1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2018 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les outils généraux élémentaires.
26 __author__ = "Jean-Philippe ARGAUD"
34 from functools import partial
35 from daCore import Persistence
36 from daCore import PlatformInfo
37 from daCore import Interfaces
38 from daCore import Templates
39 from daCore.Interfaces import ImportFromScript, ImportFromFile
41 # ==============================================================================
42 class CacheManager(object):
44 Classe générale de gestion d'un cache de calculs
47 toleranceInRedundancy = 1.e-18,
48 lenghtOfRedundancy = -1,
51 Les caractéristiques de tolérance peuvent être modifées à la création.
53 self.__tolerBP = float(toleranceInRedundancy)
54 self.__lenghtOR = int(lenghtOfRedundancy)
55 self.__initlnOR = self.__lenghtOR
60 self.__listOPCV = [] # Operator Previous Calculated Points, Results, Point Norms
61 # logging.debug("CM Tolerance de determination des doublons : %.2e", self.__tolerBP)
63 def wasCalculatedIn(self, xValue ): #, info="" ):
64 "Vérifie l'existence d'un calcul correspondant à la valeur"
67 for i in range(min(len(self.__listOPCV),self.__lenghtOR)-1,-1,-1):
68 if not hasattr(xValue, 'size') or (xValue.size != self.__listOPCV[i][0].size):
69 # logging.debug("CM Différence de la taille %s de X et de celle %s du point %i déjà calculé", xValue.shape,i,self.__listOPCP[i].shape)
71 if numpy.linalg.norm(numpy.ravel(xValue) - self.__listOPCV[i][0]) < self.__tolerBP * self.__listOPCV[i][2]:
73 __HxV = self.__listOPCV[i][1]
74 # logging.debug("CM Cas%s déja calculé, portant le numéro %i", info, i)
78 def storeValueInX(self, xValue, HxValue ):
79 "Stocke un calcul correspondant à la valeur"
80 if self.__lenghtOR < 0:
81 self.__lenghtOR = 2 * xValue.size + 2
82 self.__initlnOR = self.__lenghtOR
83 while len(self.__listOPCV) > self.__lenghtOR:
84 # logging.debug("CM Réduction de la liste des cas à %i éléments par suppression du premier", self.__lenghtOR)
85 self.__listOPCV.pop(0)
86 self.__listOPCV.append( (
87 copy.copy(numpy.ravel(xValue)),
89 numpy.linalg.norm(xValue),
94 self.__initlnOR = self.__lenghtOR
99 self.__lenghtOR = self.__initlnOR
101 # ==============================================================================
102 class Operator(object):
104 Classe générale d'interface de type opérateur simple
111 def __init__(self, fromMethod=None, fromMatrix=None, avoidingRedundancy = True, inputAsMultiFunction = False):
113 On construit un objet de ce type en fournissant, à l'aide de l'un des
114 deux mots-clé, soit une fonction ou un multi-fonction python, soit une
117 - fromMethod : argument de type fonction Python
118 - fromMatrix : argument adapté au constructeur numpy.matrix
119 - avoidingRedundancy : évite ou pas les calculs redondants
120 - inputAsMultiFunction : fonction explicitement définie ou pas en multi-fonction
122 self.__NbCallsAsMatrix, self.__NbCallsAsMethod, self.__NbCallsOfCached = 0, 0, 0
123 self.__AvoidRC = bool( avoidingRedundancy )
124 self.__inputAsMF = bool( inputAsMultiFunction )
125 if fromMethod is not None and self.__inputAsMF:
126 self.__Method = fromMethod # logtimer(fromMethod)
128 self.__Type = "Method"
129 elif fromMethod is not None and not self.__inputAsMF:
130 self.__Method = partial( MultiFonction, _sFunction=fromMethod)
132 self.__Type = "Method"
133 elif fromMatrix is not None:
135 self.__Matrix = numpy.matrix( fromMatrix, numpy.float )
136 self.__Type = "Matrix"
142 def disableAvoidingRedundancy(self):
144 Operator.CM.disable()
146 def enableAvoidingRedundancy(self):
151 Operator.CM.disable()
157 def appliedTo(self, xValue, HValue = None, argsAsSerie = False):
159 Permet de restituer le résultat de l'application de l'opérateur à une
160 série d'arguments xValue. Cette méthode se contente d'appliquer, chaque
161 argument devant a priori être du bon type.
163 - les arguments par série sont :
164 - xValue : argument adapté pour appliquer l'opérateur
165 - HValue : valeur précalculée de l'opérateur en ce point
166 - argsAsSerie : indique si les arguments sont une mono ou multi-valeur
173 if HValue is not None:
177 PlatformInfo.isIterable( _xValue, True, " in Operator.appliedTo" )
179 if _HValue is not None:
180 assert len(_xValue) == len(_HValue), "Incompatible number of elements in xValue and HValue"
182 for i in range(len(_HValue)):
183 HxValue.append( numpy.asmatrix( numpy.ravel( _HValue[i] ) ).T )
185 Operator.CM.storeValueInX(_xValue[i],HxValue[-1])
190 for i, xv in enumerate(_xValue):
192 __alreadyCalculated, __HxV = Operator.CM.wasCalculatedIn(xv)
194 __alreadyCalculated = False
196 if __alreadyCalculated:
197 self.__addOneCacheCall()
200 if self.__Matrix is not None:
201 self.__addOneMatrixCall()
202 _hv = self.__Matrix * xv
204 self.__addOneMethodCall()
208 HxValue.append( _hv )
210 if len(_xserie)>0 and self.__Matrix is None:
211 _hserie = self.__Method( _xserie ) # Calcul MF
212 if not hasattr(_hserie, "pop"):
213 raise TypeError("The user input multi-function doesn't seem to return sequence results, behaving like a mono-function. It has to be checked.")
219 Operator.CM.storeValueInX(_xv,_hv)
221 if argsAsSerie: return HxValue
222 else: return HxValue[-1]
224 def appliedControledFormTo(self, paires, argsAsSerie = False ):
226 Permet de restituer le résultat de l'application de l'opérateur à des
227 paires (xValue, uValue). Cette méthode se contente d'appliquer, son
228 argument devant a priori être du bon type. Si la uValue est None,
229 on suppose que l'opérateur ne s'applique qu'à xValue.
231 - paires : les arguments par paire sont :
232 - xValue : argument X adapté pour appliquer l'opérateur
233 - uValue : argument U adapté pour appliquer l'opérateur
234 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
236 if argsAsSerie: _xuValue = paires
237 else: _xuValue = (paires,)
238 PlatformInfo.isIterable( _xuValue, True, " in Operator.appliedControledFormTo" )
240 if self.__Matrix is not None:
242 for paire in _xuValue:
243 _xValue, _uValue = paire
244 self.__addOneMatrixCall()
245 HxValue.append( self.__Matrix * _xValue )
248 for paire in _xuValue:
250 _xValue, _uValue = paire
251 if _uValue is not None:
252 _xuValue.append( paire )
254 _xuValue.append( _xValue )
255 self.__addOneMethodCall( len(_xuValue) )
256 HxValue = self.__Method( _xuValue ) # Calcul MF
258 if argsAsSerie: return HxValue
259 else: return HxValue[-1]
261 def appliedInXTo(self, paires, argsAsSerie = False ):
263 Permet de restituer le résultat de l'application de l'opérateur à une
264 série d'arguments xValue, sachant que l'opérateur est valable en
265 xNominal. Cette méthode se contente d'appliquer, son argument devant a
266 priori être du bon type. Si l'opérateur est linéaire car c'est une
267 matrice, alors il est valable en tout point nominal et xNominal peut
268 être quelconque. Il n'y a qu'une seule paire par défaut, et argsAsSerie
269 permet d'indiquer que l'argument est multi-paires.
271 - paires : les arguments par paire sont :
272 - xNominal : série d'arguments permettant de donner le point où
273 l'opérateur est construit pour être ensuite appliqué
274 - xValue : série d'arguments adaptés pour appliquer l'opérateur
275 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
277 if argsAsSerie: _nxValue = paires
278 else: _nxValue = (paires,)
279 PlatformInfo.isIterable( _nxValue, True, " in Operator.appliedInXTo" )
281 if self.__Matrix is not None:
283 for paire in _nxValue:
284 _xNominal, _xValue = paire
285 self.__addOneMatrixCall()
286 HxValue.append( self.__Matrix * _xValue )
288 self.__addOneMethodCall( len(_nxValue) )
289 HxValue = self.__Method( _nxValue ) # Calcul MF
291 if argsAsSerie: return HxValue
292 else: return HxValue[-1]
294 def asMatrix(self, ValueForMethodForm = "UnknownVoidValue", argsAsSerie = False):
296 Permet de renvoyer l'opérateur sous la forme d'une matrice
298 if self.__Matrix is not None:
299 self.__addOneMatrixCall()
300 mValue = [self.__Matrix,]
301 elif ValueForMethodForm is not "UnknownVoidValue": # Ne pas utiliser "None"
304 self.__addOneMethodCall( len(ValueForMethodForm) )
305 for _vfmf in ValueForMethodForm:
306 mValue.append( numpy.matrix( self.__Method(((_vfmf, None),)) ) )
308 self.__addOneMethodCall()
309 mValue = self.__Method(((ValueForMethodForm, None),))
311 raise ValueError("Matrix form of the operator defined as a function/method requires to give an operating point.")
313 if argsAsSerie: return mValue
314 else: return mValue[-1]
318 Renvoie la taille sous forme numpy si l'opérateur est disponible sous
319 la forme d'une matrice
321 if self.__Matrix is not None:
322 return self.__Matrix.shape
324 raise ValueError("Matrix form of the operator is not available, nor the shape")
326 def nbcalls(self, which=None):
328 Renvoie les nombres d'évaluations de l'opérateur
331 self.__NbCallsAsMatrix+self.__NbCallsAsMethod,
332 self.__NbCallsAsMatrix,
333 self.__NbCallsAsMethod,
334 self.__NbCallsOfCached,
335 Operator.NbCallsAsMatrix+Operator.NbCallsAsMethod,
336 Operator.NbCallsAsMatrix,
337 Operator.NbCallsAsMethod,
338 Operator.NbCallsOfCached,
340 if which is None: return __nbcalls
341 else: return __nbcalls[which]
343 def __addOneMatrixCall(self):
344 "Comptabilise un appel"
345 self.__NbCallsAsMatrix += 1 # Decompte local
346 Operator.NbCallsAsMatrix += 1 # Decompte global
348 def __addOneMethodCall(self, nb = 1):
349 "Comptabilise un appel"
350 self.__NbCallsAsMethod += nb # Decompte local
351 Operator.NbCallsAsMethod += nb # Decompte global
353 def __addOneCacheCall(self):
354 "Comptabilise un appel"
355 self.__NbCallsOfCached += 1 # Decompte local
356 Operator.NbCallsOfCached += 1 # Decompte global
358 # ==============================================================================
359 class FullOperator(object):
361 Classe générale d'interface de type opérateur complet
362 (Direct, Linéaire Tangent, Adjoint)
365 name = "GenericFullOperator",
367 asOneFunction = None, # 1 Fonction
368 asThreeFunctions = None, # 3 Fonctions in a dictionary
369 asScript = None, # 1 or 3 Fonction(s) by script
370 asDict = None, # Parameters
373 inputAsMF = False,# Fonction(s) as Multi-Functions
378 self.__name = str(name)
379 self.__check = bool(toBeChecked)
384 if (asDict is not None) and isinstance(asDict, dict):
385 __Parameters.update( asDict )
386 if "DifferentialIncrement" in asDict:
387 __Parameters["withIncrement"] = asDict["DifferentialIncrement"]
388 if "CenteredFiniteDifference" in asDict:
389 __Parameters["withCenteredDF"] = asDict["CenteredFiniteDifference"]
390 if "EnableMultiProcessing" in asDict:
391 __Parameters["withmpEnabled"] = asDict["EnableMultiProcessing"]
392 if "NumberOfProcesses" in asDict:
393 __Parameters["withmpWorkers"] = asDict["NumberOfProcesses"]
395 if asScript is not None:
396 __Matrix, __Function = None, None
398 __Matrix = ImportFromScript(asScript).getvalue( self.__name )
400 __Function = { "Direct":ImportFromScript(asScript).getvalue( "DirectOperator" ) }
401 __Function.update({"useApproximatedDerivatives":True})
402 __Function.update(__Parameters)
403 elif asThreeFunctions:
405 "Direct" :ImportFromScript(asScript).getvalue( "DirectOperator" ),
406 "Tangent":ImportFromScript(asScript).getvalue( "TangentOperator" ),
407 "Adjoint":ImportFromScript(asScript).getvalue( "AdjointOperator" ),
409 __Function.update(__Parameters)
412 if asOneFunction is not None:
413 if isinstance(asOneFunction, dict) and "Direct" in asOneFunction:
414 if asOneFunction["Direct"] is not None:
415 __Function = asOneFunction
417 raise ValueError("The function has to be given in a dictionnary which have 1 key (\"Direct\")")
419 __Function = { "Direct":asOneFunction }
420 __Function.update({"useApproximatedDerivatives":True})
421 __Function.update(__Parameters)
422 elif asThreeFunctions is not None:
423 if isinstance(asThreeFunctions, dict) and \
424 ("Tangent" in asThreeFunctions) and (asThreeFunctions["Tangent"] is not None) and \
425 ("Adjoint" in asThreeFunctions) and (asThreeFunctions["Adjoint"] is not None) and \
426 (("useApproximatedDerivatives" not in asThreeFunctions) or not bool(asThreeFunctions["useApproximatedDerivatives"])):
427 __Function = asThreeFunctions
428 elif isinstance(asThreeFunctions, dict) and \
429 ("Direct" in asThreeFunctions) and (asThreeFunctions["Direct"] is not None):
430 __Function = asThreeFunctions
431 __Function.update({"useApproximatedDerivatives":True})
433 raise ValueError("The functions has to be given in a dictionnary which have either 1 key (\"Direct\") or 3 keys (\"Direct\" (optionnal), \"Tangent\" and \"Adjoint\")")
434 if "Direct" not in asThreeFunctions:
435 __Function["Direct"] = asThreeFunctions["Tangent"]
436 __Function.update(__Parameters)
440 # if sys.version_info[0] < 3 and isinstance(__Function, dict):
441 # for k in ("Direct", "Tangent", "Adjoint"):
442 # if k in __Function and hasattr(__Function[k],"__class__"):
443 # if type(__Function[k]) is type(self.__init__):
444 # raise TypeError("can't use a class method (%s) as a function for the \"%s\" operator. Use a real function instead."%(type(__Function[k]),k))
446 if appliedInX is not None and isinstance(appliedInX, dict):
447 __appliedInX = appliedInX
448 elif appliedInX is not None:
449 __appliedInX = {"HXb":appliedInX}
453 if scheduledBy is not None:
454 self.__T = scheduledBy
456 if isinstance(__Function, dict) and \
457 ("useApproximatedDerivatives" in __Function) and bool(__Function["useApproximatedDerivatives"]) and \
458 ("Direct" in __Function) and (__Function["Direct"] is not None):
459 if "withCenteredDF" not in __Function: __Function["withCenteredDF"] = False
460 if "withIncrement" not in __Function: __Function["withIncrement"] = 0.01
461 if "withdX" not in __Function: __Function["withdX"] = None
462 if "withAvoidingRedundancy" not in __Function: __Function["withAvoidingRedundancy"] = True
463 if "withToleranceInRedundancy" not in __Function: __Function["withToleranceInRedundancy"] = 1.e-18
464 if "withLenghtOfRedundancy" not in __Function: __Function["withLenghtOfRedundancy"] = -1
465 if "withmpEnabled" not in __Function: __Function["withmpEnabled"] = False
466 if "withmpWorkers" not in __Function: __Function["withmpWorkers"] = None
467 if "withmfEnabled" not in __Function: __Function["withmfEnabled"] = inputAsMF
468 from daNumerics.ApproximatedDerivatives import FDApproximation
469 FDA = FDApproximation(
470 Function = __Function["Direct"],
471 centeredDF = __Function["withCenteredDF"],
472 increment = __Function["withIncrement"],
473 dX = __Function["withdX"],
474 avoidingRedundancy = __Function["withAvoidingRedundancy"],
475 toleranceInRedundancy = __Function["withToleranceInRedundancy"],
476 lenghtOfRedundancy = __Function["withLenghtOfRedundancy"],
477 mpEnabled = __Function["withmpEnabled"],
478 mpWorkers = __Function["withmpWorkers"],
479 mfEnabled = __Function["withmfEnabled"],
481 self.__FO["Direct"] = Operator( fromMethod = FDA.DirectOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF)
482 self.__FO["Tangent"] = Operator( fromMethod = FDA.TangentOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
483 self.__FO["Adjoint"] = Operator( fromMethod = FDA.AdjointOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
484 elif isinstance(__Function, dict) and \
485 ("Direct" in __Function) and ("Tangent" in __Function) and ("Adjoint" in __Function) and \
486 (__Function["Direct"] is not None) and (__Function["Tangent"] is not None) and (__Function["Adjoint"] is not None):
487 self.__FO["Direct"] = Operator( fromMethod = __Function["Direct"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
488 self.__FO["Tangent"] = Operator( fromMethod = __Function["Tangent"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
489 self.__FO["Adjoint"] = Operator( fromMethod = __Function["Adjoint"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
490 elif asMatrix is not None:
491 __matrice = numpy.matrix( __Matrix, numpy.float )
492 self.__FO["Direct"] = Operator( fromMatrix = __matrice, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
493 self.__FO["Tangent"] = Operator( fromMatrix = __matrice, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
494 self.__FO["Adjoint"] = Operator( fromMatrix = __matrice.T, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
497 raise ValueError("Improperly defined observation operator, it requires at minima either a matrix, a Direct for approximate derivatives or a Tangent/Adjoint pair.")
499 if __appliedInX is not None:
500 self.__FO["AppliedInX"] = {}
501 for key in list(__appliedInX.keys()):
502 if type( __appliedInX[key] ) is type( numpy.matrix([]) ):
503 # Pour le cas où l'on a une vraie matrice
504 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].A1, numpy.float ).T
505 elif type( __appliedInX[key] ) is type( numpy.array([]) ) and len(__appliedInX[key].shape) > 1:
506 # Pour le cas où l'on a un vecteur représenté en array avec 2 dimensions
507 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].reshape(len(__appliedInX[key]),), numpy.float ).T
509 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key], numpy.float ).T
511 self.__FO["AppliedInX"] = None
517 "x.__repr__() <==> repr(x)"
518 return repr(self.__V)
521 "x.__str__() <==> str(x)"
524 # ==============================================================================
525 class Algorithm(object):
527 Classe générale d'interface de type algorithme
529 Elle donne un cadre pour l'écriture d'une classe élémentaire d'algorithme
530 d'assimilation, en fournissant un container (dictionnaire) de variables
531 persistantes initialisées, et des méthodes d'accès à ces variables stockées.
533 Une classe élémentaire d'algorithme doit implémenter la méthode "run".
535 def __init__(self, name):
537 L'initialisation présente permet de fabriquer des variables de stockage
538 disponibles de manière générique dans les algorithmes élémentaires. Ces
539 variables de stockage sont ensuite conservées dans un dictionnaire
540 interne à l'objet, mais auquel on accède par la méthode "get".
542 Les variables prévues sont :
543 - CostFunctionJ : fonction-cout globale, somme des deux parties suivantes
544 - CostFunctionJb : partie ébauche ou background de la fonction-cout
545 - CostFunctionJo : partie observations de la fonction-cout
546 - GradientOfCostFunctionJ : gradient de la fonction-cout globale
547 - GradientOfCostFunctionJb : gradient de la partie ébauche de la fonction-cout
548 - GradientOfCostFunctionJo : gradient de la partie observations de la fonction-cout
549 - CurrentState : état courant lors d'itérations
550 - Analysis : l'analyse Xa
551 - SimulatedObservationAtBackground : l'état observé H(Xb) à l'ébauche
552 - SimulatedObservationAtCurrentState : l'état observé H(X) à l'état courant
553 - SimulatedObservationAtOptimum : l'état observé H(Xa) à l'optimum
554 - Innovation : l'innovation : d = Y - H(X)
555 - InnovationAtCurrentState : l'innovation à l'état courant : dn = Y - H(Xn)
556 - SigmaObs2 : indicateur de correction optimale des erreurs d'observation
557 - SigmaBck2 : indicateur de correction optimale des erreurs d'ébauche
558 - MahalanobisConsistency : indicateur de consistance des covariances
559 - OMA : Observation moins Analysis : Y - Xa
560 - OMB : Observation moins Background : Y - Xb
561 - AMB : Analysis moins Background : Xa - Xb
562 - APosterioriCovariance : matrice A
563 - APosterioriVariances : variances de la matrice A
564 - APosterioriStandardDeviations : écart-types de la matrice A
565 - APosterioriCorrelations : correlations de la matrice A
566 - Residu : dans le cas des algorithmes de vérification
567 On peut rajouter des variables à stocker dans l'initialisation de
568 l'algorithme élémentaire qui va hériter de cette classe
570 logging.debug("%s Initialisation", str(name))
571 self._m = PlatformInfo.SystemUsage()
573 self._name = str( name )
574 self._parameters = {"StoreSupplementaryCalculations":[]}
575 self.__required_parameters = {}
576 self.__required_inputs = {"RequiredInputValues":{"mandatory":(), "optional":()}}
578 self.StoredVariables = {}
579 self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
580 self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
581 self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
582 self.StoredVariables["CostFunctionJAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJAtCurrentOptimum")
583 self.StoredVariables["CostFunctionJbAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJbAtCurrentOptimum")
584 self.StoredVariables["CostFunctionJoAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJoAtCurrentOptimum")
585 self.StoredVariables["GradientOfCostFunctionJ"] = Persistence.OneVector(name = "GradientOfCostFunctionJ")
586 self.StoredVariables["GradientOfCostFunctionJb"] = Persistence.OneVector(name = "GradientOfCostFunctionJb")
587 self.StoredVariables["GradientOfCostFunctionJo"] = Persistence.OneVector(name = "GradientOfCostFunctionJo")
588 self.StoredVariables["CurrentState"] = Persistence.OneVector(name = "CurrentState")
589 self.StoredVariables["PredictedState"] = Persistence.OneVector(name = "PredictedState")
590 self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
591 self.StoredVariables["IndexOfOptimum"] = Persistence.OneIndex(name = "IndexOfOptimum")
592 self.StoredVariables["CurrentOptimum"] = Persistence.OneVector(name = "CurrentOptimum")
593 self.StoredVariables["SimulatedObservationAtBackground"] = Persistence.OneVector(name = "SimulatedObservationAtBackground")
594 self.StoredVariables["SimulatedObservationAtCurrentState"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentState")
595 self.StoredVariables["SimulatedObservationAtOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtOptimum")
596 self.StoredVariables["SimulatedObservationAtCurrentOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentOptimum")
597 self.StoredVariables["Innovation"] = Persistence.OneVector(name = "Innovation")
598 self.StoredVariables["InnovationAtCurrentState"] = Persistence.OneVector(name = "InnovationAtCurrentState")
599 self.StoredVariables["SigmaObs2"] = Persistence.OneScalar(name = "SigmaObs2")
600 self.StoredVariables["SigmaBck2"] = Persistence.OneScalar(name = "SigmaBck2")
601 self.StoredVariables["MahalanobisConsistency"] = Persistence.OneScalar(name = "MahalanobisConsistency")
602 self.StoredVariables["OMA"] = Persistence.OneVector(name = "OMA")
603 self.StoredVariables["OMB"] = Persistence.OneVector(name = "OMB")
604 self.StoredVariables["BMA"] = Persistence.OneVector(name = "BMA")
605 self.StoredVariables["APosterioriCovariance"] = Persistence.OneMatrix(name = "APosterioriCovariance")
606 self.StoredVariables["APosterioriVariances"] = Persistence.OneVector(name = "APosterioriVariances")
607 self.StoredVariables["APosterioriStandardDeviations"] = Persistence.OneVector(name = "APosterioriStandardDeviations")
608 self.StoredVariables["APosterioriCorrelations"] = Persistence.OneMatrix(name = "APosterioriCorrelations")
609 self.StoredVariables["SimulationQuantiles"] = Persistence.OneMatrix(name = "SimulationQuantiles")
610 self.StoredVariables["Residu"] = Persistence.OneScalar(name = "Residu")
612 def _pre_run(self, Parameters, Xb=None, Y=None, R=None, B=None, Q=None ):
614 logging.debug("%s Lancement", self._name)
615 logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
617 # Mise a jour de self._parameters avec Parameters
618 self.__setParameters(Parameters)
620 # Corrections et complements
621 def __test_vvalue(argument, variable, argname):
623 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
624 raise ValueError("%s %s vector %s has to be properly defined!"%(self._name,argname,variable))
625 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
626 logging.debug("%s %s vector %s is not set, but is optional."%(self._name,argname,variable))
628 logging.debug("%s %s vector %s is not set, but is not required."%(self._name,argname,variable))
630 logging.debug("%s %s vector %s is set, and its size is %i."%(self._name,argname,variable,numpy.array(argument).size))
632 __test_vvalue( Xb, "Xb", "Background or initial state" )
633 __test_vvalue( Y, "Y", "Observation" )
635 def __test_cvalue(argument, variable, argname):
637 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
638 raise ValueError("%s %s error covariance matrix %s has to be properly defined!"%(self._name,argname,variable))
639 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
640 logging.debug("%s %s error covariance matrix %s is not set, but is optional."%(self._name,argname,variable))
642 logging.debug("%s %s error covariance matrix %s is not set, but is not required."%(self._name,argname,variable))
644 logging.debug("%s %s error covariance matrix %s is set."%(self._name,argname,variable))
646 __test_cvalue( R, "R", "Observation" )
647 __test_cvalue( B, "B", "Background" )
648 __test_cvalue( Q, "Q", "Evolution" )
650 if ("Bounds" in self._parameters) and isinstance(self._parameters["Bounds"], (list, tuple)) and (len(self._parameters["Bounds"]) > 0):
651 logging.debug("%s Prise en compte des bornes effectuee"%(self._name,))
653 self._parameters["Bounds"] = None
655 if logging.getLogger().level < logging.WARNING:
656 self._parameters["optiprint"], self._parameters["optdisp"] = 1, 1
657 if PlatformInfo.has_scipy:
658 import scipy.optimize
659 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_ALL
661 self._parameters["optmessages"] = 15
663 self._parameters["optiprint"], self._parameters["optdisp"] = -1, 0
664 if PlatformInfo.has_scipy:
665 import scipy.optimize
666 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_NONE
668 self._parameters["optmessages"] = 15
672 def _post_run(self,_oH=None):
674 if ("StoreSupplementaryCalculations" in self._parameters) and \
675 "APosterioriCovariance" in self._parameters["StoreSupplementaryCalculations"]:
676 for _A in self.StoredVariables["APosterioriCovariance"]:
677 if "APosterioriVariances" in self._parameters["StoreSupplementaryCalculations"]:
678 self.StoredVariables["APosterioriVariances"].store( numpy.diag(_A) )
679 if "APosterioriStandardDeviations" in self._parameters["StoreSupplementaryCalculations"]:
680 self.StoredVariables["APosterioriStandardDeviations"].store( numpy.sqrt(numpy.diag(_A)) )
681 if "APosterioriCorrelations" in self._parameters["StoreSupplementaryCalculations"]:
682 _EI = numpy.diag(1./numpy.sqrt(numpy.diag(_A)))
683 _C = numpy.dot(_EI, numpy.dot(_A, _EI))
684 self.StoredVariables["APosterioriCorrelations"].store( _C )
686 logging.debug("%s Nombre d'évaluation(s) de l'opérateur d'observation direct/tangent/adjoint.: %i/%i/%i", self._name, _oH["Direct"].nbcalls(0),_oH["Tangent"].nbcalls(0),_oH["Adjoint"].nbcalls(0))
687 logging.debug("%s Nombre d'appels au cache d'opérateur d'observation direct/tangent/adjoint..: %i/%i/%i", self._name, _oH["Direct"].nbcalls(3),_oH["Tangent"].nbcalls(3),_oH["Adjoint"].nbcalls(3))
688 logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
689 logging.debug("%s Terminé", self._name)
692 def _toStore(self, key):
693 "True if in StoreSupplementaryCalculations, else False"
694 return key in self._parameters["StoreSupplementaryCalculations"]
696 def get(self, key=None):
698 Renvoie l'une des variables stockées identifiée par la clé, ou le
699 dictionnaire de l'ensemble des variables disponibles en l'absence de
700 clé. Ce sont directement les variables sous forme objet qui sont
701 renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
702 des classes de persistance.
705 return self.StoredVariables[key]
707 return self.StoredVariables
709 def __contains__(self, key=None):
710 "D.__contains__(k) -> True if D has a key k, else False"
711 return key in self.StoredVariables
714 "D.keys() -> list of D's keys"
715 if hasattr(self, "StoredVariables"):
716 return self.StoredVariables.keys()
721 "D.pop(k[,d]) -> v, remove specified key and return the corresponding value"
722 if hasattr(self, "StoredVariables"):
723 return self.StoredVariables.pop(k, d)
728 raise TypeError("pop expected at least 1 arguments, got 0")
729 "If key is not found, d is returned if given, otherwise KeyError is raised"
735 def run(self, Xb=None, Y=None, H=None, M=None, R=None, B=None, Q=None, Parameters=None):
737 Doit implémenter l'opération élémentaire de calcul d'assimilation sous
738 sa forme mathématique la plus naturelle possible.
740 raise NotImplementedError("Mathematical assimilation calculation has not been implemented!")
742 def defineRequiredParameter(self, name = None, default = None, typecast = None, message = None, minval = None, maxval = None, listval = None):
744 Permet de définir dans l'algorithme des paramètres requis et leurs
745 caractéristiques par défaut.
748 raise ValueError("A name is mandatory to define a required parameter.")
750 self.__required_parameters[name] = {
752 "typecast" : typecast,
758 logging.debug("%s %s (valeur par défaut = %s)", self._name, message, self.setParameterValue(name))
760 def getRequiredParameters(self, noDetails=True):
762 Renvoie la liste des noms de paramètres requis ou directement le
763 dictionnaire des paramètres requis.
766 return sorted(self.__required_parameters.keys())
768 return self.__required_parameters
770 def setParameterValue(self, name=None, value=None):
772 Renvoie la valeur d'un paramètre requis de manière contrôlée
774 default = self.__required_parameters[name]["default"]
775 typecast = self.__required_parameters[name]["typecast"]
776 minval = self.__required_parameters[name]["minval"]
777 maxval = self.__required_parameters[name]["maxval"]
778 listval = self.__required_parameters[name]["listval"]
780 if value is None and default is None:
782 elif value is None and default is not None:
783 if typecast is None: __val = default
784 else: __val = typecast( default )
786 if typecast is None: __val = value
787 else: __val = typecast( value )
789 if minval is not None and (numpy.array(__val, float) < minval).any():
790 raise ValueError("The parameter named \"%s\" of value \"%s\" can not be less than %s."%(name, __val, minval))
791 if maxval is not None and (numpy.array(__val, float) > maxval).any():
792 raise ValueError("The parameter named \"%s\" of value \"%s\" can not be greater than %s."%(name, __val, maxval))
793 if listval is not None:
794 if typecast is list or typecast is tuple or isinstance(__val,list) or isinstance(__val,tuple):
797 raise ValueError("The value \"%s\" of the parameter named \"%s\" is not allowed, it has to be in the list %s."%(v, name, listval))
798 elif __val not in listval:
799 raise ValueError("The value \"%s\" of the parameter named \"%s\" is not allowed, it has to be in the list %s."%( __val, name,listval))
802 def requireInputArguments(self, mandatory=(), optional=()):
804 Permet d'imposer des arguments requises en entrée
806 self.__required_inputs["RequiredInputValues"]["mandatory"] = tuple( mandatory )
807 self.__required_inputs["RequiredInputValues"]["optional"] = tuple( optional )
809 def __setParameters(self, fromDico={}):
811 Permet de stocker les paramètres reçus dans le dictionnaire interne.
813 self._parameters.update( fromDico )
814 for k in self.__required_parameters.keys():
815 if k in fromDico.keys():
816 self._parameters[k] = self.setParameterValue(k,fromDico[k])
818 self._parameters[k] = self.setParameterValue(k)
819 logging.debug("%s %s : %s", self._name, self.__required_parameters[k]["message"], self._parameters[k])
821 # ==============================================================================
822 class AlgorithmAndParameters(object):
824 Classe générale d'interface d'action pour l'algorithme et ses paramètres
827 name = "GenericAlgorithm",
834 self.__name = str(name)
838 self.__algorithm = {}
839 self.__algorithmFile = None
840 self.__algorithmName = None
842 self.updateParameters( asDict, asScript )
844 if asAlgorithm is None and asScript is not None:
845 __Algo = ImportFromScript(asScript).getvalue( "Algorithm" )
849 if __Algo is not None:
850 self.__A = str(__Algo)
851 self.__P.update( {"Algorithm":self.__A} )
853 self.__setAlgorithm( self.__A )
855 def updateParameters(self,
859 "Mise a jour des parametres"
860 if asDict is None and asScript is not None:
861 __Dict = ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
865 if __Dict is not None:
866 self.__P.update( dict(__Dict) )
868 def executePythonScheme(self, asDictAO = None):
869 "Permet de lancer le calcul d'assimilation"
870 Operator.CM.clearCache()
872 if not isinstance(asDictAO, dict):
873 raise ValueError("The objects for algorithm calculation have to be given together as a dictionnary, and they are not")
874 if hasattr(asDictAO["Background"],"getO"): self.__Xb = asDictAO["Background"].getO()
875 elif hasattr(asDictAO["CheckingPoint"],"getO"): self.__Xb = asDictAO["CheckingPoint"].getO()
876 else: self.__Xb = None
877 if hasattr(asDictAO["Observation"],"getO"): self.__Y = asDictAO["Observation"].getO()
878 else: self.__Y = asDictAO["Observation"]
879 if hasattr(asDictAO["ControlInput"],"getO"): self.__U = asDictAO["ControlInput"].getO()
880 else: self.__U = asDictAO["ControlInput"]
881 if hasattr(asDictAO["ObservationOperator"],"getO"): self.__HO = asDictAO["ObservationOperator"].getO()
882 else: self.__HO = asDictAO["ObservationOperator"]
883 if hasattr(asDictAO["EvolutionModel"],"getO"): self.__EM = asDictAO["EvolutionModel"].getO()
884 else: self.__EM = asDictAO["EvolutionModel"]
885 if hasattr(asDictAO["ControlModel"],"getO"): self.__CM = asDictAO["ControlModel"].getO()
886 else: self.__CM = asDictAO["ControlModel"]
887 self.__B = asDictAO["BackgroundError"]
888 self.__R = asDictAO["ObservationError"]
889 self.__Q = asDictAO["EvolutionError"]
891 self.__shape_validate()
893 self.__algorithm.run(
903 Parameters = self.__P,
907 def executeYACSScheme(self, FileName=None):
908 "Permet de lancer le calcul d'assimilation"
909 if FileName is None or not os.path.exists(FileName):
910 raise ValueError("a YACS file name has to be given for YACS execution.\n")
912 __file = os.path.abspath(FileName)
913 logging.debug("The YACS file name is \"%s\"."%__file)
914 if not PlatformInfo.has_salome or \
915 not PlatformInfo.has_yacs or \
916 not PlatformInfo.has_adao:
917 raise ImportError("\n\n"+\
918 "Unable to get SALOME, YACS or ADAO environnement variables.\n"+\
919 "Please load the right environnement before trying to use it.\n")
924 SALOMERuntime.RuntimeSALOME_setRuntime()
926 r = pilot.getRuntime()
927 xmlLoader = loader.YACSLoader()
928 xmlLoader.registerProcCataLoader()
930 catalogAd = r.loadCatalog("proc", __file)
931 r.addCatalog(catalogAd)
936 p = xmlLoader.load(__file)
937 except IOError as ex:
938 print("The YACS XML schema file can not be loaded: %s"%(ex,))
940 logger = p.getLogger("parser")
941 if not logger.isEmpty():
942 print("The imported YACS XML schema has errors on parsing:")
943 print(logger.getStr())
946 print("The YACS XML schema is not valid and will not be executed:")
947 print(p.getErrorReport())
949 info=pilot.LinkInfo(pilot.LinkInfo.ALL_DONT_STOP)
950 p.checkConsistency(info)
951 if info.areWarningsOrErrors():
952 print("The YACS XML schema is not coherent and will not be executed:")
953 print(info.getGlobalRepr())
955 e = pilot.ExecutorSwig()
957 if p.getEffectiveState() != pilot.DONE:
958 print(p.getErrorReport())
962 def get(self, key = None):
963 "Vérifie l'existence d'une clé de variable ou de paramètres"
964 if key in self.__algorithm:
965 return self.__algorithm.get( key )
966 elif key in self.__P:
972 "Necessaire pour le pickling"
973 return self.__algorithm.pop(k, d)
975 def getAlgorithmRequiredParameters(self, noDetails=True):
976 "Renvoie la liste des paramètres requis selon l'algorithme"
977 return self.__algorithm.getRequiredParameters(noDetails)
979 def setObserver(self, __V, __O, __I, __S):
980 if self.__algorithm is None \
981 or isinstance(self.__algorithm, dict) \
982 or not hasattr(self.__algorithm,"StoredVariables"):
983 raise ValueError("No observer can be build before choosing an algorithm.")
984 if __V not in self.__algorithm:
985 raise ValueError("An observer requires to be set on a variable named %s which does not exist."%__V)
987 self.__algorithm.StoredVariables[ __V ].setDataObserver(
990 HookParameters = __I,
993 def removeObserver(self, __V, __O, __A = False):
994 if self.__algorithm is None \
995 or isinstance(self.__algorithm, dict) \
996 or not hasattr(self.__algorithm,"StoredVariables"):
997 raise ValueError("No observer can be removed before choosing an algorithm.")
998 if __V not in self.__algorithm:
999 raise ValueError("An observer requires to be removed on a variable named %s which does not exist."%__V)
1001 return self.__algorithm.StoredVariables[ __V ].removeDataObserver(
1006 def hasObserver(self, __V):
1007 if self.__algorithm is None \
1008 or isinstance(self.__algorithm, dict) \
1009 or not hasattr(self.__algorithm,"StoredVariables"):
1011 if __V not in self.__algorithm:
1013 return self.__algorithm.StoredVariables[ __V ].hasDataObserver()
1016 return list(self.__algorithm.keys()) + list(self.__P.keys())
1018 def __contains__(self, key=None):
1019 "D.__contains__(k) -> True if D has a key k, else False"
1020 return key in self.__algorithm or key in self.__P
1023 "x.__repr__() <==> repr(x)"
1024 return repr(self.__A)+", "+repr(self.__P)
1027 "x.__str__() <==> str(x)"
1028 return str(self.__A)+", "+str(self.__P)
1030 def __setAlgorithm(self, choice = None ):
1032 Permet de sélectionner l'algorithme à utiliser pour mener à bien l'étude
1033 d'assimilation. L'argument est un champ caractère se rapportant au nom
1034 d'un algorithme réalisant l'opération sur les arguments fixes.
1037 raise ValueError("Error: algorithm choice has to be given")
1038 if self.__algorithmName is not None:
1039 raise ValueError("Error: algorithm choice has already been done as \"%s\", it can't be changed."%self.__algorithmName)
1040 daDirectory = "daAlgorithms"
1042 # Recherche explicitement le fichier complet
1043 # ------------------------------------------
1045 for directory in sys.path:
1046 if os.path.isfile(os.path.join(directory, daDirectory, str(choice)+'.py')):
1047 module_path = os.path.abspath(os.path.join(directory, daDirectory))
1048 if module_path is None:
1049 raise ImportError("No algorithm module named \"%s\" has been found in the search path.\n The search path is %s"%(choice, sys.path))
1051 # Importe le fichier complet comme un module
1052 # ------------------------------------------
1054 sys_path_tmp = sys.path ; sys.path.insert(0,module_path)
1055 self.__algorithmFile = __import__(str(choice), globals(), locals(), [])
1056 if not hasattr(self.__algorithmFile, "ElementaryAlgorithm"):
1057 raise ImportError("this module does not define a valid elementary algorithm.")
1058 self.__algorithmName = str(choice)
1059 sys.path = sys_path_tmp ; del sys_path_tmp
1060 except ImportError as e:
1061 raise ImportError("The module named \"%s\" was found, but is incorrect at the import stage.\n The import error message is: %s"%(choice,e))
1063 # Instancie un objet du type élémentaire du fichier
1064 # -------------------------------------------------
1065 self.__algorithm = self.__algorithmFile.ElementaryAlgorithm()
1068 def __shape_validate(self):
1070 Validation de la correspondance correcte des tailles des variables et
1071 des matrices s'il y en a.
1073 if self.__Xb is None: __Xb_shape = (0,)
1074 elif hasattr(self.__Xb,"size"): __Xb_shape = (self.__Xb.size,)
1075 elif hasattr(self.__Xb,"shape"):
1076 if isinstance(self.__Xb.shape, tuple): __Xb_shape = self.__Xb.shape
1077 else: __Xb_shape = self.__Xb.shape()
1078 else: raise TypeError("The background (Xb) has no attribute of shape: problem !")
1080 if self.__Y is None: __Y_shape = (0,)
1081 elif hasattr(self.__Y,"size"): __Y_shape = (self.__Y.size,)
1082 elif hasattr(self.__Y,"shape"):
1083 if isinstance(self.__Y.shape, tuple): __Y_shape = self.__Y.shape
1084 else: __Y_shape = self.__Y.shape()
1085 else: raise TypeError("The observation (Y) has no attribute of shape: problem !")
1087 if self.__U is None: __U_shape = (0,)
1088 elif hasattr(self.__U,"size"): __U_shape = (self.__U.size,)
1089 elif hasattr(self.__U,"shape"):
1090 if isinstance(self.__U.shape, tuple): __U_shape = self.__U.shape
1091 else: __U_shape = self.__U.shape()
1092 else: raise TypeError("The control (U) has no attribute of shape: problem !")
1094 if self.__B is None: __B_shape = (0,0)
1095 elif hasattr(self.__B,"shape"):
1096 if isinstance(self.__B.shape, tuple): __B_shape = self.__B.shape
1097 else: __B_shape = self.__B.shape()
1098 else: raise TypeError("The a priori errors covariance matrix (B) has no attribute of shape: problem !")
1100 if self.__R is None: __R_shape = (0,0)
1101 elif hasattr(self.__R,"shape"):
1102 if isinstance(self.__R.shape, tuple): __R_shape = self.__R.shape
1103 else: __R_shape = self.__R.shape()
1104 else: raise TypeError("The observation errors covariance matrix (R) has no attribute of shape: problem !")
1106 if self.__Q is None: __Q_shape = (0,0)
1107 elif hasattr(self.__Q,"shape"):
1108 if isinstance(self.__Q.shape, tuple): __Q_shape = self.__Q.shape
1109 else: __Q_shape = self.__Q.shape()
1110 else: raise TypeError("The evolution errors covariance matrix (Q) has no attribute of shape: problem !")
1112 if len(self.__HO) == 0: __HO_shape = (0,0)
1113 elif isinstance(self.__HO, dict): __HO_shape = (0,0)
1114 elif hasattr(self.__HO["Direct"],"shape"):
1115 if isinstance(self.__HO["Direct"].shape, tuple): __HO_shape = self.__HO["Direct"].shape
1116 else: __HO_shape = self.__HO["Direct"].shape()
1117 else: raise TypeError("The observation operator (H) has no attribute of shape: problem !")
1119 if len(self.__EM) == 0: __EM_shape = (0,0)
1120 elif isinstance(self.__EM, dict): __EM_shape = (0,0)
1121 elif hasattr(self.__EM["Direct"],"shape"):
1122 if isinstance(self.__EM["Direct"].shape, tuple): __EM_shape = self.__EM["Direct"].shape
1123 else: __EM_shape = self.__EM["Direct"].shape()
1124 else: raise TypeError("The evolution model (EM) has no attribute of shape: problem !")
1126 if len(self.__CM) == 0: __CM_shape = (0,0)
1127 elif isinstance(self.__CM, dict): __CM_shape = (0,0)
1128 elif hasattr(self.__CM["Direct"],"shape"):
1129 if isinstance(self.__CM["Direct"].shape, tuple): __CM_shape = self.__CM["Direct"].shape
1130 else: __CM_shape = self.__CM["Direct"].shape()
1131 else: raise TypeError("The control model (CM) has no attribute of shape: problem !")
1133 # Vérification des conditions
1134 # ---------------------------
1135 if not( len(__Xb_shape) == 1 or min(__Xb_shape) == 1 ):
1136 raise ValueError("Shape characteristic of background (Xb) is incorrect: \"%s\"."%(__Xb_shape,))
1137 if not( len(__Y_shape) == 1 or min(__Y_shape) == 1 ):
1138 raise ValueError("Shape characteristic of observation (Y) is incorrect: \"%s\"."%(__Y_shape,))
1140 if not( min(__B_shape) == max(__B_shape) ):
1141 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) is incorrect: \"%s\"."%(__B_shape,))
1142 if not( min(__R_shape) == max(__R_shape) ):
1143 raise ValueError("Shape characteristic of observation errors covariance matrix (R) is incorrect: \"%s\"."%(__R_shape,))
1144 if not( min(__Q_shape) == max(__Q_shape) ):
1145 raise ValueError("Shape characteristic of evolution errors covariance matrix (Q) is incorrect: \"%s\"."%(__Q_shape,))
1146 if not( min(__EM_shape) == max(__EM_shape) ):
1147 raise ValueError("Shape characteristic of evolution operator (EM) is incorrect: \"%s\"."%(__EM_shape,))
1149 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[1] == max(__Xb_shape) ):
1150 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and state (X) \"%s\" are incompatible."%(__HO_shape,__Xb_shape))
1151 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[0] == max(__Y_shape) ):
1152 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation (Y) \"%s\" are incompatible."%(__HO_shape,__Y_shape))
1153 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__B) > 0 and not( __HO_shape[1] == __B_shape[0] ):
1154 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and a priori errors covariance matrix (B) \"%s\" are incompatible."%(__HO_shape,__B_shape))
1155 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__R) > 0 and not( __HO_shape[0] == __R_shape[1] ):
1156 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation errors covariance matrix (R) \"%s\" are incompatible."%(__HO_shape,__R_shape))
1158 if self.__B is not None and len(self.__B) > 0 and not( __B_shape[1] == max(__Xb_shape) ):
1159 if self.__algorithmName in ["EnsembleBlue",]:
1160 asPersistentVector = self.__Xb.reshape((-1,min(__B_shape)))
1161 self.__Xb = Persistence.OneVector("Background", basetype=numpy.matrix)
1162 for member in asPersistentVector:
1163 self.__Xb.store( numpy.matrix( numpy.ravel(member), numpy.float ).T )
1164 __Xb_shape = min(__B_shape)
1166 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) \"%s\" and background (Xb) \"%s\" are incompatible."%(__B_shape,__Xb_shape))
1168 if self.__R is not None and len(self.__R) > 0 and not( __R_shape[1] == max(__Y_shape) ):
1169 raise ValueError("Shape characteristic of observation errors covariance matrix (R) \"%s\" and observation (Y) \"%s\" are incompatible."%(__R_shape,__Y_shape))
1171 if self.__EM is not None and len(self.__EM) > 0 and not isinstance(self.__EM, dict) and not( __EM_shape[1] == max(__Xb_shape) ):
1172 raise ValueError("Shape characteristic of evolution model (EM) \"%s\" and state (X) \"%s\" are incompatible."%(__EM_shape,__Xb_shape))
1174 if self.__CM is not None and len(self.__CM) > 0 and not isinstance(self.__CM, dict) and not( __CM_shape[1] == max(__U_shape) ):
1175 raise ValueError("Shape characteristic of control model (CM) \"%s\" and control (U) \"%s\" are incompatible."%(__CM_shape,__U_shape))
1177 if ("Bounds" in self.__P) \
1178 and (isinstance(self.__P["Bounds"], list) or isinstance(self.__P["Bounds"], tuple)) \
1179 and (len(self.__P["Bounds"]) != max(__Xb_shape)):
1180 raise ValueError("The number \"%s\" of bound pairs for the state (X) components is different of the size \"%s\" of the state itself." \
1181 %(len(self.__P["Bounds"]),max(__Xb_shape)))
1185 # ==============================================================================
1186 class RegulationAndParameters(object):
1188 Classe générale d'interface d'action pour la régulation et ses paramètres
1191 name = "GenericRegulation",
1198 self.__name = str(name)
1201 if asAlgorithm is None and asScript is not None:
1202 __Algo = ImportFromScript(asScript).getvalue( "Algorithm" )
1204 __Algo = asAlgorithm
1206 if asDict is None and asScript is not None:
1207 __Dict = ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1211 if __Dict is not None:
1212 self.__P.update( dict(__Dict) )
1214 if __Algo is not None:
1215 self.__P.update( {"Algorithm":self.__A} )
1217 def get(self, key = None):
1218 "Vérifie l'existence d'une clé de variable ou de paramètres"
1220 return self.__P[key]
1224 # ==============================================================================
1225 class DataObserver(object):
1227 Classe générale d'interface de type observer
1230 name = "GenericObserver",
1242 self.__name = str(name)
1247 if onVariable is None:
1248 raise ValueError("setting an observer has to be done over a variable name or a list of variable names, not over None.")
1249 elif type(onVariable) in (tuple, list):
1250 self.__V = tuple(map( str, onVariable ))
1251 if withInfo is None:
1254 self.__I = (str(withInfo),)*len(self.__V)
1255 elif isinstance(onVariable, str):
1256 self.__V = (onVariable,)
1257 if withInfo is None:
1258 self.__I = (onVariable,)
1260 self.__I = (str(withInfo),)
1262 raise ValueError("setting an observer has to be done over a variable name or a list of variable names.")
1264 if asString is not None:
1265 __FunctionText = asString
1266 elif (asTemplate is not None) and (asTemplate in Templates.ObserverTemplates):
1267 __FunctionText = Templates.ObserverTemplates[asTemplate]
1268 elif asScript is not None:
1269 __FunctionText = ImportFromScript(asScript).getstring()
1272 __Function = ObserverF(__FunctionText)
1274 if asObsObject is not None:
1275 self.__O = asObsObject
1277 self.__O = __Function.getfunc()
1279 for k in range(len(self.__V)):
1282 if ename not in withAlgo:
1283 raise ValueError("An observer is asked to be set on a variable named %s which does not exist."%ename)
1285 withAlgo.setObserver(ename, self.__O, einfo, scheduledBy)
1288 "x.__repr__() <==> repr(x)"
1289 return repr(self.__V)+"\n"+repr(self.__O)
1292 "x.__str__() <==> str(x)"
1293 return str(self.__V)+"\n"+str(self.__O)
1295 # ==============================================================================
1296 class State(object):
1298 Classe générale d'interface de type état
1301 name = "GenericVector",
1303 asPersistentVector = None,
1309 toBeChecked = False,
1312 Permet de définir un vecteur :
1313 - asVector : entrée des données, comme un vecteur compatible avec le
1314 constructeur de numpy.matrix, ou "True" si entrée par script.
1315 - asPersistentVector : entrée des données, comme une série de vecteurs
1316 compatible avec le constructeur de numpy.matrix, ou comme un objet de
1317 type Persistence, ou "True" si entrée par script.
1318 - asScript : si un script valide est donné contenant une variable
1319 nommée "name", la variable est de type "asVector" (par défaut) ou
1320 "asPersistentVector" selon que l'une de ces variables est placée à
1322 - asDataFile : si un ou plusieurs fichiers valides sont donnés
1323 contenant des valeurs en colonnes, elles-mêmes nommées "colNames"
1324 (s'il n'y a pas de nom de colonne indiquée, on cherche une colonne
1325 nommée "name"), on récupère les colonnes et on les range ligne après
1326 ligne (colMajor=False) ou colonne après colonne (colMajor=True). La
1327 variable résultante est de type "asVector" (par défaut) ou
1328 "asPersistentVector" selon que l'une de ces variables est placée à
1331 self.__name = str(name)
1332 self.__check = bool(toBeChecked)
1336 self.__is_vector = False
1337 self.__is_series = False
1339 if asScript is not None:
1340 __Vector, __Series = None, None
1341 if asPersistentVector:
1342 __Series = ImportFromScript(asScript).getvalue( self.__name )
1344 __Vector = ImportFromScript(asScript).getvalue( self.__name )
1345 elif asDataFile is not None:
1346 __Vector, __Series = None, None
1347 if asPersistentVector:
1348 if colNames is not None:
1349 __Series = ImportFromFile(asDataFile).getvalue( colNames )[1]
1351 __Series = ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1352 if bool(colMajor) and not ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1353 __Series = numpy.transpose(__Series)
1354 elif not bool(colMajor) and ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1355 __Series = numpy.transpose(__Series)
1357 if colNames is not None:
1358 __Vector = ImportFromFile(asDataFile).getvalue( colNames )[1]
1360 __Vector = ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1362 __Vector = numpy.ravel(__Vector, order = "F")
1364 __Vector = numpy.ravel(__Vector, order = "C")
1366 __Vector, __Series = asVector, asPersistentVector
1368 if __Vector is not None:
1369 self.__is_vector = True
1370 self.__V = numpy.matrix( numpy.asmatrix(__Vector).A1, numpy.float ).T
1371 self.shape = self.__V.shape
1372 self.size = self.__V.size
1373 elif __Series is not None:
1374 self.__is_series = True
1375 if isinstance(__Series, (tuple, list, numpy.ndarray, numpy.matrix, str)):
1376 self.__V = Persistence.OneVector(self.__name, basetype=numpy.matrix)
1377 if isinstance(__Series, str): __Series = eval(__Series)
1378 for member in __Series:
1379 self.__V.store( numpy.matrix( numpy.asmatrix(member).A1, numpy.float ).T )
1382 if isinstance(self.__V.shape, (tuple, list)):
1383 self.shape = self.__V.shape
1385 self.shape = self.__V.shape()
1386 if len(self.shape) == 1:
1387 self.shape = (self.shape[0],1)
1388 self.size = self.shape[0] * self.shape[1]
1390 raise ValueError("The %s object is improperly defined, it requires at minima either a vector, a list/tuple of vectors or a persistent object. Please check your vector input."%self.__name)
1392 if scheduledBy is not None:
1393 self.__T = scheduledBy
1395 def getO(self, withScheduler=False):
1397 return self.__V, self.__T
1398 elif self.__T is None:
1404 "Vérification du type interne"
1405 return self.__is_vector
1408 "Vérification du type interne"
1409 return self.__is_series
1412 "x.__repr__() <==> repr(x)"
1413 return repr(self.__V)
1416 "x.__str__() <==> str(x)"
1417 return str(self.__V)
1419 # ==============================================================================
1420 class Covariance(object):
1422 Classe générale d'interface de type covariance
1425 name = "GenericCovariance",
1426 asCovariance = None,
1427 asEyeByScalar = None,
1428 asEyeByVector = None,
1431 toBeChecked = False,
1434 Permet de définir une covariance :
1435 - asCovariance : entrée des données, comme une matrice compatible avec
1436 le constructeur de numpy.matrix
1437 - asEyeByScalar : entrée des données comme un seul scalaire de variance,
1438 multiplicatif d'une matrice de corrélation identité, aucune matrice
1439 n'étant donc explicitement à donner
1440 - asEyeByVector : entrée des données comme un seul vecteur de variance,
1441 à mettre sur la diagonale d'une matrice de corrélation, aucune matrice
1442 n'étant donc explicitement à donner
1443 - asCovObject : entrée des données comme un objet python, qui a les
1444 methodes obligatoires "getT", "getI", "diag", "trace", "__add__",
1445 "__sub__", "__neg__", "__mul__", "__rmul__" et facultatives "shape",
1446 "size", "cholesky", "choleskyI", "asfullmatrix", "__repr__", "__str__"
1447 - toBeChecked : booléen indiquant si le caractère SDP de la matrice
1448 pleine doit être vérifié
1450 self.__name = str(name)
1451 self.__check = bool(toBeChecked)
1454 self.__is_scalar = False
1455 self.__is_vector = False
1456 self.__is_matrix = False
1457 self.__is_object = False
1459 if asScript is not None:
1460 __Matrix, __Scalar, __Vector, __Object = None, None, None, None
1462 __Scalar = ImportFromScript(asScript).getvalue( self.__name )
1464 __Vector = ImportFromScript(asScript).getvalue( self.__name )
1466 __Object = ImportFromScript(asScript).getvalue( self.__name )
1468 __Matrix = ImportFromScript(asScript).getvalue( self.__name )
1470 __Matrix, __Scalar, __Vector, __Object = asCovariance, asEyeByScalar, asEyeByVector, asCovObject
1472 if __Scalar is not None:
1473 if numpy.matrix(__Scalar).size != 1:
1474 raise ValueError(' The diagonal multiplier given to define a sparse matrix is not a unique scalar value.\n Its actual measured size is %i. Please check your scalar input.'%numpy.matrix(__Scalar).size)
1475 self.__is_scalar = True
1476 self.__C = numpy.abs( float(__Scalar) )
1479 elif __Vector is not None:
1480 self.__is_vector = True
1481 self.__C = numpy.abs( numpy.array( numpy.ravel( numpy.matrix(__Vector, float ) ) ) )
1482 self.shape = (self.__C.size,self.__C.size)
1483 self.size = self.__C.size**2
1484 elif __Matrix is not None:
1485 self.__is_matrix = True
1486 self.__C = numpy.matrix( __Matrix, float )
1487 self.shape = self.__C.shape
1488 self.size = self.__C.size
1489 elif __Object is not None:
1490 self.__is_object = True
1492 for at in ("getT","getI","diag","trace","__add__","__sub__","__neg__","__mul__","__rmul__"):
1493 if not hasattr(self.__C,at):
1494 raise ValueError("The matrix given for %s as an object has no attribute \"%s\". Please check your object input."%(self.__name,at))
1495 if hasattr(self.__C,"shape"):
1496 self.shape = self.__C.shape
1499 if hasattr(self.__C,"size"):
1500 self.size = self.__C.size
1505 # raise ValueError("The %s covariance matrix has to be specified either as a matrix, a vector for its diagonal or a scalar multiplying an identity matrix."%self.__name)
1509 def __validate(self):
1511 if self.__C is None:
1512 raise UnboundLocalError("%s covariance matrix value has not been set!"%(self.__name,))
1513 if self.ismatrix() and min(self.shape) != max(self.shape):
1514 raise ValueError("The given matrix for %s is not a square one, its shape is %s. Please check your matrix input."%(self.__name,self.shape))
1515 if self.isobject() and min(self.shape) != max(self.shape):
1516 raise ValueError("The matrix given for \"%s\" is not a square one, its shape is %s. Please check your object input."%(self.__name,self.shape))
1517 if self.isscalar() and self.__C <= 0:
1518 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your scalar input %s."%(self.__name,self.__C))
1519 if self.isvector() and (self.__C <= 0).any():
1520 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your vector input."%(self.__name,))
1521 if self.ismatrix() and (self.__check or logging.getLogger().level < logging.WARNING):
1523 L = numpy.linalg.cholesky( self.__C )
1525 raise ValueError("The %s covariance matrix is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1526 if self.isobject() and (self.__check or logging.getLogger().level < logging.WARNING):
1528 L = self.__C.cholesky()
1530 raise ValueError("The %s covariance object is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1533 "Vérification du type interne"
1534 return self.__is_scalar
1537 "Vérification du type interne"
1538 return self.__is_vector
1541 "Vérification du type interne"
1542 return self.__is_matrix
1545 "Vérification du type interne"
1546 return self.__is_object
1551 return Covariance(self.__name+"I", asCovariance = self.__C.I )
1552 elif self.isvector():
1553 return Covariance(self.__name+"I", asEyeByVector = 1. / self.__C )
1554 elif self.isscalar():
1555 return Covariance(self.__name+"I", asEyeByScalar = 1. / self.__C )
1556 elif self.isobject():
1557 return Covariance(self.__name+"I", asCovObject = self.__C.getI() )
1559 return None # Indispensable
1564 return Covariance(self.__name+"T", asCovariance = self.__C.T )
1565 elif self.isvector():
1566 return Covariance(self.__name+"T", asEyeByVector = self.__C )
1567 elif self.isscalar():
1568 return Covariance(self.__name+"T", asEyeByScalar = self.__C )
1569 elif self.isobject():
1570 return Covariance(self.__name+"T", asCovObject = self.__C.getT() )
1573 "Décomposition de Cholesky"
1575 return Covariance(self.__name+"C", asCovariance = numpy.linalg.cholesky(self.__C) )
1576 elif self.isvector():
1577 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
1578 elif self.isscalar():
1579 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
1580 elif self.isobject() and hasattr(self.__C,"cholesky"):
1581 return Covariance(self.__name+"C", asCovObject = self.__C.cholesky() )
1583 def choleskyI(self):
1584 "Inversion de la décomposition de Cholesky"
1586 return Covariance(self.__name+"H", asCovariance = numpy.linalg.cholesky(self.__C).I )
1587 elif self.isvector():
1588 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
1589 elif self.isscalar():
1590 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
1591 elif self.isobject() and hasattr(self.__C,"choleskyI"):
1592 return Covariance(self.__name+"H", asCovObject = self.__C.choleskyI() )
1594 def diag(self, msize=None):
1595 "Diagonale de la matrice"
1597 return numpy.diag(self.__C)
1598 elif self.isvector():
1600 elif self.isscalar():
1602 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1604 return self.__C * numpy.ones(int(msize))
1605 elif self.isobject():
1606 return self.__C.diag()
1608 def asfullmatrix(self, msize=None):
1612 elif self.isvector():
1613 return numpy.matrix( numpy.diag(self.__C), float )
1614 elif self.isscalar():
1616 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1618 return numpy.matrix( self.__C * numpy.eye(int(msize)), float )
1619 elif self.isobject() and hasattr(self.__C,"asfullmatrix"):
1620 return self.__C.asfullmatrix()
1622 def trace(self, msize=None):
1623 "Trace de la matrice"
1625 return numpy.trace(self.__C)
1626 elif self.isvector():
1627 return float(numpy.sum(self.__C))
1628 elif self.isscalar():
1630 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1632 return self.__C * int(msize)
1633 elif self.isobject():
1634 return self.__C.trace()
1640 "x.__repr__() <==> repr(x)"
1641 return repr(self.__C)
1644 "x.__str__() <==> str(x)"
1645 return str(self.__C)
1647 def __add__(self, other):
1648 "x.__add__(y) <==> x+y"
1649 if self.ismatrix() or self.isobject():
1650 return self.__C + numpy.asmatrix(other)
1651 elif self.isvector() or self.isscalar():
1652 _A = numpy.asarray(other)
1653 _A.reshape(_A.size)[::_A.shape[1]+1] += self.__C
1654 return numpy.asmatrix(_A)
1656 def __radd__(self, other):
1657 "x.__radd__(y) <==> y+x"
1658 raise NotImplementedError("%s covariance matrix __radd__ method not available for %s type!"%(self.__name,type(other)))
1660 def __sub__(self, other):
1661 "x.__sub__(y) <==> x-y"
1662 if self.ismatrix() or self.isobject():
1663 return self.__C - numpy.asmatrix(other)
1664 elif self.isvector() or self.isscalar():
1665 _A = numpy.asarray(other)
1666 _A.reshape(_A.size)[::_A.shape[1]+1] = self.__C - _A.reshape(_A.size)[::_A.shape[1]+1]
1667 return numpy.asmatrix(_A)
1669 def __rsub__(self, other):
1670 "x.__rsub__(y) <==> y-x"
1671 raise NotImplementedError("%s covariance matrix __rsub__ method not available for %s type!"%(self.__name,type(other)))
1674 "x.__neg__() <==> -x"
1677 def __mul__(self, other):
1678 "x.__mul__(y) <==> x*y"
1679 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
1680 return self.__C * other
1681 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
1682 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1683 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
1684 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1685 return self.__C * numpy.asmatrix(other)
1687 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asmatrix(other).shape,self.__name))
1688 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
1689 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1690 return numpy.asmatrix(self.__C * numpy.ravel(other)).T
1691 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1692 return numpy.asmatrix((self.__C * (numpy.asarray(other).transpose())).transpose())
1694 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
1695 elif self.isscalar() and isinstance(other,numpy.matrix):
1696 return self.__C * other
1697 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
1698 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
1699 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
1701 return self.__C * numpy.asmatrix(other)
1702 elif self.isobject():
1703 return self.__C.__mul__(other)
1705 raise NotImplementedError("%s covariance matrix __mul__ method not available for %s type!"%(self.__name,type(other)))
1707 def __rmul__(self, other):
1708 "x.__rmul__(y) <==> y*x"
1709 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
1710 return other * self.__C
1711 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
1712 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1713 return numpy.asmatrix(numpy.ravel(other)) * self.__C
1714 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1715 return numpy.asmatrix(other) * self.__C
1717 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name))
1718 elif self.isvector() and isinstance(other,numpy.matrix):
1719 if numpy.ravel(other).size == self.shape[0]: # Vecteur
1720 return numpy.asmatrix(numpy.ravel(other) * self.__C)
1721 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
1722 return numpy.asmatrix(numpy.array(other) * self.__C)
1724 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
1725 elif self.isscalar() and isinstance(other,numpy.matrix):
1726 return other * self.__C
1727 elif self.isobject():
1728 return self.__C.__rmul__(other)
1730 raise NotImplementedError("%s covariance matrix __rmul__ method not available for %s type!"%(self.__name,type(other)))
1733 "x.__len__() <==> len(x)"
1734 return self.shape[0]
1736 # ==============================================================================
1737 class ObserverF(object):
1739 Creation d'une fonction d'observateur a partir de son texte
1741 def __init__(self, corps=""):
1742 self.__corps = corps
1743 def func(self,var,info):
1744 "Fonction d'observation"
1747 "Restitution du pointeur de fonction dans l'objet"
1750 # ==============================================================================
1751 class CaseLogger(object):
1753 Conservation des commandes de creation d'un cas
1755 def __init__(self, __name="", __objname="case", __addViewers=None, __addLoaders=None):
1756 self.__name = str(__name)
1757 self.__objname = str(__objname)
1758 self.__logSerie = []
1759 self.__switchoff = False
1761 "TUI" :Interfaces._TUIViewer,
1762 "SCD" :Interfaces._SCDViewer,
1763 "YACS":Interfaces._YACSViewer,
1766 "TUI" :Interfaces._TUIViewer,
1767 "COM" :Interfaces._COMViewer,
1769 if __addViewers is not None:
1770 self.__viewers.update(dict(__addViewers))
1771 if __addLoaders is not None:
1772 self.__loaders.update(dict(__addLoaders))
1774 def register(self, __command=None, __keys=None, __local=None, __pre=None, __switchoff=False):
1775 "Enregistrement d'une commande individuelle"
1776 if __command is not None and __keys is not None and __local is not None and not self.__switchoff:
1777 if "self" in __keys: __keys.remove("self")
1778 self.__logSerie.append( (str(__command), __keys, __local, __pre, __switchoff) )
1780 self.__switchoff = True
1782 self.__switchoff = False
1784 def dump(self, __filename=None, __format="TUI", __upa=""):
1785 "Restitution normalisée des commandes"
1786 if __format in self.__viewers:
1787 __formater = self.__viewers[__format](self.__name, self.__objname, self.__logSerie)
1789 raise ValueError("Dumping as \"%s\" is not available"%__format)
1790 return __formater.dump(__filename, __upa)
1792 def load(self, __filename=None, __content=None, __object=None, __format="TUI"):
1793 "Chargement normalisé des commandes"
1794 if __format in self.__loaders:
1795 __formater = self.__loaders[__format]()
1797 raise ValueError("Loading as \"%s\" is not available"%__format)
1798 return __formater.load(__filename, __content, __object)
1800 # ==============================================================================
1801 def MultiFonction( __xserie, _sFunction = lambda x: x ):
1803 Pour une liste ordonnée de vecteurs en entrée, renvoie en sortie la liste
1804 correspondante de valeurs de la fonction en argument
1806 if not PlatformInfo.isIterable( __xserie ):
1807 raise ValueError("MultiFonction not iterable unkown input type: %s"%(type(__xserie),))
1810 for __xvalue in __xserie:
1811 __multiHX.append( _sFunction( __xvalue ) )
1815 # ==============================================================================
1816 def CostFunction3D(_x,
1817 _Hm = None, # Pour simuler Hm(x) : HO["Direct"].appliedTo
1818 _HmX = None, # Simulation déjà faite de Hm(x)
1819 _arg = None, # Arguments supplementaires pour Hm, sous la forme d'un tuple
1824 _SIV = False, # A résorber pour la 8.0
1825 _SSC = [], # self._parameters["StoreSupplementaryCalculations"]
1826 _nPS = 0, # nbPreviousSteps
1827 _QM = "DA", # QualityMeasure
1828 _SSV = {}, # Entrée et/ou sortie : self.StoredVariables
1829 _fRt = False, # Restitue ou pas la sortie étendue
1830 _sSc = True, # Stocke ou pas les SSC
1833 Fonction-coût générale utile pour les algorithmes statiques/3D : 3DVAR, BLUE
1834 et dérivés, Kalman et dérivés, LeastSquares, SamplingTest, PSO, SA, Tabu,
1835 DFO, QuantileRegression
1841 for k in ["CostFunctionJ",
1847 "SimulatedObservationAtCurrentOptimum",
1848 "SimulatedObservationAtCurrentState",
1852 if hasattr(_SSV[k],"store"):
1853 _SSV[k].append = _SSV[k].store # Pour utiliser "append" au lieu de "store"
1855 _X = numpy.asmatrix(numpy.ravel( _x )).T
1856 if _SIV or "CurrentState" in _SSC or "CurrentOptimum" in _SSC:
1857 _SSV["CurrentState"].append( _X )
1859 if _HmX is not None:
1863 raise ValueError("COSTFUNCTION3D Operator has to be defined.")
1867 _HX = _Hm( _X, *_arg )
1868 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
1870 if "SimulatedObservationAtCurrentState" in _SSC or \
1871 "SimulatedObservationAtCurrentOptimum" in _SSC:
1872 _SSV["SimulatedObservationAtCurrentState"].append( _HX )
1874 if numpy.any(numpy.isnan(_HX)):
1875 Jb, Jo, J = numpy.nan, numpy.nan, numpy.nan
1877 _Y = numpy.asmatrix(numpy.ravel( _Y )).T
1878 if _QM in ["AugmentedWeightedLeastSquares", "AWLS", "AugmentedPonderatedLeastSquares", "APLS", "DA"]:
1879 if _BI is None or _RI is None:
1880 raise ValueError("Background and Observation error covariance matrix has to be properly defined!")
1881 _Xb = numpy.asmatrix(numpy.ravel( _Xb )).T
1882 Jb = 0.5 * (_X - _Xb).T * _BI * (_X - _Xb)
1883 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
1884 elif _QM in ["WeightedLeastSquares", "WLS", "PonderatedLeastSquares", "PLS"]:
1886 raise ValueError("Observation error covariance matrix has to be properly defined!")
1888 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
1889 elif _QM in ["LeastSquares", "LS", "L2"]:
1891 Jo = 0.5 * (_Y - _HX).T * (_Y - _HX)
1892 elif _QM in ["AbsoluteValue", "L1"]:
1894 Jo = numpy.sum( numpy.abs(_Y - _HX) )
1895 elif _QM in ["MaximumError", "ME"]:
1897 Jo = numpy.max( numpy.abs(_Y - _HX) )
1898 elif _QM in ["QR", "Null"]:
1902 raise ValueError("Unknown asked quality measure!")
1904 J = float( Jb ) + float( Jo )
1907 _SSV["CostFunctionJb"].append( Jb )
1908 _SSV["CostFunctionJo"].append( Jo )
1909 _SSV["CostFunctionJ" ].append( J )
1911 if "IndexOfOptimum" in _SSC or \
1912 "CurrentOptimum" in _SSC or \
1913 "SimulatedObservationAtCurrentOptimum" in _SSC:
1914 IndexMin = numpy.argmin( _SSV["CostFunctionJ"][_nPS:] ) + _nPS
1915 if "IndexOfOptimum" in _SSC:
1916 _SSV["IndexOfOptimum"].append( IndexMin )
1917 if "CurrentOptimum" in _SSC:
1918 _SSV["CurrentOptimum"].append( _SSV["CurrentState"][IndexMin] )
1919 if "SimulatedObservationAtCurrentOptimum" in _SSC:
1920 _SSV["SimulatedObservationAtCurrentOptimum"].append( _SSV["SimulatedObservationAtCurrentState"][IndexMin] )
1925 if _QM in ["QR"]: # Pour le QuantileRegression
1930 # ==============================================================================
1931 if __name__ == "__main__":
1932 print('\n AUTODIAGNOSTIC \n')