1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2019 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les outils généraux élémentaires.
26 __author__ = "Jean-Philippe ARGAUD"
34 from functools import partial
35 from daCore import Persistence
36 from daCore import PlatformInfo
37 from daCore import Interfaces
38 from daCore import Templates
39 from daCore.Interfaces import ImportFromScript, ImportFromFile
41 # ==============================================================================
42 class CacheManager(object):
44 Classe générale de gestion d'un cache de calculs
47 toleranceInRedundancy = 1.e-18,
48 lenghtOfRedundancy = -1,
51 Les caractéristiques de tolérance peuvent être modifées à la création.
53 self.__tolerBP = float(toleranceInRedundancy)
54 self.__lenghtOR = int(lenghtOfRedundancy)
55 self.__initlnOR = self.__lenghtOR
60 self.__listOPCV = [] # Operator Previous Calculated Points, Results, Point Norms
61 # logging.debug("CM Tolerance de determination des doublons : %.2e", self.__tolerBP)
63 def wasCalculatedIn(self, xValue ): #, info="" ):
64 "Vérifie l'existence d'un calcul correspondant à la valeur"
67 for i in range(min(len(self.__listOPCV),self.__lenghtOR)-1,-1,-1):
68 if not hasattr(xValue, 'size') or (xValue.size != self.__listOPCV[i][0].size):
69 # logging.debug("CM Différence de la taille %s de X et de celle %s du point %i déjà calculé", xValue.shape,i,self.__listOPCP[i].shape)
71 if numpy.linalg.norm(numpy.ravel(xValue) - self.__listOPCV[i][0]) < self.__tolerBP * self.__listOPCV[i][2]:
73 __HxV = self.__listOPCV[i][1]
74 # logging.debug("CM Cas%s déja calculé, portant le numéro %i", info, i)
78 def storeValueInX(self, xValue, HxValue ):
79 "Stocke un calcul correspondant à la valeur"
80 if self.__lenghtOR < 0:
81 self.__lenghtOR = 2 * xValue.size + 2
82 self.__initlnOR = self.__lenghtOR
83 while len(self.__listOPCV) > self.__lenghtOR:
84 # logging.debug("CM Réduction de la liste des cas à %i éléments par suppression du premier", self.__lenghtOR)
85 self.__listOPCV.pop(0)
86 self.__listOPCV.append( (
87 copy.copy(numpy.ravel(xValue)),
89 numpy.linalg.norm(xValue),
94 self.__initlnOR = self.__lenghtOR
99 self.__lenghtOR = self.__initlnOR
101 # ==============================================================================
102 class Operator(object):
104 Classe générale d'interface de type opérateur simple
111 def __init__(self, fromMethod=None, fromMatrix=None, avoidingRedundancy = True, inputAsMultiFunction = False):
113 On construit un objet de ce type en fournissant, à l'aide de l'un des
114 deux mots-clé, soit une fonction ou un multi-fonction python, soit une
117 - fromMethod : argument de type fonction Python
118 - fromMatrix : argument adapté au constructeur numpy.matrix
119 - avoidingRedundancy : évite ou pas les calculs redondants
120 - inputAsMultiFunction : fonction explicitement définie ou pas en multi-fonction
122 self.__NbCallsAsMatrix, self.__NbCallsAsMethod, self.__NbCallsOfCached = 0, 0, 0
123 self.__AvoidRC = bool( avoidingRedundancy )
124 self.__inputAsMF = bool( inputAsMultiFunction )
125 if fromMethod is not None and self.__inputAsMF:
126 self.__Method = fromMethod # logtimer(fromMethod)
128 self.__Type = "Method"
129 elif fromMethod is not None and not self.__inputAsMF:
130 self.__Method = partial( MultiFonction, _sFunction=fromMethod)
132 self.__Type = "Method"
133 elif fromMatrix is not None:
135 self.__Matrix = numpy.matrix( fromMatrix, numpy.float )
136 self.__Type = "Matrix"
142 def disableAvoidingRedundancy(self):
144 Operator.CM.disable()
146 def enableAvoidingRedundancy(self):
151 Operator.CM.disable()
157 def appliedTo(self, xValue, HValue = None, argsAsSerie = False):
159 Permet de restituer le résultat de l'application de l'opérateur à une
160 série d'arguments xValue. Cette méthode se contente d'appliquer, chaque
161 argument devant a priori être du bon type.
163 - les arguments par série sont :
164 - xValue : argument adapté pour appliquer l'opérateur
165 - HValue : valeur précalculée de l'opérateur en ce point
166 - argsAsSerie : indique si les arguments sont une mono ou multi-valeur
173 if HValue is not None:
177 PlatformInfo.isIterable( _xValue, True, " in Operator.appliedTo" )
179 if _HValue is not None:
180 assert len(_xValue) == len(_HValue), "Incompatible number of elements in xValue and HValue"
182 for i in range(len(_HValue)):
183 HxValue.append( numpy.asmatrix( numpy.ravel( _HValue[i] ) ).T )
185 Operator.CM.storeValueInX(_xValue[i],HxValue[-1])
190 for i, xv in enumerate(_xValue):
192 __alreadyCalculated, __HxV = Operator.CM.wasCalculatedIn(xv)
194 __alreadyCalculated = False
196 if __alreadyCalculated:
197 self.__addOneCacheCall()
200 if self.__Matrix is not None:
201 self.__addOneMatrixCall()
202 _hv = self.__Matrix * xv
204 self.__addOneMethodCall()
208 HxValue.append( _hv )
210 if len(_xserie)>0 and self.__Matrix is None:
211 _hserie = self.__Method( _xserie ) # Calcul MF
212 if not hasattr(_hserie, "pop"):
213 raise TypeError("The user input multi-function doesn't seem to return sequence results, behaving like a mono-function. It has to be checked.")
219 Operator.CM.storeValueInX(_xv,_hv)
221 if argsAsSerie: return HxValue
222 else: return HxValue[-1]
224 def appliedControledFormTo(self, paires, argsAsSerie = False ):
226 Permet de restituer le résultat de l'application de l'opérateur à des
227 paires (xValue, uValue). Cette méthode se contente d'appliquer, son
228 argument devant a priori être du bon type. Si la uValue est None,
229 on suppose que l'opérateur ne s'applique qu'à xValue.
231 - paires : les arguments par paire sont :
232 - xValue : argument X adapté pour appliquer l'opérateur
233 - uValue : argument U adapté pour appliquer l'opérateur
234 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
236 if argsAsSerie: _xuValue = paires
237 else: _xuValue = (paires,)
238 PlatformInfo.isIterable( _xuValue, True, " in Operator.appliedControledFormTo" )
240 if self.__Matrix is not None:
242 for paire in _xuValue:
243 _xValue, _uValue = paire
244 self.__addOneMatrixCall()
245 HxValue.append( self.__Matrix * _xValue )
248 for paire in _xuValue:
250 _xValue, _uValue = paire
251 if _uValue is not None:
252 _xuValue.append( paire )
254 _xuValue.append( _xValue )
255 self.__addOneMethodCall( len(_xuValue) )
256 HxValue = self.__Method( _xuValue ) # Calcul MF
258 if argsAsSerie: return HxValue
259 else: return HxValue[-1]
261 def appliedInXTo(self, paires, argsAsSerie = False ):
263 Permet de restituer le résultat de l'application de l'opérateur à une
264 série d'arguments xValue, sachant que l'opérateur est valable en
265 xNominal. Cette méthode se contente d'appliquer, son argument devant a
266 priori être du bon type. Si l'opérateur est linéaire car c'est une
267 matrice, alors il est valable en tout point nominal et xNominal peut
268 être quelconque. Il n'y a qu'une seule paire par défaut, et argsAsSerie
269 permet d'indiquer que l'argument est multi-paires.
271 - paires : les arguments par paire sont :
272 - xNominal : série d'arguments permettant de donner le point où
273 l'opérateur est construit pour être ensuite appliqué
274 - xValue : série d'arguments adaptés pour appliquer l'opérateur
275 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
277 if argsAsSerie: _nxValue = paires
278 else: _nxValue = (paires,)
279 PlatformInfo.isIterable( _nxValue, True, " in Operator.appliedInXTo" )
281 if self.__Matrix is not None:
283 for paire in _nxValue:
284 _xNominal, _xValue = paire
285 self.__addOneMatrixCall()
286 HxValue.append( self.__Matrix * _xValue )
288 self.__addOneMethodCall( len(_nxValue) )
289 HxValue = self.__Method( _nxValue ) # Calcul MF
291 if argsAsSerie: return HxValue
292 else: return HxValue[-1]
294 def asMatrix(self, ValueForMethodForm = "UnknownVoidValue", argsAsSerie = False):
296 Permet de renvoyer l'opérateur sous la forme d'une matrice
298 if self.__Matrix is not None:
299 self.__addOneMatrixCall()
300 mValue = [self.__Matrix,]
301 elif ValueForMethodForm is not "UnknownVoidValue": # Ne pas utiliser "None"
304 self.__addOneMethodCall( len(ValueForMethodForm) )
305 for _vfmf in ValueForMethodForm:
306 mValue.append( numpy.matrix( self.__Method(((_vfmf, None),)) ) )
308 self.__addOneMethodCall()
309 mValue = self.__Method(((ValueForMethodForm, None),))
311 raise ValueError("Matrix form of the operator defined as a function/method requires to give an operating point.")
313 if argsAsSerie: return mValue
314 else: return mValue[-1]
318 Renvoie la taille sous forme numpy si l'opérateur est disponible sous
319 la forme d'une matrice
321 if self.__Matrix is not None:
322 return self.__Matrix.shape
324 raise ValueError("Matrix form of the operator is not available, nor the shape")
326 def nbcalls(self, which=None):
328 Renvoie les nombres d'évaluations de l'opérateur
331 self.__NbCallsAsMatrix+self.__NbCallsAsMethod,
332 self.__NbCallsAsMatrix,
333 self.__NbCallsAsMethod,
334 self.__NbCallsOfCached,
335 Operator.NbCallsAsMatrix+Operator.NbCallsAsMethod,
336 Operator.NbCallsAsMatrix,
337 Operator.NbCallsAsMethod,
338 Operator.NbCallsOfCached,
340 if which is None: return __nbcalls
341 else: return __nbcalls[which]
343 def __addOneMatrixCall(self):
344 "Comptabilise un appel"
345 self.__NbCallsAsMatrix += 1 # Decompte local
346 Operator.NbCallsAsMatrix += 1 # Decompte global
348 def __addOneMethodCall(self, nb = 1):
349 "Comptabilise un appel"
350 self.__NbCallsAsMethod += nb # Decompte local
351 Operator.NbCallsAsMethod += nb # Decompte global
353 def __addOneCacheCall(self):
354 "Comptabilise un appel"
355 self.__NbCallsOfCached += 1 # Decompte local
356 Operator.NbCallsOfCached += 1 # Decompte global
358 # ==============================================================================
359 class FullOperator(object):
361 Classe générale d'interface de type opérateur complet
362 (Direct, Linéaire Tangent, Adjoint)
365 name = "GenericFullOperator",
367 asOneFunction = None, # 1 Fonction
368 asThreeFunctions = None, # 3 Fonctions in a dictionary
369 asScript = None, # 1 or 3 Fonction(s) by script
370 asDict = None, # Parameters
373 inputAsMF = False,# Fonction(s) as Multi-Functions
378 self.__name = str(name)
379 self.__check = bool(toBeChecked)
384 if (asDict is not None) and isinstance(asDict, dict):
385 __Parameters.update( asDict )
386 if "DifferentialIncrement" in asDict:
387 __Parameters["withIncrement"] = asDict["DifferentialIncrement"]
388 if "CenteredFiniteDifference" in asDict:
389 __Parameters["withCenteredDF"] = asDict["CenteredFiniteDifference"]
390 if "EnableMultiProcessing" in asDict:
391 __Parameters["withmpEnabled"] = asDict["EnableMultiProcessing"]
392 if "NumberOfProcesses" in asDict:
393 __Parameters["withmpWorkers"] = asDict["NumberOfProcesses"]
395 if asScript is not None:
396 __Matrix, __Function = None, None
398 __Matrix = ImportFromScript(asScript).getvalue( self.__name )
400 __Function = { "Direct":ImportFromScript(asScript).getvalue( "DirectOperator" ) }
401 __Function.update({"useApproximatedDerivatives":True})
402 __Function.update(__Parameters)
403 elif asThreeFunctions:
405 "Direct" :ImportFromScript(asScript).getvalue( "DirectOperator" ),
406 "Tangent":ImportFromScript(asScript).getvalue( "TangentOperator" ),
407 "Adjoint":ImportFromScript(asScript).getvalue( "AdjointOperator" ),
409 __Function.update(__Parameters)
412 if asOneFunction is not None:
413 if isinstance(asOneFunction, dict) and "Direct" in asOneFunction:
414 if asOneFunction["Direct"] is not None:
415 __Function = asOneFunction
417 raise ValueError("The function has to be given in a dictionnary which have 1 key (\"Direct\")")
419 __Function = { "Direct":asOneFunction }
420 __Function.update({"useApproximatedDerivatives":True})
421 __Function.update(__Parameters)
422 elif asThreeFunctions is not None:
423 if isinstance(asThreeFunctions, dict) and \
424 ("Tangent" in asThreeFunctions) and (asThreeFunctions["Tangent"] is not None) and \
425 ("Adjoint" in asThreeFunctions) and (asThreeFunctions["Adjoint"] is not None) and \
426 (("useApproximatedDerivatives" not in asThreeFunctions) or not bool(asThreeFunctions["useApproximatedDerivatives"])):
427 __Function = asThreeFunctions
428 elif isinstance(asThreeFunctions, dict) and \
429 ("Direct" in asThreeFunctions) and (asThreeFunctions["Direct"] is not None):
430 __Function = asThreeFunctions
431 __Function.update({"useApproximatedDerivatives":True})
433 raise ValueError("The functions has to be given in a dictionnary which have either 1 key (\"Direct\") or 3 keys (\"Direct\" (optionnal), \"Tangent\" and \"Adjoint\")")
434 if "Direct" not in asThreeFunctions:
435 __Function["Direct"] = asThreeFunctions["Tangent"]
436 __Function.update(__Parameters)
440 # if sys.version_info[0] < 3 and isinstance(__Function, dict):
441 # for k in ("Direct", "Tangent", "Adjoint"):
442 # if k in __Function and hasattr(__Function[k],"__class__"):
443 # if type(__Function[k]) is type(self.__init__):
444 # raise TypeError("can't use a class method (%s) as a function for the \"%s\" operator. Use a real function instead."%(type(__Function[k]),k))
446 if appliedInX is not None and isinstance(appliedInX, dict):
447 __appliedInX = appliedInX
448 elif appliedInX is not None:
449 __appliedInX = {"HXb":appliedInX}
453 if scheduledBy is not None:
454 self.__T = scheduledBy
456 if isinstance(__Function, dict) and \
457 ("useApproximatedDerivatives" in __Function) and bool(__Function["useApproximatedDerivatives"]) and \
458 ("Direct" in __Function) and (__Function["Direct"] is not None):
459 if "withCenteredDF" not in __Function: __Function["withCenteredDF"] = False
460 if "withIncrement" not in __Function: __Function["withIncrement"] = 0.01
461 if "withdX" not in __Function: __Function["withdX"] = None
462 if "withAvoidingRedundancy" not in __Function: __Function["withAvoidingRedundancy"] = True
463 if "withToleranceInRedundancy" not in __Function: __Function["withToleranceInRedundancy"] = 1.e-18
464 if "withLenghtOfRedundancy" not in __Function: __Function["withLenghtOfRedundancy"] = -1
465 if "withmpEnabled" not in __Function: __Function["withmpEnabled"] = False
466 if "withmpWorkers" not in __Function: __Function["withmpWorkers"] = None
467 if "withmfEnabled" not in __Function: __Function["withmfEnabled"] = inputAsMF
468 from daNumerics.ApproximatedDerivatives import FDApproximation
469 FDA = FDApproximation(
470 Function = __Function["Direct"],
471 centeredDF = __Function["withCenteredDF"],
472 increment = __Function["withIncrement"],
473 dX = __Function["withdX"],
474 avoidingRedundancy = __Function["withAvoidingRedundancy"],
475 toleranceInRedundancy = __Function["withToleranceInRedundancy"],
476 lenghtOfRedundancy = __Function["withLenghtOfRedundancy"],
477 mpEnabled = __Function["withmpEnabled"],
478 mpWorkers = __Function["withmpWorkers"],
479 mfEnabled = __Function["withmfEnabled"],
481 self.__FO["Direct"] = Operator( fromMethod = FDA.DirectOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF)
482 self.__FO["Tangent"] = Operator( fromMethod = FDA.TangentOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
483 self.__FO["Adjoint"] = Operator( fromMethod = FDA.AdjointOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
484 elif isinstance(__Function, dict) and \
485 ("Direct" in __Function) and ("Tangent" in __Function) and ("Adjoint" in __Function) and \
486 (__Function["Direct"] is not None) and (__Function["Tangent"] is not None) and (__Function["Adjoint"] is not None):
487 self.__FO["Direct"] = Operator( fromMethod = __Function["Direct"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
488 self.__FO["Tangent"] = Operator( fromMethod = __Function["Tangent"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
489 self.__FO["Adjoint"] = Operator( fromMethod = __Function["Adjoint"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
490 elif asMatrix is not None:
491 __matrice = numpy.matrix( __Matrix, numpy.float )
492 self.__FO["Direct"] = Operator( fromMatrix = __matrice, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
493 self.__FO["Tangent"] = Operator( fromMatrix = __matrice, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
494 self.__FO["Adjoint"] = Operator( fromMatrix = __matrice.T, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
497 raise ValueError("Improperly defined observation operator, it requires at minima either a matrix, a Direct for approximate derivatives or a Tangent/Adjoint pair.")
499 if __appliedInX is not None:
500 self.__FO["AppliedInX"] = {}
501 for key in list(__appliedInX.keys()):
502 if type( __appliedInX[key] ) is type( numpy.matrix([]) ):
503 # Pour le cas où l'on a une vraie matrice
504 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].A1, numpy.float ).T
505 elif type( __appliedInX[key] ) is type( numpy.array([]) ) and len(__appliedInX[key].shape) > 1:
506 # Pour le cas où l'on a un vecteur représenté en array avec 2 dimensions
507 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].reshape(len(__appliedInX[key]),), numpy.float ).T
509 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key], numpy.float ).T
511 self.__FO["AppliedInX"] = None
517 "x.__repr__() <==> repr(x)"
518 return repr(self.__V)
521 "x.__str__() <==> str(x)"
524 # ==============================================================================
525 class Algorithm(object):
527 Classe générale d'interface de type algorithme
529 Elle donne un cadre pour l'écriture d'une classe élémentaire d'algorithme
530 d'assimilation, en fournissant un container (dictionnaire) de variables
531 persistantes initialisées, et des méthodes d'accès à ces variables stockées.
533 Une classe élémentaire d'algorithme doit implémenter la méthode "run".
535 def __init__(self, name):
537 L'initialisation présente permet de fabriquer des variables de stockage
538 disponibles de manière générique dans les algorithmes élémentaires. Ces
539 variables de stockage sont ensuite conservées dans un dictionnaire
540 interne à l'objet, mais auquel on accède par la méthode "get".
542 Les variables prévues sont :
543 - APosterioriCorrelations : matrice de corrélations de la matrice A
544 - APosterioriCovariance : matrice de covariances a posteriori : A
545 - APosterioriStandardDeviations : vecteur des écart-types de la matrice A
546 - APosterioriVariances : vecteur des variances de la matrice A
547 - Analysis : vecteur d'analyse : Xa
548 - BMA : Background moins Analysis : Xa - Xb
549 - CostFunctionJ : fonction-coût globale, somme des deux parties suivantes Jb et Jo
550 - CostFunctionJAtCurrentOptimum : fonction-coût globale à l'état optimal courant lors d'itérations
551 - CostFunctionJb : partie ébauche ou background de la fonction-coût : Jb
552 - CostFunctionJbAtCurrentOptimum : partie ébauche à l'état optimal courant lors d'itérations
553 - CostFunctionJo : partie observations de la fonction-coût : Jo
554 - CostFunctionJoAtCurrentOptimum : partie observations à l'état optimal courant lors d'itérations
555 - CurrentOptimum : état optimal courant lors d'itérations
556 - CurrentState : état courant lors d'itérations
557 - GradientOfCostFunctionJ : gradient de la fonction-coût globale
558 - GradientOfCostFunctionJb : gradient de la partie ébauche de la fonction-coût
559 - GradientOfCostFunctionJo : gradient de la partie observations de la fonction-coût
560 - IndexOfOptimum : index de l'état optimal courant lors d'itérations
561 - Innovation : l'innovation : d = Y - H(X)
562 - InnovationAtCurrentState : l'innovation à l'état courant : dn = Y - H(Xn)
563 - JacobianMatrixAtBackground : matrice jacobienne à l'ébauche
564 - JacobianMatrixAtOptimum : matrice jacobienne à l'optimum
565 - MahalanobisConsistency : indicateur de consistance des covariances
566 - OMA : Observation moins Analyse : Y - Xa
567 - OMB : Observation moins Background : Y - Xb
568 - PredictedState : état prédit courant lors d'itérations
569 - Residu : dans le cas des algorithmes de vérification
570 - SigmaBck2 : indicateur de correction optimale des erreurs d'ébauche
571 - SigmaObs2 : indicateur de correction optimale des erreurs d'observation
572 - SimulatedObservationAtBackground : l'état observé H(Xb) à l'ébauche
573 - SimulatedObservationAtCurrentOptimum : l'état observé H(X) à l'état optimal courant
574 - SimulatedObservationAtCurrentState : l'état observé H(X) à l'état courant
575 - SimulatedObservationAtOptimum : l'état observé H(Xa) à l'optimum
576 - SimulationQuantiles : états observés H(X) pour les quantiles demandés
577 On peut rajouter des variables à stocker dans l'initialisation de
578 l'algorithme élémentaire qui va hériter de cette classe
580 logging.debug("%s Initialisation", str(name))
581 self._m = PlatformInfo.SystemUsage()
583 self._name = str( name )
584 self._parameters = {"StoreSupplementaryCalculations":[]}
585 self.__required_parameters = {}
586 self.__required_inputs = {"RequiredInputValues":{"mandatory":(), "optional":()}}
588 self.StoredVariables = {}
589 self.StoredVariables["APosterioriCorrelations"] = Persistence.OneMatrix(name = "APosterioriCorrelations")
590 self.StoredVariables["APosterioriCovariance"] = Persistence.OneMatrix(name = "APosterioriCovariance")
591 self.StoredVariables["APosterioriStandardDeviations"] = Persistence.OneVector(name = "APosterioriStandardDeviations")
592 self.StoredVariables["APosterioriVariances"] = Persistence.OneVector(name = "APosterioriVariances")
593 self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
594 self.StoredVariables["BMA"] = Persistence.OneVector(name = "BMA")
595 self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
596 self.StoredVariables["CostFunctionJAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJAtCurrentOptimum")
597 self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
598 self.StoredVariables["CostFunctionJbAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJbAtCurrentOptimum")
599 self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
600 self.StoredVariables["CostFunctionJoAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJoAtCurrentOptimum")
601 self.StoredVariables["CurrentOptimum"] = Persistence.OneVector(name = "CurrentOptimum")
602 self.StoredVariables["CurrentState"] = Persistence.OneVector(name = "CurrentState")
603 self.StoredVariables["GradientOfCostFunctionJ"] = Persistence.OneVector(name = "GradientOfCostFunctionJ")
604 self.StoredVariables["GradientOfCostFunctionJb"] = Persistence.OneVector(name = "GradientOfCostFunctionJb")
605 self.StoredVariables["GradientOfCostFunctionJo"] = Persistence.OneVector(name = "GradientOfCostFunctionJo")
606 self.StoredVariables["IndexOfOptimum"] = Persistence.OneIndex(name = "IndexOfOptimum")
607 self.StoredVariables["Innovation"] = Persistence.OneVector(name = "Innovation")
608 self.StoredVariables["InnovationAtCurrentState"] = Persistence.OneVector(name = "InnovationAtCurrentState")
609 self.StoredVariables["JacobianMatrixAtBackground"] = Persistence.OneMatrix(name = "JacobianMatrixAtBackground")
610 self.StoredVariables["JacobianMatrixAtOptimum"] = Persistence.OneMatrix(name = "JacobianMatrixAtOptimum")
611 self.StoredVariables["MahalanobisConsistency"] = Persistence.OneScalar(name = "MahalanobisConsistency")
612 self.StoredVariables["OMA"] = Persistence.OneVector(name = "OMA")
613 self.StoredVariables["OMB"] = Persistence.OneVector(name = "OMB")
614 self.StoredVariables["PredictedState"] = Persistence.OneVector(name = "PredictedState")
615 self.StoredVariables["Residu"] = Persistence.OneScalar(name = "Residu")
616 self.StoredVariables["SigmaBck2"] = Persistence.OneScalar(name = "SigmaBck2")
617 self.StoredVariables["SigmaObs2"] = Persistence.OneScalar(name = "SigmaObs2")
618 self.StoredVariables["SimulatedObservationAtBackground"] = Persistence.OneVector(name = "SimulatedObservationAtBackground")
619 self.StoredVariables["SimulatedObservationAtCurrentOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentOptimum")
620 self.StoredVariables["SimulatedObservationAtCurrentState"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentState")
621 self.StoredVariables["SimulatedObservationAtOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtOptimum")
622 self.StoredVariables["SimulationQuantiles"] = Persistence.OneMatrix(name = "SimulationQuantiles")
624 def _pre_run(self, Parameters, Xb=None, Y=None, R=None, B=None, Q=None ):
626 logging.debug("%s Lancement", self._name)
627 logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
629 # Mise a jour de self._parameters avec Parameters
630 self.__setParameters(Parameters)
632 # Corrections et complements
633 def __test_vvalue(argument, variable, argname):
635 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
636 raise ValueError("%s %s vector %s has to be properly defined!"%(self._name,argname,variable))
637 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
638 logging.debug("%s %s vector %s is not set, but is optional."%(self._name,argname,variable))
640 logging.debug("%s %s vector %s is not set, but is not required."%(self._name,argname,variable))
642 logging.debug("%s %s vector %s is set, and its size is %i."%(self._name,argname,variable,numpy.array(argument).size))
644 __test_vvalue( Xb, "Xb", "Background or initial state" )
645 __test_vvalue( Y, "Y", "Observation" )
647 def __test_cvalue(argument, variable, argname):
649 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
650 raise ValueError("%s %s error covariance matrix %s has to be properly defined!"%(self._name,argname,variable))
651 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
652 logging.debug("%s %s error covariance matrix %s is not set, but is optional."%(self._name,argname,variable))
654 logging.debug("%s %s error covariance matrix %s is not set, but is not required."%(self._name,argname,variable))
656 logging.debug("%s %s error covariance matrix %s is set."%(self._name,argname,variable))
658 __test_cvalue( R, "R", "Observation" )
659 __test_cvalue( B, "B", "Background" )
660 __test_cvalue( Q, "Q", "Evolution" )
662 if ("Bounds" in self._parameters) and isinstance(self._parameters["Bounds"], (list, tuple)) and (len(self._parameters["Bounds"]) > 0):
663 logging.debug("%s Prise en compte des bornes effectuee"%(self._name,))
665 self._parameters["Bounds"] = None
667 if logging.getLogger().level < logging.WARNING:
668 self._parameters["optiprint"], self._parameters["optdisp"] = 1, 1
669 if PlatformInfo.has_scipy:
670 import scipy.optimize
671 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_ALL
673 self._parameters["optmessages"] = 15
675 self._parameters["optiprint"], self._parameters["optdisp"] = -1, 0
676 if PlatformInfo.has_scipy:
677 import scipy.optimize
678 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_NONE
680 self._parameters["optmessages"] = 15
684 def _post_run(self,_oH=None):
686 if ("StoreSupplementaryCalculations" in self._parameters) and \
687 "APosterioriCovariance" in self._parameters["StoreSupplementaryCalculations"]:
688 for _A in self.StoredVariables["APosterioriCovariance"]:
689 if "APosterioriVariances" in self._parameters["StoreSupplementaryCalculations"]:
690 self.StoredVariables["APosterioriVariances"].store( numpy.diag(_A) )
691 if "APosterioriStandardDeviations" in self._parameters["StoreSupplementaryCalculations"]:
692 self.StoredVariables["APosterioriStandardDeviations"].store( numpy.sqrt(numpy.diag(_A)) )
693 if "APosterioriCorrelations" in self._parameters["StoreSupplementaryCalculations"]:
694 _EI = numpy.diag(1./numpy.sqrt(numpy.diag(_A)))
695 _C = numpy.dot(_EI, numpy.dot(_A, _EI))
696 self.StoredVariables["APosterioriCorrelations"].store( _C )
698 logging.debug("%s Nombre d'évaluation(s) de l'opérateur d'observation direct/tangent/adjoint.: %i/%i/%i", self._name, _oH["Direct"].nbcalls(0),_oH["Tangent"].nbcalls(0),_oH["Adjoint"].nbcalls(0))
699 logging.debug("%s Nombre d'appels au cache d'opérateur d'observation direct/tangent/adjoint..: %i/%i/%i", self._name, _oH["Direct"].nbcalls(3),_oH["Tangent"].nbcalls(3),_oH["Adjoint"].nbcalls(3))
700 logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
701 logging.debug("%s Terminé", self._name)
704 def _toStore(self, key):
705 "True if in StoreSupplementaryCalculations, else False"
706 return key in self._parameters["StoreSupplementaryCalculations"]
708 def get(self, key=None):
710 Renvoie l'une des variables stockées identifiée par la clé, ou le
711 dictionnaire de l'ensemble des variables disponibles en l'absence de
712 clé. Ce sont directement les variables sous forme objet qui sont
713 renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
714 des classes de persistance.
717 return self.StoredVariables[key]
719 return self.StoredVariables
721 def __contains__(self, key=None):
722 "D.__contains__(k) -> True if D has a key k, else False"
723 return key in self.StoredVariables
726 "D.keys() -> list of D's keys"
727 if hasattr(self, "StoredVariables"):
728 return self.StoredVariables.keys()
733 "D.pop(k[,d]) -> v, remove specified key and return the corresponding value"
734 if hasattr(self, "StoredVariables"):
735 return self.StoredVariables.pop(k, d)
740 raise TypeError("pop expected at least 1 arguments, got 0")
741 "If key is not found, d is returned if given, otherwise KeyError is raised"
747 def run(self, Xb=None, Y=None, H=None, M=None, R=None, B=None, Q=None, Parameters=None):
749 Doit implémenter l'opération élémentaire de calcul d'assimilation sous
750 sa forme mathématique la plus naturelle possible.
752 raise NotImplementedError("Mathematical assimilation calculation has not been implemented!")
754 def defineRequiredParameter(self, name = None, default = None, typecast = None, message = None, minval = None, maxval = None, listval = None):
756 Permet de définir dans l'algorithme des paramètres requis et leurs
757 caractéristiques par défaut.
760 raise ValueError("A name is mandatory to define a required parameter.")
762 self.__required_parameters[name] = {
764 "typecast" : typecast,
770 logging.debug("%s %s (valeur par défaut = %s)", self._name, message, self.setParameterValue(name))
772 def getRequiredParameters(self, noDetails=True):
774 Renvoie la liste des noms de paramètres requis ou directement le
775 dictionnaire des paramètres requis.
778 return sorted(self.__required_parameters.keys())
780 return self.__required_parameters
782 def setParameterValue(self, name=None, value=None):
784 Renvoie la valeur d'un paramètre requis de manière contrôlée
786 default = self.__required_parameters[name]["default"]
787 typecast = self.__required_parameters[name]["typecast"]
788 minval = self.__required_parameters[name]["minval"]
789 maxval = self.__required_parameters[name]["maxval"]
790 listval = self.__required_parameters[name]["listval"]
792 if value is None and default is None:
794 elif value is None and default is not None:
795 if typecast is None: __val = default
796 else: __val = typecast( default )
798 if typecast is None: __val = value
799 else: __val = typecast( value )
801 if minval is not None and (numpy.array(__val, float) < minval).any():
802 raise ValueError("The parameter named \"%s\" of value \"%s\" can not be less than %s."%(name, __val, minval))
803 if maxval is not None and (numpy.array(__val, float) > maxval).any():
804 raise ValueError("The parameter named \"%s\" of value \"%s\" can not be greater than %s."%(name, __val, maxval))
805 if listval is not None:
806 if typecast is list or typecast is tuple or isinstance(__val,list) or isinstance(__val,tuple):
809 raise ValueError("The value \"%s\" of the parameter named \"%s\" is not allowed, it has to be in the list %s."%(v, name, listval))
810 elif __val not in listval:
811 raise ValueError("The value \"%s\" of the parameter named \"%s\" is not allowed, it has to be in the list %s."%( __val, name,listval))
814 def requireInputArguments(self, mandatory=(), optional=()):
816 Permet d'imposer des arguments requises en entrée
818 self.__required_inputs["RequiredInputValues"]["mandatory"] = tuple( mandatory )
819 self.__required_inputs["RequiredInputValues"]["optional"] = tuple( optional )
821 def __setParameters(self, fromDico={}):
823 Permet de stocker les paramètres reçus dans le dictionnaire interne.
825 self._parameters.update( fromDico )
826 for k in self.__required_parameters.keys():
827 if k in fromDico.keys():
828 self._parameters[k] = self.setParameterValue(k,fromDico[k])
830 self._parameters[k] = self.setParameterValue(k)
831 logging.debug("%s %s : %s", self._name, self.__required_parameters[k]["message"], self._parameters[k])
833 # ==============================================================================
834 class AlgorithmAndParameters(object):
836 Classe générale d'interface d'action pour l'algorithme et ses paramètres
839 name = "GenericAlgorithm",
846 self.__name = str(name)
850 self.__algorithm = {}
851 self.__algorithmFile = None
852 self.__algorithmName = None
854 self.updateParameters( asDict, asScript )
856 if asAlgorithm is None and asScript is not None:
857 __Algo = ImportFromScript(asScript).getvalue( "Algorithm" )
861 if __Algo is not None:
862 self.__A = str(__Algo)
863 self.__P.update( {"Algorithm":self.__A} )
865 self.__setAlgorithm( self.__A )
867 def updateParameters(self,
871 "Mise a jour des parametres"
872 if asDict is None and asScript is not None:
873 __Dict = ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
877 if __Dict is not None:
878 self.__P.update( dict(__Dict) )
880 def executePythonScheme(self, asDictAO = None):
881 "Permet de lancer le calcul d'assimilation"
882 Operator.CM.clearCache()
884 if not isinstance(asDictAO, dict):
885 raise ValueError("The objects for algorithm calculation have to be given together as a dictionnary, and they are not")
886 if hasattr(asDictAO["Background"],"getO"): self.__Xb = asDictAO["Background"].getO()
887 elif hasattr(asDictAO["CheckingPoint"],"getO"): self.__Xb = asDictAO["CheckingPoint"].getO()
888 else: self.__Xb = None
889 if hasattr(asDictAO["Observation"],"getO"): self.__Y = asDictAO["Observation"].getO()
890 else: self.__Y = asDictAO["Observation"]
891 if hasattr(asDictAO["ControlInput"],"getO"): self.__U = asDictAO["ControlInput"].getO()
892 else: self.__U = asDictAO["ControlInput"]
893 if hasattr(asDictAO["ObservationOperator"],"getO"): self.__HO = asDictAO["ObservationOperator"].getO()
894 else: self.__HO = asDictAO["ObservationOperator"]
895 if hasattr(asDictAO["EvolutionModel"],"getO"): self.__EM = asDictAO["EvolutionModel"].getO()
896 else: self.__EM = asDictAO["EvolutionModel"]
897 if hasattr(asDictAO["ControlModel"],"getO"): self.__CM = asDictAO["ControlModel"].getO()
898 else: self.__CM = asDictAO["ControlModel"]
899 self.__B = asDictAO["BackgroundError"]
900 self.__R = asDictAO["ObservationError"]
901 self.__Q = asDictAO["EvolutionError"]
903 self.__shape_validate()
905 self.__algorithm.run(
915 Parameters = self.__P,
919 def executeYACSScheme(self, FileName=None):
920 "Permet de lancer le calcul d'assimilation"
921 if FileName is None or not os.path.exists(FileName):
922 raise ValueError("a YACS file name has to be given for YACS execution.\n")
924 __file = os.path.abspath(FileName)
925 logging.debug("The YACS file name is \"%s\"."%__file)
926 if not PlatformInfo.has_salome or \
927 not PlatformInfo.has_yacs or \
928 not PlatformInfo.has_adao:
929 raise ImportError("\n\n"+\
930 "Unable to get SALOME, YACS or ADAO environnement variables.\n"+\
931 "Please load the right environnement before trying to use it.\n")
936 SALOMERuntime.RuntimeSALOME_setRuntime()
938 r = pilot.getRuntime()
939 xmlLoader = loader.YACSLoader()
940 xmlLoader.registerProcCataLoader()
942 catalogAd = r.loadCatalog("proc", __file)
943 r.addCatalog(catalogAd)
948 p = xmlLoader.load(__file)
949 except IOError as ex:
950 print("The YACS XML schema file can not be loaded: %s"%(ex,))
952 logger = p.getLogger("parser")
953 if not logger.isEmpty():
954 print("The imported YACS XML schema has errors on parsing:")
955 print(logger.getStr())
958 print("The YACS XML schema is not valid and will not be executed:")
959 print(p.getErrorReport())
961 info=pilot.LinkInfo(pilot.LinkInfo.ALL_DONT_STOP)
962 p.checkConsistency(info)
963 if info.areWarningsOrErrors():
964 print("The YACS XML schema is not coherent and will not be executed:")
965 print(info.getGlobalRepr())
967 e = pilot.ExecutorSwig()
969 if p.getEffectiveState() != pilot.DONE:
970 print(p.getErrorReport())
974 def get(self, key = None):
975 "Vérifie l'existence d'une clé de variable ou de paramètres"
976 if key in self.__algorithm:
977 return self.__algorithm.get( key )
978 elif key in self.__P:
984 "Necessaire pour le pickling"
985 return self.__algorithm.pop(k, d)
987 def getAlgorithmRequiredParameters(self, noDetails=True):
988 "Renvoie la liste des paramètres requis selon l'algorithme"
989 return self.__algorithm.getRequiredParameters(noDetails)
991 def setObserver(self, __V, __O, __I, __S):
992 if self.__algorithm is None \
993 or isinstance(self.__algorithm, dict) \
994 or not hasattr(self.__algorithm,"StoredVariables"):
995 raise ValueError("No observer can be build before choosing an algorithm.")
996 if __V not in self.__algorithm:
997 raise ValueError("An observer requires to be set on a variable named %s which does not exist."%__V)
999 self.__algorithm.StoredVariables[ __V ].setDataObserver(
1002 HookParameters = __I,
1005 def removeObserver(self, __V, __O, __A = False):
1006 if self.__algorithm is None \
1007 or isinstance(self.__algorithm, dict) \
1008 or not hasattr(self.__algorithm,"StoredVariables"):
1009 raise ValueError("No observer can be removed before choosing an algorithm.")
1010 if __V not in self.__algorithm:
1011 raise ValueError("An observer requires to be removed on a variable named %s which does not exist."%__V)
1013 return self.__algorithm.StoredVariables[ __V ].removeDataObserver(
1018 def hasObserver(self, __V):
1019 if self.__algorithm is None \
1020 or isinstance(self.__algorithm, dict) \
1021 or not hasattr(self.__algorithm,"StoredVariables"):
1023 if __V not in self.__algorithm:
1025 return self.__algorithm.StoredVariables[ __V ].hasDataObserver()
1028 return list(self.__algorithm.keys()) + list(self.__P.keys())
1030 def __contains__(self, key=None):
1031 "D.__contains__(k) -> True if D has a key k, else False"
1032 return key in self.__algorithm or key in self.__P
1035 "x.__repr__() <==> repr(x)"
1036 return repr(self.__A)+", "+repr(self.__P)
1039 "x.__str__() <==> str(x)"
1040 return str(self.__A)+", "+str(self.__P)
1042 def __setAlgorithm(self, choice = None ):
1044 Permet de sélectionner l'algorithme à utiliser pour mener à bien l'étude
1045 d'assimilation. L'argument est un champ caractère se rapportant au nom
1046 d'un algorithme réalisant l'opération sur les arguments fixes.
1049 raise ValueError("Error: algorithm choice has to be given")
1050 if self.__algorithmName is not None:
1051 raise ValueError("Error: algorithm choice has already been done as \"%s\", it can't be changed."%self.__algorithmName)
1052 daDirectory = "daAlgorithms"
1054 # Recherche explicitement le fichier complet
1055 # ------------------------------------------
1057 for directory in sys.path:
1058 if os.path.isfile(os.path.join(directory, daDirectory, str(choice)+'.py')):
1059 module_path = os.path.abspath(os.path.join(directory, daDirectory))
1060 if module_path is None:
1061 raise ImportError("No algorithm module named \"%s\" has been found in the search path.\n The search path is %s"%(choice, sys.path))
1063 # Importe le fichier complet comme un module
1064 # ------------------------------------------
1066 sys_path_tmp = sys.path ; sys.path.insert(0,module_path)
1067 self.__algorithmFile = __import__(str(choice), globals(), locals(), [])
1068 if not hasattr(self.__algorithmFile, "ElementaryAlgorithm"):
1069 raise ImportError("this module does not define a valid elementary algorithm.")
1070 self.__algorithmName = str(choice)
1071 sys.path = sys_path_tmp ; del sys_path_tmp
1072 except ImportError as e:
1073 raise ImportError("The module named \"%s\" was found, but is incorrect at the import stage.\n The import error message is: %s"%(choice,e))
1075 # Instancie un objet du type élémentaire du fichier
1076 # -------------------------------------------------
1077 self.__algorithm = self.__algorithmFile.ElementaryAlgorithm()
1080 def __shape_validate(self):
1082 Validation de la correspondance correcte des tailles des variables et
1083 des matrices s'il y en a.
1085 if self.__Xb is None: __Xb_shape = (0,)
1086 elif hasattr(self.__Xb,"size"): __Xb_shape = (self.__Xb.size,)
1087 elif hasattr(self.__Xb,"shape"):
1088 if isinstance(self.__Xb.shape, tuple): __Xb_shape = self.__Xb.shape
1089 else: __Xb_shape = self.__Xb.shape()
1090 else: raise TypeError("The background (Xb) has no attribute of shape: problem !")
1092 if self.__Y is None: __Y_shape = (0,)
1093 elif hasattr(self.__Y,"size"): __Y_shape = (self.__Y.size,)
1094 elif hasattr(self.__Y,"shape"):
1095 if isinstance(self.__Y.shape, tuple): __Y_shape = self.__Y.shape
1096 else: __Y_shape = self.__Y.shape()
1097 else: raise TypeError("The observation (Y) has no attribute of shape: problem !")
1099 if self.__U is None: __U_shape = (0,)
1100 elif hasattr(self.__U,"size"): __U_shape = (self.__U.size,)
1101 elif hasattr(self.__U,"shape"):
1102 if isinstance(self.__U.shape, tuple): __U_shape = self.__U.shape
1103 else: __U_shape = self.__U.shape()
1104 else: raise TypeError("The control (U) has no attribute of shape: problem !")
1106 if self.__B is None: __B_shape = (0,0)
1107 elif hasattr(self.__B,"shape"):
1108 if isinstance(self.__B.shape, tuple): __B_shape = self.__B.shape
1109 else: __B_shape = self.__B.shape()
1110 else: raise TypeError("The a priori errors covariance matrix (B) has no attribute of shape: problem !")
1112 if self.__R is None: __R_shape = (0,0)
1113 elif hasattr(self.__R,"shape"):
1114 if isinstance(self.__R.shape, tuple): __R_shape = self.__R.shape
1115 else: __R_shape = self.__R.shape()
1116 else: raise TypeError("The observation errors covariance matrix (R) has no attribute of shape: problem !")
1118 if self.__Q is None: __Q_shape = (0,0)
1119 elif hasattr(self.__Q,"shape"):
1120 if isinstance(self.__Q.shape, tuple): __Q_shape = self.__Q.shape
1121 else: __Q_shape = self.__Q.shape()
1122 else: raise TypeError("The evolution errors covariance matrix (Q) has no attribute of shape: problem !")
1124 if len(self.__HO) == 0: __HO_shape = (0,0)
1125 elif isinstance(self.__HO, dict): __HO_shape = (0,0)
1126 elif hasattr(self.__HO["Direct"],"shape"):
1127 if isinstance(self.__HO["Direct"].shape, tuple): __HO_shape = self.__HO["Direct"].shape
1128 else: __HO_shape = self.__HO["Direct"].shape()
1129 else: raise TypeError("The observation operator (H) has no attribute of shape: problem !")
1131 if len(self.__EM) == 0: __EM_shape = (0,0)
1132 elif isinstance(self.__EM, dict): __EM_shape = (0,0)
1133 elif hasattr(self.__EM["Direct"],"shape"):
1134 if isinstance(self.__EM["Direct"].shape, tuple): __EM_shape = self.__EM["Direct"].shape
1135 else: __EM_shape = self.__EM["Direct"].shape()
1136 else: raise TypeError("The evolution model (EM) has no attribute of shape: problem !")
1138 if len(self.__CM) == 0: __CM_shape = (0,0)
1139 elif isinstance(self.__CM, dict): __CM_shape = (0,0)
1140 elif hasattr(self.__CM["Direct"],"shape"):
1141 if isinstance(self.__CM["Direct"].shape, tuple): __CM_shape = self.__CM["Direct"].shape
1142 else: __CM_shape = self.__CM["Direct"].shape()
1143 else: raise TypeError("The control model (CM) has no attribute of shape: problem !")
1145 # Vérification des conditions
1146 # ---------------------------
1147 if not( len(__Xb_shape) == 1 or min(__Xb_shape) == 1 ):
1148 raise ValueError("Shape characteristic of background (Xb) is incorrect: \"%s\"."%(__Xb_shape,))
1149 if not( len(__Y_shape) == 1 or min(__Y_shape) == 1 ):
1150 raise ValueError("Shape characteristic of observation (Y) is incorrect: \"%s\"."%(__Y_shape,))
1152 if not( min(__B_shape) == max(__B_shape) ):
1153 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) is incorrect: \"%s\"."%(__B_shape,))
1154 if not( min(__R_shape) == max(__R_shape) ):
1155 raise ValueError("Shape characteristic of observation errors covariance matrix (R) is incorrect: \"%s\"."%(__R_shape,))
1156 if not( min(__Q_shape) == max(__Q_shape) ):
1157 raise ValueError("Shape characteristic of evolution errors covariance matrix (Q) is incorrect: \"%s\"."%(__Q_shape,))
1158 if not( min(__EM_shape) == max(__EM_shape) ):
1159 raise ValueError("Shape characteristic of evolution operator (EM) is incorrect: \"%s\"."%(__EM_shape,))
1161 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[1] == max(__Xb_shape) ):
1162 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and state (X) \"%s\" are incompatible."%(__HO_shape,__Xb_shape))
1163 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[0] == max(__Y_shape) ):
1164 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation (Y) \"%s\" are incompatible."%(__HO_shape,__Y_shape))
1165 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__B) > 0 and not( __HO_shape[1] == __B_shape[0] ):
1166 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and a priori errors covariance matrix (B) \"%s\" are incompatible."%(__HO_shape,__B_shape))
1167 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__R) > 0 and not( __HO_shape[0] == __R_shape[1] ):
1168 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation errors covariance matrix (R) \"%s\" are incompatible."%(__HO_shape,__R_shape))
1170 if self.__B is not None and len(self.__B) > 0 and not( __B_shape[1] == max(__Xb_shape) ):
1171 if self.__algorithmName in ["EnsembleBlue",]:
1172 asPersistentVector = self.__Xb.reshape((-1,min(__B_shape)))
1173 self.__Xb = Persistence.OneVector("Background", basetype=numpy.matrix)
1174 for member in asPersistentVector:
1175 self.__Xb.store( numpy.matrix( numpy.ravel(member), numpy.float ).T )
1176 __Xb_shape = min(__B_shape)
1178 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) \"%s\" and background (Xb) \"%s\" are incompatible."%(__B_shape,__Xb_shape))
1180 if self.__R is not None and len(self.__R) > 0 and not( __R_shape[1] == max(__Y_shape) ):
1181 raise ValueError("Shape characteristic of observation errors covariance matrix (R) \"%s\" and observation (Y) \"%s\" are incompatible."%(__R_shape,__Y_shape))
1183 if self.__EM is not None and len(self.__EM) > 0 and not isinstance(self.__EM, dict) and not( __EM_shape[1] == max(__Xb_shape) ):
1184 raise ValueError("Shape characteristic of evolution model (EM) \"%s\" and state (X) \"%s\" are incompatible."%(__EM_shape,__Xb_shape))
1186 if self.__CM is not None and len(self.__CM) > 0 and not isinstance(self.__CM, dict) and not( __CM_shape[1] == max(__U_shape) ):
1187 raise ValueError("Shape characteristic of control model (CM) \"%s\" and control (U) \"%s\" are incompatible."%(__CM_shape,__U_shape))
1189 if ("Bounds" in self.__P) \
1190 and (isinstance(self.__P["Bounds"], list) or isinstance(self.__P["Bounds"], tuple)) \
1191 and (len(self.__P["Bounds"]) != max(__Xb_shape)):
1192 raise ValueError("The number \"%s\" of bound pairs for the state (X) components is different of the size \"%s\" of the state itself." \
1193 %(len(self.__P["Bounds"]),max(__Xb_shape)))
1197 # ==============================================================================
1198 class RegulationAndParameters(object):
1200 Classe générale d'interface d'action pour la régulation et ses paramètres
1203 name = "GenericRegulation",
1210 self.__name = str(name)
1213 if asAlgorithm is None and asScript is not None:
1214 __Algo = ImportFromScript(asScript).getvalue( "Algorithm" )
1216 __Algo = asAlgorithm
1218 if asDict is None and asScript is not None:
1219 __Dict = ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1223 if __Dict is not None:
1224 self.__P.update( dict(__Dict) )
1226 if __Algo is not None:
1227 self.__P.update( {"Algorithm":self.__A} )
1229 def get(self, key = None):
1230 "Vérifie l'existence d'une clé de variable ou de paramètres"
1232 return self.__P[key]
1236 # ==============================================================================
1237 class DataObserver(object):
1239 Classe générale d'interface de type observer
1242 name = "GenericObserver",
1254 self.__name = str(name)
1259 if onVariable is None:
1260 raise ValueError("setting an observer has to be done over a variable name or a list of variable names, not over None.")
1261 elif type(onVariable) in (tuple, list):
1262 self.__V = tuple(map( str, onVariable ))
1263 if withInfo is None:
1266 self.__I = (str(withInfo),)*len(self.__V)
1267 elif isinstance(onVariable, str):
1268 self.__V = (onVariable,)
1269 if withInfo is None:
1270 self.__I = (onVariable,)
1272 self.__I = (str(withInfo),)
1274 raise ValueError("setting an observer has to be done over a variable name or a list of variable names.")
1276 if asString is not None:
1277 __FunctionText = asString
1278 elif (asTemplate is not None) and (asTemplate in Templates.ObserverTemplates):
1279 __FunctionText = Templates.ObserverTemplates[asTemplate]
1280 elif asScript is not None:
1281 __FunctionText = ImportFromScript(asScript).getstring()
1284 __Function = ObserverF(__FunctionText)
1286 if asObsObject is not None:
1287 self.__O = asObsObject
1289 self.__O = __Function.getfunc()
1291 for k in range(len(self.__V)):
1294 if ename not in withAlgo:
1295 raise ValueError("An observer is asked to be set on a variable named %s which does not exist."%ename)
1297 withAlgo.setObserver(ename, self.__O, einfo, scheduledBy)
1300 "x.__repr__() <==> repr(x)"
1301 return repr(self.__V)+"\n"+repr(self.__O)
1304 "x.__str__() <==> str(x)"
1305 return str(self.__V)+"\n"+str(self.__O)
1307 # ==============================================================================
1308 class State(object):
1310 Classe générale d'interface de type état
1313 name = "GenericVector",
1315 asPersistentVector = None,
1321 toBeChecked = False,
1324 Permet de définir un vecteur :
1325 - asVector : entrée des données, comme un vecteur compatible avec le
1326 constructeur de numpy.matrix, ou "True" si entrée par script.
1327 - asPersistentVector : entrée des données, comme une série de vecteurs
1328 compatible avec le constructeur de numpy.matrix, ou comme un objet de
1329 type Persistence, ou "True" si entrée par script.
1330 - asScript : si un script valide est donné contenant une variable
1331 nommée "name", la variable est de type "asVector" (par défaut) ou
1332 "asPersistentVector" selon que l'une de ces variables est placée à
1334 - asDataFile : si un ou plusieurs fichiers valides sont donnés
1335 contenant des valeurs en colonnes, elles-mêmes nommées "colNames"
1336 (s'il n'y a pas de nom de colonne indiquée, on cherche une colonne
1337 nommée "name"), on récupère les colonnes et on les range ligne après
1338 ligne (colMajor=False) ou colonne après colonne (colMajor=True). La
1339 variable résultante est de type "asVector" (par défaut) ou
1340 "asPersistentVector" selon que l'une de ces variables est placée à
1343 self.__name = str(name)
1344 self.__check = bool(toBeChecked)
1348 self.__is_vector = False
1349 self.__is_series = False
1351 if asScript is not None:
1352 __Vector, __Series = None, None
1353 if asPersistentVector:
1354 __Series = ImportFromScript(asScript).getvalue( self.__name )
1356 __Vector = ImportFromScript(asScript).getvalue( self.__name )
1357 elif asDataFile is not None:
1358 __Vector, __Series = None, None
1359 if asPersistentVector:
1360 if colNames is not None:
1361 __Series = ImportFromFile(asDataFile).getvalue( colNames )[1]
1363 __Series = ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1364 if bool(colMajor) and not ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1365 __Series = numpy.transpose(__Series)
1366 elif not bool(colMajor) and ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1367 __Series = numpy.transpose(__Series)
1369 if colNames is not None:
1370 __Vector = ImportFromFile(asDataFile).getvalue( colNames )[1]
1372 __Vector = ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1374 __Vector = numpy.ravel(__Vector, order = "F")
1376 __Vector = numpy.ravel(__Vector, order = "C")
1378 __Vector, __Series = asVector, asPersistentVector
1380 if __Vector is not None:
1381 self.__is_vector = True
1382 self.__V = numpy.matrix( numpy.asmatrix(__Vector).A1, numpy.float ).T
1383 self.shape = self.__V.shape
1384 self.size = self.__V.size
1385 elif __Series is not None:
1386 self.__is_series = True
1387 if isinstance(__Series, (tuple, list, numpy.ndarray, numpy.matrix, str)):
1388 self.__V = Persistence.OneVector(self.__name, basetype=numpy.matrix)
1389 if isinstance(__Series, str): __Series = eval(__Series)
1390 for member in __Series:
1391 self.__V.store( numpy.matrix( numpy.asmatrix(member).A1, numpy.float ).T )
1394 if isinstance(self.__V.shape, (tuple, list)):
1395 self.shape = self.__V.shape
1397 self.shape = self.__V.shape()
1398 if len(self.shape) == 1:
1399 self.shape = (self.shape[0],1)
1400 self.size = self.shape[0] * self.shape[1]
1402 raise ValueError("The %s object is improperly defined, it requires at minima either a vector, a list/tuple of vectors or a persistent object. Please check your vector input."%self.__name)
1404 if scheduledBy is not None:
1405 self.__T = scheduledBy
1407 def getO(self, withScheduler=False):
1409 return self.__V, self.__T
1410 elif self.__T is None:
1416 "Vérification du type interne"
1417 return self.__is_vector
1420 "Vérification du type interne"
1421 return self.__is_series
1424 "x.__repr__() <==> repr(x)"
1425 return repr(self.__V)
1428 "x.__str__() <==> str(x)"
1429 return str(self.__V)
1431 # ==============================================================================
1432 class Covariance(object):
1434 Classe générale d'interface de type covariance
1437 name = "GenericCovariance",
1438 asCovariance = None,
1439 asEyeByScalar = None,
1440 asEyeByVector = None,
1443 toBeChecked = False,
1446 Permet de définir une covariance :
1447 - asCovariance : entrée des données, comme une matrice compatible avec
1448 le constructeur de numpy.matrix
1449 - asEyeByScalar : entrée des données comme un seul scalaire de variance,
1450 multiplicatif d'une matrice de corrélation identité, aucune matrice
1451 n'étant donc explicitement à donner
1452 - asEyeByVector : entrée des données comme un seul vecteur de variance,
1453 à mettre sur la diagonale d'une matrice de corrélation, aucune matrice
1454 n'étant donc explicitement à donner
1455 - asCovObject : entrée des données comme un objet python, qui a les
1456 methodes obligatoires "getT", "getI", "diag", "trace", "__add__",
1457 "__sub__", "__neg__", "__mul__", "__rmul__" et facultatives "shape",
1458 "size", "cholesky", "choleskyI", "asfullmatrix", "__repr__", "__str__"
1459 - toBeChecked : booléen indiquant si le caractère SDP de la matrice
1460 pleine doit être vérifié
1462 self.__name = str(name)
1463 self.__check = bool(toBeChecked)
1466 self.__is_scalar = False
1467 self.__is_vector = False
1468 self.__is_matrix = False
1469 self.__is_object = False
1471 if asScript is not None:
1472 __Matrix, __Scalar, __Vector, __Object = None, None, None, None
1474 __Scalar = ImportFromScript(asScript).getvalue( self.__name )
1476 __Vector = ImportFromScript(asScript).getvalue( self.__name )
1478 __Object = ImportFromScript(asScript).getvalue( self.__name )
1480 __Matrix = ImportFromScript(asScript).getvalue( self.__name )
1482 __Matrix, __Scalar, __Vector, __Object = asCovariance, asEyeByScalar, asEyeByVector, asCovObject
1484 if __Scalar is not None:
1485 if numpy.matrix(__Scalar).size != 1:
1486 raise ValueError(' The diagonal multiplier given to define a sparse matrix is not a unique scalar value.\n Its actual measured size is %i. Please check your scalar input.'%numpy.matrix(__Scalar).size)
1487 self.__is_scalar = True
1488 self.__C = numpy.abs( float(__Scalar) )
1491 elif __Vector is not None:
1492 self.__is_vector = True
1493 self.__C = numpy.abs( numpy.array( numpy.ravel( numpy.matrix(__Vector, float ) ) ) )
1494 self.shape = (self.__C.size,self.__C.size)
1495 self.size = self.__C.size**2
1496 elif __Matrix is not None:
1497 self.__is_matrix = True
1498 self.__C = numpy.matrix( __Matrix, float )
1499 self.shape = self.__C.shape
1500 self.size = self.__C.size
1501 elif __Object is not None:
1502 self.__is_object = True
1504 for at in ("getT","getI","diag","trace","__add__","__sub__","__neg__","__mul__","__rmul__"):
1505 if not hasattr(self.__C,at):
1506 raise ValueError("The matrix given for %s as an object has no attribute \"%s\". Please check your object input."%(self.__name,at))
1507 if hasattr(self.__C,"shape"):
1508 self.shape = self.__C.shape
1511 if hasattr(self.__C,"size"):
1512 self.size = self.__C.size
1517 # raise ValueError("The %s covariance matrix has to be specified either as a matrix, a vector for its diagonal or a scalar multiplying an identity matrix."%self.__name)
1521 def __validate(self):
1523 if self.__C is None:
1524 raise UnboundLocalError("%s covariance matrix value has not been set!"%(self.__name,))
1525 if self.ismatrix() and min(self.shape) != max(self.shape):
1526 raise ValueError("The given matrix for %s is not a square one, its shape is %s. Please check your matrix input."%(self.__name,self.shape))
1527 if self.isobject() and min(self.shape) != max(self.shape):
1528 raise ValueError("The matrix given for \"%s\" is not a square one, its shape is %s. Please check your object input."%(self.__name,self.shape))
1529 if self.isscalar() and self.__C <= 0:
1530 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your scalar input %s."%(self.__name,self.__C))
1531 if self.isvector() and (self.__C <= 0).any():
1532 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your vector input."%(self.__name,))
1533 if self.ismatrix() and (self.__check or logging.getLogger().level < logging.WARNING):
1535 L = numpy.linalg.cholesky( self.__C )
1537 raise ValueError("The %s covariance matrix is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1538 if self.isobject() and (self.__check or logging.getLogger().level < logging.WARNING):
1540 L = self.__C.cholesky()
1542 raise ValueError("The %s covariance object is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1545 "Vérification du type interne"
1546 return self.__is_scalar
1549 "Vérification du type interne"
1550 return self.__is_vector
1553 "Vérification du type interne"
1554 return self.__is_matrix
1557 "Vérification du type interne"
1558 return self.__is_object
1563 return Covariance(self.__name+"I", asCovariance = self.__C.I )
1564 elif self.isvector():
1565 return Covariance(self.__name+"I", asEyeByVector = 1. / self.__C )
1566 elif self.isscalar():
1567 return Covariance(self.__name+"I", asEyeByScalar = 1. / self.__C )
1568 elif self.isobject():
1569 return Covariance(self.__name+"I", asCovObject = self.__C.getI() )
1571 return None # Indispensable
1576 return Covariance(self.__name+"T", asCovariance = self.__C.T )
1577 elif self.isvector():
1578 return Covariance(self.__name+"T", asEyeByVector = self.__C )
1579 elif self.isscalar():
1580 return Covariance(self.__name+"T", asEyeByScalar = self.__C )
1581 elif self.isobject():
1582 return Covariance(self.__name+"T", asCovObject = self.__C.getT() )
1585 "Décomposition de Cholesky"
1587 return Covariance(self.__name+"C", asCovariance = numpy.linalg.cholesky(self.__C) )
1588 elif self.isvector():
1589 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
1590 elif self.isscalar():
1591 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
1592 elif self.isobject() and hasattr(self.__C,"cholesky"):
1593 return Covariance(self.__name+"C", asCovObject = self.__C.cholesky() )
1595 def choleskyI(self):
1596 "Inversion de la décomposition de Cholesky"
1598 return Covariance(self.__name+"H", asCovariance = numpy.linalg.cholesky(self.__C).I )
1599 elif self.isvector():
1600 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
1601 elif self.isscalar():
1602 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
1603 elif self.isobject() and hasattr(self.__C,"choleskyI"):
1604 return Covariance(self.__name+"H", asCovObject = self.__C.choleskyI() )
1606 def diag(self, msize=None):
1607 "Diagonale de la matrice"
1609 return numpy.diag(self.__C)
1610 elif self.isvector():
1612 elif self.isscalar():
1614 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1616 return self.__C * numpy.ones(int(msize))
1617 elif self.isobject():
1618 return self.__C.diag()
1620 def asfullmatrix(self, msize=None):
1624 elif self.isvector():
1625 return numpy.matrix( numpy.diag(self.__C), float )
1626 elif self.isscalar():
1628 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1630 return numpy.matrix( self.__C * numpy.eye(int(msize)), float )
1631 elif self.isobject() and hasattr(self.__C,"asfullmatrix"):
1632 return self.__C.asfullmatrix()
1634 def trace(self, msize=None):
1635 "Trace de la matrice"
1637 return numpy.trace(self.__C)
1638 elif self.isvector():
1639 return float(numpy.sum(self.__C))
1640 elif self.isscalar():
1642 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1644 return self.__C * int(msize)
1645 elif self.isobject():
1646 return self.__C.trace()
1652 "x.__repr__() <==> repr(x)"
1653 return repr(self.__C)
1656 "x.__str__() <==> str(x)"
1657 return str(self.__C)
1659 def __add__(self, other):
1660 "x.__add__(y) <==> x+y"
1661 if self.ismatrix() or self.isobject():
1662 return self.__C + numpy.asmatrix(other)
1663 elif self.isvector() or self.isscalar():
1664 _A = numpy.asarray(other)
1665 _A.reshape(_A.size)[::_A.shape[1]+1] += self.__C
1666 return numpy.asmatrix(_A)
1668 def __radd__(self, other):
1669 "x.__radd__(y) <==> y+x"
1670 raise NotImplementedError("%s covariance matrix __radd__ method not available for %s type!"%(self.__name,type(other)))
1672 def __sub__(self, other):
1673 "x.__sub__(y) <==> x-y"
1674 if self.ismatrix() or self.isobject():
1675 return self.__C - numpy.asmatrix(other)
1676 elif self.isvector() or self.isscalar():
1677 _A = numpy.asarray(other)
1678 _A.reshape(_A.size)[::_A.shape[1]+1] = self.__C - _A.reshape(_A.size)[::_A.shape[1]+1]
1679 return numpy.asmatrix(_A)
1681 def __rsub__(self, other):
1682 "x.__rsub__(y) <==> y-x"
1683 raise NotImplementedError("%s covariance matrix __rsub__ method not available for %s type!"%(self.__name,type(other)))
1686 "x.__neg__() <==> -x"
1689 def __mul__(self, other):
1690 "x.__mul__(y) <==> x*y"
1691 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
1692 return self.__C * other
1693 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
1694 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1695 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
1696 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1697 return self.__C * numpy.asmatrix(other)
1699 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asmatrix(other).shape,self.__name))
1700 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
1701 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1702 return numpy.asmatrix(self.__C * numpy.ravel(other)).T
1703 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1704 return numpy.asmatrix((self.__C * (numpy.asarray(other).transpose())).transpose())
1706 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
1707 elif self.isscalar() and isinstance(other,numpy.matrix):
1708 return self.__C * other
1709 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
1710 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
1711 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
1713 return self.__C * numpy.asmatrix(other)
1714 elif self.isobject():
1715 return self.__C.__mul__(other)
1717 raise NotImplementedError("%s covariance matrix __mul__ method not available for %s type!"%(self.__name,type(other)))
1719 def __rmul__(self, other):
1720 "x.__rmul__(y) <==> y*x"
1721 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
1722 return other * self.__C
1723 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
1724 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1725 return numpy.asmatrix(numpy.ravel(other)) * self.__C
1726 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1727 return numpy.asmatrix(other) * self.__C
1729 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name))
1730 elif self.isvector() and isinstance(other,numpy.matrix):
1731 if numpy.ravel(other).size == self.shape[0]: # Vecteur
1732 return numpy.asmatrix(numpy.ravel(other) * self.__C)
1733 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
1734 return numpy.asmatrix(numpy.array(other) * self.__C)
1736 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
1737 elif self.isscalar() and isinstance(other,numpy.matrix):
1738 return other * self.__C
1739 elif self.isobject():
1740 return self.__C.__rmul__(other)
1742 raise NotImplementedError("%s covariance matrix __rmul__ method not available for %s type!"%(self.__name,type(other)))
1745 "x.__len__() <==> len(x)"
1746 return self.shape[0]
1748 # ==============================================================================
1749 class ObserverF(object):
1751 Creation d'une fonction d'observateur a partir de son texte
1753 def __init__(self, corps=""):
1754 self.__corps = corps
1755 def func(self,var,info):
1756 "Fonction d'observation"
1759 "Restitution du pointeur de fonction dans l'objet"
1762 # ==============================================================================
1763 class CaseLogger(object):
1765 Conservation des commandes de creation d'un cas
1767 def __init__(self, __name="", __objname="case", __addViewers=None, __addLoaders=None):
1768 self.__name = str(__name)
1769 self.__objname = str(__objname)
1770 self.__logSerie = []
1771 self.__switchoff = False
1773 "TUI" :Interfaces._TUIViewer,
1774 "SCD" :Interfaces._SCDViewer,
1775 "YACS":Interfaces._YACSViewer,
1778 "TUI" :Interfaces._TUIViewer,
1779 "COM" :Interfaces._COMViewer,
1781 if __addViewers is not None:
1782 self.__viewers.update(dict(__addViewers))
1783 if __addLoaders is not None:
1784 self.__loaders.update(dict(__addLoaders))
1786 def register(self, __command=None, __keys=None, __local=None, __pre=None, __switchoff=False):
1787 "Enregistrement d'une commande individuelle"
1788 if __command is not None and __keys is not None and __local is not None and not self.__switchoff:
1789 if "self" in __keys: __keys.remove("self")
1790 self.__logSerie.append( (str(__command), __keys, __local, __pre, __switchoff) )
1792 self.__switchoff = True
1794 self.__switchoff = False
1796 def dump(self, __filename=None, __format="TUI", __upa=""):
1797 "Restitution normalisée des commandes"
1798 if __format in self.__viewers:
1799 __formater = self.__viewers[__format](self.__name, self.__objname, self.__logSerie)
1801 raise ValueError("Dumping as \"%s\" is not available"%__format)
1802 return __formater.dump(__filename, __upa)
1804 def load(self, __filename=None, __content=None, __object=None, __format="TUI"):
1805 "Chargement normalisé des commandes"
1806 if __format in self.__loaders:
1807 __formater = self.__loaders[__format]()
1809 raise ValueError("Loading as \"%s\" is not available"%__format)
1810 return __formater.load(__filename, __content, __object)
1812 # ==============================================================================
1813 def MultiFonction( __xserie, _sFunction = lambda x: x ):
1815 Pour une liste ordonnée de vecteurs en entrée, renvoie en sortie la liste
1816 correspondante de valeurs de la fonction en argument
1818 if not PlatformInfo.isIterable( __xserie ):
1819 raise ValueError("MultiFonction not iterable unkown input type: %s"%(type(__xserie),))
1822 for __xvalue in __xserie:
1823 __multiHX.append( _sFunction( __xvalue ) )
1827 # ==============================================================================
1828 def CostFunction3D(_x,
1829 _Hm = None, # Pour simuler Hm(x) : HO["Direct"].appliedTo
1830 _HmX = None, # Simulation déjà faite de Hm(x)
1831 _arg = None, # Arguments supplementaires pour Hm, sous la forme d'un tuple
1836 _SIV = False, # A résorber pour la 8.0
1837 _SSC = [], # self._parameters["StoreSupplementaryCalculations"]
1838 _nPS = 0, # nbPreviousSteps
1839 _QM = "DA", # QualityMeasure
1840 _SSV = {}, # Entrée et/ou sortie : self.StoredVariables
1841 _fRt = False, # Restitue ou pas la sortie étendue
1842 _sSc = True, # Stocke ou pas les SSC
1845 Fonction-coût générale utile pour les algorithmes statiques/3D : 3DVAR, BLUE
1846 et dérivés, Kalman et dérivés, LeastSquares, SamplingTest, PSO, SA, Tabu,
1847 DFO, QuantileRegression
1853 for k in ["CostFunctionJ",
1859 "SimulatedObservationAtCurrentOptimum",
1860 "SimulatedObservationAtCurrentState",
1864 if hasattr(_SSV[k],"store"):
1865 _SSV[k].append = _SSV[k].store # Pour utiliser "append" au lieu de "store"
1867 _X = numpy.asmatrix(numpy.ravel( _x )).T
1868 if _SIV or "CurrentState" in _SSC or "CurrentOptimum" in _SSC:
1869 _SSV["CurrentState"].append( _X )
1871 if _HmX is not None:
1875 raise ValueError("COSTFUNCTION3D Operator has to be defined.")
1879 _HX = _Hm( _X, *_arg )
1880 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
1882 if "SimulatedObservationAtCurrentState" in _SSC or \
1883 "SimulatedObservationAtCurrentOptimum" in _SSC:
1884 _SSV["SimulatedObservationAtCurrentState"].append( _HX )
1886 if numpy.any(numpy.isnan(_HX)):
1887 Jb, Jo, J = numpy.nan, numpy.nan, numpy.nan
1889 _Y = numpy.asmatrix(numpy.ravel( _Y )).T
1890 if _QM in ["AugmentedWeightedLeastSquares", "AWLS", "AugmentedPonderatedLeastSquares", "APLS", "DA"]:
1891 if _BI is None or _RI is None:
1892 raise ValueError("Background and Observation error covariance matrix has to be properly defined!")
1893 _Xb = numpy.asmatrix(numpy.ravel( _Xb )).T
1894 Jb = 0.5 * (_X - _Xb).T * _BI * (_X - _Xb)
1895 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
1896 elif _QM in ["WeightedLeastSquares", "WLS", "PonderatedLeastSquares", "PLS"]:
1898 raise ValueError("Observation error covariance matrix has to be properly defined!")
1900 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
1901 elif _QM in ["LeastSquares", "LS", "L2"]:
1903 Jo = 0.5 * (_Y - _HX).T * (_Y - _HX)
1904 elif _QM in ["AbsoluteValue", "L1"]:
1906 Jo = numpy.sum( numpy.abs(_Y - _HX) )
1907 elif _QM in ["MaximumError", "ME"]:
1909 Jo = numpy.max( numpy.abs(_Y - _HX) )
1910 elif _QM in ["QR", "Null"]:
1914 raise ValueError("Unknown asked quality measure!")
1916 J = float( Jb ) + float( Jo )
1919 _SSV["CostFunctionJb"].append( Jb )
1920 _SSV["CostFunctionJo"].append( Jo )
1921 _SSV["CostFunctionJ" ].append( J )
1923 if "IndexOfOptimum" in _SSC or \
1924 "CurrentOptimum" in _SSC or \
1925 "SimulatedObservationAtCurrentOptimum" in _SSC:
1926 IndexMin = numpy.argmin( _SSV["CostFunctionJ"][_nPS:] ) + _nPS
1927 if "IndexOfOptimum" in _SSC:
1928 _SSV["IndexOfOptimum"].append( IndexMin )
1929 if "CurrentOptimum" in _SSC:
1930 _SSV["CurrentOptimum"].append( _SSV["CurrentState"][IndexMin] )
1931 if "SimulatedObservationAtCurrentOptimum" in _SSC:
1932 _SSV["SimulatedObservationAtCurrentOptimum"].append( _SSV["SimulatedObservationAtCurrentState"][IndexMin] )
1937 if _QM in ["QR"]: # Pour le QuantileRegression
1942 # ==============================================================================
1943 if __name__ == "__main__":
1944 print('\n AUTODIAGNOSTIC \n')