1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2019 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les outils généraux élémentaires.
26 __author__ = "Jean-Philippe ARGAUD"
34 from functools import partial
35 from daCore import Persistence
36 from daCore import PlatformInfo
37 from daCore import Interfaces
38 from daCore import Templates
39 from daCore.Interfaces import ImportFromScript, ImportFromFile
41 # ==============================================================================
42 class CacheManager(object):
44 Classe générale de gestion d'un cache de calculs
47 toleranceInRedundancy = 1.e-18,
48 lenghtOfRedundancy = -1,
51 Les caractéristiques de tolérance peuvent être modifées à la création.
53 self.__tolerBP = float(toleranceInRedundancy)
54 self.__lenghtOR = int(lenghtOfRedundancy)
55 self.__initlnOR = self.__lenghtOR
60 self.__listOPCV = [] # Operator Previous Calculated Points, Results, Point Norms
61 # logging.debug("CM Tolerance de determination des doublons : %.2e", self.__tolerBP)
63 def wasCalculatedIn(self, xValue ): #, info="" ):
64 "Vérifie l'existence d'un calcul correspondant à la valeur"
67 for i in range(min(len(self.__listOPCV),self.__lenghtOR)-1,-1,-1):
68 if not hasattr(xValue, 'size') or (xValue.size != self.__listOPCV[i][0].size):
69 # logging.debug("CM Différence de la taille %s de X et de celle %s du point %i déjà calculé", xValue.shape,i,self.__listOPCP[i].shape)
71 if numpy.linalg.norm(numpy.ravel(xValue) - self.__listOPCV[i][0]) < self.__tolerBP * self.__listOPCV[i][2]:
73 __HxV = self.__listOPCV[i][1]
74 # logging.debug("CM Cas%s déja calculé, portant le numéro %i", info, i)
78 def storeValueInX(self, xValue, HxValue ):
79 "Stocke un calcul correspondant à la valeur"
80 if self.__lenghtOR < 0:
81 self.__lenghtOR = 2 * xValue.size + 2
82 self.__initlnOR = self.__lenghtOR
83 while len(self.__listOPCV) > self.__lenghtOR:
84 # logging.debug("CM Réduction de la liste des cas à %i éléments par suppression du premier", self.__lenghtOR)
85 self.__listOPCV.pop(0)
86 self.__listOPCV.append( (
87 copy.copy(numpy.ravel(xValue)),
89 numpy.linalg.norm(xValue),
94 self.__initlnOR = self.__lenghtOR
99 self.__lenghtOR = self.__initlnOR
101 # ==============================================================================
102 class Operator(object):
104 Classe générale d'interface de type opérateur simple
114 avoidingRedundancy = True,
115 inputAsMultiFunction = False,
116 extraArguments = None,
119 On construit un objet de ce type en fournissant, à l'aide de l'un des
120 deux mots-clé, soit une fonction ou un multi-fonction python, soit une
123 - fromMethod : argument de type fonction Python
124 - fromMatrix : argument adapté au constructeur numpy.matrix
125 - avoidingRedundancy : booléen évitant (ou pas) les calculs redondants
126 - inputAsMultiFunction : booléen indiquant une fonction explicitement
127 définie (ou pas) en multi-fonction
128 - extraArguments : arguments supplémentaires passés à la fonction de
129 base et ses dérivées (tuple ou dictionnaire)
131 self.__NbCallsAsMatrix, self.__NbCallsAsMethod, self.__NbCallsOfCached = 0, 0, 0
132 self.__AvoidRC = bool( avoidingRedundancy )
133 self.__inputAsMF = bool( inputAsMultiFunction )
134 self.__extraArgs = extraArguments
135 if fromMethod is not None and self.__inputAsMF:
136 self.__Method = fromMethod # logtimer(fromMethod)
138 self.__Type = "Method"
139 elif fromMethod is not None and not self.__inputAsMF:
140 self.__Method = partial( MultiFonction, _sFunction=fromMethod)
142 self.__Type = "Method"
143 elif fromMatrix is not None:
145 self.__Matrix = numpy.matrix( fromMatrix, numpy.float )
146 self.__Type = "Matrix"
152 def disableAvoidingRedundancy(self):
154 Operator.CM.disable()
156 def enableAvoidingRedundancy(self):
161 Operator.CM.disable()
167 def appliedTo(self, xValue, HValue = None, argsAsSerie = False):
169 Permet de restituer le résultat de l'application de l'opérateur à une
170 série d'arguments xValue. Cette méthode se contente d'appliquer, chaque
171 argument devant a priori être du bon type.
173 - les arguments par série sont :
174 - xValue : argument adapté pour appliquer l'opérateur
175 - HValue : valeur précalculée de l'opérateur en ce point
176 - argsAsSerie : indique si les arguments sont une mono ou multi-valeur
183 if HValue is not None:
187 PlatformInfo.isIterable( _xValue, True, " in Operator.appliedTo" )
189 if _HValue is not None:
190 assert len(_xValue) == len(_HValue), "Incompatible number of elements in xValue and HValue"
192 for i in range(len(_HValue)):
193 HxValue.append( numpy.asmatrix( numpy.ravel( _HValue[i] ) ).T )
195 Operator.CM.storeValueInX(_xValue[i],HxValue[-1])
200 for i, xv in enumerate(_xValue):
202 __alreadyCalculated, __HxV = Operator.CM.wasCalculatedIn(xv)
204 __alreadyCalculated = False
206 if __alreadyCalculated:
207 self.__addOneCacheCall()
210 if self.__Matrix is not None:
211 self.__addOneMatrixCall()
212 _hv = self.__Matrix * xv
214 self.__addOneMethodCall()
218 HxValue.append( _hv )
220 if len(_xserie)>0 and self.__Matrix is None:
221 if self.__extraArgs is None:
222 _hserie = self.__Method( _xserie ) # Calcul MF
224 _hserie = self.__Method( _xserie, self.__extraArgs ) # Calcul MF
225 if not hasattr(_hserie, "pop"):
226 raise TypeError("The user input multi-function doesn't seem to return sequence results, behaving like a mono-function. It has to be checked.")
232 Operator.CM.storeValueInX(_xv,_hv)
234 if argsAsSerie: return HxValue
235 else: return HxValue[-1]
237 def appliedControledFormTo(self, paires, argsAsSerie = False):
239 Permet de restituer le résultat de l'application de l'opérateur à des
240 paires (xValue, uValue). Cette méthode se contente d'appliquer, son
241 argument devant a priori être du bon type. Si la uValue est None,
242 on suppose que l'opérateur ne s'applique qu'à xValue.
244 - paires : les arguments par paire sont :
245 - xValue : argument X adapté pour appliquer l'opérateur
246 - uValue : argument U adapté pour appliquer l'opérateur
247 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
249 if argsAsSerie: _xuValue = paires
250 else: _xuValue = (paires,)
251 PlatformInfo.isIterable( _xuValue, True, " in Operator.appliedControledFormTo" )
253 if self.__Matrix is not None:
255 for paire in _xuValue:
256 _xValue, _uValue = paire
257 self.__addOneMatrixCall()
258 HxValue.append( self.__Matrix * _xValue )
261 for paire in _xuValue:
263 _xValue, _uValue = paire
264 if _uValue is not None:
265 _xuValue.append( paire )
267 _xuValue.append( _xValue )
268 self.__addOneMethodCall( len(_xuValue) )
269 if self.__extraArgs is None:
270 HxValue = self.__Method( _xuValue ) # Calcul MF
272 HxValue = self.__Method( _xuValue, self.__extraArgs ) # Calcul MF
274 if argsAsSerie: return HxValue
275 else: return HxValue[-1]
277 def appliedInXTo(self, paires, argsAsSerie = False):
279 Permet de restituer le résultat de l'application de l'opérateur à une
280 série d'arguments xValue, sachant que l'opérateur est valable en
281 xNominal. Cette méthode se contente d'appliquer, son argument devant a
282 priori être du bon type. Si l'opérateur est linéaire car c'est une
283 matrice, alors il est valable en tout point nominal et xNominal peut
284 être quelconque. Il n'y a qu'une seule paire par défaut, et argsAsSerie
285 permet d'indiquer que l'argument est multi-paires.
287 - paires : les arguments par paire sont :
288 - xNominal : série d'arguments permettant de donner le point où
289 l'opérateur est construit pour être ensuite appliqué
290 - xValue : série d'arguments adaptés pour appliquer l'opérateur
291 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
293 if argsAsSerie: _nxValue = paires
294 else: _nxValue = (paires,)
295 PlatformInfo.isIterable( _nxValue, True, " in Operator.appliedInXTo" )
297 if self.__Matrix is not None:
299 for paire in _nxValue:
300 _xNominal, _xValue = paire
301 self.__addOneMatrixCall()
302 HxValue.append( self.__Matrix * _xValue )
304 self.__addOneMethodCall( len(_nxValue) )
305 if self.__extraArgs is None:
306 HxValue = self.__Method( _nxValue ) # Calcul MF
308 HxValue = self.__Method( _nxValue, self.__extraArgs ) # Calcul MF
310 if argsAsSerie: return HxValue
311 else: return HxValue[-1]
313 def asMatrix(self, ValueForMethodForm = "UnknownVoidValue", argsAsSerie = False):
315 Permet de renvoyer l'opérateur sous la forme d'une matrice
317 if self.__Matrix is not None:
318 self.__addOneMatrixCall()
319 mValue = [self.__Matrix,]
320 elif ValueForMethodForm is not "UnknownVoidValue": # Ne pas utiliser "None"
323 self.__addOneMethodCall( len(ValueForMethodForm) )
324 for _vfmf in ValueForMethodForm:
325 mValue.append( numpy.matrix( self.__Method(((_vfmf, None),)) ) )
327 self.__addOneMethodCall()
328 mValue = self.__Method(((ValueForMethodForm, None),))
330 raise ValueError("Matrix form of the operator defined as a function/method requires to give an operating point.")
332 if argsAsSerie: return mValue
333 else: return mValue[-1]
337 Renvoie la taille sous forme numpy si l'opérateur est disponible sous
338 la forme d'une matrice
340 if self.__Matrix is not None:
341 return self.__Matrix.shape
343 raise ValueError("Matrix form of the operator is not available, nor the shape")
345 def nbcalls(self, which=None):
347 Renvoie les nombres d'évaluations de l'opérateur
350 self.__NbCallsAsMatrix+self.__NbCallsAsMethod,
351 self.__NbCallsAsMatrix,
352 self.__NbCallsAsMethod,
353 self.__NbCallsOfCached,
354 Operator.NbCallsAsMatrix+Operator.NbCallsAsMethod,
355 Operator.NbCallsAsMatrix,
356 Operator.NbCallsAsMethod,
357 Operator.NbCallsOfCached,
359 if which is None: return __nbcalls
360 else: return __nbcalls[which]
362 def __addOneMatrixCall(self):
363 "Comptabilise un appel"
364 self.__NbCallsAsMatrix += 1 # Decompte local
365 Operator.NbCallsAsMatrix += 1 # Decompte global
367 def __addOneMethodCall(self, nb = 1):
368 "Comptabilise un appel"
369 self.__NbCallsAsMethod += nb # Decompte local
370 Operator.NbCallsAsMethod += nb # Decompte global
372 def __addOneCacheCall(self):
373 "Comptabilise un appel"
374 self.__NbCallsOfCached += 1 # Decompte local
375 Operator.NbCallsOfCached += 1 # Decompte global
377 # ==============================================================================
378 class FullOperator(object):
380 Classe générale d'interface de type opérateur complet
381 (Direct, Linéaire Tangent, Adjoint)
384 name = "GenericFullOperator",
386 asOneFunction = None, # 1 Fonction
387 asThreeFunctions = None, # 3 Fonctions in a dictionary
388 asScript = None, # 1 or 3 Fonction(s) by script
389 asDict = None, # Parameters
391 extraArguments = None,
393 inputAsMF = False,# Fonction(s) as Multi-Functions
398 self.__name = str(name)
399 self.__check = bool(toBeChecked)
400 self.__extraArgs = extraArguments
405 if (asDict is not None) and isinstance(asDict, dict):
406 __Parameters.update( asDict )
407 if "DifferentialIncrement" in asDict:
408 __Parameters["withIncrement"] = asDict["DifferentialIncrement"]
409 if "CenteredFiniteDifference" in asDict:
410 __Parameters["withCenteredDF"] = asDict["CenteredFiniteDifference"]
411 if "EnableMultiProcessing" in asDict:
412 __Parameters["withmpEnabled"] = asDict["EnableMultiProcessing"]
413 if "NumberOfProcesses" in asDict:
414 __Parameters["withmpWorkers"] = asDict["NumberOfProcesses"]
416 if asScript is not None:
417 __Matrix, __Function = None, None
419 __Matrix = ImportFromScript(asScript).getvalue( self.__name )
421 __Function = { "Direct":ImportFromScript(asScript).getvalue( "DirectOperator" ) }
422 __Function.update({"useApproximatedDerivatives":True})
423 __Function.update(__Parameters)
424 elif asThreeFunctions:
426 "Direct" :ImportFromScript(asScript).getvalue( "DirectOperator" ),
427 "Tangent":ImportFromScript(asScript).getvalue( "TangentOperator" ),
428 "Adjoint":ImportFromScript(asScript).getvalue( "AdjointOperator" ),
430 __Function.update(__Parameters)
433 if asOneFunction is not None:
434 if isinstance(asOneFunction, dict) and "Direct" in asOneFunction:
435 if asOneFunction["Direct"] is not None:
436 __Function = asOneFunction
438 raise ValueError("The function has to be given in a dictionnary which have 1 key (\"Direct\")")
440 __Function = { "Direct":asOneFunction }
441 __Function.update({"useApproximatedDerivatives":True})
442 __Function.update(__Parameters)
443 elif asThreeFunctions is not None:
444 if isinstance(asThreeFunctions, dict) and \
445 ("Tangent" in asThreeFunctions) and (asThreeFunctions["Tangent"] is not None) and \
446 ("Adjoint" in asThreeFunctions) and (asThreeFunctions["Adjoint"] is not None) and \
447 (("useApproximatedDerivatives" not in asThreeFunctions) or not bool(asThreeFunctions["useApproximatedDerivatives"])):
448 __Function = asThreeFunctions
449 elif isinstance(asThreeFunctions, dict) and \
450 ("Direct" in asThreeFunctions) and (asThreeFunctions["Direct"] is not None):
451 __Function = asThreeFunctions
452 __Function.update({"useApproximatedDerivatives":True})
454 raise ValueError("The functions has to be given in a dictionnary which have either 1 key (\"Direct\") or 3 keys (\"Direct\" (optionnal), \"Tangent\" and \"Adjoint\")")
455 if "Direct" not in asThreeFunctions:
456 __Function["Direct"] = asThreeFunctions["Tangent"]
457 __Function.update(__Parameters)
461 # if sys.version_info[0] < 3 and isinstance(__Function, dict):
462 # for k in ("Direct", "Tangent", "Adjoint"):
463 # if k in __Function and hasattr(__Function[k],"__class__"):
464 # if type(__Function[k]) is type(self.__init__):
465 # raise TypeError("can't use a class method (%s) as a function for the \"%s\" operator. Use a real function instead."%(type(__Function[k]),k))
467 if appliedInX is not None and isinstance(appliedInX, dict):
468 __appliedInX = appliedInX
469 elif appliedInX is not None:
470 __appliedInX = {"HXb":appliedInX}
474 if scheduledBy is not None:
475 self.__T = scheduledBy
477 if isinstance(__Function, dict) and \
478 ("useApproximatedDerivatives" in __Function) and bool(__Function["useApproximatedDerivatives"]) and \
479 ("Direct" in __Function) and (__Function["Direct"] is not None):
480 if "withCenteredDF" not in __Function: __Function["withCenteredDF"] = False
481 if "withIncrement" not in __Function: __Function["withIncrement"] = 0.01
482 if "withdX" not in __Function: __Function["withdX"] = None
483 if "withAvoidingRedundancy" not in __Function: __Function["withAvoidingRedundancy"] = avoidRC
484 if "withToleranceInRedundancy" not in __Function: __Function["withToleranceInRedundancy"] = 1.e-18
485 if "withLenghtOfRedundancy" not in __Function: __Function["withLenghtOfRedundancy"] = -1
486 if "withmpEnabled" not in __Function: __Function["withmpEnabled"] = False
487 if "withmpWorkers" not in __Function: __Function["withmpWorkers"] = None
488 if "withmfEnabled" not in __Function: __Function["withmfEnabled"] = inputAsMF
489 from daNumerics.ApproximatedDerivatives import FDApproximation
490 FDA = FDApproximation(
491 Function = __Function["Direct"],
492 centeredDF = __Function["withCenteredDF"],
493 increment = __Function["withIncrement"],
494 dX = __Function["withdX"],
495 avoidingRedundancy = __Function["withAvoidingRedundancy"],
496 toleranceInRedundancy = __Function["withToleranceInRedundancy"],
497 lenghtOfRedundancy = __Function["withLenghtOfRedundancy"],
498 mpEnabled = __Function["withmpEnabled"],
499 mpWorkers = __Function["withmpWorkers"],
500 mfEnabled = __Function["withmfEnabled"],
502 self.__FO["Direct"] = Operator( fromMethod = FDA.DirectOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
503 self.__FO["Tangent"] = Operator( fromMethod = FDA.TangentOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
504 self.__FO["Adjoint"] = Operator( fromMethod = FDA.AdjointOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
505 elif isinstance(__Function, dict) and \
506 ("Direct" in __Function) and ("Tangent" in __Function) and ("Adjoint" in __Function) and \
507 (__Function["Direct"] is not None) and (__Function["Tangent"] is not None) and (__Function["Adjoint"] is not None):
508 self.__FO["Direct"] = Operator( fromMethod = __Function["Direct"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
509 self.__FO["Tangent"] = Operator( fromMethod = __Function["Tangent"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
510 self.__FO["Adjoint"] = Operator( fromMethod = __Function["Adjoint"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
511 elif asMatrix is not None:
512 __matrice = numpy.matrix( __Matrix, numpy.float )
513 self.__FO["Direct"] = Operator( fromMatrix = __matrice, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
514 self.__FO["Tangent"] = Operator( fromMatrix = __matrice, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
515 self.__FO["Adjoint"] = Operator( fromMatrix = __matrice.T, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
518 raise ValueError("Improperly defined operator, it requires at minima either a matrix, a Direct for approximate derivatives or a Tangent/Adjoint pair.")
520 if __appliedInX is not None:
521 self.__FO["AppliedInX"] = {}
522 for key in list(__appliedInX.keys()):
523 if type( __appliedInX[key] ) is type( numpy.matrix([]) ):
524 # Pour le cas où l'on a une vraie matrice
525 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].A1, numpy.float ).T
526 elif type( __appliedInX[key] ) is type( numpy.array([]) ) and len(__appliedInX[key].shape) > 1:
527 # Pour le cas où l'on a un vecteur représenté en array avec 2 dimensions
528 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].reshape(len(__appliedInX[key]),), numpy.float ).T
530 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key], numpy.float ).T
532 self.__FO["AppliedInX"] = None
538 "x.__repr__() <==> repr(x)"
539 return repr(self.__V)
542 "x.__str__() <==> str(x)"
545 # ==============================================================================
546 class Algorithm(object):
548 Classe générale d'interface de type algorithme
550 Elle donne un cadre pour l'écriture d'une classe élémentaire d'algorithme
551 d'assimilation, en fournissant un container (dictionnaire) de variables
552 persistantes initialisées, et des méthodes d'accès à ces variables stockées.
554 Une classe élémentaire d'algorithme doit implémenter la méthode "run".
556 def __init__(self, name):
558 L'initialisation présente permet de fabriquer des variables de stockage
559 disponibles de manière générique dans les algorithmes élémentaires. Ces
560 variables de stockage sont ensuite conservées dans un dictionnaire
561 interne à l'objet, mais auquel on accède par la méthode "get".
563 Les variables prévues sont :
564 - APosterioriCorrelations : matrice de corrélations de la matrice A
565 - APosterioriCovariance : matrice de covariances a posteriori : A
566 - APosterioriStandardDeviations : vecteur des écart-types de la matrice A
567 - APosterioriVariances : vecteur des variances de la matrice A
568 - Analysis : vecteur d'analyse : Xa
569 - BMA : Background moins Analysis : Xa - Xb
570 - CostFunctionJ : fonction-coût globale, somme des deux parties suivantes Jb et Jo
571 - CostFunctionJAtCurrentOptimum : fonction-coût globale à l'état optimal courant lors d'itérations
572 - CostFunctionJb : partie ébauche ou background de la fonction-coût : Jb
573 - CostFunctionJbAtCurrentOptimum : partie ébauche à l'état optimal courant lors d'itérations
574 - CostFunctionJo : partie observations de la fonction-coût : Jo
575 - CostFunctionJoAtCurrentOptimum : partie observations à l'état optimal courant lors d'itérations
576 - CurrentOptimum : état optimal courant lors d'itérations
577 - CurrentState : état courant lors d'itérations
578 - GradientOfCostFunctionJ : gradient de la fonction-coût globale
579 - GradientOfCostFunctionJb : gradient de la partie ébauche de la fonction-coût
580 - GradientOfCostFunctionJo : gradient de la partie observations de la fonction-coût
581 - IndexOfOptimum : index de l'état optimal courant lors d'itérations
582 - Innovation : l'innovation : d = Y - H(X)
583 - InnovationAtCurrentState : l'innovation à l'état courant : dn = Y - H(Xn)
584 - JacobianMatrixAtBackground : matrice jacobienne à l'état d'ébauche
585 - JacobianMatrixAtCurrentState : matrice jacobienne à l'état courant
586 - JacobianMatrixAtOptimum : matrice jacobienne à l'optimum
587 - KalmanGainAtOptimum : gain de Kalman à l'optimum
588 - MahalanobisConsistency : indicateur de consistance des covariances
589 - OMA : Observation moins Analyse : Y - Xa
590 - OMB : Observation moins Background : Y - Xb
591 - PredictedState : état prédit courant lors d'itérations
592 - Residu : dans le cas des algorithmes de vérification
593 - SigmaBck2 : indicateur de correction optimale des erreurs d'ébauche
594 - SigmaObs2 : indicateur de correction optimale des erreurs d'observation
595 - SimulatedObservationAtBackground : l'état observé H(Xb) à l'ébauche
596 - SimulatedObservationAtCurrentOptimum : l'état observé H(X) à l'état optimal courant
597 - SimulatedObservationAtCurrentState : l'état observé H(X) à l'état courant
598 - SimulatedObservationAtOptimum : l'état observé H(Xa) à l'optimum
599 - SimulationQuantiles : états observés H(X) pour les quantiles demandés
600 On peut rajouter des variables à stocker dans l'initialisation de
601 l'algorithme élémentaire qui va hériter de cette classe
603 logging.debug("%s Initialisation", str(name))
604 self._m = PlatformInfo.SystemUsage()
606 self._name = str( name )
607 self._parameters = {"StoreSupplementaryCalculations":[]}
608 self.__required_parameters = {}
609 self.__required_inputs = {"RequiredInputValues":{"mandatory":(), "optional":()}}
610 self.__variable_names_not_public = {"nextStep":False} # Duplication dans AlgorithmAndParameters
612 self.StoredVariables = {}
613 self.StoredVariables["APosterioriCorrelations"] = Persistence.OneMatrix(name = "APosterioriCorrelations")
614 self.StoredVariables["APosterioriCovariance"] = Persistence.OneMatrix(name = "APosterioriCovariance")
615 self.StoredVariables["APosterioriStandardDeviations"] = Persistence.OneVector(name = "APosterioriStandardDeviations")
616 self.StoredVariables["APosterioriVariances"] = Persistence.OneVector(name = "APosterioriVariances")
617 self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
618 self.StoredVariables["BMA"] = Persistence.OneVector(name = "BMA")
619 self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
620 self.StoredVariables["CostFunctionJAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJAtCurrentOptimum")
621 self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
622 self.StoredVariables["CostFunctionJbAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJbAtCurrentOptimum")
623 self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
624 self.StoredVariables["CostFunctionJoAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJoAtCurrentOptimum")
625 self.StoredVariables["CurrentOptimum"] = Persistence.OneVector(name = "CurrentOptimum")
626 self.StoredVariables["CurrentState"] = Persistence.OneVector(name = "CurrentState")
627 self.StoredVariables["GradientOfCostFunctionJ"] = Persistence.OneVector(name = "GradientOfCostFunctionJ")
628 self.StoredVariables["GradientOfCostFunctionJb"] = Persistence.OneVector(name = "GradientOfCostFunctionJb")
629 self.StoredVariables["GradientOfCostFunctionJo"] = Persistence.OneVector(name = "GradientOfCostFunctionJo")
630 self.StoredVariables["IndexOfOptimum"] = Persistence.OneIndex(name = "IndexOfOptimum")
631 self.StoredVariables["Innovation"] = Persistence.OneVector(name = "Innovation")
632 self.StoredVariables["InnovationAtCurrentAnalysis"] = Persistence.OneVector(name = "InnovationAtCurrentAnalysis")
633 self.StoredVariables["InnovationAtCurrentState"] = Persistence.OneVector(name = "InnovationAtCurrentState")
634 self.StoredVariables["JacobianMatrixAtBackground"] = Persistence.OneMatrix(name = "JacobianMatrixAtBackground")
635 self.StoredVariables["JacobianMatrixAtCurrentState"] = Persistence.OneMatrix(name = "JacobianMatrixAtCurrentState")
636 self.StoredVariables["JacobianMatrixAtOptimum"] = Persistence.OneMatrix(name = "JacobianMatrixAtOptimum")
637 self.StoredVariables["KalmanGainAtOptimum"] = Persistence.OneMatrix(name = "KalmanGainAtOptimum")
638 self.StoredVariables["MahalanobisConsistency"] = Persistence.OneScalar(name = "MahalanobisConsistency")
639 self.StoredVariables["OMA"] = Persistence.OneVector(name = "OMA")
640 self.StoredVariables["OMB"] = Persistence.OneVector(name = "OMB")
641 self.StoredVariables["PredictedState"] = Persistence.OneVector(name = "PredictedState")
642 self.StoredVariables["Residu"] = Persistence.OneScalar(name = "Residu")
643 self.StoredVariables["SigmaBck2"] = Persistence.OneScalar(name = "SigmaBck2")
644 self.StoredVariables["SigmaObs2"] = Persistence.OneScalar(name = "SigmaObs2")
645 self.StoredVariables["SimulatedObservationAtBackground"] = Persistence.OneVector(name = "SimulatedObservationAtBackground")
646 self.StoredVariables["SimulatedObservationAtCurrentAnalysis"]= Persistence.OneVector(name = "SimulatedObservationAtCurrentAnalysis")
647 self.StoredVariables["SimulatedObservationAtCurrentOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentOptimum")
648 self.StoredVariables["SimulatedObservationAtCurrentState"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentState")
649 self.StoredVariables["SimulatedObservationAtOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtOptimum")
650 self.StoredVariables["SimulationQuantiles"] = Persistence.OneMatrix(name = "SimulationQuantiles")
652 def _pre_run(self, Parameters, Xb=None, Y=None, R=None, B=None, Q=None ):
654 logging.debug("%s Lancement", self._name)
655 logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
657 # Mise a jour de self._parameters avec Parameters
658 self.__setParameters(Parameters)
660 for k, v in self.__variable_names_not_public.items():
661 if k not in self._parameters: self.__setParameters( {k:v} )
663 # Corrections et complements
664 def __test_vvalue(argument, variable, argname):
666 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
667 raise ValueError("%s %s vector %s has to be properly defined!"%(self._name,argname,variable))
668 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
669 logging.debug("%s %s vector %s is not set, but is optional."%(self._name,argname,variable))
671 logging.debug("%s %s vector %s is not set, but is not required."%(self._name,argname,variable))
673 logging.debug("%s %s vector %s is set, and its size is %i."%(self._name,argname,variable,numpy.array(argument).size))
675 __test_vvalue( Xb, "Xb", "Background or initial state" )
676 __test_vvalue( Y, "Y", "Observation" )
678 def __test_cvalue(argument, variable, argname):
680 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
681 raise ValueError("%s %s error covariance matrix %s has to be properly defined!"%(self._name,argname,variable))
682 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
683 logging.debug("%s %s error covariance matrix %s is not set, but is optional."%(self._name,argname,variable))
685 logging.debug("%s %s error covariance matrix %s is not set, but is not required."%(self._name,argname,variable))
687 logging.debug("%s %s error covariance matrix %s is set."%(self._name,argname,variable))
689 __test_cvalue( R, "R", "Observation" )
690 __test_cvalue( B, "B", "Background" )
691 __test_cvalue( Q, "Q", "Evolution" )
693 if ("Bounds" in self._parameters) and isinstance(self._parameters["Bounds"], (list, tuple)) and (len(self._parameters["Bounds"]) > 0):
694 logging.debug("%s Prise en compte des bornes effectuee"%(self._name,))
696 self._parameters["Bounds"] = None
698 if logging.getLogger().level < logging.WARNING:
699 self._parameters["optiprint"], self._parameters["optdisp"] = 1, 1
700 if PlatformInfo.has_scipy:
701 import scipy.optimize
702 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_ALL
704 self._parameters["optmessages"] = 15
706 self._parameters["optiprint"], self._parameters["optdisp"] = -1, 0
707 if PlatformInfo.has_scipy:
708 import scipy.optimize
709 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_NONE
711 self._parameters["optmessages"] = 15
715 def _post_run(self,_oH=None):
717 if ("StoreSupplementaryCalculations" in self._parameters) and \
718 "APosterioriCovariance" in self._parameters["StoreSupplementaryCalculations"]:
719 for _A in self.StoredVariables["APosterioriCovariance"]:
720 if "APosterioriVariances" in self._parameters["StoreSupplementaryCalculations"]:
721 self.StoredVariables["APosterioriVariances"].store( numpy.diag(_A) )
722 if "APosterioriStandardDeviations" in self._parameters["StoreSupplementaryCalculations"]:
723 self.StoredVariables["APosterioriStandardDeviations"].store( numpy.sqrt(numpy.diag(_A)) )
724 if "APosterioriCorrelations" in self._parameters["StoreSupplementaryCalculations"]:
725 _EI = numpy.diag(1./numpy.sqrt(numpy.diag(_A)))
726 _C = numpy.dot(_EI, numpy.dot(_A, _EI))
727 self.StoredVariables["APosterioriCorrelations"].store( _C )
729 logging.debug("%s Nombre d'évaluation(s) de l'opérateur d'observation direct/tangent/adjoint.: %i/%i/%i", self._name, _oH["Direct"].nbcalls(0),_oH["Tangent"].nbcalls(0),_oH["Adjoint"].nbcalls(0))
730 logging.debug("%s Nombre d'appels au cache d'opérateur d'observation direct/tangent/adjoint..: %i/%i/%i", self._name, _oH["Direct"].nbcalls(3),_oH["Tangent"].nbcalls(3),_oH["Adjoint"].nbcalls(3))
731 logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
732 logging.debug("%s Terminé", self._name)
735 def _toStore(self, key):
736 "True if in StoreSupplementaryCalculations, else False"
737 return key in self._parameters["StoreSupplementaryCalculations"]
739 def get(self, key=None):
741 Renvoie l'une des variables stockées identifiée par la clé, ou le
742 dictionnaire de l'ensemble des variables disponibles en l'absence de
743 clé. Ce sont directement les variables sous forme objet qui sont
744 renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
745 des classes de persistance.
748 return self.StoredVariables[key]
750 return self.StoredVariables
752 def __contains__(self, key=None):
753 "D.__contains__(k) -> True if D has a key k, else False"
754 return key in self.StoredVariables
757 "D.keys() -> list of D's keys"
758 if hasattr(self, "StoredVariables"):
759 return self.StoredVariables.keys()
764 "D.pop(k[,d]) -> v, remove specified key and return the corresponding value"
765 if hasattr(self, "StoredVariables"):
766 return self.StoredVariables.pop(k, d)
771 raise TypeError("pop expected at least 1 arguments, got 0")
772 "If key is not found, d is returned if given, otherwise KeyError is raised"
778 def run(self, Xb=None, Y=None, H=None, M=None, R=None, B=None, Q=None, Parameters=None):
780 Doit implémenter l'opération élémentaire de calcul d'assimilation sous
781 sa forme mathématique la plus naturelle possible.
783 raise NotImplementedError("Mathematical assimilation calculation has not been implemented!")
785 def defineRequiredParameter(self, name = None, default = None, typecast = None, message = None, minval = None, maxval = None, listval = None):
787 Permet de définir dans l'algorithme des paramètres requis et leurs
788 caractéristiques par défaut.
791 raise ValueError("A name is mandatory to define a required parameter.")
793 self.__required_parameters[name] = {
795 "typecast" : typecast,
801 logging.debug("%s %s (valeur par défaut = %s)", self._name, message, self.setParameterValue(name))
803 def getRequiredParameters(self, noDetails=True):
805 Renvoie la liste des noms de paramètres requis ou directement le
806 dictionnaire des paramètres requis.
809 return sorted(self.__required_parameters.keys())
811 return self.__required_parameters
813 def setParameterValue(self, name=None, value=None):
815 Renvoie la valeur d'un paramètre requis de manière contrôlée
817 default = self.__required_parameters[name]["default"]
818 typecast = self.__required_parameters[name]["typecast"]
819 minval = self.__required_parameters[name]["minval"]
820 maxval = self.__required_parameters[name]["maxval"]
821 listval = self.__required_parameters[name]["listval"]
823 if value is None and default is None:
825 elif value is None and default is not None:
826 if typecast is None: __val = default
827 else: __val = typecast( default )
829 if typecast is None: __val = value
830 else: __val = typecast( value )
832 if minval is not None and (numpy.array(__val, float) < minval).any():
833 raise ValueError("The parameter named \"%s\" of value \"%s\" can not be less than %s."%(name, __val, minval))
834 if maxval is not None and (numpy.array(__val, float) > maxval).any():
835 raise ValueError("The parameter named \"%s\" of value \"%s\" can not be greater than %s."%(name, __val, maxval))
836 if listval is not None:
837 if typecast is list or typecast is tuple or isinstance(__val,list) or isinstance(__val,tuple):
840 raise ValueError("The value \"%s\" of the parameter named \"%s\" is not allowed, it has to be in the list %s."%(v, name, listval))
841 elif __val not in listval:
842 raise ValueError("The value \"%s\" of the parameter named \"%s\" is not allowed, it has to be in the list %s."%( __val, name,listval))
845 def requireInputArguments(self, mandatory=(), optional=()):
847 Permet d'imposer des arguments requises en entrée
849 self.__required_inputs["RequiredInputValues"]["mandatory"] = tuple( mandatory )
850 self.__required_inputs["RequiredInputValues"]["optional"] = tuple( optional )
852 def __setParameters(self, fromDico={}):
854 Permet de stocker les paramètres reçus dans le dictionnaire interne.
856 self._parameters.update( fromDico )
857 for k in self.__required_parameters.keys():
858 if k in fromDico.keys():
859 self._parameters[k] = self.setParameterValue(k,fromDico[k])
861 self._parameters[k] = self.setParameterValue(k)
862 logging.debug("%s %s : %s", self._name, self.__required_parameters[k]["message"], self._parameters[k])
864 # ==============================================================================
865 class AlgorithmAndParameters(object):
867 Classe générale d'interface d'action pour l'algorithme et ses paramètres
870 name = "GenericAlgorithm",
877 self.__name = str(name)
881 self.__algorithm = {}
882 self.__algorithmFile = None
883 self.__algorithmName = None
885 self.updateParameters( asDict, asScript )
887 if asAlgorithm is None and asScript is not None:
888 __Algo = ImportFromScript(asScript).getvalue( "Algorithm" )
892 if __Algo is not None:
893 self.__A = str(__Algo)
894 self.__P.update( {"Algorithm":self.__A} )
896 self.__setAlgorithm( self.__A )
898 self.__variable_names_not_public = {"nextStep":False} # Duplication dans Algorithm
900 def updateParameters(self,
904 "Mise a jour des parametres"
905 if asDict is None and asScript is not None:
906 __Dict = ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
910 if __Dict is not None:
911 self.__P.update( dict(__Dict) )
913 def executePythonScheme(self, asDictAO = None):
914 "Permet de lancer le calcul d'assimilation"
915 Operator.CM.clearCache()
917 if not isinstance(asDictAO, dict):
918 raise ValueError("The objects for algorithm calculation have to be given together as a dictionnary, and they are not")
919 if hasattr(asDictAO["Background"],"getO"): self.__Xb = asDictAO["Background"].getO()
920 elif hasattr(asDictAO["CheckingPoint"],"getO"): self.__Xb = asDictAO["CheckingPoint"].getO()
921 else: self.__Xb = None
922 if hasattr(asDictAO["Observation"],"getO"): self.__Y = asDictAO["Observation"].getO()
923 else: self.__Y = asDictAO["Observation"]
924 if hasattr(asDictAO["ControlInput"],"getO"): self.__U = asDictAO["ControlInput"].getO()
925 else: self.__U = asDictAO["ControlInput"]
926 if hasattr(asDictAO["ObservationOperator"],"getO"): self.__HO = asDictAO["ObservationOperator"].getO()
927 else: self.__HO = asDictAO["ObservationOperator"]
928 if hasattr(asDictAO["EvolutionModel"],"getO"): self.__EM = asDictAO["EvolutionModel"].getO()
929 else: self.__EM = asDictAO["EvolutionModel"]
930 if hasattr(asDictAO["ControlModel"],"getO"): self.__CM = asDictAO["ControlModel"].getO()
931 else: self.__CM = asDictAO["ControlModel"]
932 self.__B = asDictAO["BackgroundError"]
933 self.__R = asDictAO["ObservationError"]
934 self.__Q = asDictAO["EvolutionError"]
936 self.__shape_validate()
938 self.__algorithm.run(
948 Parameters = self.__P,
952 def executeYACSScheme(self, FileName=None):
953 "Permet de lancer le calcul d'assimilation"
954 if FileName is None or not os.path.exists(FileName):
955 raise ValueError("a YACS file name has to be given for YACS execution.\n")
957 __file = os.path.abspath(FileName)
958 logging.debug("The YACS file name is \"%s\"."%__file)
959 if not PlatformInfo.has_salome or \
960 not PlatformInfo.has_yacs or \
961 not PlatformInfo.has_adao:
962 raise ImportError("\n\n"+\
963 "Unable to get SALOME, YACS or ADAO environnement variables.\n"+\
964 "Please load the right environnement before trying to use it.\n")
969 SALOMERuntime.RuntimeSALOME_setRuntime()
971 r = pilot.getRuntime()
972 xmlLoader = loader.YACSLoader()
973 xmlLoader.registerProcCataLoader()
975 catalogAd = r.loadCatalog("proc", __file)
976 r.addCatalog(catalogAd)
981 p = xmlLoader.load(__file)
982 except IOError as ex:
983 print("The YACS XML schema file can not be loaded: %s"%(ex,))
985 logger = p.getLogger("parser")
986 if not logger.isEmpty():
987 print("The imported YACS XML schema has errors on parsing:")
988 print(logger.getStr())
991 print("The YACS XML schema is not valid and will not be executed:")
992 print(p.getErrorReport())
994 info=pilot.LinkInfo(pilot.LinkInfo.ALL_DONT_STOP)
995 p.checkConsistency(info)
996 if info.areWarningsOrErrors():
997 print("The YACS XML schema is not coherent and will not be executed:")
998 print(info.getGlobalRepr())
1000 e = pilot.ExecutorSwig()
1002 if p.getEffectiveState() != pilot.DONE:
1003 print(p.getErrorReport())
1007 def get(self, key = None):
1008 "Vérifie l'existence d'une clé de variable ou de paramètres"
1009 if key in self.__algorithm:
1010 return self.__algorithm.get( key )
1011 elif key in self.__P:
1012 return self.__P[key]
1014 allvariables = self.__P
1015 for k in self.__variable_names_not_public: allvariables.pop(k, None)
1018 def pop(self, k, d):
1019 "Necessaire pour le pickling"
1020 return self.__algorithm.pop(k, d)
1022 def getAlgorithmRequiredParameters(self, noDetails=True):
1023 "Renvoie la liste des paramètres requis selon l'algorithme"
1024 return self.__algorithm.getRequiredParameters(noDetails)
1026 def setObserver(self, __V, __O, __I, __S):
1027 if self.__algorithm is None \
1028 or isinstance(self.__algorithm, dict) \
1029 or not hasattr(self.__algorithm,"StoredVariables"):
1030 raise ValueError("No observer can be build before choosing an algorithm.")
1031 if __V not in self.__algorithm:
1032 raise ValueError("An observer requires to be set on a variable named %s which does not exist."%__V)
1034 self.__algorithm.StoredVariables[ __V ].setDataObserver(
1037 HookParameters = __I,
1040 def removeObserver(self, __V, __O, __A = False):
1041 if self.__algorithm is None \
1042 or isinstance(self.__algorithm, dict) \
1043 or not hasattr(self.__algorithm,"StoredVariables"):
1044 raise ValueError("No observer can be removed before choosing an algorithm.")
1045 if __V not in self.__algorithm:
1046 raise ValueError("An observer requires to be removed on a variable named %s which does not exist."%__V)
1048 return self.__algorithm.StoredVariables[ __V ].removeDataObserver(
1053 def hasObserver(self, __V):
1054 if self.__algorithm is None \
1055 or isinstance(self.__algorithm, dict) \
1056 or not hasattr(self.__algorithm,"StoredVariables"):
1058 if __V not in self.__algorithm:
1060 return self.__algorithm.StoredVariables[ __V ].hasDataObserver()
1063 __allvariables = list(self.__algorithm.keys()) + list(self.__P.keys())
1064 for k in self.__variable_names_not_public:
1065 if k in __allvariables: __allvariables.remove(k)
1066 return __allvariables
1068 def __contains__(self, key=None):
1069 "D.__contains__(k) -> True if D has a key k, else False"
1070 return key in self.__algorithm or key in self.__P
1073 "x.__repr__() <==> repr(x)"
1074 return repr(self.__A)+", "+repr(self.__P)
1077 "x.__str__() <==> str(x)"
1078 return str(self.__A)+", "+str(self.__P)
1080 def __setAlgorithm(self, choice = None ):
1082 Permet de sélectionner l'algorithme à utiliser pour mener à bien l'étude
1083 d'assimilation. L'argument est un champ caractère se rapportant au nom
1084 d'un algorithme réalisant l'opération sur les arguments fixes.
1087 raise ValueError("Error: algorithm choice has to be given")
1088 if self.__algorithmName is not None:
1089 raise ValueError("Error: algorithm choice has already been done as \"%s\", it can't be changed."%self.__algorithmName)
1090 daDirectory = "daAlgorithms"
1092 # Recherche explicitement le fichier complet
1093 # ------------------------------------------
1095 for directory in sys.path:
1096 if os.path.isfile(os.path.join(directory, daDirectory, str(choice)+'.py')):
1097 module_path = os.path.abspath(os.path.join(directory, daDirectory))
1098 if module_path is None:
1099 raise ImportError("No algorithm module named \"%s\" has been found in the search path.\n The search path is %s"%(choice, sys.path))
1101 # Importe le fichier complet comme un module
1102 # ------------------------------------------
1104 sys_path_tmp = sys.path ; sys.path.insert(0,module_path)
1105 self.__algorithmFile = __import__(str(choice), globals(), locals(), [])
1106 if not hasattr(self.__algorithmFile, "ElementaryAlgorithm"):
1107 raise ImportError("this module does not define a valid elementary algorithm.")
1108 self.__algorithmName = str(choice)
1109 sys.path = sys_path_tmp ; del sys_path_tmp
1110 except ImportError as e:
1111 raise ImportError("The module named \"%s\" was found, but is incorrect at the import stage.\n The import error message is: %s"%(choice,e))
1113 # Instancie un objet du type élémentaire du fichier
1114 # -------------------------------------------------
1115 self.__algorithm = self.__algorithmFile.ElementaryAlgorithm()
1118 def __shape_validate(self):
1120 Validation de la correspondance correcte des tailles des variables et
1121 des matrices s'il y en a.
1123 if self.__Xb is None: __Xb_shape = (0,)
1124 elif hasattr(self.__Xb,"size"): __Xb_shape = (self.__Xb.size,)
1125 elif hasattr(self.__Xb,"shape"):
1126 if isinstance(self.__Xb.shape, tuple): __Xb_shape = self.__Xb.shape
1127 else: __Xb_shape = self.__Xb.shape()
1128 else: raise TypeError("The background (Xb) has no attribute of shape: problem !")
1130 if self.__Y is None: __Y_shape = (0,)
1131 elif hasattr(self.__Y,"size"): __Y_shape = (self.__Y.size,)
1132 elif hasattr(self.__Y,"shape"):
1133 if isinstance(self.__Y.shape, tuple): __Y_shape = self.__Y.shape
1134 else: __Y_shape = self.__Y.shape()
1135 else: raise TypeError("The observation (Y) has no attribute of shape: problem !")
1137 if self.__U is None: __U_shape = (0,)
1138 elif hasattr(self.__U,"size"): __U_shape = (self.__U.size,)
1139 elif hasattr(self.__U,"shape"):
1140 if isinstance(self.__U.shape, tuple): __U_shape = self.__U.shape
1141 else: __U_shape = self.__U.shape()
1142 else: raise TypeError("The control (U) has no attribute of shape: problem !")
1144 if self.__B is None: __B_shape = (0,0)
1145 elif hasattr(self.__B,"shape"):
1146 if isinstance(self.__B.shape, tuple): __B_shape = self.__B.shape
1147 else: __B_shape = self.__B.shape()
1148 else: raise TypeError("The a priori errors covariance matrix (B) has no attribute of shape: problem !")
1150 if self.__R is None: __R_shape = (0,0)
1151 elif hasattr(self.__R,"shape"):
1152 if isinstance(self.__R.shape, tuple): __R_shape = self.__R.shape
1153 else: __R_shape = self.__R.shape()
1154 else: raise TypeError("The observation errors covariance matrix (R) has no attribute of shape: problem !")
1156 if self.__Q is None: __Q_shape = (0,0)
1157 elif hasattr(self.__Q,"shape"):
1158 if isinstance(self.__Q.shape, tuple): __Q_shape = self.__Q.shape
1159 else: __Q_shape = self.__Q.shape()
1160 else: raise TypeError("The evolution errors covariance matrix (Q) has no attribute of shape: problem !")
1162 if len(self.__HO) == 0: __HO_shape = (0,0)
1163 elif isinstance(self.__HO, dict): __HO_shape = (0,0)
1164 elif hasattr(self.__HO["Direct"],"shape"):
1165 if isinstance(self.__HO["Direct"].shape, tuple): __HO_shape = self.__HO["Direct"].shape
1166 else: __HO_shape = self.__HO["Direct"].shape()
1167 else: raise TypeError("The observation operator (H) has no attribute of shape: problem !")
1169 if len(self.__EM) == 0: __EM_shape = (0,0)
1170 elif isinstance(self.__EM, dict): __EM_shape = (0,0)
1171 elif hasattr(self.__EM["Direct"],"shape"):
1172 if isinstance(self.__EM["Direct"].shape, tuple): __EM_shape = self.__EM["Direct"].shape
1173 else: __EM_shape = self.__EM["Direct"].shape()
1174 else: raise TypeError("The evolution model (EM) has no attribute of shape: problem !")
1176 if len(self.__CM) == 0: __CM_shape = (0,0)
1177 elif isinstance(self.__CM, dict): __CM_shape = (0,0)
1178 elif hasattr(self.__CM["Direct"],"shape"):
1179 if isinstance(self.__CM["Direct"].shape, tuple): __CM_shape = self.__CM["Direct"].shape
1180 else: __CM_shape = self.__CM["Direct"].shape()
1181 else: raise TypeError("The control model (CM) has no attribute of shape: problem !")
1183 # Vérification des conditions
1184 # ---------------------------
1185 if not( len(__Xb_shape) == 1 or min(__Xb_shape) == 1 ):
1186 raise ValueError("Shape characteristic of background (Xb) is incorrect: \"%s\"."%(__Xb_shape,))
1187 if not( len(__Y_shape) == 1 or min(__Y_shape) == 1 ):
1188 raise ValueError("Shape characteristic of observation (Y) is incorrect: \"%s\"."%(__Y_shape,))
1190 if not( min(__B_shape) == max(__B_shape) ):
1191 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) is incorrect: \"%s\"."%(__B_shape,))
1192 if not( min(__R_shape) == max(__R_shape) ):
1193 raise ValueError("Shape characteristic of observation errors covariance matrix (R) is incorrect: \"%s\"."%(__R_shape,))
1194 if not( min(__Q_shape) == max(__Q_shape) ):
1195 raise ValueError("Shape characteristic of evolution errors covariance matrix (Q) is incorrect: \"%s\"."%(__Q_shape,))
1196 if not( min(__EM_shape) == max(__EM_shape) ):
1197 raise ValueError("Shape characteristic of evolution operator (EM) is incorrect: \"%s\"."%(__EM_shape,))
1199 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[1] == max(__Xb_shape) ):
1200 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and state (X) \"%s\" are incompatible."%(__HO_shape,__Xb_shape))
1201 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[0] == max(__Y_shape) ):
1202 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation (Y) \"%s\" are incompatible."%(__HO_shape,__Y_shape))
1203 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__B) > 0 and not( __HO_shape[1] == __B_shape[0] ):
1204 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and a priori errors covariance matrix (B) \"%s\" are incompatible."%(__HO_shape,__B_shape))
1205 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__R) > 0 and not( __HO_shape[0] == __R_shape[1] ):
1206 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation errors covariance matrix (R) \"%s\" are incompatible."%(__HO_shape,__R_shape))
1208 if self.__B is not None and len(self.__B) > 0 and not( __B_shape[1] == max(__Xb_shape) ):
1209 if self.__algorithmName in ["EnsembleBlue",]:
1210 asPersistentVector = self.__Xb.reshape((-1,min(__B_shape)))
1211 self.__Xb = Persistence.OneVector("Background", basetype=numpy.matrix)
1212 for member in asPersistentVector:
1213 self.__Xb.store( numpy.matrix( numpy.ravel(member), numpy.float ).T )
1214 __Xb_shape = min(__B_shape)
1216 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) \"%s\" and background (Xb) \"%s\" are incompatible."%(__B_shape,__Xb_shape))
1218 if self.__R is not None and len(self.__R) > 0 and not( __R_shape[1] == max(__Y_shape) ):
1219 raise ValueError("Shape characteristic of observation errors covariance matrix (R) \"%s\" and observation (Y) \"%s\" are incompatible."%(__R_shape,__Y_shape))
1221 if self.__EM is not None and len(self.__EM) > 0 and not isinstance(self.__EM, dict) and not( __EM_shape[1] == max(__Xb_shape) ):
1222 raise ValueError("Shape characteristic of evolution model (EM) \"%s\" and state (X) \"%s\" are incompatible."%(__EM_shape,__Xb_shape))
1224 if self.__CM is not None and len(self.__CM) > 0 and not isinstance(self.__CM, dict) and not( __CM_shape[1] == max(__U_shape) ):
1225 raise ValueError("Shape characteristic of control model (CM) \"%s\" and control (U) \"%s\" are incompatible."%(__CM_shape,__U_shape))
1227 if ("Bounds" in self.__P) \
1228 and (isinstance(self.__P["Bounds"], list) or isinstance(self.__P["Bounds"], tuple)) \
1229 and (len(self.__P["Bounds"]) != max(__Xb_shape)):
1230 raise ValueError("The number \"%s\" of bound pairs for the state (X) components is different of the size \"%s\" of the state itself." \
1231 %(len(self.__P["Bounds"]),max(__Xb_shape)))
1235 # ==============================================================================
1236 class RegulationAndParameters(object):
1238 Classe générale d'interface d'action pour la régulation et ses paramètres
1241 name = "GenericRegulation",
1248 self.__name = str(name)
1251 if asAlgorithm is None and asScript is not None:
1252 __Algo = ImportFromScript(asScript).getvalue( "Algorithm" )
1254 __Algo = asAlgorithm
1256 if asDict is None and asScript is not None:
1257 __Dict = ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1261 if __Dict is not None:
1262 self.__P.update( dict(__Dict) )
1264 if __Algo is not None:
1265 self.__P.update( {"Algorithm":__Algo} )
1267 def get(self, key = None):
1268 "Vérifie l'existence d'une clé de variable ou de paramètres"
1270 return self.__P[key]
1274 # ==============================================================================
1275 class DataObserver(object):
1277 Classe générale d'interface de type observer
1280 name = "GenericObserver",
1292 self.__name = str(name)
1297 if onVariable is None:
1298 raise ValueError("setting an observer has to be done over a variable name or a list of variable names, not over None.")
1299 elif type(onVariable) in (tuple, list):
1300 self.__V = tuple(map( str, onVariable ))
1301 if withInfo is None:
1304 self.__I = (str(withInfo),)*len(self.__V)
1305 elif isinstance(onVariable, str):
1306 self.__V = (onVariable,)
1307 if withInfo is None:
1308 self.__I = (onVariable,)
1310 self.__I = (str(withInfo),)
1312 raise ValueError("setting an observer has to be done over a variable name or a list of variable names.")
1314 if asString is not None:
1315 __FunctionText = asString
1316 elif (asTemplate is not None) and (asTemplate in Templates.ObserverTemplates):
1317 __FunctionText = Templates.ObserverTemplates[asTemplate]
1318 elif asScript is not None:
1319 __FunctionText = ImportFromScript(asScript).getstring()
1322 __Function = ObserverF(__FunctionText)
1324 if asObsObject is not None:
1325 self.__O = asObsObject
1327 self.__O = __Function.getfunc()
1329 for k in range(len(self.__V)):
1332 if ename not in withAlgo:
1333 raise ValueError("An observer is asked to be set on a variable named %s which does not exist."%ename)
1335 withAlgo.setObserver(ename, self.__O, einfo, scheduledBy)
1338 "x.__repr__() <==> repr(x)"
1339 return repr(self.__V)+"\n"+repr(self.__O)
1342 "x.__str__() <==> str(x)"
1343 return str(self.__V)+"\n"+str(self.__O)
1345 # ==============================================================================
1346 class State(object):
1348 Classe générale d'interface de type état
1351 name = "GenericVector",
1353 asPersistentVector = None,
1359 toBeChecked = False,
1362 Permet de définir un vecteur :
1363 - asVector : entrée des données, comme un vecteur compatible avec le
1364 constructeur de numpy.matrix, ou "True" si entrée par script.
1365 - asPersistentVector : entrée des données, comme une série de vecteurs
1366 compatible avec le constructeur de numpy.matrix, ou comme un objet de
1367 type Persistence, ou "True" si entrée par script.
1368 - asScript : si un script valide est donné contenant une variable
1369 nommée "name", la variable est de type "asVector" (par défaut) ou
1370 "asPersistentVector" selon que l'une de ces variables est placée à
1372 - asDataFile : si un ou plusieurs fichiers valides sont donnés
1373 contenant des valeurs en colonnes, elles-mêmes nommées "colNames"
1374 (s'il n'y a pas de nom de colonne indiquée, on cherche une colonne
1375 nommée "name"), on récupère les colonnes et on les range ligne après
1376 ligne (colMajor=False) ou colonne après colonne (colMajor=True). La
1377 variable résultante est de type "asVector" (par défaut) ou
1378 "asPersistentVector" selon que l'une de ces variables est placée à
1381 self.__name = str(name)
1382 self.__check = bool(toBeChecked)
1386 self.__is_vector = False
1387 self.__is_series = False
1389 if asScript is not None:
1390 __Vector, __Series = None, None
1391 if asPersistentVector:
1392 __Series = ImportFromScript(asScript).getvalue( self.__name )
1394 __Vector = ImportFromScript(asScript).getvalue( self.__name )
1395 elif asDataFile is not None:
1396 __Vector, __Series = None, None
1397 if asPersistentVector:
1398 if colNames is not None:
1399 __Series = ImportFromFile(asDataFile).getvalue( colNames )[1]
1401 __Series = ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1402 if bool(colMajor) and not ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1403 __Series = numpy.transpose(__Series)
1404 elif not bool(colMajor) and ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1405 __Series = numpy.transpose(__Series)
1407 if colNames is not None:
1408 __Vector = ImportFromFile(asDataFile).getvalue( colNames )[1]
1410 __Vector = ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1412 __Vector = numpy.ravel(__Vector, order = "F")
1414 __Vector = numpy.ravel(__Vector, order = "C")
1416 __Vector, __Series = asVector, asPersistentVector
1418 if __Vector is not None:
1419 self.__is_vector = True
1420 self.__V = numpy.matrix( numpy.asmatrix(__Vector).A1, numpy.float ).T
1421 self.shape = self.__V.shape
1422 self.size = self.__V.size
1423 elif __Series is not None:
1424 self.__is_series = True
1425 if isinstance(__Series, (tuple, list, numpy.ndarray, numpy.matrix, str)):
1426 self.__V = Persistence.OneVector(self.__name, basetype=numpy.matrix)
1427 if isinstance(__Series, str): __Series = eval(__Series)
1428 for member in __Series:
1429 self.__V.store( numpy.matrix( numpy.asmatrix(member).A1, numpy.float ).T )
1432 if isinstance(self.__V.shape, (tuple, list)):
1433 self.shape = self.__V.shape
1435 self.shape = self.__V.shape()
1436 if len(self.shape) == 1:
1437 self.shape = (self.shape[0],1)
1438 self.size = self.shape[0] * self.shape[1]
1440 raise ValueError("The %s object is improperly defined, it requires at minima either a vector, a list/tuple of vectors or a persistent object. Please check your vector input."%self.__name)
1442 if scheduledBy is not None:
1443 self.__T = scheduledBy
1445 def getO(self, withScheduler=False):
1447 return self.__V, self.__T
1448 elif self.__T is None:
1454 "Vérification du type interne"
1455 return self.__is_vector
1458 "Vérification du type interne"
1459 return self.__is_series
1462 "x.__repr__() <==> repr(x)"
1463 return repr(self.__V)
1466 "x.__str__() <==> str(x)"
1467 return str(self.__V)
1469 # ==============================================================================
1470 class Covariance(object):
1472 Classe générale d'interface de type covariance
1475 name = "GenericCovariance",
1476 asCovariance = None,
1477 asEyeByScalar = None,
1478 asEyeByVector = None,
1481 toBeChecked = False,
1484 Permet de définir une covariance :
1485 - asCovariance : entrée des données, comme une matrice compatible avec
1486 le constructeur de numpy.matrix
1487 - asEyeByScalar : entrée des données comme un seul scalaire de variance,
1488 multiplicatif d'une matrice de corrélation identité, aucune matrice
1489 n'étant donc explicitement à donner
1490 - asEyeByVector : entrée des données comme un seul vecteur de variance,
1491 à mettre sur la diagonale d'une matrice de corrélation, aucune matrice
1492 n'étant donc explicitement à donner
1493 - asCovObject : entrée des données comme un objet python, qui a les
1494 methodes obligatoires "getT", "getI", "diag", "trace", "__add__",
1495 "__sub__", "__neg__", "__mul__", "__rmul__" et facultatives "shape",
1496 "size", "cholesky", "choleskyI", "asfullmatrix", "__repr__", "__str__"
1497 - toBeChecked : booléen indiquant si le caractère SDP de la matrice
1498 pleine doit être vérifié
1500 self.__name = str(name)
1501 self.__check = bool(toBeChecked)
1504 self.__is_scalar = False
1505 self.__is_vector = False
1506 self.__is_matrix = False
1507 self.__is_object = False
1509 if asScript is not None:
1510 __Matrix, __Scalar, __Vector, __Object = None, None, None, None
1512 __Scalar = ImportFromScript(asScript).getvalue( self.__name )
1514 __Vector = ImportFromScript(asScript).getvalue( self.__name )
1516 __Object = ImportFromScript(asScript).getvalue( self.__name )
1518 __Matrix = ImportFromScript(asScript).getvalue( self.__name )
1520 __Matrix, __Scalar, __Vector, __Object = asCovariance, asEyeByScalar, asEyeByVector, asCovObject
1522 if __Scalar is not None:
1523 if numpy.matrix(__Scalar).size != 1:
1524 raise ValueError(' The diagonal multiplier given to define a sparse matrix is not a unique scalar value.\n Its actual measured size is %i. Please check your scalar input.'%numpy.matrix(__Scalar).size)
1525 self.__is_scalar = True
1526 self.__C = numpy.abs( float(__Scalar) )
1529 elif __Vector is not None:
1530 self.__is_vector = True
1531 self.__C = numpy.abs( numpy.array( numpy.ravel( numpy.matrix(__Vector, float ) ) ) )
1532 self.shape = (self.__C.size,self.__C.size)
1533 self.size = self.__C.size**2
1534 elif __Matrix is not None:
1535 self.__is_matrix = True
1536 self.__C = numpy.matrix( __Matrix, float )
1537 self.shape = self.__C.shape
1538 self.size = self.__C.size
1539 elif __Object is not None:
1540 self.__is_object = True
1542 for at in ("getT","getI","diag","trace","__add__","__sub__","__neg__","__mul__","__rmul__"):
1543 if not hasattr(self.__C,at):
1544 raise ValueError("The matrix given for %s as an object has no attribute \"%s\". Please check your object input."%(self.__name,at))
1545 if hasattr(self.__C,"shape"):
1546 self.shape = self.__C.shape
1549 if hasattr(self.__C,"size"):
1550 self.size = self.__C.size
1555 # raise ValueError("The %s covariance matrix has to be specified either as a matrix, a vector for its diagonal or a scalar multiplying an identity matrix."%self.__name)
1559 def __validate(self):
1561 if self.__C is None:
1562 raise UnboundLocalError("%s covariance matrix value has not been set!"%(self.__name,))
1563 if self.ismatrix() and min(self.shape) != max(self.shape):
1564 raise ValueError("The given matrix for %s is not a square one, its shape is %s. Please check your matrix input."%(self.__name,self.shape))
1565 if self.isobject() and min(self.shape) != max(self.shape):
1566 raise ValueError("The matrix given for \"%s\" is not a square one, its shape is %s. Please check your object input."%(self.__name,self.shape))
1567 if self.isscalar() and self.__C <= 0:
1568 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your scalar input %s."%(self.__name,self.__C))
1569 if self.isvector() and (self.__C <= 0).any():
1570 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your vector input."%(self.__name,))
1571 if self.ismatrix() and (self.__check or logging.getLogger().level < logging.WARNING):
1573 L = numpy.linalg.cholesky( self.__C )
1575 raise ValueError("The %s covariance matrix is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1576 if self.isobject() and (self.__check or logging.getLogger().level < logging.WARNING):
1578 L = self.__C.cholesky()
1580 raise ValueError("The %s covariance object is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1583 "Vérification du type interne"
1584 return self.__is_scalar
1587 "Vérification du type interne"
1588 return self.__is_vector
1591 "Vérification du type interne"
1592 return self.__is_matrix
1595 "Vérification du type interne"
1596 return self.__is_object
1601 return Covariance(self.__name+"I", asCovariance = self.__C.I )
1602 elif self.isvector():
1603 return Covariance(self.__name+"I", asEyeByVector = 1. / self.__C )
1604 elif self.isscalar():
1605 return Covariance(self.__name+"I", asEyeByScalar = 1. / self.__C )
1606 elif self.isobject():
1607 return Covariance(self.__name+"I", asCovObject = self.__C.getI() )
1609 return None # Indispensable
1614 return Covariance(self.__name+"T", asCovariance = self.__C.T )
1615 elif self.isvector():
1616 return Covariance(self.__name+"T", asEyeByVector = self.__C )
1617 elif self.isscalar():
1618 return Covariance(self.__name+"T", asEyeByScalar = self.__C )
1619 elif self.isobject():
1620 return Covariance(self.__name+"T", asCovObject = self.__C.getT() )
1623 "Décomposition de Cholesky"
1625 return Covariance(self.__name+"C", asCovariance = numpy.linalg.cholesky(self.__C) )
1626 elif self.isvector():
1627 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
1628 elif self.isscalar():
1629 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
1630 elif self.isobject() and hasattr(self.__C,"cholesky"):
1631 return Covariance(self.__name+"C", asCovObject = self.__C.cholesky() )
1633 def choleskyI(self):
1634 "Inversion de la décomposition de Cholesky"
1636 return Covariance(self.__name+"H", asCovariance = numpy.linalg.cholesky(self.__C).I )
1637 elif self.isvector():
1638 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
1639 elif self.isscalar():
1640 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
1641 elif self.isobject() and hasattr(self.__C,"choleskyI"):
1642 return Covariance(self.__name+"H", asCovObject = self.__C.choleskyI() )
1644 def diag(self, msize=None):
1645 "Diagonale de la matrice"
1647 return numpy.diag(self.__C)
1648 elif self.isvector():
1650 elif self.isscalar():
1652 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1654 return self.__C * numpy.ones(int(msize))
1655 elif self.isobject():
1656 return self.__C.diag()
1658 def asfullmatrix(self, msize=None):
1662 elif self.isvector():
1663 return numpy.matrix( numpy.diag(self.__C), float )
1664 elif self.isscalar():
1666 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1668 return numpy.matrix( self.__C * numpy.eye(int(msize)), float )
1669 elif self.isobject() and hasattr(self.__C,"asfullmatrix"):
1670 return self.__C.asfullmatrix()
1672 def trace(self, msize=None):
1673 "Trace de la matrice"
1675 return numpy.trace(self.__C)
1676 elif self.isvector():
1677 return float(numpy.sum(self.__C))
1678 elif self.isscalar():
1680 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1682 return self.__C * int(msize)
1683 elif self.isobject():
1684 return self.__C.trace()
1690 "x.__repr__() <==> repr(x)"
1691 return repr(self.__C)
1694 "x.__str__() <==> str(x)"
1695 return str(self.__C)
1697 def __add__(self, other):
1698 "x.__add__(y) <==> x+y"
1699 if self.ismatrix() or self.isobject():
1700 return self.__C + numpy.asmatrix(other)
1701 elif self.isvector() or self.isscalar():
1702 _A = numpy.asarray(other)
1703 _A.reshape(_A.size)[::_A.shape[1]+1] += self.__C
1704 return numpy.asmatrix(_A)
1706 def __radd__(self, other):
1707 "x.__radd__(y) <==> y+x"
1708 raise NotImplementedError("%s covariance matrix __radd__ method not available for %s type!"%(self.__name,type(other)))
1710 def __sub__(self, other):
1711 "x.__sub__(y) <==> x-y"
1712 if self.ismatrix() or self.isobject():
1713 return self.__C - numpy.asmatrix(other)
1714 elif self.isvector() or self.isscalar():
1715 _A = numpy.asarray(other)
1716 _A.reshape(_A.size)[::_A.shape[1]+1] = self.__C - _A.reshape(_A.size)[::_A.shape[1]+1]
1717 return numpy.asmatrix(_A)
1719 def __rsub__(self, other):
1720 "x.__rsub__(y) <==> y-x"
1721 raise NotImplementedError("%s covariance matrix __rsub__ method not available for %s type!"%(self.__name,type(other)))
1724 "x.__neg__() <==> -x"
1727 def __mul__(self, other):
1728 "x.__mul__(y) <==> x*y"
1729 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
1730 return self.__C * other
1731 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
1732 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1733 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
1734 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1735 return self.__C * numpy.asmatrix(other)
1737 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asmatrix(other).shape,self.__name))
1738 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
1739 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1740 return numpy.asmatrix(self.__C * numpy.ravel(other)).T
1741 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1742 return numpy.asmatrix((self.__C * (numpy.asarray(other).transpose())).transpose())
1744 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
1745 elif self.isscalar() and isinstance(other,numpy.matrix):
1746 return self.__C * other
1747 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
1748 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
1749 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
1751 return self.__C * numpy.asmatrix(other)
1752 elif self.isobject():
1753 return self.__C.__mul__(other)
1755 raise NotImplementedError("%s covariance matrix __mul__ method not available for %s type!"%(self.__name,type(other)))
1757 def __rmul__(self, other):
1758 "x.__rmul__(y) <==> y*x"
1759 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
1760 return other * self.__C
1761 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
1762 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1763 return numpy.asmatrix(numpy.ravel(other)) * self.__C
1764 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1765 return numpy.asmatrix(other) * self.__C
1767 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name))
1768 elif self.isvector() and isinstance(other,numpy.matrix):
1769 if numpy.ravel(other).size == self.shape[0]: # Vecteur
1770 return numpy.asmatrix(numpy.ravel(other) * self.__C)
1771 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
1772 return numpy.asmatrix(numpy.array(other) * self.__C)
1774 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
1775 elif self.isscalar() and isinstance(other,numpy.matrix):
1776 return other * self.__C
1777 elif self.isobject():
1778 return self.__C.__rmul__(other)
1780 raise NotImplementedError("%s covariance matrix __rmul__ method not available for %s type!"%(self.__name,type(other)))
1783 "x.__len__() <==> len(x)"
1784 return self.shape[0]
1786 # ==============================================================================
1787 class ObserverF(object):
1789 Creation d'une fonction d'observateur a partir de son texte
1791 def __init__(self, corps=""):
1792 self.__corps = corps
1793 def func(self,var,info):
1794 "Fonction d'observation"
1797 "Restitution du pointeur de fonction dans l'objet"
1800 # ==============================================================================
1801 class CaseLogger(object):
1803 Conservation des commandes de creation d'un cas
1805 def __init__(self, __name="", __objname="case", __addViewers=None, __addLoaders=None):
1806 self.__name = str(__name)
1807 self.__objname = str(__objname)
1808 self.__logSerie = []
1809 self.__switchoff = False
1811 "TUI" :Interfaces._TUIViewer,
1812 "SCD" :Interfaces._SCDViewer,
1813 "YACS":Interfaces._YACSViewer,
1816 "TUI" :Interfaces._TUIViewer,
1817 "COM" :Interfaces._COMViewer,
1819 if __addViewers is not None:
1820 self.__viewers.update(dict(__addViewers))
1821 if __addLoaders is not None:
1822 self.__loaders.update(dict(__addLoaders))
1824 def register(self, __command=None, __keys=None, __local=None, __pre=None, __switchoff=False):
1825 "Enregistrement d'une commande individuelle"
1826 if __command is not None and __keys is not None and __local is not None and not self.__switchoff:
1827 if "self" in __keys: __keys.remove("self")
1828 self.__logSerie.append( (str(__command), __keys, __local, __pre, __switchoff) )
1830 self.__switchoff = True
1832 self.__switchoff = False
1834 def dump(self, __filename=None, __format="TUI", __upa=""):
1835 "Restitution normalisée des commandes"
1836 if __format in self.__viewers:
1837 __formater = self.__viewers[__format](self.__name, self.__objname, self.__logSerie)
1839 raise ValueError("Dumping as \"%s\" is not available"%__format)
1840 return __formater.dump(__filename, __upa)
1842 def load(self, __filename=None, __content=None, __object=None, __format="TUI"):
1843 "Chargement normalisé des commandes"
1844 if __format in self.__loaders:
1845 __formater = self.__loaders[__format]()
1847 raise ValueError("Loading as \"%s\" is not available"%__format)
1848 return __formater.load(__filename, __content, __object)
1850 # ==============================================================================
1851 def MultiFonction( __xserie, _extraArguments = None, _sFunction = lambda x: x ):
1853 Pour une liste ordonnée de vecteurs en entrée, renvoie en sortie la liste
1854 correspondante de valeurs de la fonction en argument
1856 # Vérifications et définitions initiales
1857 if not PlatformInfo.isIterable( __xserie ):
1858 raise TypeError("MultiFonction not iterable unkown input type: %s"%(type(__xserie),))
1862 if _extraArguments is None:
1863 for __xvalue in __xserie:
1864 __multiHX.append( _sFunction( __xvalue ) )
1865 elif _extraArguments is not None and isinstance(_extraArguments, (list, tuple, map)):
1866 for __xvalue in __xserie:
1867 __multiHX.append( _sFunction( __xvalue, *_extraArguments ) )
1868 elif _extraArguments is not None and isinstance(_extraArguments, dict):
1869 for __xvalue in __xserie:
1870 __multiHX.append( _sFunction( __xvalue, **_extraArguments ) )
1872 raise TypeError("MultiFonction extra arguments unkown input type: %s"%(type(_extraArguments),))
1876 # ==============================================================================
1877 def CostFunction3D(_x,
1878 _Hm = None, # Pour simuler Hm(x) : HO["Direct"].appliedTo
1879 _HmX = None, # Simulation déjà faite de Hm(x)
1880 _arg = None, # Arguments supplementaires pour Hm, sous la forme d'un tuple
1885 _SIV = False, # A résorber pour la 8.0
1886 _SSC = [], # self._parameters["StoreSupplementaryCalculations"]
1887 _nPS = 0, # nbPreviousSteps
1888 _QM = "DA", # QualityMeasure
1889 _SSV = {}, # Entrée et/ou sortie : self.StoredVariables
1890 _fRt = False, # Restitue ou pas la sortie étendue
1891 _sSc = True, # Stocke ou pas les SSC
1894 Fonction-coût générale utile pour les algorithmes statiques/3D : 3DVAR, BLUE
1895 et dérivés, Kalman et dérivés, LeastSquares, SamplingTest, PSO, SA, Tabu,
1896 DFO, QuantileRegression
1902 for k in ["CostFunctionJ",
1908 "SimulatedObservationAtCurrentOptimum",
1909 "SimulatedObservationAtCurrentState",
1913 if hasattr(_SSV[k],"store"):
1914 _SSV[k].append = _SSV[k].store # Pour utiliser "append" au lieu de "store"
1916 _X = numpy.asmatrix(numpy.ravel( _x )).T
1917 if _SIV or "CurrentState" in _SSC or "CurrentOptimum" in _SSC:
1918 _SSV["CurrentState"].append( _X )
1920 if _HmX is not None:
1924 raise ValueError("COSTFUNCTION3D Operator has to be defined.")
1928 _HX = _Hm( _X, *_arg )
1929 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
1931 if "SimulatedObservationAtCurrentState" in _SSC or \
1932 "SimulatedObservationAtCurrentOptimum" in _SSC:
1933 _SSV["SimulatedObservationAtCurrentState"].append( _HX )
1935 if numpy.any(numpy.isnan(_HX)):
1936 Jb, Jo, J = numpy.nan, numpy.nan, numpy.nan
1938 _Y = numpy.asmatrix(numpy.ravel( _Y )).T
1939 if _QM in ["AugmentedWeightedLeastSquares", "AWLS", "AugmentedPonderatedLeastSquares", "APLS", "DA"]:
1940 if _BI is None or _RI is None:
1941 raise ValueError("Background and Observation error covariance matrix has to be properly defined!")
1942 _Xb = numpy.asmatrix(numpy.ravel( _Xb )).T
1943 Jb = 0.5 * (_X - _Xb).T * _BI * (_X - _Xb)
1944 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
1945 elif _QM in ["WeightedLeastSquares", "WLS", "PonderatedLeastSquares", "PLS"]:
1947 raise ValueError("Observation error covariance matrix has to be properly defined!")
1949 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
1950 elif _QM in ["LeastSquares", "LS", "L2"]:
1952 Jo = 0.5 * (_Y - _HX).T * (_Y - _HX)
1953 elif _QM in ["AbsoluteValue", "L1"]:
1955 Jo = numpy.sum( numpy.abs(_Y - _HX) )
1956 elif _QM in ["MaximumError", "ME"]:
1958 Jo = numpy.max( numpy.abs(_Y - _HX) )
1959 elif _QM in ["QR", "Null"]:
1963 raise ValueError("Unknown asked quality measure!")
1965 J = float( Jb ) + float( Jo )
1968 _SSV["CostFunctionJb"].append( Jb )
1969 _SSV["CostFunctionJo"].append( Jo )
1970 _SSV["CostFunctionJ" ].append( J )
1972 if "IndexOfOptimum" in _SSC or \
1973 "CurrentOptimum" in _SSC or \
1974 "SimulatedObservationAtCurrentOptimum" in _SSC:
1975 IndexMin = numpy.argmin( _SSV["CostFunctionJ"][_nPS:] ) + _nPS
1976 if "IndexOfOptimum" in _SSC:
1977 _SSV["IndexOfOptimum"].append( IndexMin )
1978 if "CurrentOptimum" in _SSC:
1979 _SSV["CurrentOptimum"].append( _SSV["CurrentState"][IndexMin] )
1980 if "SimulatedObservationAtCurrentOptimum" in _SSC:
1981 _SSV["SimulatedObservationAtCurrentOptimum"].append( _SSV["SimulatedObservationAtCurrentState"][IndexMin] )
1986 if _QM in ["QR"]: # Pour le QuantileRegression
1991 # ==============================================================================
1992 if __name__ == "__main__":
1993 print('\n AUTODIAGNOSTIC\n')