1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2019 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les outils généraux élémentaires.
26 __author__ = "Jean-Philippe ARGAUD"
34 from functools import partial
35 from daCore import Persistence
36 from daCore import PlatformInfo
37 from daCore import Interfaces
38 from daCore import Templates
39 from daCore.Interfaces import ImportFromScript, ImportFromFile
41 # ==============================================================================
42 class CacheManager(object):
44 Classe générale de gestion d'un cache de calculs
47 toleranceInRedundancy = 1.e-18,
48 lenghtOfRedundancy = -1,
51 Les caractéristiques de tolérance peuvent être modifées à la création.
53 self.__tolerBP = float(toleranceInRedundancy)
54 self.__lenghtOR = int(lenghtOfRedundancy)
55 self.__initlnOR = self.__lenghtOR
60 self.__listOPCV = [] # Operator Previous Calculated Points, Results, Point Norms
61 # logging.debug("CM Tolerance de determination des doublons : %.2e", self.__tolerBP)
63 def wasCalculatedIn(self, xValue ): #, info="" ):
64 "Vérifie l'existence d'un calcul correspondant à la valeur"
67 for i in range(min(len(self.__listOPCV),self.__lenghtOR)-1,-1,-1):
68 if not hasattr(xValue, 'size') or (xValue.size != self.__listOPCV[i][0].size):
69 # logging.debug("CM Différence de la taille %s de X et de celle %s du point %i déjà calculé", xValue.shape,i,self.__listOPCP[i].shape)
71 if numpy.linalg.norm(numpy.ravel(xValue) - self.__listOPCV[i][0]) < self.__tolerBP * self.__listOPCV[i][2]:
73 __HxV = self.__listOPCV[i][1]
74 # logging.debug("CM Cas%s déja calculé, portant le numéro %i", info, i)
78 def storeValueInX(self, xValue, HxValue ):
79 "Stocke un calcul correspondant à la valeur"
80 if self.__lenghtOR < 0:
81 self.__lenghtOR = 2 * xValue.size + 2
82 self.__initlnOR = self.__lenghtOR
83 while len(self.__listOPCV) > self.__lenghtOR:
84 # logging.debug("CM Réduction de la liste des cas à %i éléments par suppression du premier", self.__lenghtOR)
85 self.__listOPCV.pop(0)
86 self.__listOPCV.append( (
87 copy.copy(numpy.ravel(xValue)),
89 numpy.linalg.norm(xValue),
94 self.__initlnOR = self.__lenghtOR
99 self.__lenghtOR = self.__initlnOR
101 # ==============================================================================
102 class Operator(object):
104 Classe générale d'interface de type opérateur simple
114 avoidingRedundancy = True,
115 inputAsMultiFunction = False,
116 extraArguments = None,
119 On construit un objet de ce type en fournissant, à l'aide de l'un des
120 deux mots-clé, soit une fonction ou un multi-fonction python, soit une
123 - fromMethod : argument de type fonction Python
124 - fromMatrix : argument adapté au constructeur numpy.matrix
125 - avoidingRedundancy : booléen évitant (ou pas) les calculs redondants
126 - inputAsMultiFunction : booléen indiquant une fonction explicitement
127 définie (ou pas) en multi-fonction
128 - extraArguments : arguments supplémentaires passés à la fonction de
129 base et ses dérivées (tuple ou dictionnaire)
131 self.__NbCallsAsMatrix, self.__NbCallsAsMethod, self.__NbCallsOfCached = 0, 0, 0
132 self.__AvoidRC = bool( avoidingRedundancy )
133 self.__inputAsMF = bool( inputAsMultiFunction )
134 self.__extraArgs = extraArguments
135 if fromMethod is not None and self.__inputAsMF:
136 self.__Method = fromMethod # logtimer(fromMethod)
138 self.__Type = "Method"
139 elif fromMethod is not None and not self.__inputAsMF:
140 self.__Method = partial( MultiFonction, _sFunction=fromMethod)
142 self.__Type = "Method"
143 elif fromMatrix is not None:
145 self.__Matrix = numpy.matrix( fromMatrix, numpy.float )
146 self.__Type = "Matrix"
152 def disableAvoidingRedundancy(self):
154 Operator.CM.disable()
156 def enableAvoidingRedundancy(self):
161 Operator.CM.disable()
167 def appliedTo(self, xValue, HValue = None, argsAsSerie = False):
169 Permet de restituer le résultat de l'application de l'opérateur à une
170 série d'arguments xValue. Cette méthode se contente d'appliquer, chaque
171 argument devant a priori être du bon type.
173 - les arguments par série sont :
174 - xValue : argument adapté pour appliquer l'opérateur
175 - HValue : valeur précalculée de l'opérateur en ce point
176 - argsAsSerie : indique si les arguments sont une mono ou multi-valeur
183 if HValue is not None:
187 PlatformInfo.isIterable( _xValue, True, " in Operator.appliedTo" )
189 if _HValue is not None:
190 assert len(_xValue) == len(_HValue), "Incompatible number of elements in xValue and HValue"
192 for i in range(len(_HValue)):
193 HxValue.append( numpy.asmatrix( numpy.ravel( _HValue[i] ) ).T )
195 Operator.CM.storeValueInX(_xValue[i],HxValue[-1])
200 for i, xv in enumerate(_xValue):
202 __alreadyCalculated, __HxV = Operator.CM.wasCalculatedIn(xv)
204 __alreadyCalculated = False
206 if __alreadyCalculated:
207 self.__addOneCacheCall()
210 if self.__Matrix is not None:
211 self.__addOneMatrixCall()
212 _hv = self.__Matrix * xv
214 self.__addOneMethodCall()
218 HxValue.append( _hv )
220 if len(_xserie)>0 and self.__Matrix is None:
221 if self.__extraArgs is None:
222 _hserie = self.__Method( _xserie ) # Calcul MF
224 _hserie = self.__Method( _xserie, self.__extraArgs ) # Calcul MF
225 if not hasattr(_hserie, "pop"):
226 raise TypeError("The user input multi-function doesn't seem to return sequence results, behaving like a mono-function. It has to be checked.")
232 Operator.CM.storeValueInX(_xv,_hv)
234 if argsAsSerie: return HxValue
235 else: return HxValue[-1]
237 def appliedControledFormTo(self, paires, argsAsSerie = False ):
239 Permet de restituer le résultat de l'application de l'opérateur à des
240 paires (xValue, uValue). Cette méthode se contente d'appliquer, son
241 argument devant a priori être du bon type. Si la uValue est None,
242 on suppose que l'opérateur ne s'applique qu'à xValue.
244 - paires : les arguments par paire sont :
245 - xValue : argument X adapté pour appliquer l'opérateur
246 - uValue : argument U adapté pour appliquer l'opérateur
247 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
249 if argsAsSerie: _xuValue = paires
250 else: _xuValue = (paires,)
251 PlatformInfo.isIterable( _xuValue, True, " in Operator.appliedControledFormTo" )
253 if self.__Matrix is not None:
255 for paire in _xuValue:
256 _xValue, _uValue = paire
257 self.__addOneMatrixCall()
258 HxValue.append( self.__Matrix * _xValue )
261 for paire in _xuValue:
263 _xValue, _uValue = paire
264 if _uValue is not None:
265 _xuValue.append( paire )
267 _xuValue.append( _xValue )
268 self.__addOneMethodCall( len(_xuValue) )
269 if self.__extraArgs is None:
270 HxValue = self.__Method( _xuValue ) # Calcul MF
272 HxValue = self.__Method( _xuValue, self.__extraArgs ) # Calcul MF
274 if argsAsSerie: return HxValue
275 else: return HxValue[-1]
277 def appliedInXTo(self, paires, argsAsSerie = False ):
279 Permet de restituer le résultat de l'application de l'opérateur à une
280 série d'arguments xValue, sachant que l'opérateur est valable en
281 xNominal. Cette méthode se contente d'appliquer, son argument devant a
282 priori être du bon type. Si l'opérateur est linéaire car c'est une
283 matrice, alors il est valable en tout point nominal et xNominal peut
284 être quelconque. Il n'y a qu'une seule paire par défaut, et argsAsSerie
285 permet d'indiquer que l'argument est multi-paires.
287 - paires : les arguments par paire sont :
288 - xNominal : série d'arguments permettant de donner le point où
289 l'opérateur est construit pour être ensuite appliqué
290 - xValue : série d'arguments adaptés pour appliquer l'opérateur
291 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
293 if argsAsSerie: _nxValue = paires
294 else: _nxValue = (paires,)
295 PlatformInfo.isIterable( _nxValue, True, " in Operator.appliedInXTo" )
297 if self.__Matrix is not None:
299 for paire in _nxValue:
300 _xNominal, _xValue = paire
301 self.__addOneMatrixCall()
302 HxValue.append( self.__Matrix * _xValue )
304 self.__addOneMethodCall( len(_nxValue) )
305 if self.__extraArgs is None:
306 HxValue = self.__Method( _nxValue ) # Calcul MF
308 HxValue = self.__Method( _nxValue, self.__extraArgs ) # Calcul MF
310 if argsAsSerie: return HxValue
311 else: return HxValue[-1]
313 def asMatrix(self, ValueForMethodForm = "UnknownVoidValue", argsAsSerie = False):
315 Permet de renvoyer l'opérateur sous la forme d'une matrice
317 if self.__Matrix is not None:
318 self.__addOneMatrixCall()
319 mValue = [self.__Matrix,]
320 elif ValueForMethodForm is not "UnknownVoidValue": # Ne pas utiliser "None"
323 self.__addOneMethodCall( len(ValueForMethodForm) )
324 for _vfmf in ValueForMethodForm:
325 mValue.append( numpy.matrix( self.__Method(((_vfmf, None),)) ) )
327 self.__addOneMethodCall()
328 mValue = self.__Method(((ValueForMethodForm, None),))
330 raise ValueError("Matrix form of the operator defined as a function/method requires to give an operating point.")
332 if argsAsSerie: return mValue
333 else: return mValue[-1]
337 Renvoie la taille sous forme numpy si l'opérateur est disponible sous
338 la forme d'une matrice
340 if self.__Matrix is not None:
341 return self.__Matrix.shape
343 raise ValueError("Matrix form of the operator is not available, nor the shape")
345 def nbcalls(self, which=None):
347 Renvoie les nombres d'évaluations de l'opérateur
350 self.__NbCallsAsMatrix+self.__NbCallsAsMethod,
351 self.__NbCallsAsMatrix,
352 self.__NbCallsAsMethod,
353 self.__NbCallsOfCached,
354 Operator.NbCallsAsMatrix+Operator.NbCallsAsMethod,
355 Operator.NbCallsAsMatrix,
356 Operator.NbCallsAsMethod,
357 Operator.NbCallsOfCached,
359 if which is None: return __nbcalls
360 else: return __nbcalls[which]
362 def __addOneMatrixCall(self):
363 "Comptabilise un appel"
364 self.__NbCallsAsMatrix += 1 # Decompte local
365 Operator.NbCallsAsMatrix += 1 # Decompte global
367 def __addOneMethodCall(self, nb = 1):
368 "Comptabilise un appel"
369 self.__NbCallsAsMethod += nb # Decompte local
370 Operator.NbCallsAsMethod += nb # Decompte global
372 def __addOneCacheCall(self):
373 "Comptabilise un appel"
374 self.__NbCallsOfCached += 1 # Decompte local
375 Operator.NbCallsOfCached += 1 # Decompte global
377 # ==============================================================================
378 class FullOperator(object):
380 Classe générale d'interface de type opérateur complet
381 (Direct, Linéaire Tangent, Adjoint)
384 name = "GenericFullOperator",
386 asOneFunction = None, # 1 Fonction
387 asThreeFunctions = None, # 3 Fonctions in a dictionary
388 asScript = None, # 1 or 3 Fonction(s) by script
389 asDict = None, # Parameters
391 extraArguments = None,
393 inputAsMF = False,# Fonction(s) as Multi-Functions
398 self.__name = str(name)
399 self.__check = bool(toBeChecked)
400 self.__extraArgs = extraArguments
405 if (asDict is not None) and isinstance(asDict, dict):
406 __Parameters.update( asDict )
407 if "DifferentialIncrement" in asDict:
408 __Parameters["withIncrement"] = asDict["DifferentialIncrement"]
409 if "CenteredFiniteDifference" in asDict:
410 __Parameters["withCenteredDF"] = asDict["CenteredFiniteDifference"]
411 if "EnableMultiProcessing" in asDict:
412 __Parameters["withmpEnabled"] = asDict["EnableMultiProcessing"]
413 if "NumberOfProcesses" in asDict:
414 __Parameters["withmpWorkers"] = asDict["NumberOfProcesses"]
416 if asScript is not None:
417 __Matrix, __Function = None, None
419 __Matrix = ImportFromScript(asScript).getvalue( self.__name )
421 __Function = { "Direct":ImportFromScript(asScript).getvalue( "DirectOperator" ) }
422 __Function.update({"useApproximatedDerivatives":True})
423 __Function.update(__Parameters)
424 elif asThreeFunctions:
426 "Direct" :ImportFromScript(asScript).getvalue( "DirectOperator" ),
427 "Tangent":ImportFromScript(asScript).getvalue( "TangentOperator" ),
428 "Adjoint":ImportFromScript(asScript).getvalue( "AdjointOperator" ),
430 __Function.update(__Parameters)
433 if asOneFunction is not None:
434 if isinstance(asOneFunction, dict) and "Direct" in asOneFunction:
435 if asOneFunction["Direct"] is not None:
436 __Function = asOneFunction
438 raise ValueError("The function has to be given in a dictionnary which have 1 key (\"Direct\")")
440 __Function = { "Direct":asOneFunction }
441 __Function.update({"useApproximatedDerivatives":True})
442 __Function.update(__Parameters)
443 elif asThreeFunctions is not None:
444 if isinstance(asThreeFunctions, dict) and \
445 ("Tangent" in asThreeFunctions) and (asThreeFunctions["Tangent"] is not None) and \
446 ("Adjoint" in asThreeFunctions) and (asThreeFunctions["Adjoint"] is not None) and \
447 (("useApproximatedDerivatives" not in asThreeFunctions) or not bool(asThreeFunctions["useApproximatedDerivatives"])):
448 __Function = asThreeFunctions
449 elif isinstance(asThreeFunctions, dict) and \
450 ("Direct" in asThreeFunctions) and (asThreeFunctions["Direct"] is not None):
451 __Function = asThreeFunctions
452 __Function.update({"useApproximatedDerivatives":True})
454 raise ValueError("The functions has to be given in a dictionnary which have either 1 key (\"Direct\") or 3 keys (\"Direct\" (optionnal), \"Tangent\" and \"Adjoint\")")
455 if "Direct" not in asThreeFunctions:
456 __Function["Direct"] = asThreeFunctions["Tangent"]
457 __Function.update(__Parameters)
461 # if sys.version_info[0] < 3 and isinstance(__Function, dict):
462 # for k in ("Direct", "Tangent", "Adjoint"):
463 # if k in __Function and hasattr(__Function[k],"__class__"):
464 # if type(__Function[k]) is type(self.__init__):
465 # raise TypeError("can't use a class method (%s) as a function for the \"%s\" operator. Use a real function instead."%(type(__Function[k]),k))
467 if appliedInX is not None and isinstance(appliedInX, dict):
468 __appliedInX = appliedInX
469 elif appliedInX is not None:
470 __appliedInX = {"HXb":appliedInX}
474 if scheduledBy is not None:
475 self.__T = scheduledBy
477 if isinstance(__Function, dict) and \
478 ("useApproximatedDerivatives" in __Function) and bool(__Function["useApproximatedDerivatives"]) and \
479 ("Direct" in __Function) and (__Function["Direct"] is not None):
480 if "withCenteredDF" not in __Function: __Function["withCenteredDF"] = False
481 if "withIncrement" not in __Function: __Function["withIncrement"] = 0.01
482 if "withdX" not in __Function: __Function["withdX"] = None
483 if "withAvoidingRedundancy" not in __Function: __Function["withAvoidingRedundancy"] = avoidRC
484 if "withToleranceInRedundancy" not in __Function: __Function["withToleranceInRedundancy"] = 1.e-18
485 if "withLenghtOfRedundancy" not in __Function: __Function["withLenghtOfRedundancy"] = -1
486 if "withmpEnabled" not in __Function: __Function["withmpEnabled"] = False
487 if "withmpWorkers" not in __Function: __Function["withmpWorkers"] = None
488 if "withmfEnabled" not in __Function: __Function["withmfEnabled"] = inputAsMF
489 from daNumerics.ApproximatedDerivatives import FDApproximation
490 FDA = FDApproximation(
491 Function = __Function["Direct"],
492 centeredDF = __Function["withCenteredDF"],
493 increment = __Function["withIncrement"],
494 dX = __Function["withdX"],
495 avoidingRedundancy = __Function["withAvoidingRedundancy"],
496 toleranceInRedundancy = __Function["withToleranceInRedundancy"],
497 lenghtOfRedundancy = __Function["withLenghtOfRedundancy"],
498 mpEnabled = __Function["withmpEnabled"],
499 mpWorkers = __Function["withmpWorkers"],
500 mfEnabled = __Function["withmfEnabled"],
502 self.__FO["Direct"] = Operator( fromMethod = FDA.DirectOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
503 self.__FO["Tangent"] = Operator( fromMethod = FDA.TangentOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
504 self.__FO["Adjoint"] = Operator( fromMethod = FDA.AdjointOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
505 elif isinstance(__Function, dict) and \
506 ("Direct" in __Function) and ("Tangent" in __Function) and ("Adjoint" in __Function) and \
507 (__Function["Direct"] is not None) and (__Function["Tangent"] is not None) and (__Function["Adjoint"] is not None):
508 self.__FO["Direct"] = Operator( fromMethod = __Function["Direct"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
509 self.__FO["Tangent"] = Operator( fromMethod = __Function["Tangent"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
510 self.__FO["Adjoint"] = Operator( fromMethod = __Function["Adjoint"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
511 elif asMatrix is not None:
512 __matrice = numpy.matrix( __Matrix, numpy.float )
513 self.__FO["Direct"] = Operator( fromMatrix = __matrice, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
514 self.__FO["Tangent"] = Operator( fromMatrix = __matrice, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
515 self.__FO["Adjoint"] = Operator( fromMatrix = __matrice.T, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
518 raise ValueError("Improperly defined observation operator, it requires at minima either a matrix, a Direct for approximate derivatives or a Tangent/Adjoint pair.")
520 if __appliedInX is not None:
521 self.__FO["AppliedInX"] = {}
522 for key in list(__appliedInX.keys()):
523 if type( __appliedInX[key] ) is type( numpy.matrix([]) ):
524 # Pour le cas où l'on a une vraie matrice
525 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].A1, numpy.float ).T
526 elif type( __appliedInX[key] ) is type( numpy.array([]) ) and len(__appliedInX[key].shape) > 1:
527 # Pour le cas où l'on a un vecteur représenté en array avec 2 dimensions
528 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].reshape(len(__appliedInX[key]),), numpy.float ).T
530 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key], numpy.float ).T
532 self.__FO["AppliedInX"] = None
538 "x.__repr__() <==> repr(x)"
539 return repr(self.__V)
542 "x.__str__() <==> str(x)"
545 # ==============================================================================
546 class Algorithm(object):
548 Classe générale d'interface de type algorithme
550 Elle donne un cadre pour l'écriture d'une classe élémentaire d'algorithme
551 d'assimilation, en fournissant un container (dictionnaire) de variables
552 persistantes initialisées, et des méthodes d'accès à ces variables stockées.
554 Une classe élémentaire d'algorithme doit implémenter la méthode "run".
556 def __init__(self, name):
558 L'initialisation présente permet de fabriquer des variables de stockage
559 disponibles de manière générique dans les algorithmes élémentaires. Ces
560 variables de stockage sont ensuite conservées dans un dictionnaire
561 interne à l'objet, mais auquel on accède par la méthode "get".
563 Les variables prévues sont :
564 - APosterioriCorrelations : matrice de corrélations de la matrice A
565 - APosterioriCovariance : matrice de covariances a posteriori : A
566 - APosterioriStandardDeviations : vecteur des écart-types de la matrice A
567 - APosterioriVariances : vecteur des variances de la matrice A
568 - Analysis : vecteur d'analyse : Xa
569 - BMA : Background moins Analysis : Xa - Xb
570 - CostFunctionJ : fonction-coût globale, somme des deux parties suivantes Jb et Jo
571 - CostFunctionJAtCurrentOptimum : fonction-coût globale à l'état optimal courant lors d'itérations
572 - CostFunctionJb : partie ébauche ou background de la fonction-coût : Jb
573 - CostFunctionJbAtCurrentOptimum : partie ébauche à l'état optimal courant lors d'itérations
574 - CostFunctionJo : partie observations de la fonction-coût : Jo
575 - CostFunctionJoAtCurrentOptimum : partie observations à l'état optimal courant lors d'itérations
576 - CurrentOptimum : état optimal courant lors d'itérations
577 - CurrentState : état courant lors d'itérations
578 - GradientOfCostFunctionJ : gradient de la fonction-coût globale
579 - GradientOfCostFunctionJb : gradient de la partie ébauche de la fonction-coût
580 - GradientOfCostFunctionJo : gradient de la partie observations de la fonction-coût
581 - IndexOfOptimum : index de l'état optimal courant lors d'itérations
582 - Innovation : l'innovation : d = Y - H(X)
583 - InnovationAtCurrentState : l'innovation à l'état courant : dn = Y - H(Xn)
584 - JacobianMatrixAtBackground : matrice jacobienne à l'état d'ébauche
585 - JacobianMatrixAtCurrentState : matrice jacobienne à l'état courant
586 - JacobianMatrixAtOptimum : matrice jacobienne à l'optimum
587 - KalmanGainAtOptimum : gain de Kalman à l'optimum
588 - MahalanobisConsistency : indicateur de consistance des covariances
589 - OMA : Observation moins Analyse : Y - Xa
590 - OMB : Observation moins Background : Y - Xb
591 - PredictedState : état prédit courant lors d'itérations
592 - Residu : dans le cas des algorithmes de vérification
593 - SigmaBck2 : indicateur de correction optimale des erreurs d'ébauche
594 - SigmaObs2 : indicateur de correction optimale des erreurs d'observation
595 - SimulatedObservationAtBackground : l'état observé H(Xb) à l'ébauche
596 - SimulatedObservationAtCurrentOptimum : l'état observé H(X) à l'état optimal courant
597 - SimulatedObservationAtCurrentState : l'état observé H(X) à l'état courant
598 - SimulatedObservationAtOptimum : l'état observé H(Xa) à l'optimum
599 - SimulationQuantiles : états observés H(X) pour les quantiles demandés
600 On peut rajouter des variables à stocker dans l'initialisation de
601 l'algorithme élémentaire qui va hériter de cette classe
603 logging.debug("%s Initialisation", str(name))
604 self._m = PlatformInfo.SystemUsage()
606 self._name = str( name )
607 self._parameters = {"StoreSupplementaryCalculations":[]}
608 self.__required_parameters = {}
609 self.__required_inputs = {"RequiredInputValues":{"mandatory":(), "optional":()}}
611 self.StoredVariables = {}
612 self.StoredVariables["APosterioriCorrelations"] = Persistence.OneMatrix(name = "APosterioriCorrelations")
613 self.StoredVariables["APosterioriCovariance"] = Persistence.OneMatrix(name = "APosterioriCovariance")
614 self.StoredVariables["APosterioriStandardDeviations"] = Persistence.OneVector(name = "APosterioriStandardDeviations")
615 self.StoredVariables["APosterioriVariances"] = Persistence.OneVector(name = "APosterioriVariances")
616 self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
617 self.StoredVariables["BMA"] = Persistence.OneVector(name = "BMA")
618 self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
619 self.StoredVariables["CostFunctionJAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJAtCurrentOptimum")
620 self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
621 self.StoredVariables["CostFunctionJbAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJbAtCurrentOptimum")
622 self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
623 self.StoredVariables["CostFunctionJoAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJoAtCurrentOptimum")
624 self.StoredVariables["CurrentOptimum"] = Persistence.OneVector(name = "CurrentOptimum")
625 self.StoredVariables["CurrentState"] = Persistence.OneVector(name = "CurrentState")
626 self.StoredVariables["GradientOfCostFunctionJ"] = Persistence.OneVector(name = "GradientOfCostFunctionJ")
627 self.StoredVariables["GradientOfCostFunctionJb"] = Persistence.OneVector(name = "GradientOfCostFunctionJb")
628 self.StoredVariables["GradientOfCostFunctionJo"] = Persistence.OneVector(name = "GradientOfCostFunctionJo")
629 self.StoredVariables["IndexOfOptimum"] = Persistence.OneIndex(name = "IndexOfOptimum")
630 self.StoredVariables["Innovation"] = Persistence.OneVector(name = "Innovation")
631 self.StoredVariables["InnovationAtCurrentAnalysis"] = Persistence.OneVector(name = "InnovationAtCurrentAnalysis")
632 self.StoredVariables["InnovationAtCurrentState"] = Persistence.OneVector(name = "InnovationAtCurrentState")
633 self.StoredVariables["JacobianMatrixAtBackground"] = Persistence.OneMatrix(name = "JacobianMatrixAtBackground")
634 self.StoredVariables["JacobianMatrixAtCurrentState"] = Persistence.OneMatrix(name = "JacobianMatrixAtCurrentState")
635 self.StoredVariables["JacobianMatrixAtOptimum"] = Persistence.OneMatrix(name = "JacobianMatrixAtOptimum")
636 self.StoredVariables["KalmanGainAtOptimum"] = Persistence.OneMatrix(name = "KalmanGainAtOptimum")
637 self.StoredVariables["MahalanobisConsistency"] = Persistence.OneScalar(name = "MahalanobisConsistency")
638 self.StoredVariables["OMA"] = Persistence.OneVector(name = "OMA")
639 self.StoredVariables["OMB"] = Persistence.OneVector(name = "OMB")
640 self.StoredVariables["PredictedState"] = Persistence.OneVector(name = "PredictedState")
641 self.StoredVariables["Residu"] = Persistence.OneScalar(name = "Residu")
642 self.StoredVariables["SigmaBck2"] = Persistence.OneScalar(name = "SigmaBck2")
643 self.StoredVariables["SigmaObs2"] = Persistence.OneScalar(name = "SigmaObs2")
644 self.StoredVariables["SimulatedObservationAtBackground"] = Persistence.OneVector(name = "SimulatedObservationAtBackground")
645 self.StoredVariables["SimulatedObservationAtCurrentAnalysis"]= Persistence.OneVector(name = "SimulatedObservationAtCurrentAnalysis")
646 self.StoredVariables["SimulatedObservationAtCurrentOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentOptimum")
647 self.StoredVariables["SimulatedObservationAtCurrentState"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentState")
648 self.StoredVariables["SimulatedObservationAtOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtOptimum")
649 self.StoredVariables["SimulationQuantiles"] = Persistence.OneMatrix(name = "SimulationQuantiles")
651 def _pre_run(self, Parameters, Xb=None, Y=None, R=None, B=None, Q=None ):
653 logging.debug("%s Lancement", self._name)
654 logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
656 # Mise a jour de self._parameters avec Parameters
657 self.__setParameters(Parameters)
659 # Corrections et complements
660 def __test_vvalue(argument, variable, argname):
662 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
663 raise ValueError("%s %s vector %s has to be properly defined!"%(self._name,argname,variable))
664 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
665 logging.debug("%s %s vector %s is not set, but is optional."%(self._name,argname,variable))
667 logging.debug("%s %s vector %s is not set, but is not required."%(self._name,argname,variable))
669 logging.debug("%s %s vector %s is set, and its size is %i."%(self._name,argname,variable,numpy.array(argument).size))
671 __test_vvalue( Xb, "Xb", "Background or initial state" )
672 __test_vvalue( Y, "Y", "Observation" )
674 def __test_cvalue(argument, variable, argname):
676 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
677 raise ValueError("%s %s error covariance matrix %s has to be properly defined!"%(self._name,argname,variable))
678 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
679 logging.debug("%s %s error covariance matrix %s is not set, but is optional."%(self._name,argname,variable))
681 logging.debug("%s %s error covariance matrix %s is not set, but is not required."%(self._name,argname,variable))
683 logging.debug("%s %s error covariance matrix %s is set."%(self._name,argname,variable))
685 __test_cvalue( R, "R", "Observation" )
686 __test_cvalue( B, "B", "Background" )
687 __test_cvalue( Q, "Q", "Evolution" )
689 if ("Bounds" in self._parameters) and isinstance(self._parameters["Bounds"], (list, tuple)) and (len(self._parameters["Bounds"]) > 0):
690 logging.debug("%s Prise en compte des bornes effectuee"%(self._name,))
692 self._parameters["Bounds"] = None
694 if logging.getLogger().level < logging.WARNING:
695 self._parameters["optiprint"], self._parameters["optdisp"] = 1, 1
696 if PlatformInfo.has_scipy:
697 import scipy.optimize
698 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_ALL
700 self._parameters["optmessages"] = 15
702 self._parameters["optiprint"], self._parameters["optdisp"] = -1, 0
703 if PlatformInfo.has_scipy:
704 import scipy.optimize
705 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_NONE
707 self._parameters["optmessages"] = 15
711 def _post_run(self,_oH=None):
713 if ("StoreSupplementaryCalculations" in self._parameters) and \
714 "APosterioriCovariance" in self._parameters["StoreSupplementaryCalculations"]:
715 for _A in self.StoredVariables["APosterioriCovariance"]:
716 if "APosterioriVariances" in self._parameters["StoreSupplementaryCalculations"]:
717 self.StoredVariables["APosterioriVariances"].store( numpy.diag(_A) )
718 if "APosterioriStandardDeviations" in self._parameters["StoreSupplementaryCalculations"]:
719 self.StoredVariables["APosterioriStandardDeviations"].store( numpy.sqrt(numpy.diag(_A)) )
720 if "APosterioriCorrelations" in self._parameters["StoreSupplementaryCalculations"]:
721 _EI = numpy.diag(1./numpy.sqrt(numpy.diag(_A)))
722 _C = numpy.dot(_EI, numpy.dot(_A, _EI))
723 self.StoredVariables["APosterioriCorrelations"].store( _C )
725 logging.debug("%s Nombre d'évaluation(s) de l'opérateur d'observation direct/tangent/adjoint.: %i/%i/%i", self._name, _oH["Direct"].nbcalls(0),_oH["Tangent"].nbcalls(0),_oH["Adjoint"].nbcalls(0))
726 logging.debug("%s Nombre d'appels au cache d'opérateur d'observation direct/tangent/adjoint..: %i/%i/%i", self._name, _oH["Direct"].nbcalls(3),_oH["Tangent"].nbcalls(3),_oH["Adjoint"].nbcalls(3))
727 logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
728 logging.debug("%s Terminé", self._name)
731 def _toStore(self, key):
732 "True if in StoreSupplementaryCalculations, else False"
733 return key in self._parameters["StoreSupplementaryCalculations"]
735 def get(self, key=None):
737 Renvoie l'une des variables stockées identifiée par la clé, ou le
738 dictionnaire de l'ensemble des variables disponibles en l'absence de
739 clé. Ce sont directement les variables sous forme objet qui sont
740 renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
741 des classes de persistance.
744 return self.StoredVariables[key]
746 return self.StoredVariables
748 def __contains__(self, key=None):
749 "D.__contains__(k) -> True if D has a key k, else False"
750 return key in self.StoredVariables
753 "D.keys() -> list of D's keys"
754 if hasattr(self, "StoredVariables"):
755 return self.StoredVariables.keys()
760 "D.pop(k[,d]) -> v, remove specified key and return the corresponding value"
761 if hasattr(self, "StoredVariables"):
762 return self.StoredVariables.pop(k, d)
767 raise TypeError("pop expected at least 1 arguments, got 0")
768 "If key is not found, d is returned if given, otherwise KeyError is raised"
774 def run(self, Xb=None, Y=None, H=None, M=None, R=None, B=None, Q=None, Parameters=None):
776 Doit implémenter l'opération élémentaire de calcul d'assimilation sous
777 sa forme mathématique la plus naturelle possible.
779 raise NotImplementedError("Mathematical assimilation calculation has not been implemented!")
781 def defineRequiredParameter(self, name = None, default = None, typecast = None, message = None, minval = None, maxval = None, listval = None):
783 Permet de définir dans l'algorithme des paramètres requis et leurs
784 caractéristiques par défaut.
787 raise ValueError("A name is mandatory to define a required parameter.")
789 self.__required_parameters[name] = {
791 "typecast" : typecast,
797 logging.debug("%s %s (valeur par défaut = %s)", self._name, message, self.setParameterValue(name))
799 def getRequiredParameters(self, noDetails=True):
801 Renvoie la liste des noms de paramètres requis ou directement le
802 dictionnaire des paramètres requis.
805 return sorted(self.__required_parameters.keys())
807 return self.__required_parameters
809 def setParameterValue(self, name=None, value=None):
811 Renvoie la valeur d'un paramètre requis de manière contrôlée
813 default = self.__required_parameters[name]["default"]
814 typecast = self.__required_parameters[name]["typecast"]
815 minval = self.__required_parameters[name]["minval"]
816 maxval = self.__required_parameters[name]["maxval"]
817 listval = self.__required_parameters[name]["listval"]
819 if value is None and default is None:
821 elif value is None and default is not None:
822 if typecast is None: __val = default
823 else: __val = typecast( default )
825 if typecast is None: __val = value
826 else: __val = typecast( value )
828 if minval is not None and (numpy.array(__val, float) < minval).any():
829 raise ValueError("The parameter named \"%s\" of value \"%s\" can not be less than %s."%(name, __val, minval))
830 if maxval is not None and (numpy.array(__val, float) > maxval).any():
831 raise ValueError("The parameter named \"%s\" of value \"%s\" can not be greater than %s."%(name, __val, maxval))
832 if listval is not None:
833 if typecast is list or typecast is tuple or isinstance(__val,list) or isinstance(__val,tuple):
836 raise ValueError("The value \"%s\" of the parameter named \"%s\" is not allowed, it has to be in the list %s."%(v, name, listval))
837 elif __val not in listval:
838 raise ValueError("The value \"%s\" of the parameter named \"%s\" is not allowed, it has to be in the list %s."%( __val, name,listval))
841 def requireInputArguments(self, mandatory=(), optional=()):
843 Permet d'imposer des arguments requises en entrée
845 self.__required_inputs["RequiredInputValues"]["mandatory"] = tuple( mandatory )
846 self.__required_inputs["RequiredInputValues"]["optional"] = tuple( optional )
848 def __setParameters(self, fromDico={}):
850 Permet de stocker les paramètres reçus dans le dictionnaire interne.
852 self._parameters.update( fromDico )
853 for k in self.__required_parameters.keys():
854 if k in fromDico.keys():
855 self._parameters[k] = self.setParameterValue(k,fromDico[k])
857 self._parameters[k] = self.setParameterValue(k)
858 logging.debug("%s %s : %s", self._name, self.__required_parameters[k]["message"], self._parameters[k])
860 # ==============================================================================
861 class AlgorithmAndParameters(object):
863 Classe générale d'interface d'action pour l'algorithme et ses paramètres
866 name = "GenericAlgorithm",
873 self.__name = str(name)
877 self.__algorithm = {}
878 self.__algorithmFile = None
879 self.__algorithmName = None
881 self.updateParameters( asDict, asScript )
883 if asAlgorithm is None and asScript is not None:
884 __Algo = ImportFromScript(asScript).getvalue( "Algorithm" )
888 if __Algo is not None:
889 self.__A = str(__Algo)
890 self.__P.update( {"Algorithm":self.__A} )
892 self.__setAlgorithm( self.__A )
894 def updateParameters(self,
898 "Mise a jour des parametres"
899 if asDict is None and asScript is not None:
900 __Dict = ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
904 if __Dict is not None:
905 self.__P.update( dict(__Dict) )
907 def executePythonScheme(self, asDictAO = None):
908 "Permet de lancer le calcul d'assimilation"
909 Operator.CM.clearCache()
911 if not isinstance(asDictAO, dict):
912 raise ValueError("The objects for algorithm calculation have to be given together as a dictionnary, and they are not")
913 if hasattr(asDictAO["Background"],"getO"): self.__Xb = asDictAO["Background"].getO()
914 elif hasattr(asDictAO["CheckingPoint"],"getO"): self.__Xb = asDictAO["CheckingPoint"].getO()
915 else: self.__Xb = None
916 if hasattr(asDictAO["Observation"],"getO"): self.__Y = asDictAO["Observation"].getO()
917 else: self.__Y = asDictAO["Observation"]
918 if hasattr(asDictAO["ControlInput"],"getO"): self.__U = asDictAO["ControlInput"].getO()
919 else: self.__U = asDictAO["ControlInput"]
920 if hasattr(asDictAO["ObservationOperator"],"getO"): self.__HO = asDictAO["ObservationOperator"].getO()
921 else: self.__HO = asDictAO["ObservationOperator"]
922 if hasattr(asDictAO["EvolutionModel"],"getO"): self.__EM = asDictAO["EvolutionModel"].getO()
923 else: self.__EM = asDictAO["EvolutionModel"]
924 if hasattr(asDictAO["ControlModel"],"getO"): self.__CM = asDictAO["ControlModel"].getO()
925 else: self.__CM = asDictAO["ControlModel"]
926 self.__B = asDictAO["BackgroundError"]
927 self.__R = asDictAO["ObservationError"]
928 self.__Q = asDictAO["EvolutionError"]
930 self.__shape_validate()
932 self.__algorithm.run(
942 Parameters = self.__P,
946 def executeYACSScheme(self, FileName=None):
947 "Permet de lancer le calcul d'assimilation"
948 if FileName is None or not os.path.exists(FileName):
949 raise ValueError("a YACS file name has to be given for YACS execution.\n")
951 __file = os.path.abspath(FileName)
952 logging.debug("The YACS file name is \"%s\"."%__file)
953 if not PlatformInfo.has_salome or \
954 not PlatformInfo.has_yacs or \
955 not PlatformInfo.has_adao:
956 raise ImportError("\n\n"+\
957 "Unable to get SALOME, YACS or ADAO environnement variables.\n"+\
958 "Please load the right environnement before trying to use it.\n")
963 SALOMERuntime.RuntimeSALOME_setRuntime()
965 r = pilot.getRuntime()
966 xmlLoader = loader.YACSLoader()
967 xmlLoader.registerProcCataLoader()
969 catalogAd = r.loadCatalog("proc", __file)
970 r.addCatalog(catalogAd)
975 p = xmlLoader.load(__file)
976 except IOError as ex:
977 print("The YACS XML schema file can not be loaded: %s"%(ex,))
979 logger = p.getLogger("parser")
980 if not logger.isEmpty():
981 print("The imported YACS XML schema has errors on parsing:")
982 print(logger.getStr())
985 print("The YACS XML schema is not valid and will not be executed:")
986 print(p.getErrorReport())
988 info=pilot.LinkInfo(pilot.LinkInfo.ALL_DONT_STOP)
989 p.checkConsistency(info)
990 if info.areWarningsOrErrors():
991 print("The YACS XML schema is not coherent and will not be executed:")
992 print(info.getGlobalRepr())
994 e = pilot.ExecutorSwig()
996 if p.getEffectiveState() != pilot.DONE:
997 print(p.getErrorReport())
1001 def get(self, key = None):
1002 "Vérifie l'existence d'une clé de variable ou de paramètres"
1003 if key in self.__algorithm:
1004 return self.__algorithm.get( key )
1005 elif key in self.__P:
1006 return self.__P[key]
1010 def pop(self, k, d):
1011 "Necessaire pour le pickling"
1012 return self.__algorithm.pop(k, d)
1014 def getAlgorithmRequiredParameters(self, noDetails=True):
1015 "Renvoie la liste des paramètres requis selon l'algorithme"
1016 return self.__algorithm.getRequiredParameters(noDetails)
1018 def setObserver(self, __V, __O, __I, __S):
1019 if self.__algorithm is None \
1020 or isinstance(self.__algorithm, dict) \
1021 or not hasattr(self.__algorithm,"StoredVariables"):
1022 raise ValueError("No observer can be build before choosing an algorithm.")
1023 if __V not in self.__algorithm:
1024 raise ValueError("An observer requires to be set on a variable named %s which does not exist."%__V)
1026 self.__algorithm.StoredVariables[ __V ].setDataObserver(
1029 HookParameters = __I,
1032 def removeObserver(self, __V, __O, __A = False):
1033 if self.__algorithm is None \
1034 or isinstance(self.__algorithm, dict) \
1035 or not hasattr(self.__algorithm,"StoredVariables"):
1036 raise ValueError("No observer can be removed before choosing an algorithm.")
1037 if __V not in self.__algorithm:
1038 raise ValueError("An observer requires to be removed on a variable named %s which does not exist."%__V)
1040 return self.__algorithm.StoredVariables[ __V ].removeDataObserver(
1045 def hasObserver(self, __V):
1046 if self.__algorithm is None \
1047 or isinstance(self.__algorithm, dict) \
1048 or not hasattr(self.__algorithm,"StoredVariables"):
1050 if __V not in self.__algorithm:
1052 return self.__algorithm.StoredVariables[ __V ].hasDataObserver()
1055 return list(self.__algorithm.keys()) + list(self.__P.keys())
1057 def __contains__(self, key=None):
1058 "D.__contains__(k) -> True if D has a key k, else False"
1059 return key in self.__algorithm or key in self.__P
1062 "x.__repr__() <==> repr(x)"
1063 return repr(self.__A)+", "+repr(self.__P)
1066 "x.__str__() <==> str(x)"
1067 return str(self.__A)+", "+str(self.__P)
1069 def __setAlgorithm(self, choice = None ):
1071 Permet de sélectionner l'algorithme à utiliser pour mener à bien l'étude
1072 d'assimilation. L'argument est un champ caractère se rapportant au nom
1073 d'un algorithme réalisant l'opération sur les arguments fixes.
1076 raise ValueError("Error: algorithm choice has to be given")
1077 if self.__algorithmName is not None:
1078 raise ValueError("Error: algorithm choice has already been done as \"%s\", it can't be changed."%self.__algorithmName)
1079 daDirectory = "daAlgorithms"
1081 # Recherche explicitement le fichier complet
1082 # ------------------------------------------
1084 for directory in sys.path:
1085 if os.path.isfile(os.path.join(directory, daDirectory, str(choice)+'.py')):
1086 module_path = os.path.abspath(os.path.join(directory, daDirectory))
1087 if module_path is None:
1088 raise ImportError("No algorithm module named \"%s\" has been found in the search path.\n The search path is %s"%(choice, sys.path))
1090 # Importe le fichier complet comme un module
1091 # ------------------------------------------
1093 sys_path_tmp = sys.path ; sys.path.insert(0,module_path)
1094 self.__algorithmFile = __import__(str(choice), globals(), locals(), [])
1095 if not hasattr(self.__algorithmFile, "ElementaryAlgorithm"):
1096 raise ImportError("this module does not define a valid elementary algorithm.")
1097 self.__algorithmName = str(choice)
1098 sys.path = sys_path_tmp ; del sys_path_tmp
1099 except ImportError as e:
1100 raise ImportError("The module named \"%s\" was found, but is incorrect at the import stage.\n The import error message is: %s"%(choice,e))
1102 # Instancie un objet du type élémentaire du fichier
1103 # -------------------------------------------------
1104 self.__algorithm = self.__algorithmFile.ElementaryAlgorithm()
1107 def __shape_validate(self):
1109 Validation de la correspondance correcte des tailles des variables et
1110 des matrices s'il y en a.
1112 if self.__Xb is None: __Xb_shape = (0,)
1113 elif hasattr(self.__Xb,"size"): __Xb_shape = (self.__Xb.size,)
1114 elif hasattr(self.__Xb,"shape"):
1115 if isinstance(self.__Xb.shape, tuple): __Xb_shape = self.__Xb.shape
1116 else: __Xb_shape = self.__Xb.shape()
1117 else: raise TypeError("The background (Xb) has no attribute of shape: problem !")
1119 if self.__Y is None: __Y_shape = (0,)
1120 elif hasattr(self.__Y,"size"): __Y_shape = (self.__Y.size,)
1121 elif hasattr(self.__Y,"shape"):
1122 if isinstance(self.__Y.shape, tuple): __Y_shape = self.__Y.shape
1123 else: __Y_shape = self.__Y.shape()
1124 else: raise TypeError("The observation (Y) has no attribute of shape: problem !")
1126 if self.__U is None: __U_shape = (0,)
1127 elif hasattr(self.__U,"size"): __U_shape = (self.__U.size,)
1128 elif hasattr(self.__U,"shape"):
1129 if isinstance(self.__U.shape, tuple): __U_shape = self.__U.shape
1130 else: __U_shape = self.__U.shape()
1131 else: raise TypeError("The control (U) has no attribute of shape: problem !")
1133 if self.__B is None: __B_shape = (0,0)
1134 elif hasattr(self.__B,"shape"):
1135 if isinstance(self.__B.shape, tuple): __B_shape = self.__B.shape
1136 else: __B_shape = self.__B.shape()
1137 else: raise TypeError("The a priori errors covariance matrix (B) has no attribute of shape: problem !")
1139 if self.__R is None: __R_shape = (0,0)
1140 elif hasattr(self.__R,"shape"):
1141 if isinstance(self.__R.shape, tuple): __R_shape = self.__R.shape
1142 else: __R_shape = self.__R.shape()
1143 else: raise TypeError("The observation errors covariance matrix (R) has no attribute of shape: problem !")
1145 if self.__Q is None: __Q_shape = (0,0)
1146 elif hasattr(self.__Q,"shape"):
1147 if isinstance(self.__Q.shape, tuple): __Q_shape = self.__Q.shape
1148 else: __Q_shape = self.__Q.shape()
1149 else: raise TypeError("The evolution errors covariance matrix (Q) has no attribute of shape: problem !")
1151 if len(self.__HO) == 0: __HO_shape = (0,0)
1152 elif isinstance(self.__HO, dict): __HO_shape = (0,0)
1153 elif hasattr(self.__HO["Direct"],"shape"):
1154 if isinstance(self.__HO["Direct"].shape, tuple): __HO_shape = self.__HO["Direct"].shape
1155 else: __HO_shape = self.__HO["Direct"].shape()
1156 else: raise TypeError("The observation operator (H) has no attribute of shape: problem !")
1158 if len(self.__EM) == 0: __EM_shape = (0,0)
1159 elif isinstance(self.__EM, dict): __EM_shape = (0,0)
1160 elif hasattr(self.__EM["Direct"],"shape"):
1161 if isinstance(self.__EM["Direct"].shape, tuple): __EM_shape = self.__EM["Direct"].shape
1162 else: __EM_shape = self.__EM["Direct"].shape()
1163 else: raise TypeError("The evolution model (EM) has no attribute of shape: problem !")
1165 if len(self.__CM) == 0: __CM_shape = (0,0)
1166 elif isinstance(self.__CM, dict): __CM_shape = (0,0)
1167 elif hasattr(self.__CM["Direct"],"shape"):
1168 if isinstance(self.__CM["Direct"].shape, tuple): __CM_shape = self.__CM["Direct"].shape
1169 else: __CM_shape = self.__CM["Direct"].shape()
1170 else: raise TypeError("The control model (CM) has no attribute of shape: problem !")
1172 # Vérification des conditions
1173 # ---------------------------
1174 if not( len(__Xb_shape) == 1 or min(__Xb_shape) == 1 ):
1175 raise ValueError("Shape characteristic of background (Xb) is incorrect: \"%s\"."%(__Xb_shape,))
1176 if not( len(__Y_shape) == 1 or min(__Y_shape) == 1 ):
1177 raise ValueError("Shape characteristic of observation (Y) is incorrect: \"%s\"."%(__Y_shape,))
1179 if not( min(__B_shape) == max(__B_shape) ):
1180 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) is incorrect: \"%s\"."%(__B_shape,))
1181 if not( min(__R_shape) == max(__R_shape) ):
1182 raise ValueError("Shape characteristic of observation errors covariance matrix (R) is incorrect: \"%s\"."%(__R_shape,))
1183 if not( min(__Q_shape) == max(__Q_shape) ):
1184 raise ValueError("Shape characteristic of evolution errors covariance matrix (Q) is incorrect: \"%s\"."%(__Q_shape,))
1185 if not( min(__EM_shape) == max(__EM_shape) ):
1186 raise ValueError("Shape characteristic of evolution operator (EM) is incorrect: \"%s\"."%(__EM_shape,))
1188 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[1] == max(__Xb_shape) ):
1189 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and state (X) \"%s\" are incompatible."%(__HO_shape,__Xb_shape))
1190 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[0] == max(__Y_shape) ):
1191 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation (Y) \"%s\" are incompatible."%(__HO_shape,__Y_shape))
1192 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__B) > 0 and not( __HO_shape[1] == __B_shape[0] ):
1193 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and a priori errors covariance matrix (B) \"%s\" are incompatible."%(__HO_shape,__B_shape))
1194 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__R) > 0 and not( __HO_shape[0] == __R_shape[1] ):
1195 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation errors covariance matrix (R) \"%s\" are incompatible."%(__HO_shape,__R_shape))
1197 if self.__B is not None and len(self.__B) > 0 and not( __B_shape[1] == max(__Xb_shape) ):
1198 if self.__algorithmName in ["EnsembleBlue",]:
1199 asPersistentVector = self.__Xb.reshape((-1,min(__B_shape)))
1200 self.__Xb = Persistence.OneVector("Background", basetype=numpy.matrix)
1201 for member in asPersistentVector:
1202 self.__Xb.store( numpy.matrix( numpy.ravel(member), numpy.float ).T )
1203 __Xb_shape = min(__B_shape)
1205 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) \"%s\" and background (Xb) \"%s\" are incompatible."%(__B_shape,__Xb_shape))
1207 if self.__R is not None and len(self.__R) > 0 and not( __R_shape[1] == max(__Y_shape) ):
1208 raise ValueError("Shape characteristic of observation errors covariance matrix (R) \"%s\" and observation (Y) \"%s\" are incompatible."%(__R_shape,__Y_shape))
1210 if self.__EM is not None and len(self.__EM) > 0 and not isinstance(self.__EM, dict) and not( __EM_shape[1] == max(__Xb_shape) ):
1211 raise ValueError("Shape characteristic of evolution model (EM) \"%s\" and state (X) \"%s\" are incompatible."%(__EM_shape,__Xb_shape))
1213 if self.__CM is not None and len(self.__CM) > 0 and not isinstance(self.__CM, dict) and not( __CM_shape[1] == max(__U_shape) ):
1214 raise ValueError("Shape characteristic of control model (CM) \"%s\" and control (U) \"%s\" are incompatible."%(__CM_shape,__U_shape))
1216 if ("Bounds" in self.__P) \
1217 and (isinstance(self.__P["Bounds"], list) or isinstance(self.__P["Bounds"], tuple)) \
1218 and (len(self.__P["Bounds"]) != max(__Xb_shape)):
1219 raise ValueError("The number \"%s\" of bound pairs for the state (X) components is different of the size \"%s\" of the state itself." \
1220 %(len(self.__P["Bounds"]),max(__Xb_shape)))
1224 # ==============================================================================
1225 class RegulationAndParameters(object):
1227 Classe générale d'interface d'action pour la régulation et ses paramètres
1230 name = "GenericRegulation",
1237 self.__name = str(name)
1240 if asAlgorithm is None and asScript is not None:
1241 __Algo = ImportFromScript(asScript).getvalue( "Algorithm" )
1243 __Algo = asAlgorithm
1245 if asDict is None and asScript is not None:
1246 __Dict = ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1250 if __Dict is not None:
1251 self.__P.update( dict(__Dict) )
1253 if __Algo is not None:
1254 self.__P.update( {"Algorithm":__Algo} )
1256 def get(self, key = None):
1257 "Vérifie l'existence d'une clé de variable ou de paramètres"
1259 return self.__P[key]
1263 # ==============================================================================
1264 class DataObserver(object):
1266 Classe générale d'interface de type observer
1269 name = "GenericObserver",
1281 self.__name = str(name)
1286 if onVariable is None:
1287 raise ValueError("setting an observer has to be done over a variable name or a list of variable names, not over None.")
1288 elif type(onVariable) in (tuple, list):
1289 self.__V = tuple(map( str, onVariable ))
1290 if withInfo is None:
1293 self.__I = (str(withInfo),)*len(self.__V)
1294 elif isinstance(onVariable, str):
1295 self.__V = (onVariable,)
1296 if withInfo is None:
1297 self.__I = (onVariable,)
1299 self.__I = (str(withInfo),)
1301 raise ValueError("setting an observer has to be done over a variable name or a list of variable names.")
1303 if asString is not None:
1304 __FunctionText = asString
1305 elif (asTemplate is not None) and (asTemplate in Templates.ObserverTemplates):
1306 __FunctionText = Templates.ObserverTemplates[asTemplate]
1307 elif asScript is not None:
1308 __FunctionText = ImportFromScript(asScript).getstring()
1311 __Function = ObserverF(__FunctionText)
1313 if asObsObject is not None:
1314 self.__O = asObsObject
1316 self.__O = __Function.getfunc()
1318 for k in range(len(self.__V)):
1321 if ename not in withAlgo:
1322 raise ValueError("An observer is asked to be set on a variable named %s which does not exist."%ename)
1324 withAlgo.setObserver(ename, self.__O, einfo, scheduledBy)
1327 "x.__repr__() <==> repr(x)"
1328 return repr(self.__V)+"\n"+repr(self.__O)
1331 "x.__str__() <==> str(x)"
1332 return str(self.__V)+"\n"+str(self.__O)
1334 # ==============================================================================
1335 class State(object):
1337 Classe générale d'interface de type état
1340 name = "GenericVector",
1342 asPersistentVector = None,
1348 toBeChecked = False,
1351 Permet de définir un vecteur :
1352 - asVector : entrée des données, comme un vecteur compatible avec le
1353 constructeur de numpy.matrix, ou "True" si entrée par script.
1354 - asPersistentVector : entrée des données, comme une série de vecteurs
1355 compatible avec le constructeur de numpy.matrix, ou comme un objet de
1356 type Persistence, ou "True" si entrée par script.
1357 - asScript : si un script valide est donné contenant une variable
1358 nommée "name", la variable est de type "asVector" (par défaut) ou
1359 "asPersistentVector" selon que l'une de ces variables est placée à
1361 - asDataFile : si un ou plusieurs fichiers valides sont donnés
1362 contenant des valeurs en colonnes, elles-mêmes nommées "colNames"
1363 (s'il n'y a pas de nom de colonne indiquée, on cherche une colonne
1364 nommée "name"), on récupère les colonnes et on les range ligne après
1365 ligne (colMajor=False) ou colonne après colonne (colMajor=True). La
1366 variable résultante est de type "asVector" (par défaut) ou
1367 "asPersistentVector" selon que l'une de ces variables est placée à
1370 self.__name = str(name)
1371 self.__check = bool(toBeChecked)
1375 self.__is_vector = False
1376 self.__is_series = False
1378 if asScript is not None:
1379 __Vector, __Series = None, None
1380 if asPersistentVector:
1381 __Series = ImportFromScript(asScript).getvalue( self.__name )
1383 __Vector = ImportFromScript(asScript).getvalue( self.__name )
1384 elif asDataFile is not None:
1385 __Vector, __Series = None, None
1386 if asPersistentVector:
1387 if colNames is not None:
1388 __Series = ImportFromFile(asDataFile).getvalue( colNames )[1]
1390 __Series = ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1391 if bool(colMajor) and not ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1392 __Series = numpy.transpose(__Series)
1393 elif not bool(colMajor) and ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1394 __Series = numpy.transpose(__Series)
1396 if colNames is not None:
1397 __Vector = ImportFromFile(asDataFile).getvalue( colNames )[1]
1399 __Vector = ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1401 __Vector = numpy.ravel(__Vector, order = "F")
1403 __Vector = numpy.ravel(__Vector, order = "C")
1405 __Vector, __Series = asVector, asPersistentVector
1407 if __Vector is not None:
1408 self.__is_vector = True
1409 self.__V = numpy.matrix( numpy.asmatrix(__Vector).A1, numpy.float ).T
1410 self.shape = self.__V.shape
1411 self.size = self.__V.size
1412 elif __Series is not None:
1413 self.__is_series = True
1414 if isinstance(__Series, (tuple, list, numpy.ndarray, numpy.matrix, str)):
1415 self.__V = Persistence.OneVector(self.__name, basetype=numpy.matrix)
1416 if isinstance(__Series, str): __Series = eval(__Series)
1417 for member in __Series:
1418 self.__V.store( numpy.matrix( numpy.asmatrix(member).A1, numpy.float ).T )
1421 if isinstance(self.__V.shape, (tuple, list)):
1422 self.shape = self.__V.shape
1424 self.shape = self.__V.shape()
1425 if len(self.shape) == 1:
1426 self.shape = (self.shape[0],1)
1427 self.size = self.shape[0] * self.shape[1]
1429 raise ValueError("The %s object is improperly defined, it requires at minima either a vector, a list/tuple of vectors or a persistent object. Please check your vector input."%self.__name)
1431 if scheduledBy is not None:
1432 self.__T = scheduledBy
1434 def getO(self, withScheduler=False):
1436 return self.__V, self.__T
1437 elif self.__T is None:
1443 "Vérification du type interne"
1444 return self.__is_vector
1447 "Vérification du type interne"
1448 return self.__is_series
1451 "x.__repr__() <==> repr(x)"
1452 return repr(self.__V)
1455 "x.__str__() <==> str(x)"
1456 return str(self.__V)
1458 # ==============================================================================
1459 class Covariance(object):
1461 Classe générale d'interface de type covariance
1464 name = "GenericCovariance",
1465 asCovariance = None,
1466 asEyeByScalar = None,
1467 asEyeByVector = None,
1470 toBeChecked = False,
1473 Permet de définir une covariance :
1474 - asCovariance : entrée des données, comme une matrice compatible avec
1475 le constructeur de numpy.matrix
1476 - asEyeByScalar : entrée des données comme un seul scalaire de variance,
1477 multiplicatif d'une matrice de corrélation identité, aucune matrice
1478 n'étant donc explicitement à donner
1479 - asEyeByVector : entrée des données comme un seul vecteur de variance,
1480 à mettre sur la diagonale d'une matrice de corrélation, aucune matrice
1481 n'étant donc explicitement à donner
1482 - asCovObject : entrée des données comme un objet python, qui a les
1483 methodes obligatoires "getT", "getI", "diag", "trace", "__add__",
1484 "__sub__", "__neg__", "__mul__", "__rmul__" et facultatives "shape",
1485 "size", "cholesky", "choleskyI", "asfullmatrix", "__repr__", "__str__"
1486 - toBeChecked : booléen indiquant si le caractère SDP de la matrice
1487 pleine doit être vérifié
1489 self.__name = str(name)
1490 self.__check = bool(toBeChecked)
1493 self.__is_scalar = False
1494 self.__is_vector = False
1495 self.__is_matrix = False
1496 self.__is_object = False
1498 if asScript is not None:
1499 __Matrix, __Scalar, __Vector, __Object = None, None, None, None
1501 __Scalar = ImportFromScript(asScript).getvalue( self.__name )
1503 __Vector = ImportFromScript(asScript).getvalue( self.__name )
1505 __Object = ImportFromScript(asScript).getvalue( self.__name )
1507 __Matrix = ImportFromScript(asScript).getvalue( self.__name )
1509 __Matrix, __Scalar, __Vector, __Object = asCovariance, asEyeByScalar, asEyeByVector, asCovObject
1511 if __Scalar is not None:
1512 if numpy.matrix(__Scalar).size != 1:
1513 raise ValueError(' The diagonal multiplier given to define a sparse matrix is not a unique scalar value.\n Its actual measured size is %i. Please check your scalar input.'%numpy.matrix(__Scalar).size)
1514 self.__is_scalar = True
1515 self.__C = numpy.abs( float(__Scalar) )
1518 elif __Vector is not None:
1519 self.__is_vector = True
1520 self.__C = numpy.abs( numpy.array( numpy.ravel( numpy.matrix(__Vector, float ) ) ) )
1521 self.shape = (self.__C.size,self.__C.size)
1522 self.size = self.__C.size**2
1523 elif __Matrix is not None:
1524 self.__is_matrix = True
1525 self.__C = numpy.matrix( __Matrix, float )
1526 self.shape = self.__C.shape
1527 self.size = self.__C.size
1528 elif __Object is not None:
1529 self.__is_object = True
1531 for at in ("getT","getI","diag","trace","__add__","__sub__","__neg__","__mul__","__rmul__"):
1532 if not hasattr(self.__C,at):
1533 raise ValueError("The matrix given for %s as an object has no attribute \"%s\". Please check your object input."%(self.__name,at))
1534 if hasattr(self.__C,"shape"):
1535 self.shape = self.__C.shape
1538 if hasattr(self.__C,"size"):
1539 self.size = self.__C.size
1544 # raise ValueError("The %s covariance matrix has to be specified either as a matrix, a vector for its diagonal or a scalar multiplying an identity matrix."%self.__name)
1548 def __validate(self):
1550 if self.__C is None:
1551 raise UnboundLocalError("%s covariance matrix value has not been set!"%(self.__name,))
1552 if self.ismatrix() and min(self.shape) != max(self.shape):
1553 raise ValueError("The given matrix for %s is not a square one, its shape is %s. Please check your matrix input."%(self.__name,self.shape))
1554 if self.isobject() and min(self.shape) != max(self.shape):
1555 raise ValueError("The matrix given for \"%s\" is not a square one, its shape is %s. Please check your object input."%(self.__name,self.shape))
1556 if self.isscalar() and self.__C <= 0:
1557 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your scalar input %s."%(self.__name,self.__C))
1558 if self.isvector() and (self.__C <= 0).any():
1559 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your vector input."%(self.__name,))
1560 if self.ismatrix() and (self.__check or logging.getLogger().level < logging.WARNING):
1562 L = numpy.linalg.cholesky( self.__C )
1564 raise ValueError("The %s covariance matrix is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1565 if self.isobject() and (self.__check or logging.getLogger().level < logging.WARNING):
1567 L = self.__C.cholesky()
1569 raise ValueError("The %s covariance object is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1572 "Vérification du type interne"
1573 return self.__is_scalar
1576 "Vérification du type interne"
1577 return self.__is_vector
1580 "Vérification du type interne"
1581 return self.__is_matrix
1584 "Vérification du type interne"
1585 return self.__is_object
1590 return Covariance(self.__name+"I", asCovariance = self.__C.I )
1591 elif self.isvector():
1592 return Covariance(self.__name+"I", asEyeByVector = 1. / self.__C )
1593 elif self.isscalar():
1594 return Covariance(self.__name+"I", asEyeByScalar = 1. / self.__C )
1595 elif self.isobject():
1596 return Covariance(self.__name+"I", asCovObject = self.__C.getI() )
1598 return None # Indispensable
1603 return Covariance(self.__name+"T", asCovariance = self.__C.T )
1604 elif self.isvector():
1605 return Covariance(self.__name+"T", asEyeByVector = self.__C )
1606 elif self.isscalar():
1607 return Covariance(self.__name+"T", asEyeByScalar = self.__C )
1608 elif self.isobject():
1609 return Covariance(self.__name+"T", asCovObject = self.__C.getT() )
1612 "Décomposition de Cholesky"
1614 return Covariance(self.__name+"C", asCovariance = numpy.linalg.cholesky(self.__C) )
1615 elif self.isvector():
1616 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
1617 elif self.isscalar():
1618 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
1619 elif self.isobject() and hasattr(self.__C,"cholesky"):
1620 return Covariance(self.__name+"C", asCovObject = self.__C.cholesky() )
1622 def choleskyI(self):
1623 "Inversion de la décomposition de Cholesky"
1625 return Covariance(self.__name+"H", asCovariance = numpy.linalg.cholesky(self.__C).I )
1626 elif self.isvector():
1627 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
1628 elif self.isscalar():
1629 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
1630 elif self.isobject() and hasattr(self.__C,"choleskyI"):
1631 return Covariance(self.__name+"H", asCovObject = self.__C.choleskyI() )
1633 def diag(self, msize=None):
1634 "Diagonale de la matrice"
1636 return numpy.diag(self.__C)
1637 elif self.isvector():
1639 elif self.isscalar():
1641 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1643 return self.__C * numpy.ones(int(msize))
1644 elif self.isobject():
1645 return self.__C.diag()
1647 def asfullmatrix(self, msize=None):
1651 elif self.isvector():
1652 return numpy.matrix( numpy.diag(self.__C), float )
1653 elif self.isscalar():
1655 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1657 return numpy.matrix( self.__C * numpy.eye(int(msize)), float )
1658 elif self.isobject() and hasattr(self.__C,"asfullmatrix"):
1659 return self.__C.asfullmatrix()
1661 def trace(self, msize=None):
1662 "Trace de la matrice"
1664 return numpy.trace(self.__C)
1665 elif self.isvector():
1666 return float(numpy.sum(self.__C))
1667 elif self.isscalar():
1669 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1671 return self.__C * int(msize)
1672 elif self.isobject():
1673 return self.__C.trace()
1679 "x.__repr__() <==> repr(x)"
1680 return repr(self.__C)
1683 "x.__str__() <==> str(x)"
1684 return str(self.__C)
1686 def __add__(self, other):
1687 "x.__add__(y) <==> x+y"
1688 if self.ismatrix() or self.isobject():
1689 return self.__C + numpy.asmatrix(other)
1690 elif self.isvector() or self.isscalar():
1691 _A = numpy.asarray(other)
1692 _A.reshape(_A.size)[::_A.shape[1]+1] += self.__C
1693 return numpy.asmatrix(_A)
1695 def __radd__(self, other):
1696 "x.__radd__(y) <==> y+x"
1697 raise NotImplementedError("%s covariance matrix __radd__ method not available for %s type!"%(self.__name,type(other)))
1699 def __sub__(self, other):
1700 "x.__sub__(y) <==> x-y"
1701 if self.ismatrix() or self.isobject():
1702 return self.__C - numpy.asmatrix(other)
1703 elif self.isvector() or self.isscalar():
1704 _A = numpy.asarray(other)
1705 _A.reshape(_A.size)[::_A.shape[1]+1] = self.__C - _A.reshape(_A.size)[::_A.shape[1]+1]
1706 return numpy.asmatrix(_A)
1708 def __rsub__(self, other):
1709 "x.__rsub__(y) <==> y-x"
1710 raise NotImplementedError("%s covariance matrix __rsub__ method not available for %s type!"%(self.__name,type(other)))
1713 "x.__neg__() <==> -x"
1716 def __mul__(self, other):
1717 "x.__mul__(y) <==> x*y"
1718 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
1719 return self.__C * other
1720 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
1721 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1722 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
1723 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1724 return self.__C * numpy.asmatrix(other)
1726 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asmatrix(other).shape,self.__name))
1727 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
1728 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1729 return numpy.asmatrix(self.__C * numpy.ravel(other)).T
1730 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1731 return numpy.asmatrix((self.__C * (numpy.asarray(other).transpose())).transpose())
1733 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
1734 elif self.isscalar() and isinstance(other,numpy.matrix):
1735 return self.__C * other
1736 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
1737 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
1738 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
1740 return self.__C * numpy.asmatrix(other)
1741 elif self.isobject():
1742 return self.__C.__mul__(other)
1744 raise NotImplementedError("%s covariance matrix __mul__ method not available for %s type!"%(self.__name,type(other)))
1746 def __rmul__(self, other):
1747 "x.__rmul__(y) <==> y*x"
1748 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
1749 return other * self.__C
1750 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
1751 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1752 return numpy.asmatrix(numpy.ravel(other)) * self.__C
1753 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1754 return numpy.asmatrix(other) * self.__C
1756 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name))
1757 elif self.isvector() and isinstance(other,numpy.matrix):
1758 if numpy.ravel(other).size == self.shape[0]: # Vecteur
1759 return numpy.asmatrix(numpy.ravel(other) * self.__C)
1760 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
1761 return numpy.asmatrix(numpy.array(other) * self.__C)
1763 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
1764 elif self.isscalar() and isinstance(other,numpy.matrix):
1765 return other * self.__C
1766 elif self.isobject():
1767 return self.__C.__rmul__(other)
1769 raise NotImplementedError("%s covariance matrix __rmul__ method not available for %s type!"%(self.__name,type(other)))
1772 "x.__len__() <==> len(x)"
1773 return self.shape[0]
1775 # ==============================================================================
1776 class ObserverF(object):
1778 Creation d'une fonction d'observateur a partir de son texte
1780 def __init__(self, corps=""):
1781 self.__corps = corps
1782 def func(self,var,info):
1783 "Fonction d'observation"
1786 "Restitution du pointeur de fonction dans l'objet"
1789 # ==============================================================================
1790 class CaseLogger(object):
1792 Conservation des commandes de creation d'un cas
1794 def __init__(self, __name="", __objname="case", __addViewers=None, __addLoaders=None):
1795 self.__name = str(__name)
1796 self.__objname = str(__objname)
1797 self.__logSerie = []
1798 self.__switchoff = False
1800 "TUI" :Interfaces._TUIViewer,
1801 "SCD" :Interfaces._SCDViewer,
1802 "YACS":Interfaces._YACSViewer,
1805 "TUI" :Interfaces._TUIViewer,
1806 "COM" :Interfaces._COMViewer,
1808 if __addViewers is not None:
1809 self.__viewers.update(dict(__addViewers))
1810 if __addLoaders is not None:
1811 self.__loaders.update(dict(__addLoaders))
1813 def register(self, __command=None, __keys=None, __local=None, __pre=None, __switchoff=False):
1814 "Enregistrement d'une commande individuelle"
1815 if __command is not None and __keys is not None and __local is not None and not self.__switchoff:
1816 if "self" in __keys: __keys.remove("self")
1817 self.__logSerie.append( (str(__command), __keys, __local, __pre, __switchoff) )
1819 self.__switchoff = True
1821 self.__switchoff = False
1823 def dump(self, __filename=None, __format="TUI", __upa=""):
1824 "Restitution normalisée des commandes"
1825 if __format in self.__viewers:
1826 __formater = self.__viewers[__format](self.__name, self.__objname, self.__logSerie)
1828 raise ValueError("Dumping as \"%s\" is not available"%__format)
1829 return __formater.dump(__filename, __upa)
1831 def load(self, __filename=None, __content=None, __object=None, __format="TUI"):
1832 "Chargement normalisé des commandes"
1833 if __format in self.__loaders:
1834 __formater = self.__loaders[__format]()
1836 raise ValueError("Loading as \"%s\" is not available"%__format)
1837 return __formater.load(__filename, __content, __object)
1839 # ==============================================================================
1840 def MultiFonction( __xserie, _extraArguments = None, _sFunction = lambda x: x ):
1842 Pour une liste ordonnée de vecteurs en entrée, renvoie en sortie la liste
1843 correspondante de valeurs de la fonction en argument
1845 if not PlatformInfo.isIterable( __xserie ):
1846 raise TypeError("MultiFonction not iterable unkown input type: %s"%(type(__xserie),))
1849 if _extraArguments is None:
1850 for __xvalue in __xserie:
1851 __multiHX.append( _sFunction( __xvalue ) )
1852 elif _extraArguments is not None and isinstance(_extraArguments, (list, tuple, map)):
1853 for __xvalue in __xserie:
1854 __multiHX.append( _sFunction( __xvalue, *_extraArguments ) )
1855 elif _extraArguments is not None and isinstance(_extraArguments, dict):
1856 for __xvalue in __xserie:
1857 __multiHX.append( _sFunction( __xvalue, **_extraArguments ) )
1859 raise TypeError("MultiFonction extra arguments unkown input type: %s"%(type(_extraArguments),))
1863 # ==============================================================================
1864 def CostFunction3D(_x,
1865 _Hm = None, # Pour simuler Hm(x) : HO["Direct"].appliedTo
1866 _HmX = None, # Simulation déjà faite de Hm(x)
1867 _arg = None, # Arguments supplementaires pour Hm, sous la forme d'un tuple
1872 _SIV = False, # A résorber pour la 8.0
1873 _SSC = [], # self._parameters["StoreSupplementaryCalculations"]
1874 _nPS = 0, # nbPreviousSteps
1875 _QM = "DA", # QualityMeasure
1876 _SSV = {}, # Entrée et/ou sortie : self.StoredVariables
1877 _fRt = False, # Restitue ou pas la sortie étendue
1878 _sSc = True, # Stocke ou pas les SSC
1881 Fonction-coût générale utile pour les algorithmes statiques/3D : 3DVAR, BLUE
1882 et dérivés, Kalman et dérivés, LeastSquares, SamplingTest, PSO, SA, Tabu,
1883 DFO, QuantileRegression
1889 for k in ["CostFunctionJ",
1895 "SimulatedObservationAtCurrentOptimum",
1896 "SimulatedObservationAtCurrentState",
1900 if hasattr(_SSV[k],"store"):
1901 _SSV[k].append = _SSV[k].store # Pour utiliser "append" au lieu de "store"
1903 _X = numpy.asmatrix(numpy.ravel( _x )).T
1904 if _SIV or "CurrentState" in _SSC or "CurrentOptimum" in _SSC:
1905 _SSV["CurrentState"].append( _X )
1907 if _HmX is not None:
1911 raise ValueError("COSTFUNCTION3D Operator has to be defined.")
1915 _HX = _Hm( _X, *_arg )
1916 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
1918 if "SimulatedObservationAtCurrentState" in _SSC or \
1919 "SimulatedObservationAtCurrentOptimum" in _SSC:
1920 _SSV["SimulatedObservationAtCurrentState"].append( _HX )
1922 if numpy.any(numpy.isnan(_HX)):
1923 Jb, Jo, J = numpy.nan, numpy.nan, numpy.nan
1925 _Y = numpy.asmatrix(numpy.ravel( _Y )).T
1926 if _QM in ["AugmentedWeightedLeastSquares", "AWLS", "AugmentedPonderatedLeastSquares", "APLS", "DA"]:
1927 if _BI is None or _RI is None:
1928 raise ValueError("Background and Observation error covariance matrix has to be properly defined!")
1929 _Xb = numpy.asmatrix(numpy.ravel( _Xb )).T
1930 Jb = 0.5 * (_X - _Xb).T * _BI * (_X - _Xb)
1931 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
1932 elif _QM in ["WeightedLeastSquares", "WLS", "PonderatedLeastSquares", "PLS"]:
1934 raise ValueError("Observation error covariance matrix has to be properly defined!")
1936 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
1937 elif _QM in ["LeastSquares", "LS", "L2"]:
1939 Jo = 0.5 * (_Y - _HX).T * (_Y - _HX)
1940 elif _QM in ["AbsoluteValue", "L1"]:
1942 Jo = numpy.sum( numpy.abs(_Y - _HX) )
1943 elif _QM in ["MaximumError", "ME"]:
1945 Jo = numpy.max( numpy.abs(_Y - _HX) )
1946 elif _QM in ["QR", "Null"]:
1950 raise ValueError("Unknown asked quality measure!")
1952 J = float( Jb ) + float( Jo )
1955 _SSV["CostFunctionJb"].append( Jb )
1956 _SSV["CostFunctionJo"].append( Jo )
1957 _SSV["CostFunctionJ" ].append( J )
1959 if "IndexOfOptimum" in _SSC or \
1960 "CurrentOptimum" in _SSC or \
1961 "SimulatedObservationAtCurrentOptimum" in _SSC:
1962 IndexMin = numpy.argmin( _SSV["CostFunctionJ"][_nPS:] ) + _nPS
1963 if "IndexOfOptimum" in _SSC:
1964 _SSV["IndexOfOptimum"].append( IndexMin )
1965 if "CurrentOptimum" in _SSC:
1966 _SSV["CurrentOptimum"].append( _SSV["CurrentState"][IndexMin] )
1967 if "SimulatedObservationAtCurrentOptimum" in _SSC:
1968 _SSV["SimulatedObservationAtCurrentOptimum"].append( _SSV["SimulatedObservationAtCurrentState"][IndexMin] )
1973 if _QM in ["QR"]: # Pour le QuantileRegression
1978 # ==============================================================================
1979 if __name__ == "__main__":
1980 print('\n AUTODIAGNOSTIC \n')