1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2019 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les outils généraux élémentaires.
26 __author__ = "Jean-Philippe ARGAUD"
34 from functools import partial
35 from daCore import Persistence
36 from daCore import PlatformInfo
37 from daCore import Interfaces
38 from daCore import Templates
39 from daCore.Interfaces import ImportFromScript, ImportFromFile
41 # ==============================================================================
42 class CacheManager(object):
44 Classe générale de gestion d'un cache de calculs
47 toleranceInRedundancy = 1.e-18,
48 lenghtOfRedundancy = -1,
51 Les caractéristiques de tolérance peuvent être modifées à la création.
53 self.__tolerBP = float(toleranceInRedundancy)
54 self.__lenghtOR = int(lenghtOfRedundancy)
55 self.__initlnOR = self.__lenghtOR
60 self.__listOPCV = [] # Operator Previous Calculated Points, Results, Point Norms
61 # logging.debug("CM Tolerance de determination des doublons : %.2e", self.__tolerBP)
63 def wasCalculatedIn(self, xValue ): #, info="" ):
64 "Vérifie l'existence d'un calcul correspondant à la valeur"
67 for i in range(min(len(self.__listOPCV),self.__lenghtOR)-1,-1,-1):
68 if not hasattr(xValue, 'size') or (xValue.size != self.__listOPCV[i][0].size):
69 # logging.debug("CM Différence de la taille %s de X et de celle %s du point %i déjà calculé", xValue.shape,i,self.__listOPCP[i].shape)
71 if numpy.linalg.norm(numpy.ravel(xValue) - self.__listOPCV[i][0]) < self.__tolerBP * self.__listOPCV[i][2]:
73 __HxV = self.__listOPCV[i][1]
74 # logging.debug("CM Cas%s déja calculé, portant le numéro %i", info, i)
78 def storeValueInX(self, xValue, HxValue ):
79 "Stocke un calcul correspondant à la valeur"
80 if self.__lenghtOR < 0:
81 self.__lenghtOR = 2 * xValue.size + 2
82 self.__initlnOR = self.__lenghtOR
83 while len(self.__listOPCV) > self.__lenghtOR:
84 # logging.debug("CM Réduction de la liste des cas à %i éléments par suppression du premier", self.__lenghtOR)
85 self.__listOPCV.pop(0)
86 self.__listOPCV.append( (
87 copy.copy(numpy.ravel(xValue)),
89 numpy.linalg.norm(xValue),
94 self.__initlnOR = self.__lenghtOR
99 self.__lenghtOR = self.__initlnOR
101 # ==============================================================================
102 class Operator(object):
104 Classe générale d'interface de type opérateur simple
114 avoidingRedundancy = True,
115 inputAsMultiFunction = False,
116 extraArguments = None,
119 On construit un objet de ce type en fournissant, à l'aide de l'un des
120 deux mots-clé, soit une fonction ou un multi-fonction python, soit une
123 - fromMethod : argument de type fonction Python
124 - fromMatrix : argument adapté au constructeur numpy.matrix
125 - avoidingRedundancy : booléen évitant (ou pas) les calculs redondants
126 - inputAsMultiFunction : booléen indiquant une fonction explicitement
127 définie (ou pas) en multi-fonction
128 - extraArguments : arguments supplémentaires passés à la fonction de
129 base et ses dérivées (tuple ou dictionnaire)
131 self.__NbCallsAsMatrix, self.__NbCallsAsMethod, self.__NbCallsOfCached = 0, 0, 0
132 self.__AvoidRC = bool( avoidingRedundancy )
133 self.__inputAsMF = bool( inputAsMultiFunction )
134 self.__extraArgs = extraArguments
135 if fromMethod is not None and self.__inputAsMF:
136 self.__Method = fromMethod # logtimer(fromMethod)
138 self.__Type = "Method"
139 elif fromMethod is not None and not self.__inputAsMF:
140 self.__Method = partial( MultiFonction, _sFunction=fromMethod)
142 self.__Type = "Method"
143 elif fromMatrix is not None:
145 self.__Matrix = numpy.matrix( fromMatrix, numpy.float )
146 self.__Type = "Matrix"
152 def disableAvoidingRedundancy(self):
154 Operator.CM.disable()
156 def enableAvoidingRedundancy(self):
161 Operator.CM.disable()
167 def appliedTo(self, xValue, HValue = None, argsAsSerie = False):
169 Permet de restituer le résultat de l'application de l'opérateur à une
170 série d'arguments xValue. Cette méthode se contente d'appliquer, chaque
171 argument devant a priori être du bon type.
173 - les arguments par série sont :
174 - xValue : argument adapté pour appliquer l'opérateur
175 - HValue : valeur précalculée de l'opérateur en ce point
176 - argsAsSerie : indique si les arguments sont une mono ou multi-valeur
183 if HValue is not None:
187 PlatformInfo.isIterable( _xValue, True, " in Operator.appliedTo" )
189 if _HValue is not None:
190 assert len(_xValue) == len(_HValue), "Incompatible number of elements in xValue and HValue"
192 for i in range(len(_HValue)):
193 HxValue.append( numpy.asmatrix( numpy.ravel( _HValue[i] ) ).T )
195 Operator.CM.storeValueInX(_xValue[i],HxValue[-1])
200 for i, xv in enumerate(_xValue):
202 __alreadyCalculated, __HxV = Operator.CM.wasCalculatedIn(xv)
204 __alreadyCalculated = False
206 if __alreadyCalculated:
207 self.__addOneCacheCall()
210 if self.__Matrix is not None:
211 self.__addOneMatrixCall()
212 _hv = self.__Matrix * xv
214 self.__addOneMethodCall()
218 HxValue.append( _hv )
220 if len(_xserie)>0 and self.__Matrix is None:
221 if self.__extraArgs is None:
222 _hserie = self.__Method( _xserie ) # Calcul MF
224 _hserie = self.__Method( _xserie, self.__extraArgs ) # Calcul MF
225 if not hasattr(_hserie, "pop"):
226 raise TypeError("The user input multi-function doesn't seem to return sequence results, behaving like a mono-function. It has to be checked.")
232 Operator.CM.storeValueInX(_xv,_hv)
234 if argsAsSerie: return HxValue
235 else: return HxValue[-1]
237 def appliedControledFormTo(self, paires, argsAsSerie = False ):
239 Permet de restituer le résultat de l'application de l'opérateur à des
240 paires (xValue, uValue). Cette méthode se contente d'appliquer, son
241 argument devant a priori être du bon type. Si la uValue est None,
242 on suppose que l'opérateur ne s'applique qu'à xValue.
244 - paires : les arguments par paire sont :
245 - xValue : argument X adapté pour appliquer l'opérateur
246 - uValue : argument U adapté pour appliquer l'opérateur
247 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
249 if argsAsSerie: _xuValue = paires
250 else: _xuValue = (paires,)
251 PlatformInfo.isIterable( _xuValue, True, " in Operator.appliedControledFormTo" )
253 if self.__Matrix is not None:
255 for paire in _xuValue:
256 _xValue, _uValue = paire
257 self.__addOneMatrixCall()
258 HxValue.append( self.__Matrix * _xValue )
261 for paire in _xuValue:
263 _xValue, _uValue = paire
264 if _uValue is not None:
265 _xuValue.append( paire )
267 _xuValue.append( _xValue )
268 self.__addOneMethodCall( len(_xuValue) )
269 if self.__extraArgs is None:
270 HxValue = self.__Method( _xuValue ) # Calcul MF
272 HxValue = self.__Method( _xuValue, self.__extraArgs ) # Calcul MF
274 if argsAsSerie: return HxValue
275 else: return HxValue[-1]
277 def appliedInXTo(self, paires, argsAsSerie = False ):
279 Permet de restituer le résultat de l'application de l'opérateur à une
280 série d'arguments xValue, sachant que l'opérateur est valable en
281 xNominal. Cette méthode se contente d'appliquer, son argument devant a
282 priori être du bon type. Si l'opérateur est linéaire car c'est une
283 matrice, alors il est valable en tout point nominal et xNominal peut
284 être quelconque. Il n'y a qu'une seule paire par défaut, et argsAsSerie
285 permet d'indiquer que l'argument est multi-paires.
287 - paires : les arguments par paire sont :
288 - xNominal : série d'arguments permettant de donner le point où
289 l'opérateur est construit pour être ensuite appliqué
290 - xValue : série d'arguments adaptés pour appliquer l'opérateur
291 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
293 if argsAsSerie: _nxValue = paires
294 else: _nxValue = (paires,)
295 PlatformInfo.isIterable( _nxValue, True, " in Operator.appliedInXTo" )
297 if self.__Matrix is not None:
299 for paire in _nxValue:
300 _xNominal, _xValue = paire
301 self.__addOneMatrixCall()
302 HxValue.append( self.__Matrix * _xValue )
304 self.__addOneMethodCall( len(_nxValue) )
305 if self.__extraArgs is None:
306 HxValue = self.__Method( _nxValue ) # Calcul MF
308 HxValue = self.__Method( _nxValue, self.__extraArgs ) # Calcul MF
310 if argsAsSerie: return HxValue
311 else: return HxValue[-1]
313 def asMatrix(self, ValueForMethodForm = "UnknownVoidValue", argsAsSerie = False):
315 Permet de renvoyer l'opérateur sous la forme d'une matrice
317 if self.__Matrix is not None:
318 self.__addOneMatrixCall()
319 mValue = [self.__Matrix,]
320 elif ValueForMethodForm is not "UnknownVoidValue": # Ne pas utiliser "None"
323 self.__addOneMethodCall( len(ValueForMethodForm) )
324 for _vfmf in ValueForMethodForm:
325 mValue.append( numpy.matrix( self.__Method(((_vfmf, None),)) ) )
327 self.__addOneMethodCall()
328 mValue = self.__Method(((ValueForMethodForm, None),))
330 raise ValueError("Matrix form of the operator defined as a function/method requires to give an operating point.")
332 if argsAsSerie: return mValue
333 else: return mValue[-1]
337 Renvoie la taille sous forme numpy si l'opérateur est disponible sous
338 la forme d'une matrice
340 if self.__Matrix is not None:
341 return self.__Matrix.shape
343 raise ValueError("Matrix form of the operator is not available, nor the shape")
345 def nbcalls(self, which=None):
347 Renvoie les nombres d'évaluations de l'opérateur
350 self.__NbCallsAsMatrix+self.__NbCallsAsMethod,
351 self.__NbCallsAsMatrix,
352 self.__NbCallsAsMethod,
353 self.__NbCallsOfCached,
354 Operator.NbCallsAsMatrix+Operator.NbCallsAsMethod,
355 Operator.NbCallsAsMatrix,
356 Operator.NbCallsAsMethod,
357 Operator.NbCallsOfCached,
359 if which is None: return __nbcalls
360 else: return __nbcalls[which]
362 def __addOneMatrixCall(self):
363 "Comptabilise un appel"
364 self.__NbCallsAsMatrix += 1 # Decompte local
365 Operator.NbCallsAsMatrix += 1 # Decompte global
367 def __addOneMethodCall(self, nb = 1):
368 "Comptabilise un appel"
369 self.__NbCallsAsMethod += nb # Decompte local
370 Operator.NbCallsAsMethod += nb # Decompte global
372 def __addOneCacheCall(self):
373 "Comptabilise un appel"
374 self.__NbCallsOfCached += 1 # Decompte local
375 Operator.NbCallsOfCached += 1 # Decompte global
377 # ==============================================================================
378 class FullOperator(object):
380 Classe générale d'interface de type opérateur complet
381 (Direct, Linéaire Tangent, Adjoint)
384 name = "GenericFullOperator",
386 asOneFunction = None, # 1 Fonction
387 asThreeFunctions = None, # 3 Fonctions in a dictionary
388 asScript = None, # 1 or 3 Fonction(s) by script
389 asDict = None, # Parameters
391 extraArguments = None,
393 inputAsMF = False,# Fonction(s) as Multi-Functions
398 self.__name = str(name)
399 self.__check = bool(toBeChecked)
400 self.__extraArgs = extraArguments
405 if (asDict is not None) and isinstance(asDict, dict):
406 __Parameters.update( asDict )
407 if "DifferentialIncrement" in asDict:
408 __Parameters["withIncrement"] = asDict["DifferentialIncrement"]
409 if "CenteredFiniteDifference" in asDict:
410 __Parameters["withCenteredDF"] = asDict["CenteredFiniteDifference"]
411 if "EnableMultiProcessing" in asDict:
412 __Parameters["withmpEnabled"] = asDict["EnableMultiProcessing"]
413 if "NumberOfProcesses" in asDict:
414 __Parameters["withmpWorkers"] = asDict["NumberOfProcesses"]
416 if asScript is not None:
417 __Matrix, __Function = None, None
419 __Matrix = ImportFromScript(asScript).getvalue( self.__name )
421 __Function = { "Direct":ImportFromScript(asScript).getvalue( "DirectOperator" ) }
422 __Function.update({"useApproximatedDerivatives":True})
423 __Function.update(__Parameters)
424 elif asThreeFunctions:
426 "Direct" :ImportFromScript(asScript).getvalue( "DirectOperator" ),
427 "Tangent":ImportFromScript(asScript).getvalue( "TangentOperator" ),
428 "Adjoint":ImportFromScript(asScript).getvalue( "AdjointOperator" ),
430 __Function.update(__Parameters)
433 if asOneFunction is not None:
434 if isinstance(asOneFunction, dict) and "Direct" in asOneFunction:
435 if asOneFunction["Direct"] is not None:
436 __Function = asOneFunction
438 raise ValueError("The function has to be given in a dictionnary which have 1 key (\"Direct\")")
440 __Function = { "Direct":asOneFunction }
441 __Function.update({"useApproximatedDerivatives":True})
442 __Function.update(__Parameters)
443 elif asThreeFunctions is not None:
444 if isinstance(asThreeFunctions, dict) and \
445 ("Tangent" in asThreeFunctions) and (asThreeFunctions["Tangent"] is not None) and \
446 ("Adjoint" in asThreeFunctions) and (asThreeFunctions["Adjoint"] is not None) and \
447 (("useApproximatedDerivatives" not in asThreeFunctions) or not bool(asThreeFunctions["useApproximatedDerivatives"])):
448 __Function = asThreeFunctions
449 elif isinstance(asThreeFunctions, dict) and \
450 ("Direct" in asThreeFunctions) and (asThreeFunctions["Direct"] is not None):
451 __Function = asThreeFunctions
452 __Function.update({"useApproximatedDerivatives":True})
454 raise ValueError("The functions has to be given in a dictionnary which have either 1 key (\"Direct\") or 3 keys (\"Direct\" (optionnal), \"Tangent\" and \"Adjoint\")")
455 if "Direct" not in asThreeFunctions:
456 __Function["Direct"] = asThreeFunctions["Tangent"]
457 __Function.update(__Parameters)
461 # if sys.version_info[0] < 3 and isinstance(__Function, dict):
462 # for k in ("Direct", "Tangent", "Adjoint"):
463 # if k in __Function and hasattr(__Function[k],"__class__"):
464 # if type(__Function[k]) is type(self.__init__):
465 # raise TypeError("can't use a class method (%s) as a function for the \"%s\" operator. Use a real function instead."%(type(__Function[k]),k))
467 if appliedInX is not None and isinstance(appliedInX, dict):
468 __appliedInX = appliedInX
469 elif appliedInX is not None:
470 __appliedInX = {"HXb":appliedInX}
474 if scheduledBy is not None:
475 self.__T = scheduledBy
477 if isinstance(__Function, dict) and \
478 ("useApproximatedDerivatives" in __Function) and bool(__Function["useApproximatedDerivatives"]) and \
479 ("Direct" in __Function) and (__Function["Direct"] is not None):
480 if "withCenteredDF" not in __Function: __Function["withCenteredDF"] = False
481 if "withIncrement" not in __Function: __Function["withIncrement"] = 0.01
482 if "withdX" not in __Function: __Function["withdX"] = None
483 if "withAvoidingRedundancy" not in __Function: __Function["withAvoidingRedundancy"] = avoidRC
484 if "withToleranceInRedundancy" not in __Function: __Function["withToleranceInRedundancy"] = 1.e-18
485 if "withLenghtOfRedundancy" not in __Function: __Function["withLenghtOfRedundancy"] = -1
486 if "withmpEnabled" not in __Function: __Function["withmpEnabled"] = False
487 if "withmpWorkers" not in __Function: __Function["withmpWorkers"] = None
488 if "withmfEnabled" not in __Function: __Function["withmfEnabled"] = inputAsMF
489 from daNumerics.ApproximatedDerivatives import FDApproximation
490 FDA = FDApproximation(
491 Function = __Function["Direct"],
492 centeredDF = __Function["withCenteredDF"],
493 increment = __Function["withIncrement"],
494 dX = __Function["withdX"],
495 avoidingRedundancy = __Function["withAvoidingRedundancy"],
496 toleranceInRedundancy = __Function["withToleranceInRedundancy"],
497 lenghtOfRedundancy = __Function["withLenghtOfRedundancy"],
498 mpEnabled = __Function["withmpEnabled"],
499 mpWorkers = __Function["withmpWorkers"],
500 mfEnabled = __Function["withmfEnabled"],
502 self.__FO["Direct"] = Operator( fromMethod = FDA.DirectOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
503 self.__FO["Tangent"] = Operator( fromMethod = FDA.TangentOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
504 self.__FO["Adjoint"] = Operator( fromMethod = FDA.AdjointOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
505 elif isinstance(__Function, dict) and \
506 ("Direct" in __Function) and ("Tangent" in __Function) and ("Adjoint" in __Function) and \
507 (__Function["Direct"] is not None) and (__Function["Tangent"] is not None) and (__Function["Adjoint"] is not None):
508 self.__FO["Direct"] = Operator( fromMethod = __Function["Direct"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
509 self.__FO["Tangent"] = Operator( fromMethod = __Function["Tangent"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
510 self.__FO["Adjoint"] = Operator( fromMethod = __Function["Adjoint"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
511 elif asMatrix is not None:
512 __matrice = numpy.matrix( __Matrix, numpy.float )
513 self.__FO["Direct"] = Operator( fromMatrix = __matrice, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
514 self.__FO["Tangent"] = Operator( fromMatrix = __matrice, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
515 self.__FO["Adjoint"] = Operator( fromMatrix = __matrice.T, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
518 raise ValueError("Improperly defined observation operator, it requires at minima either a matrix, a Direct for approximate derivatives or a Tangent/Adjoint pair.")
520 if __appliedInX is not None:
521 self.__FO["AppliedInX"] = {}
522 for key in list(__appliedInX.keys()):
523 if type( __appliedInX[key] ) is type( numpy.matrix([]) ):
524 # Pour le cas où l'on a une vraie matrice
525 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].A1, numpy.float ).T
526 elif type( __appliedInX[key] ) is type( numpy.array([]) ) and len(__appliedInX[key].shape) > 1:
527 # Pour le cas où l'on a un vecteur représenté en array avec 2 dimensions
528 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].reshape(len(__appliedInX[key]),), numpy.float ).T
530 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key], numpy.float ).T
532 self.__FO["AppliedInX"] = None
538 "x.__repr__() <==> repr(x)"
539 return repr(self.__V)
542 "x.__str__() <==> str(x)"
545 # ==============================================================================
546 class Algorithm(object):
548 Classe générale d'interface de type algorithme
550 Elle donne un cadre pour l'écriture d'une classe élémentaire d'algorithme
551 d'assimilation, en fournissant un container (dictionnaire) de variables
552 persistantes initialisées, et des méthodes d'accès à ces variables stockées.
554 Une classe élémentaire d'algorithme doit implémenter la méthode "run".
556 def __init__(self, name):
558 L'initialisation présente permet de fabriquer des variables de stockage
559 disponibles de manière générique dans les algorithmes élémentaires. Ces
560 variables de stockage sont ensuite conservées dans un dictionnaire
561 interne à l'objet, mais auquel on accède par la méthode "get".
563 Les variables prévues sont :
564 - APosterioriCorrelations : matrice de corrélations de la matrice A
565 - APosterioriCovariance : matrice de covariances a posteriori : A
566 - APosterioriStandardDeviations : vecteur des écart-types de la matrice A
567 - APosterioriVariances : vecteur des variances de la matrice A
568 - Analysis : vecteur d'analyse : Xa
569 - BMA : Background moins Analysis : Xa - Xb
570 - CostFunctionJ : fonction-coût globale, somme des deux parties suivantes Jb et Jo
571 - CostFunctionJAtCurrentOptimum : fonction-coût globale à l'état optimal courant lors d'itérations
572 - CostFunctionJb : partie ébauche ou background de la fonction-coût : Jb
573 - CostFunctionJbAtCurrentOptimum : partie ébauche à l'état optimal courant lors d'itérations
574 - CostFunctionJo : partie observations de la fonction-coût : Jo
575 - CostFunctionJoAtCurrentOptimum : partie observations à l'état optimal courant lors d'itérations
576 - CurrentOptimum : état optimal courant lors d'itérations
577 - CurrentState : état courant lors d'itérations
578 - GradientOfCostFunctionJ : gradient de la fonction-coût globale
579 - GradientOfCostFunctionJb : gradient de la partie ébauche de la fonction-coût
580 - GradientOfCostFunctionJo : gradient de la partie observations de la fonction-coût
581 - IndexOfOptimum : index de l'état optimal courant lors d'itérations
582 - Innovation : l'innovation : d = Y - H(X)
583 - InnovationAtCurrentState : l'innovation à l'état courant : dn = Y - H(Xn)
584 - JacobianMatrixAtBackground : matrice jacobienne à l'état d'ébauche
585 - JacobianMatrixAtCurrentState : matrice jacobienne à l'état courant
586 - JacobianMatrixAtOptimum : matrice jacobienne à l'optimum
587 - KalmanGainAtOptimum : gain de Kalman à l'optimum
588 - MahalanobisConsistency : indicateur de consistance des covariances
589 - OMA : Observation moins Analyse : Y - Xa
590 - OMB : Observation moins Background : Y - Xb
591 - PredictedState : état prédit courant lors d'itérations
592 - Residu : dans le cas des algorithmes de vérification
593 - SigmaBck2 : indicateur de correction optimale des erreurs d'ébauche
594 - SigmaObs2 : indicateur de correction optimale des erreurs d'observation
595 - SimulatedObservationAtBackground : l'état observé H(Xb) à l'ébauche
596 - SimulatedObservationAtCurrentOptimum : l'état observé H(X) à l'état optimal courant
597 - SimulatedObservationAtCurrentState : l'état observé H(X) à l'état courant
598 - SimulatedObservationAtOptimum : l'état observé H(Xa) à l'optimum
599 - SimulationQuantiles : états observés H(X) pour les quantiles demandés
600 On peut rajouter des variables à stocker dans l'initialisation de
601 l'algorithme élémentaire qui va hériter de cette classe
603 logging.debug("%s Initialisation", str(name))
604 self._m = PlatformInfo.SystemUsage()
606 self._name = str( name )
607 self._parameters = {"StoreSupplementaryCalculations":[]}
608 self.__required_parameters = {}
609 self.__required_inputs = {"RequiredInputValues":{"mandatory":(), "optional":()}}
611 self.StoredVariables = {}
612 self.StoredVariables["APosterioriCorrelations"] = Persistence.OneMatrix(name = "APosterioriCorrelations")
613 self.StoredVariables["APosterioriCovariance"] = Persistence.OneMatrix(name = "APosterioriCovariance")
614 self.StoredVariables["APosterioriStandardDeviations"] = Persistence.OneVector(name = "APosterioriStandardDeviations")
615 self.StoredVariables["APosterioriVariances"] = Persistence.OneVector(name = "APosterioriVariances")
616 self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
617 self.StoredVariables["BMA"] = Persistence.OneVector(name = "BMA")
618 self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
619 self.StoredVariables["CostFunctionJAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJAtCurrentOptimum")
620 self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
621 self.StoredVariables["CostFunctionJbAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJbAtCurrentOptimum")
622 self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
623 self.StoredVariables["CostFunctionJoAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJoAtCurrentOptimum")
624 self.StoredVariables["CurrentOptimum"] = Persistence.OneVector(name = "CurrentOptimum")
625 self.StoredVariables["CurrentState"] = Persistence.OneVector(name = "CurrentState")
626 self.StoredVariables["GradientOfCostFunctionJ"] = Persistence.OneVector(name = "GradientOfCostFunctionJ")
627 self.StoredVariables["GradientOfCostFunctionJb"] = Persistence.OneVector(name = "GradientOfCostFunctionJb")
628 self.StoredVariables["GradientOfCostFunctionJo"] = Persistence.OneVector(name = "GradientOfCostFunctionJo")
629 self.StoredVariables["IndexOfOptimum"] = Persistence.OneIndex(name = "IndexOfOptimum")
630 self.StoredVariables["Innovation"] = Persistence.OneVector(name = "Innovation")
631 self.StoredVariables["InnovationAtCurrentState"] = Persistence.OneVector(name = "InnovationAtCurrentState")
632 self.StoredVariables["JacobianMatrixAtBackground"] = Persistence.OneMatrix(name = "JacobianMatrixAtBackground")
633 self.StoredVariables["JacobianMatrixAtCurrentState"] = Persistence.OneMatrix(name = "JacobianMatrixAtCurrentState")
634 self.StoredVariables["JacobianMatrixAtOptimum"] = Persistence.OneMatrix(name = "JacobianMatrixAtOptimum")
635 self.StoredVariables["KalmanGainAtOptimum"] = Persistence.OneMatrix(name = "KalmanGainAtOptimum")
636 self.StoredVariables["MahalanobisConsistency"] = Persistence.OneScalar(name = "MahalanobisConsistency")
637 self.StoredVariables["OMA"] = Persistence.OneVector(name = "OMA")
638 self.StoredVariables["OMB"] = Persistence.OneVector(name = "OMB")
639 self.StoredVariables["PredictedState"] = Persistence.OneVector(name = "PredictedState")
640 self.StoredVariables["Residu"] = Persistence.OneScalar(name = "Residu")
641 self.StoredVariables["SigmaBck2"] = Persistence.OneScalar(name = "SigmaBck2")
642 self.StoredVariables["SigmaObs2"] = Persistence.OneScalar(name = "SigmaObs2")
643 self.StoredVariables["SimulatedObservationAtBackground"] = Persistence.OneVector(name = "SimulatedObservationAtBackground")
644 self.StoredVariables["SimulatedObservationAtCurrentOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentOptimum")
645 self.StoredVariables["SimulatedObservationAtCurrentState"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentState")
646 self.StoredVariables["SimulatedObservationAtOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtOptimum")
647 self.StoredVariables["SimulationQuantiles"] = Persistence.OneMatrix(name = "SimulationQuantiles")
649 def _pre_run(self, Parameters, Xb=None, Y=None, R=None, B=None, Q=None ):
651 logging.debug("%s Lancement", self._name)
652 logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
654 # Mise a jour de self._parameters avec Parameters
655 self.__setParameters(Parameters)
657 # Corrections et complements
658 def __test_vvalue(argument, variable, argname):
660 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
661 raise ValueError("%s %s vector %s has to be properly defined!"%(self._name,argname,variable))
662 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
663 logging.debug("%s %s vector %s is not set, but is optional."%(self._name,argname,variable))
665 logging.debug("%s %s vector %s is not set, but is not required."%(self._name,argname,variable))
667 logging.debug("%s %s vector %s is set, and its size is %i."%(self._name,argname,variable,numpy.array(argument).size))
669 __test_vvalue( Xb, "Xb", "Background or initial state" )
670 __test_vvalue( Y, "Y", "Observation" )
672 def __test_cvalue(argument, variable, argname):
674 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
675 raise ValueError("%s %s error covariance matrix %s has to be properly defined!"%(self._name,argname,variable))
676 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
677 logging.debug("%s %s error covariance matrix %s is not set, but is optional."%(self._name,argname,variable))
679 logging.debug("%s %s error covariance matrix %s is not set, but is not required."%(self._name,argname,variable))
681 logging.debug("%s %s error covariance matrix %s is set."%(self._name,argname,variable))
683 __test_cvalue( R, "R", "Observation" )
684 __test_cvalue( B, "B", "Background" )
685 __test_cvalue( Q, "Q", "Evolution" )
687 if ("Bounds" in self._parameters) and isinstance(self._parameters["Bounds"], (list, tuple)) and (len(self._parameters["Bounds"]) > 0):
688 logging.debug("%s Prise en compte des bornes effectuee"%(self._name,))
690 self._parameters["Bounds"] = None
692 if logging.getLogger().level < logging.WARNING:
693 self._parameters["optiprint"], self._parameters["optdisp"] = 1, 1
694 if PlatformInfo.has_scipy:
695 import scipy.optimize
696 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_ALL
698 self._parameters["optmessages"] = 15
700 self._parameters["optiprint"], self._parameters["optdisp"] = -1, 0
701 if PlatformInfo.has_scipy:
702 import scipy.optimize
703 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_NONE
705 self._parameters["optmessages"] = 15
709 def _post_run(self,_oH=None):
711 if ("StoreSupplementaryCalculations" in self._parameters) and \
712 "APosterioriCovariance" in self._parameters["StoreSupplementaryCalculations"]:
713 for _A in self.StoredVariables["APosterioriCovariance"]:
714 if "APosterioriVariances" in self._parameters["StoreSupplementaryCalculations"]:
715 self.StoredVariables["APosterioriVariances"].store( numpy.diag(_A) )
716 if "APosterioriStandardDeviations" in self._parameters["StoreSupplementaryCalculations"]:
717 self.StoredVariables["APosterioriStandardDeviations"].store( numpy.sqrt(numpy.diag(_A)) )
718 if "APosterioriCorrelations" in self._parameters["StoreSupplementaryCalculations"]:
719 _EI = numpy.diag(1./numpy.sqrt(numpy.diag(_A)))
720 _C = numpy.dot(_EI, numpy.dot(_A, _EI))
721 self.StoredVariables["APosterioriCorrelations"].store( _C )
723 logging.debug("%s Nombre d'évaluation(s) de l'opérateur d'observation direct/tangent/adjoint.: %i/%i/%i", self._name, _oH["Direct"].nbcalls(0),_oH["Tangent"].nbcalls(0),_oH["Adjoint"].nbcalls(0))
724 logging.debug("%s Nombre d'appels au cache d'opérateur d'observation direct/tangent/adjoint..: %i/%i/%i", self._name, _oH["Direct"].nbcalls(3),_oH["Tangent"].nbcalls(3),_oH["Adjoint"].nbcalls(3))
725 logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
726 logging.debug("%s Terminé", self._name)
729 def _toStore(self, key):
730 "True if in StoreSupplementaryCalculations, else False"
731 return key in self._parameters["StoreSupplementaryCalculations"]
733 def get(self, key=None):
735 Renvoie l'une des variables stockées identifiée par la clé, ou le
736 dictionnaire de l'ensemble des variables disponibles en l'absence de
737 clé. Ce sont directement les variables sous forme objet qui sont
738 renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
739 des classes de persistance.
742 return self.StoredVariables[key]
744 return self.StoredVariables
746 def __contains__(self, key=None):
747 "D.__contains__(k) -> True if D has a key k, else False"
748 return key in self.StoredVariables
751 "D.keys() -> list of D's keys"
752 if hasattr(self, "StoredVariables"):
753 return self.StoredVariables.keys()
758 "D.pop(k[,d]) -> v, remove specified key and return the corresponding value"
759 if hasattr(self, "StoredVariables"):
760 return self.StoredVariables.pop(k, d)
765 raise TypeError("pop expected at least 1 arguments, got 0")
766 "If key is not found, d is returned if given, otherwise KeyError is raised"
772 def run(self, Xb=None, Y=None, H=None, M=None, R=None, B=None, Q=None, Parameters=None):
774 Doit implémenter l'opération élémentaire de calcul d'assimilation sous
775 sa forme mathématique la plus naturelle possible.
777 raise NotImplementedError("Mathematical assimilation calculation has not been implemented!")
779 def defineRequiredParameter(self, name = None, default = None, typecast = None, message = None, minval = None, maxval = None, listval = None):
781 Permet de définir dans l'algorithme des paramètres requis et leurs
782 caractéristiques par défaut.
785 raise ValueError("A name is mandatory to define a required parameter.")
787 self.__required_parameters[name] = {
789 "typecast" : typecast,
795 logging.debug("%s %s (valeur par défaut = %s)", self._name, message, self.setParameterValue(name))
797 def getRequiredParameters(self, noDetails=True):
799 Renvoie la liste des noms de paramètres requis ou directement le
800 dictionnaire des paramètres requis.
803 return sorted(self.__required_parameters.keys())
805 return self.__required_parameters
807 def setParameterValue(self, name=None, value=None):
809 Renvoie la valeur d'un paramètre requis de manière contrôlée
811 default = self.__required_parameters[name]["default"]
812 typecast = self.__required_parameters[name]["typecast"]
813 minval = self.__required_parameters[name]["minval"]
814 maxval = self.__required_parameters[name]["maxval"]
815 listval = self.__required_parameters[name]["listval"]
817 if value is None and default is None:
819 elif value is None and default is not None:
820 if typecast is None: __val = default
821 else: __val = typecast( default )
823 if typecast is None: __val = value
824 else: __val = typecast( value )
826 if minval is not None and (numpy.array(__val, float) < minval).any():
827 raise ValueError("The parameter named \"%s\" of value \"%s\" can not be less than %s."%(name, __val, minval))
828 if maxval is not None and (numpy.array(__val, float) > maxval).any():
829 raise ValueError("The parameter named \"%s\" of value \"%s\" can not be greater than %s."%(name, __val, maxval))
830 if listval is not None:
831 if typecast is list or typecast is tuple or isinstance(__val,list) or isinstance(__val,tuple):
834 raise ValueError("The value \"%s\" of the parameter named \"%s\" is not allowed, it has to be in the list %s."%(v, name, listval))
835 elif __val not in listval:
836 raise ValueError("The value \"%s\" of the parameter named \"%s\" is not allowed, it has to be in the list %s."%( __val, name,listval))
839 def requireInputArguments(self, mandatory=(), optional=()):
841 Permet d'imposer des arguments requises en entrée
843 self.__required_inputs["RequiredInputValues"]["mandatory"] = tuple( mandatory )
844 self.__required_inputs["RequiredInputValues"]["optional"] = tuple( optional )
846 def __setParameters(self, fromDico={}):
848 Permet de stocker les paramètres reçus dans le dictionnaire interne.
850 self._parameters.update( fromDico )
851 for k in self.__required_parameters.keys():
852 if k in fromDico.keys():
853 self._parameters[k] = self.setParameterValue(k,fromDico[k])
855 self._parameters[k] = self.setParameterValue(k)
856 logging.debug("%s %s : %s", self._name, self.__required_parameters[k]["message"], self._parameters[k])
858 # ==============================================================================
859 class AlgorithmAndParameters(object):
861 Classe générale d'interface d'action pour l'algorithme et ses paramètres
864 name = "GenericAlgorithm",
871 self.__name = str(name)
875 self.__algorithm = {}
876 self.__algorithmFile = None
877 self.__algorithmName = None
879 self.updateParameters( asDict, asScript )
881 if asAlgorithm is None and asScript is not None:
882 __Algo = ImportFromScript(asScript).getvalue( "Algorithm" )
886 if __Algo is not None:
887 self.__A = str(__Algo)
888 self.__P.update( {"Algorithm":self.__A} )
890 self.__setAlgorithm( self.__A )
892 def updateParameters(self,
896 "Mise a jour des parametres"
897 if asDict is None and asScript is not None:
898 __Dict = ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
902 if __Dict is not None:
903 self.__P.update( dict(__Dict) )
905 def executePythonScheme(self, asDictAO = None):
906 "Permet de lancer le calcul d'assimilation"
907 Operator.CM.clearCache()
909 if not isinstance(asDictAO, dict):
910 raise ValueError("The objects for algorithm calculation have to be given together as a dictionnary, and they are not")
911 if hasattr(asDictAO["Background"],"getO"): self.__Xb = asDictAO["Background"].getO()
912 elif hasattr(asDictAO["CheckingPoint"],"getO"): self.__Xb = asDictAO["CheckingPoint"].getO()
913 else: self.__Xb = None
914 if hasattr(asDictAO["Observation"],"getO"): self.__Y = asDictAO["Observation"].getO()
915 else: self.__Y = asDictAO["Observation"]
916 if hasattr(asDictAO["ControlInput"],"getO"): self.__U = asDictAO["ControlInput"].getO()
917 else: self.__U = asDictAO["ControlInput"]
918 if hasattr(asDictAO["ObservationOperator"],"getO"): self.__HO = asDictAO["ObservationOperator"].getO()
919 else: self.__HO = asDictAO["ObservationOperator"]
920 if hasattr(asDictAO["EvolutionModel"],"getO"): self.__EM = asDictAO["EvolutionModel"].getO()
921 else: self.__EM = asDictAO["EvolutionModel"]
922 if hasattr(asDictAO["ControlModel"],"getO"): self.__CM = asDictAO["ControlModel"].getO()
923 else: self.__CM = asDictAO["ControlModel"]
924 self.__B = asDictAO["BackgroundError"]
925 self.__R = asDictAO["ObservationError"]
926 self.__Q = asDictAO["EvolutionError"]
928 self.__shape_validate()
930 self.__algorithm.run(
940 Parameters = self.__P,
944 def executeYACSScheme(self, FileName=None):
945 "Permet de lancer le calcul d'assimilation"
946 if FileName is None or not os.path.exists(FileName):
947 raise ValueError("a YACS file name has to be given for YACS execution.\n")
949 __file = os.path.abspath(FileName)
950 logging.debug("The YACS file name is \"%s\"."%__file)
951 if not PlatformInfo.has_salome or \
952 not PlatformInfo.has_yacs or \
953 not PlatformInfo.has_adao:
954 raise ImportError("\n\n"+\
955 "Unable to get SALOME, YACS or ADAO environnement variables.\n"+\
956 "Please load the right environnement before trying to use it.\n")
961 SALOMERuntime.RuntimeSALOME_setRuntime()
963 r = pilot.getRuntime()
964 xmlLoader = loader.YACSLoader()
965 xmlLoader.registerProcCataLoader()
967 catalogAd = r.loadCatalog("proc", __file)
968 r.addCatalog(catalogAd)
973 p = xmlLoader.load(__file)
974 except IOError as ex:
975 print("The YACS XML schema file can not be loaded: %s"%(ex,))
977 logger = p.getLogger("parser")
978 if not logger.isEmpty():
979 print("The imported YACS XML schema has errors on parsing:")
980 print(logger.getStr())
983 print("The YACS XML schema is not valid and will not be executed:")
984 print(p.getErrorReport())
986 info=pilot.LinkInfo(pilot.LinkInfo.ALL_DONT_STOP)
987 p.checkConsistency(info)
988 if info.areWarningsOrErrors():
989 print("The YACS XML schema is not coherent and will not be executed:")
990 print(info.getGlobalRepr())
992 e = pilot.ExecutorSwig()
994 if p.getEffectiveState() != pilot.DONE:
995 print(p.getErrorReport())
999 def get(self, key = None):
1000 "Vérifie l'existence d'une clé de variable ou de paramètres"
1001 if key in self.__algorithm:
1002 return self.__algorithm.get( key )
1003 elif key in self.__P:
1004 return self.__P[key]
1008 def pop(self, k, d):
1009 "Necessaire pour le pickling"
1010 return self.__algorithm.pop(k, d)
1012 def getAlgorithmRequiredParameters(self, noDetails=True):
1013 "Renvoie la liste des paramètres requis selon l'algorithme"
1014 return self.__algorithm.getRequiredParameters(noDetails)
1016 def setObserver(self, __V, __O, __I, __S):
1017 if self.__algorithm is None \
1018 or isinstance(self.__algorithm, dict) \
1019 or not hasattr(self.__algorithm,"StoredVariables"):
1020 raise ValueError("No observer can be build before choosing an algorithm.")
1021 if __V not in self.__algorithm:
1022 raise ValueError("An observer requires to be set on a variable named %s which does not exist."%__V)
1024 self.__algorithm.StoredVariables[ __V ].setDataObserver(
1027 HookParameters = __I,
1030 def removeObserver(self, __V, __O, __A = False):
1031 if self.__algorithm is None \
1032 or isinstance(self.__algorithm, dict) \
1033 or not hasattr(self.__algorithm,"StoredVariables"):
1034 raise ValueError("No observer can be removed before choosing an algorithm.")
1035 if __V not in self.__algorithm:
1036 raise ValueError("An observer requires to be removed on a variable named %s which does not exist."%__V)
1038 return self.__algorithm.StoredVariables[ __V ].removeDataObserver(
1043 def hasObserver(self, __V):
1044 if self.__algorithm is None \
1045 or isinstance(self.__algorithm, dict) \
1046 or not hasattr(self.__algorithm,"StoredVariables"):
1048 if __V not in self.__algorithm:
1050 return self.__algorithm.StoredVariables[ __V ].hasDataObserver()
1053 return list(self.__algorithm.keys()) + list(self.__P.keys())
1055 def __contains__(self, key=None):
1056 "D.__contains__(k) -> True if D has a key k, else False"
1057 return key in self.__algorithm or key in self.__P
1060 "x.__repr__() <==> repr(x)"
1061 return repr(self.__A)+", "+repr(self.__P)
1064 "x.__str__() <==> str(x)"
1065 return str(self.__A)+", "+str(self.__P)
1067 def __setAlgorithm(self, choice = None ):
1069 Permet de sélectionner l'algorithme à utiliser pour mener à bien l'étude
1070 d'assimilation. L'argument est un champ caractère se rapportant au nom
1071 d'un algorithme réalisant l'opération sur les arguments fixes.
1074 raise ValueError("Error: algorithm choice has to be given")
1075 if self.__algorithmName is not None:
1076 raise ValueError("Error: algorithm choice has already been done as \"%s\", it can't be changed."%self.__algorithmName)
1077 daDirectory = "daAlgorithms"
1079 # Recherche explicitement le fichier complet
1080 # ------------------------------------------
1082 for directory in sys.path:
1083 if os.path.isfile(os.path.join(directory, daDirectory, str(choice)+'.py')):
1084 module_path = os.path.abspath(os.path.join(directory, daDirectory))
1085 if module_path is None:
1086 raise ImportError("No algorithm module named \"%s\" has been found in the search path.\n The search path is %s"%(choice, sys.path))
1088 # Importe le fichier complet comme un module
1089 # ------------------------------------------
1091 sys_path_tmp = sys.path ; sys.path.insert(0,module_path)
1092 self.__algorithmFile = __import__(str(choice), globals(), locals(), [])
1093 if not hasattr(self.__algorithmFile, "ElementaryAlgorithm"):
1094 raise ImportError("this module does not define a valid elementary algorithm.")
1095 self.__algorithmName = str(choice)
1096 sys.path = sys_path_tmp ; del sys_path_tmp
1097 except ImportError as e:
1098 raise ImportError("The module named \"%s\" was found, but is incorrect at the import stage.\n The import error message is: %s"%(choice,e))
1100 # Instancie un objet du type élémentaire du fichier
1101 # -------------------------------------------------
1102 self.__algorithm = self.__algorithmFile.ElementaryAlgorithm()
1105 def __shape_validate(self):
1107 Validation de la correspondance correcte des tailles des variables et
1108 des matrices s'il y en a.
1110 if self.__Xb is None: __Xb_shape = (0,)
1111 elif hasattr(self.__Xb,"size"): __Xb_shape = (self.__Xb.size,)
1112 elif hasattr(self.__Xb,"shape"):
1113 if isinstance(self.__Xb.shape, tuple): __Xb_shape = self.__Xb.shape
1114 else: __Xb_shape = self.__Xb.shape()
1115 else: raise TypeError("The background (Xb) has no attribute of shape: problem !")
1117 if self.__Y is None: __Y_shape = (0,)
1118 elif hasattr(self.__Y,"size"): __Y_shape = (self.__Y.size,)
1119 elif hasattr(self.__Y,"shape"):
1120 if isinstance(self.__Y.shape, tuple): __Y_shape = self.__Y.shape
1121 else: __Y_shape = self.__Y.shape()
1122 else: raise TypeError("The observation (Y) has no attribute of shape: problem !")
1124 if self.__U is None: __U_shape = (0,)
1125 elif hasattr(self.__U,"size"): __U_shape = (self.__U.size,)
1126 elif hasattr(self.__U,"shape"):
1127 if isinstance(self.__U.shape, tuple): __U_shape = self.__U.shape
1128 else: __U_shape = self.__U.shape()
1129 else: raise TypeError("The control (U) has no attribute of shape: problem !")
1131 if self.__B is None: __B_shape = (0,0)
1132 elif hasattr(self.__B,"shape"):
1133 if isinstance(self.__B.shape, tuple): __B_shape = self.__B.shape
1134 else: __B_shape = self.__B.shape()
1135 else: raise TypeError("The a priori errors covariance matrix (B) has no attribute of shape: problem !")
1137 if self.__R is None: __R_shape = (0,0)
1138 elif hasattr(self.__R,"shape"):
1139 if isinstance(self.__R.shape, tuple): __R_shape = self.__R.shape
1140 else: __R_shape = self.__R.shape()
1141 else: raise TypeError("The observation errors covariance matrix (R) has no attribute of shape: problem !")
1143 if self.__Q is None: __Q_shape = (0,0)
1144 elif hasattr(self.__Q,"shape"):
1145 if isinstance(self.__Q.shape, tuple): __Q_shape = self.__Q.shape
1146 else: __Q_shape = self.__Q.shape()
1147 else: raise TypeError("The evolution errors covariance matrix (Q) has no attribute of shape: problem !")
1149 if len(self.__HO) == 0: __HO_shape = (0,0)
1150 elif isinstance(self.__HO, dict): __HO_shape = (0,0)
1151 elif hasattr(self.__HO["Direct"],"shape"):
1152 if isinstance(self.__HO["Direct"].shape, tuple): __HO_shape = self.__HO["Direct"].shape
1153 else: __HO_shape = self.__HO["Direct"].shape()
1154 else: raise TypeError("The observation operator (H) has no attribute of shape: problem !")
1156 if len(self.__EM) == 0: __EM_shape = (0,0)
1157 elif isinstance(self.__EM, dict): __EM_shape = (0,0)
1158 elif hasattr(self.__EM["Direct"],"shape"):
1159 if isinstance(self.__EM["Direct"].shape, tuple): __EM_shape = self.__EM["Direct"].shape
1160 else: __EM_shape = self.__EM["Direct"].shape()
1161 else: raise TypeError("The evolution model (EM) has no attribute of shape: problem !")
1163 if len(self.__CM) == 0: __CM_shape = (0,0)
1164 elif isinstance(self.__CM, dict): __CM_shape = (0,0)
1165 elif hasattr(self.__CM["Direct"],"shape"):
1166 if isinstance(self.__CM["Direct"].shape, tuple): __CM_shape = self.__CM["Direct"].shape
1167 else: __CM_shape = self.__CM["Direct"].shape()
1168 else: raise TypeError("The control model (CM) has no attribute of shape: problem !")
1170 # Vérification des conditions
1171 # ---------------------------
1172 if not( len(__Xb_shape) == 1 or min(__Xb_shape) == 1 ):
1173 raise ValueError("Shape characteristic of background (Xb) is incorrect: \"%s\"."%(__Xb_shape,))
1174 if not( len(__Y_shape) == 1 or min(__Y_shape) == 1 ):
1175 raise ValueError("Shape characteristic of observation (Y) is incorrect: \"%s\"."%(__Y_shape,))
1177 if not( min(__B_shape) == max(__B_shape) ):
1178 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) is incorrect: \"%s\"."%(__B_shape,))
1179 if not( min(__R_shape) == max(__R_shape) ):
1180 raise ValueError("Shape characteristic of observation errors covariance matrix (R) is incorrect: \"%s\"."%(__R_shape,))
1181 if not( min(__Q_shape) == max(__Q_shape) ):
1182 raise ValueError("Shape characteristic of evolution errors covariance matrix (Q) is incorrect: \"%s\"."%(__Q_shape,))
1183 if not( min(__EM_shape) == max(__EM_shape) ):
1184 raise ValueError("Shape characteristic of evolution operator (EM) is incorrect: \"%s\"."%(__EM_shape,))
1186 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[1] == max(__Xb_shape) ):
1187 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and state (X) \"%s\" are incompatible."%(__HO_shape,__Xb_shape))
1188 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[0] == max(__Y_shape) ):
1189 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation (Y) \"%s\" are incompatible."%(__HO_shape,__Y_shape))
1190 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__B) > 0 and not( __HO_shape[1] == __B_shape[0] ):
1191 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and a priori errors covariance matrix (B) \"%s\" are incompatible."%(__HO_shape,__B_shape))
1192 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__R) > 0 and not( __HO_shape[0] == __R_shape[1] ):
1193 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation errors covariance matrix (R) \"%s\" are incompatible."%(__HO_shape,__R_shape))
1195 if self.__B is not None and len(self.__B) > 0 and not( __B_shape[1] == max(__Xb_shape) ):
1196 if self.__algorithmName in ["EnsembleBlue",]:
1197 asPersistentVector = self.__Xb.reshape((-1,min(__B_shape)))
1198 self.__Xb = Persistence.OneVector("Background", basetype=numpy.matrix)
1199 for member in asPersistentVector:
1200 self.__Xb.store( numpy.matrix( numpy.ravel(member), numpy.float ).T )
1201 __Xb_shape = min(__B_shape)
1203 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) \"%s\" and background (Xb) \"%s\" are incompatible."%(__B_shape,__Xb_shape))
1205 if self.__R is not None and len(self.__R) > 0 and not( __R_shape[1] == max(__Y_shape) ):
1206 raise ValueError("Shape characteristic of observation errors covariance matrix (R) \"%s\" and observation (Y) \"%s\" are incompatible."%(__R_shape,__Y_shape))
1208 if self.__EM is not None and len(self.__EM) > 0 and not isinstance(self.__EM, dict) and not( __EM_shape[1] == max(__Xb_shape) ):
1209 raise ValueError("Shape characteristic of evolution model (EM) \"%s\" and state (X) \"%s\" are incompatible."%(__EM_shape,__Xb_shape))
1211 if self.__CM is not None and len(self.__CM) > 0 and not isinstance(self.__CM, dict) and not( __CM_shape[1] == max(__U_shape) ):
1212 raise ValueError("Shape characteristic of control model (CM) \"%s\" and control (U) \"%s\" are incompatible."%(__CM_shape,__U_shape))
1214 if ("Bounds" in self.__P) \
1215 and (isinstance(self.__P["Bounds"], list) or isinstance(self.__P["Bounds"], tuple)) \
1216 and (len(self.__P["Bounds"]) != max(__Xb_shape)):
1217 raise ValueError("The number \"%s\" of bound pairs for the state (X) components is different of the size \"%s\" of the state itself." \
1218 %(len(self.__P["Bounds"]),max(__Xb_shape)))
1222 # ==============================================================================
1223 class RegulationAndParameters(object):
1225 Classe générale d'interface d'action pour la régulation et ses paramètres
1228 name = "GenericRegulation",
1235 self.__name = str(name)
1238 if asAlgorithm is None and asScript is not None:
1239 __Algo = ImportFromScript(asScript).getvalue( "Algorithm" )
1241 __Algo = asAlgorithm
1243 if asDict is None and asScript is not None:
1244 __Dict = ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1248 if __Dict is not None:
1249 self.__P.update( dict(__Dict) )
1251 if __Algo is not None:
1252 self.__P.update( {"Algorithm":self.__A} )
1254 def get(self, key = None):
1255 "Vérifie l'existence d'une clé de variable ou de paramètres"
1257 return self.__P[key]
1261 # ==============================================================================
1262 class DataObserver(object):
1264 Classe générale d'interface de type observer
1267 name = "GenericObserver",
1279 self.__name = str(name)
1284 if onVariable is None:
1285 raise ValueError("setting an observer has to be done over a variable name or a list of variable names, not over None.")
1286 elif type(onVariable) in (tuple, list):
1287 self.__V = tuple(map( str, onVariable ))
1288 if withInfo is None:
1291 self.__I = (str(withInfo),)*len(self.__V)
1292 elif isinstance(onVariable, str):
1293 self.__V = (onVariable,)
1294 if withInfo is None:
1295 self.__I = (onVariable,)
1297 self.__I = (str(withInfo),)
1299 raise ValueError("setting an observer has to be done over a variable name or a list of variable names.")
1301 if asString is not None:
1302 __FunctionText = asString
1303 elif (asTemplate is not None) and (asTemplate in Templates.ObserverTemplates):
1304 __FunctionText = Templates.ObserverTemplates[asTemplate]
1305 elif asScript is not None:
1306 __FunctionText = ImportFromScript(asScript).getstring()
1309 __Function = ObserverF(__FunctionText)
1311 if asObsObject is not None:
1312 self.__O = asObsObject
1314 self.__O = __Function.getfunc()
1316 for k in range(len(self.__V)):
1319 if ename not in withAlgo:
1320 raise ValueError("An observer is asked to be set on a variable named %s which does not exist."%ename)
1322 withAlgo.setObserver(ename, self.__O, einfo, scheduledBy)
1325 "x.__repr__() <==> repr(x)"
1326 return repr(self.__V)+"\n"+repr(self.__O)
1329 "x.__str__() <==> str(x)"
1330 return str(self.__V)+"\n"+str(self.__O)
1332 # ==============================================================================
1333 class State(object):
1335 Classe générale d'interface de type état
1338 name = "GenericVector",
1340 asPersistentVector = None,
1346 toBeChecked = False,
1349 Permet de définir un vecteur :
1350 - asVector : entrée des données, comme un vecteur compatible avec le
1351 constructeur de numpy.matrix, ou "True" si entrée par script.
1352 - asPersistentVector : entrée des données, comme une série de vecteurs
1353 compatible avec le constructeur de numpy.matrix, ou comme un objet de
1354 type Persistence, ou "True" si entrée par script.
1355 - asScript : si un script valide est donné contenant une variable
1356 nommée "name", la variable est de type "asVector" (par défaut) ou
1357 "asPersistentVector" selon que l'une de ces variables est placée à
1359 - asDataFile : si un ou plusieurs fichiers valides sont donnés
1360 contenant des valeurs en colonnes, elles-mêmes nommées "colNames"
1361 (s'il n'y a pas de nom de colonne indiquée, on cherche une colonne
1362 nommée "name"), on récupère les colonnes et on les range ligne après
1363 ligne (colMajor=False) ou colonne après colonne (colMajor=True). La
1364 variable résultante est de type "asVector" (par défaut) ou
1365 "asPersistentVector" selon que l'une de ces variables est placée à
1368 self.__name = str(name)
1369 self.__check = bool(toBeChecked)
1373 self.__is_vector = False
1374 self.__is_series = False
1376 if asScript is not None:
1377 __Vector, __Series = None, None
1378 if asPersistentVector:
1379 __Series = ImportFromScript(asScript).getvalue( self.__name )
1381 __Vector = ImportFromScript(asScript).getvalue( self.__name )
1382 elif asDataFile is not None:
1383 __Vector, __Series = None, None
1384 if asPersistentVector:
1385 if colNames is not None:
1386 __Series = ImportFromFile(asDataFile).getvalue( colNames )[1]
1388 __Series = ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1389 if bool(colMajor) and not ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1390 __Series = numpy.transpose(__Series)
1391 elif not bool(colMajor) and ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1392 __Series = numpy.transpose(__Series)
1394 if colNames is not None:
1395 __Vector = ImportFromFile(asDataFile).getvalue( colNames )[1]
1397 __Vector = ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1399 __Vector = numpy.ravel(__Vector, order = "F")
1401 __Vector = numpy.ravel(__Vector, order = "C")
1403 __Vector, __Series = asVector, asPersistentVector
1405 if __Vector is not None:
1406 self.__is_vector = True
1407 self.__V = numpy.matrix( numpy.asmatrix(__Vector).A1, numpy.float ).T
1408 self.shape = self.__V.shape
1409 self.size = self.__V.size
1410 elif __Series is not None:
1411 self.__is_series = True
1412 if isinstance(__Series, (tuple, list, numpy.ndarray, numpy.matrix, str)):
1413 self.__V = Persistence.OneVector(self.__name, basetype=numpy.matrix)
1414 if isinstance(__Series, str): __Series = eval(__Series)
1415 for member in __Series:
1416 self.__V.store( numpy.matrix( numpy.asmatrix(member).A1, numpy.float ).T )
1419 if isinstance(self.__V.shape, (tuple, list)):
1420 self.shape = self.__V.shape
1422 self.shape = self.__V.shape()
1423 if len(self.shape) == 1:
1424 self.shape = (self.shape[0],1)
1425 self.size = self.shape[0] * self.shape[1]
1427 raise ValueError("The %s object is improperly defined, it requires at minima either a vector, a list/tuple of vectors or a persistent object. Please check your vector input."%self.__name)
1429 if scheduledBy is not None:
1430 self.__T = scheduledBy
1432 def getO(self, withScheduler=False):
1434 return self.__V, self.__T
1435 elif self.__T is None:
1441 "Vérification du type interne"
1442 return self.__is_vector
1445 "Vérification du type interne"
1446 return self.__is_series
1449 "x.__repr__() <==> repr(x)"
1450 return repr(self.__V)
1453 "x.__str__() <==> str(x)"
1454 return str(self.__V)
1456 # ==============================================================================
1457 class Covariance(object):
1459 Classe générale d'interface de type covariance
1462 name = "GenericCovariance",
1463 asCovariance = None,
1464 asEyeByScalar = None,
1465 asEyeByVector = None,
1468 toBeChecked = False,
1471 Permet de définir une covariance :
1472 - asCovariance : entrée des données, comme une matrice compatible avec
1473 le constructeur de numpy.matrix
1474 - asEyeByScalar : entrée des données comme un seul scalaire de variance,
1475 multiplicatif d'une matrice de corrélation identité, aucune matrice
1476 n'étant donc explicitement à donner
1477 - asEyeByVector : entrée des données comme un seul vecteur de variance,
1478 à mettre sur la diagonale d'une matrice de corrélation, aucune matrice
1479 n'étant donc explicitement à donner
1480 - asCovObject : entrée des données comme un objet python, qui a les
1481 methodes obligatoires "getT", "getI", "diag", "trace", "__add__",
1482 "__sub__", "__neg__", "__mul__", "__rmul__" et facultatives "shape",
1483 "size", "cholesky", "choleskyI", "asfullmatrix", "__repr__", "__str__"
1484 - toBeChecked : booléen indiquant si le caractère SDP de la matrice
1485 pleine doit être vérifié
1487 self.__name = str(name)
1488 self.__check = bool(toBeChecked)
1491 self.__is_scalar = False
1492 self.__is_vector = False
1493 self.__is_matrix = False
1494 self.__is_object = False
1496 if asScript is not None:
1497 __Matrix, __Scalar, __Vector, __Object = None, None, None, None
1499 __Scalar = ImportFromScript(asScript).getvalue( self.__name )
1501 __Vector = ImportFromScript(asScript).getvalue( self.__name )
1503 __Object = ImportFromScript(asScript).getvalue( self.__name )
1505 __Matrix = ImportFromScript(asScript).getvalue( self.__name )
1507 __Matrix, __Scalar, __Vector, __Object = asCovariance, asEyeByScalar, asEyeByVector, asCovObject
1509 if __Scalar is not None:
1510 if numpy.matrix(__Scalar).size != 1:
1511 raise ValueError(' The diagonal multiplier given to define a sparse matrix is not a unique scalar value.\n Its actual measured size is %i. Please check your scalar input.'%numpy.matrix(__Scalar).size)
1512 self.__is_scalar = True
1513 self.__C = numpy.abs( float(__Scalar) )
1516 elif __Vector is not None:
1517 self.__is_vector = True
1518 self.__C = numpy.abs( numpy.array( numpy.ravel( numpy.matrix(__Vector, float ) ) ) )
1519 self.shape = (self.__C.size,self.__C.size)
1520 self.size = self.__C.size**2
1521 elif __Matrix is not None:
1522 self.__is_matrix = True
1523 self.__C = numpy.matrix( __Matrix, float )
1524 self.shape = self.__C.shape
1525 self.size = self.__C.size
1526 elif __Object is not None:
1527 self.__is_object = True
1529 for at in ("getT","getI","diag","trace","__add__","__sub__","__neg__","__mul__","__rmul__"):
1530 if not hasattr(self.__C,at):
1531 raise ValueError("The matrix given for %s as an object has no attribute \"%s\". Please check your object input."%(self.__name,at))
1532 if hasattr(self.__C,"shape"):
1533 self.shape = self.__C.shape
1536 if hasattr(self.__C,"size"):
1537 self.size = self.__C.size
1542 # raise ValueError("The %s covariance matrix has to be specified either as a matrix, a vector for its diagonal or a scalar multiplying an identity matrix."%self.__name)
1546 def __validate(self):
1548 if self.__C is None:
1549 raise UnboundLocalError("%s covariance matrix value has not been set!"%(self.__name,))
1550 if self.ismatrix() and min(self.shape) != max(self.shape):
1551 raise ValueError("The given matrix for %s is not a square one, its shape is %s. Please check your matrix input."%(self.__name,self.shape))
1552 if self.isobject() and min(self.shape) != max(self.shape):
1553 raise ValueError("The matrix given for \"%s\" is not a square one, its shape is %s. Please check your object input."%(self.__name,self.shape))
1554 if self.isscalar() and self.__C <= 0:
1555 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your scalar input %s."%(self.__name,self.__C))
1556 if self.isvector() and (self.__C <= 0).any():
1557 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your vector input."%(self.__name,))
1558 if self.ismatrix() and (self.__check or logging.getLogger().level < logging.WARNING):
1560 L = numpy.linalg.cholesky( self.__C )
1562 raise ValueError("The %s covariance matrix is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1563 if self.isobject() and (self.__check or logging.getLogger().level < logging.WARNING):
1565 L = self.__C.cholesky()
1567 raise ValueError("The %s covariance object is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1570 "Vérification du type interne"
1571 return self.__is_scalar
1574 "Vérification du type interne"
1575 return self.__is_vector
1578 "Vérification du type interne"
1579 return self.__is_matrix
1582 "Vérification du type interne"
1583 return self.__is_object
1588 return Covariance(self.__name+"I", asCovariance = self.__C.I )
1589 elif self.isvector():
1590 return Covariance(self.__name+"I", asEyeByVector = 1. / self.__C )
1591 elif self.isscalar():
1592 return Covariance(self.__name+"I", asEyeByScalar = 1. / self.__C )
1593 elif self.isobject():
1594 return Covariance(self.__name+"I", asCovObject = self.__C.getI() )
1596 return None # Indispensable
1601 return Covariance(self.__name+"T", asCovariance = self.__C.T )
1602 elif self.isvector():
1603 return Covariance(self.__name+"T", asEyeByVector = self.__C )
1604 elif self.isscalar():
1605 return Covariance(self.__name+"T", asEyeByScalar = self.__C )
1606 elif self.isobject():
1607 return Covariance(self.__name+"T", asCovObject = self.__C.getT() )
1610 "Décomposition de Cholesky"
1612 return Covariance(self.__name+"C", asCovariance = numpy.linalg.cholesky(self.__C) )
1613 elif self.isvector():
1614 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
1615 elif self.isscalar():
1616 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
1617 elif self.isobject() and hasattr(self.__C,"cholesky"):
1618 return Covariance(self.__name+"C", asCovObject = self.__C.cholesky() )
1620 def choleskyI(self):
1621 "Inversion de la décomposition de Cholesky"
1623 return Covariance(self.__name+"H", asCovariance = numpy.linalg.cholesky(self.__C).I )
1624 elif self.isvector():
1625 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
1626 elif self.isscalar():
1627 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
1628 elif self.isobject() and hasattr(self.__C,"choleskyI"):
1629 return Covariance(self.__name+"H", asCovObject = self.__C.choleskyI() )
1631 def diag(self, msize=None):
1632 "Diagonale de la matrice"
1634 return numpy.diag(self.__C)
1635 elif self.isvector():
1637 elif self.isscalar():
1639 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1641 return self.__C * numpy.ones(int(msize))
1642 elif self.isobject():
1643 return self.__C.diag()
1645 def asfullmatrix(self, msize=None):
1649 elif self.isvector():
1650 return numpy.matrix( numpy.diag(self.__C), float )
1651 elif self.isscalar():
1653 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1655 return numpy.matrix( self.__C * numpy.eye(int(msize)), float )
1656 elif self.isobject() and hasattr(self.__C,"asfullmatrix"):
1657 return self.__C.asfullmatrix()
1659 def trace(self, msize=None):
1660 "Trace de la matrice"
1662 return numpy.trace(self.__C)
1663 elif self.isvector():
1664 return float(numpy.sum(self.__C))
1665 elif self.isscalar():
1667 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1669 return self.__C * int(msize)
1670 elif self.isobject():
1671 return self.__C.trace()
1677 "x.__repr__() <==> repr(x)"
1678 return repr(self.__C)
1681 "x.__str__() <==> str(x)"
1682 return str(self.__C)
1684 def __add__(self, other):
1685 "x.__add__(y) <==> x+y"
1686 if self.ismatrix() or self.isobject():
1687 return self.__C + numpy.asmatrix(other)
1688 elif self.isvector() or self.isscalar():
1689 _A = numpy.asarray(other)
1690 _A.reshape(_A.size)[::_A.shape[1]+1] += self.__C
1691 return numpy.asmatrix(_A)
1693 def __radd__(self, other):
1694 "x.__radd__(y) <==> y+x"
1695 raise NotImplementedError("%s covariance matrix __radd__ method not available for %s type!"%(self.__name,type(other)))
1697 def __sub__(self, other):
1698 "x.__sub__(y) <==> x-y"
1699 if self.ismatrix() or self.isobject():
1700 return self.__C - numpy.asmatrix(other)
1701 elif self.isvector() or self.isscalar():
1702 _A = numpy.asarray(other)
1703 _A.reshape(_A.size)[::_A.shape[1]+1] = self.__C - _A.reshape(_A.size)[::_A.shape[1]+1]
1704 return numpy.asmatrix(_A)
1706 def __rsub__(self, other):
1707 "x.__rsub__(y) <==> y-x"
1708 raise NotImplementedError("%s covariance matrix __rsub__ method not available for %s type!"%(self.__name,type(other)))
1711 "x.__neg__() <==> -x"
1714 def __mul__(self, other):
1715 "x.__mul__(y) <==> x*y"
1716 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
1717 return self.__C * other
1718 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
1719 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1720 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
1721 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1722 return self.__C * numpy.asmatrix(other)
1724 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asmatrix(other).shape,self.__name))
1725 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
1726 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1727 return numpy.asmatrix(self.__C * numpy.ravel(other)).T
1728 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1729 return numpy.asmatrix((self.__C * (numpy.asarray(other).transpose())).transpose())
1731 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
1732 elif self.isscalar() and isinstance(other,numpy.matrix):
1733 return self.__C * other
1734 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
1735 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
1736 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
1738 return self.__C * numpy.asmatrix(other)
1739 elif self.isobject():
1740 return self.__C.__mul__(other)
1742 raise NotImplementedError("%s covariance matrix __mul__ method not available for %s type!"%(self.__name,type(other)))
1744 def __rmul__(self, other):
1745 "x.__rmul__(y) <==> y*x"
1746 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
1747 return other * self.__C
1748 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
1749 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1750 return numpy.asmatrix(numpy.ravel(other)) * self.__C
1751 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1752 return numpy.asmatrix(other) * self.__C
1754 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name))
1755 elif self.isvector() and isinstance(other,numpy.matrix):
1756 if numpy.ravel(other).size == self.shape[0]: # Vecteur
1757 return numpy.asmatrix(numpy.ravel(other) * self.__C)
1758 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
1759 return numpy.asmatrix(numpy.array(other) * self.__C)
1761 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
1762 elif self.isscalar() and isinstance(other,numpy.matrix):
1763 return other * self.__C
1764 elif self.isobject():
1765 return self.__C.__rmul__(other)
1767 raise NotImplementedError("%s covariance matrix __rmul__ method not available for %s type!"%(self.__name,type(other)))
1770 "x.__len__() <==> len(x)"
1771 return self.shape[0]
1773 # ==============================================================================
1774 class ObserverF(object):
1776 Creation d'une fonction d'observateur a partir de son texte
1778 def __init__(self, corps=""):
1779 self.__corps = corps
1780 def func(self,var,info):
1781 "Fonction d'observation"
1784 "Restitution du pointeur de fonction dans l'objet"
1787 # ==============================================================================
1788 class CaseLogger(object):
1790 Conservation des commandes de creation d'un cas
1792 def __init__(self, __name="", __objname="case", __addViewers=None, __addLoaders=None):
1793 self.__name = str(__name)
1794 self.__objname = str(__objname)
1795 self.__logSerie = []
1796 self.__switchoff = False
1798 "TUI" :Interfaces._TUIViewer,
1799 "SCD" :Interfaces._SCDViewer,
1800 "YACS":Interfaces._YACSViewer,
1803 "TUI" :Interfaces._TUIViewer,
1804 "COM" :Interfaces._COMViewer,
1806 if __addViewers is not None:
1807 self.__viewers.update(dict(__addViewers))
1808 if __addLoaders is not None:
1809 self.__loaders.update(dict(__addLoaders))
1811 def register(self, __command=None, __keys=None, __local=None, __pre=None, __switchoff=False):
1812 "Enregistrement d'une commande individuelle"
1813 if __command is not None and __keys is not None and __local is not None and not self.__switchoff:
1814 if "self" in __keys: __keys.remove("self")
1815 self.__logSerie.append( (str(__command), __keys, __local, __pre, __switchoff) )
1817 self.__switchoff = True
1819 self.__switchoff = False
1821 def dump(self, __filename=None, __format="TUI", __upa=""):
1822 "Restitution normalisée des commandes"
1823 if __format in self.__viewers:
1824 __formater = self.__viewers[__format](self.__name, self.__objname, self.__logSerie)
1826 raise ValueError("Dumping as \"%s\" is not available"%__format)
1827 return __formater.dump(__filename, __upa)
1829 def load(self, __filename=None, __content=None, __object=None, __format="TUI"):
1830 "Chargement normalisé des commandes"
1831 if __format in self.__loaders:
1832 __formater = self.__loaders[__format]()
1834 raise ValueError("Loading as \"%s\" is not available"%__format)
1835 return __formater.load(__filename, __content, __object)
1837 # ==============================================================================
1838 def MultiFonction( __xserie, _extraArguments = None, _sFunction = lambda x: x ):
1840 Pour une liste ordonnée de vecteurs en entrée, renvoie en sortie la liste
1841 correspondante de valeurs de la fonction en argument
1843 if not PlatformInfo.isIterable( __xserie ):
1844 raise TypeError("MultiFonction not iterable unkown input type: %s"%(type(__xserie),))
1847 if _extraArguments is None:
1848 for __xvalue in __xserie:
1849 __multiHX.append( _sFunction( __xvalue ) )
1850 elif _extraArguments is not None and isinstance(_extraArguments, (list, tuple, map)):
1851 for __xvalue in __xserie:
1852 __multiHX.append( _sFunction( __xvalue, *_extraArguments ) )
1853 elif _extraArguments is not None and isinstance(_extraArguments, dict):
1854 for __xvalue in __xserie:
1855 __multiHX.append( _sFunction( __xvalue, **_extraArguments ) )
1857 raise TypeError("MultiFonction extra arguments unkown input type: %s"%(type(_extraArguments),))
1861 # ==============================================================================
1862 def CostFunction3D(_x,
1863 _Hm = None, # Pour simuler Hm(x) : HO["Direct"].appliedTo
1864 _HmX = None, # Simulation déjà faite de Hm(x)
1865 _arg = None, # Arguments supplementaires pour Hm, sous la forme d'un tuple
1870 _SIV = False, # A résorber pour la 8.0
1871 _SSC = [], # self._parameters["StoreSupplementaryCalculations"]
1872 _nPS = 0, # nbPreviousSteps
1873 _QM = "DA", # QualityMeasure
1874 _SSV = {}, # Entrée et/ou sortie : self.StoredVariables
1875 _fRt = False, # Restitue ou pas la sortie étendue
1876 _sSc = True, # Stocke ou pas les SSC
1879 Fonction-coût générale utile pour les algorithmes statiques/3D : 3DVAR, BLUE
1880 et dérivés, Kalman et dérivés, LeastSquares, SamplingTest, PSO, SA, Tabu,
1881 DFO, QuantileRegression
1887 for k in ["CostFunctionJ",
1893 "SimulatedObservationAtCurrentOptimum",
1894 "SimulatedObservationAtCurrentState",
1898 if hasattr(_SSV[k],"store"):
1899 _SSV[k].append = _SSV[k].store # Pour utiliser "append" au lieu de "store"
1901 _X = numpy.asmatrix(numpy.ravel( _x )).T
1902 if _SIV or "CurrentState" in _SSC or "CurrentOptimum" in _SSC:
1903 _SSV["CurrentState"].append( _X )
1905 if _HmX is not None:
1909 raise ValueError("COSTFUNCTION3D Operator has to be defined.")
1913 _HX = _Hm( _X, *_arg )
1914 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
1916 if "SimulatedObservationAtCurrentState" in _SSC or \
1917 "SimulatedObservationAtCurrentOptimum" in _SSC:
1918 _SSV["SimulatedObservationAtCurrentState"].append( _HX )
1920 if numpy.any(numpy.isnan(_HX)):
1921 Jb, Jo, J = numpy.nan, numpy.nan, numpy.nan
1923 _Y = numpy.asmatrix(numpy.ravel( _Y )).T
1924 if _QM in ["AugmentedWeightedLeastSquares", "AWLS", "AugmentedPonderatedLeastSquares", "APLS", "DA"]:
1925 if _BI is None or _RI is None:
1926 raise ValueError("Background and Observation error covariance matrix has to be properly defined!")
1927 _Xb = numpy.asmatrix(numpy.ravel( _Xb )).T
1928 Jb = 0.5 * (_X - _Xb).T * _BI * (_X - _Xb)
1929 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
1930 elif _QM in ["WeightedLeastSquares", "WLS", "PonderatedLeastSquares", "PLS"]:
1932 raise ValueError("Observation error covariance matrix has to be properly defined!")
1934 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
1935 elif _QM in ["LeastSquares", "LS", "L2"]:
1937 Jo = 0.5 * (_Y - _HX).T * (_Y - _HX)
1938 elif _QM in ["AbsoluteValue", "L1"]:
1940 Jo = numpy.sum( numpy.abs(_Y - _HX) )
1941 elif _QM in ["MaximumError", "ME"]:
1943 Jo = numpy.max( numpy.abs(_Y - _HX) )
1944 elif _QM in ["QR", "Null"]:
1948 raise ValueError("Unknown asked quality measure!")
1950 J = float( Jb ) + float( Jo )
1953 _SSV["CostFunctionJb"].append( Jb )
1954 _SSV["CostFunctionJo"].append( Jo )
1955 _SSV["CostFunctionJ" ].append( J )
1957 if "IndexOfOptimum" in _SSC or \
1958 "CurrentOptimum" in _SSC or \
1959 "SimulatedObservationAtCurrentOptimum" in _SSC:
1960 IndexMin = numpy.argmin( _SSV["CostFunctionJ"][_nPS:] ) + _nPS
1961 if "IndexOfOptimum" in _SSC:
1962 _SSV["IndexOfOptimum"].append( IndexMin )
1963 if "CurrentOptimum" in _SSC:
1964 _SSV["CurrentOptimum"].append( _SSV["CurrentState"][IndexMin] )
1965 if "SimulatedObservationAtCurrentOptimum" in _SSC:
1966 _SSV["SimulatedObservationAtCurrentOptimum"].append( _SSV["SimulatedObservationAtCurrentState"][IndexMin] )
1971 if _QM in ["QR"]: # Pour le QuantileRegression
1976 # ==============================================================================
1977 if __name__ == "__main__":
1978 print('\n AUTODIAGNOSTIC \n')