1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2019 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les outils généraux élémentaires.
26 __author__ = "Jean-Philippe ARGAUD"
34 from functools import partial
35 from daCore import Persistence
36 from daCore import PlatformInfo
37 from daCore import Interfaces
38 from daCore import Templates
39 from daCore.Interfaces import ImportFromScript, ImportFromFile
41 # ==============================================================================
42 class CacheManager(object):
44 Classe générale de gestion d'un cache de calculs
47 toleranceInRedundancy = 1.e-18,
48 lenghtOfRedundancy = -1,
51 Les caractéristiques de tolérance peuvent être modifées à la création.
53 self.__tolerBP = float(toleranceInRedundancy)
54 self.__lenghtOR = int(lenghtOfRedundancy)
55 self.__initlnOR = self.__lenghtOR
60 self.__listOPCV = [] # Operator Previous Calculated Points, Results, Point Norms
61 # logging.debug("CM Tolerance de determination des doublons : %.2e", self.__tolerBP)
63 def wasCalculatedIn(self, xValue ): #, info="" ):
64 "Vérifie l'existence d'un calcul correspondant à la valeur"
67 for i in range(min(len(self.__listOPCV),self.__lenghtOR)-1,-1,-1):
68 if not hasattr(xValue, 'size') or (xValue.size != self.__listOPCV[i][0].size):
69 # logging.debug("CM Différence de la taille %s de X et de celle %s du point %i déjà calculé", xValue.shape,i,self.__listOPCP[i].shape)
71 if numpy.linalg.norm(numpy.ravel(xValue) - self.__listOPCV[i][0]) < self.__tolerBP * self.__listOPCV[i][2]:
73 __HxV = self.__listOPCV[i][1]
74 # logging.debug("CM Cas%s déja calculé, portant le numéro %i", info, i)
78 def storeValueInX(self, xValue, HxValue ):
79 "Stocke un calcul correspondant à la valeur"
80 if self.__lenghtOR < 0:
81 self.__lenghtOR = 2 * xValue.size + 2
82 self.__initlnOR = self.__lenghtOR
83 while len(self.__listOPCV) > self.__lenghtOR:
84 # logging.debug("CM Réduction de la liste des cas à %i éléments par suppression du premier", self.__lenghtOR)
85 self.__listOPCV.pop(0)
86 self.__listOPCV.append( (
87 copy.copy(numpy.ravel(xValue)),
89 numpy.linalg.norm(xValue),
94 self.__initlnOR = self.__lenghtOR
99 self.__lenghtOR = self.__initlnOR
101 # ==============================================================================
102 class Operator(object):
104 Classe générale d'interface de type opérateur simple
114 avoidingRedundancy = True,
115 inputAsMultiFunction = False,
116 extraArguments = None,
119 On construit un objet de ce type en fournissant, à l'aide de l'un des
120 deux mots-clé, soit une fonction ou un multi-fonction python, soit une
123 - fromMethod : argument de type fonction Python
124 - fromMatrix : argument adapté au constructeur numpy.matrix
125 - avoidingRedundancy : booléen évitant (ou pas) les calculs redondants
126 - inputAsMultiFunction : booléen indiquant une fonction explicitement
127 définie (ou pas) en multi-fonction
128 - extraArguments : arguments supplémentaires passés à la fonction de
129 base et ses dérivées (tuple ou dictionnaire)
131 self.__NbCallsAsMatrix, self.__NbCallsAsMethod, self.__NbCallsOfCached = 0, 0, 0
132 self.__AvoidRC = bool( avoidingRedundancy )
133 self.__inputAsMF = bool( inputAsMultiFunction )
134 self.__extraArgs = extraArguments
135 if fromMethod is not None and self.__inputAsMF:
136 self.__Method = fromMethod # logtimer(fromMethod)
138 self.__Type = "Method"
139 elif fromMethod is not None and not self.__inputAsMF:
140 self.__Method = partial( MultiFonction, _sFunction=fromMethod)
142 self.__Type = "Method"
143 elif fromMatrix is not None:
145 self.__Matrix = numpy.matrix( fromMatrix, numpy.float )
146 self.__Type = "Matrix"
152 def disableAvoidingRedundancy(self):
154 Operator.CM.disable()
156 def enableAvoidingRedundancy(self):
161 Operator.CM.disable()
167 def appliedTo(self, xValue, HValue = None, argsAsSerie = False):
169 Permet de restituer le résultat de l'application de l'opérateur à une
170 série d'arguments xValue. Cette méthode se contente d'appliquer, chaque
171 argument devant a priori être du bon type.
173 - les arguments par série sont :
174 - xValue : argument adapté pour appliquer l'opérateur
175 - HValue : valeur précalculée de l'opérateur en ce point
176 - argsAsSerie : indique si les arguments sont une mono ou multi-valeur
183 if HValue is not None:
187 PlatformInfo.isIterable( _xValue, True, " in Operator.appliedTo" )
189 if _HValue is not None:
190 assert len(_xValue) == len(_HValue), "Incompatible number of elements in xValue and HValue"
192 for i in range(len(_HValue)):
193 HxValue.append( numpy.asmatrix( numpy.ravel( _HValue[i] ) ).T )
195 Operator.CM.storeValueInX(_xValue[i],HxValue[-1])
200 for i, xv in enumerate(_xValue):
202 __alreadyCalculated, __HxV = Operator.CM.wasCalculatedIn(xv)
204 __alreadyCalculated = False
206 if __alreadyCalculated:
207 self.__addOneCacheCall()
210 if self.__Matrix is not None:
211 self.__addOneMatrixCall()
212 _hv = self.__Matrix * xv
214 self.__addOneMethodCall()
218 HxValue.append( _hv )
220 if len(_xserie)>0 and self.__Matrix is None:
221 if self.__extraArgs is None:
222 _hserie = self.__Method( _xserie ) # Calcul MF
224 _hserie = self.__Method( _xserie, self.__extraArgs ) # Calcul MF
225 if not hasattr(_hserie, "pop"):
226 raise TypeError("The user input multi-function doesn't seem to return sequence results, behaving like a mono-function. It has to be checked.")
232 Operator.CM.storeValueInX(_xv,_hv)
234 if argsAsSerie: return HxValue
235 else: return HxValue[-1]
237 def appliedControledFormTo(self, paires, argsAsSerie = False ):
239 Permet de restituer le résultat de l'application de l'opérateur à des
240 paires (xValue, uValue). Cette méthode se contente d'appliquer, son
241 argument devant a priori être du bon type. Si la uValue est None,
242 on suppose que l'opérateur ne s'applique qu'à xValue.
244 - paires : les arguments par paire sont :
245 - xValue : argument X adapté pour appliquer l'opérateur
246 - uValue : argument U adapté pour appliquer l'opérateur
247 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
249 if argsAsSerie: _xuValue = paires
250 else: _xuValue = (paires,)
251 PlatformInfo.isIterable( _xuValue, True, " in Operator.appliedControledFormTo" )
253 if self.__Matrix is not None:
255 for paire in _xuValue:
256 _xValue, _uValue = paire
257 self.__addOneMatrixCall()
258 HxValue.append( self.__Matrix * _xValue )
261 for paire in _xuValue:
263 _xValue, _uValue = paire
264 if _uValue is not None:
265 _xuValue.append( paire )
267 _xuValue.append( _xValue )
268 self.__addOneMethodCall( len(_xuValue) )
269 if self.__extraArgs is None:
270 HxValue = self.__Method( _xuValue ) # Calcul MF
272 HxValue = self.__Method( _xuValue, self.__extraArgs ) # Calcul MF
274 if argsAsSerie: return HxValue
275 else: return HxValue[-1]
277 def appliedInXTo(self, paires, argsAsSerie = False ):
279 Permet de restituer le résultat de l'application de l'opérateur à une
280 série d'arguments xValue, sachant que l'opérateur est valable en
281 xNominal. Cette méthode se contente d'appliquer, son argument devant a
282 priori être du bon type. Si l'opérateur est linéaire car c'est une
283 matrice, alors il est valable en tout point nominal et xNominal peut
284 être quelconque. Il n'y a qu'une seule paire par défaut, et argsAsSerie
285 permet d'indiquer que l'argument est multi-paires.
287 - paires : les arguments par paire sont :
288 - xNominal : série d'arguments permettant de donner le point où
289 l'opérateur est construit pour être ensuite appliqué
290 - xValue : série d'arguments adaptés pour appliquer l'opérateur
291 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
293 if argsAsSerie: _nxValue = paires
294 else: _nxValue = (paires,)
295 PlatformInfo.isIterable( _nxValue, True, " in Operator.appliedInXTo" )
297 if self.__Matrix is not None:
299 for paire in _nxValue:
300 _xNominal, _xValue = paire
301 self.__addOneMatrixCall()
302 HxValue.append( self.__Matrix * _xValue )
304 self.__addOneMethodCall( len(_nxValue) )
305 if self.__extraArgs is None:
306 HxValue = self.__Method( _nxValue ) # Calcul MF
308 HxValue = self.__Method( _nxValue, self.__extraArgs ) # Calcul MF
310 if argsAsSerie: return HxValue
311 else: return HxValue[-1]
313 def asMatrix(self, ValueForMethodForm = "UnknownVoidValue", argsAsSerie = False):
315 Permet de renvoyer l'opérateur sous la forme d'une matrice
317 if self.__Matrix is not None:
318 self.__addOneMatrixCall()
319 mValue = [self.__Matrix,]
320 elif ValueForMethodForm is not "UnknownVoidValue": # Ne pas utiliser "None"
323 self.__addOneMethodCall( len(ValueForMethodForm) )
324 for _vfmf in ValueForMethodForm:
325 mValue.append( numpy.matrix( self.__Method(((_vfmf, None),)) ) )
327 self.__addOneMethodCall()
328 mValue = self.__Method(((ValueForMethodForm, None),))
330 raise ValueError("Matrix form of the operator defined as a function/method requires to give an operating point.")
332 if argsAsSerie: return mValue
333 else: return mValue[-1]
337 Renvoie la taille sous forme numpy si l'opérateur est disponible sous
338 la forme d'une matrice
340 if self.__Matrix is not None:
341 return self.__Matrix.shape
343 raise ValueError("Matrix form of the operator is not available, nor the shape")
345 def nbcalls(self, which=None):
347 Renvoie les nombres d'évaluations de l'opérateur
350 self.__NbCallsAsMatrix+self.__NbCallsAsMethod,
351 self.__NbCallsAsMatrix,
352 self.__NbCallsAsMethod,
353 self.__NbCallsOfCached,
354 Operator.NbCallsAsMatrix+Operator.NbCallsAsMethod,
355 Operator.NbCallsAsMatrix,
356 Operator.NbCallsAsMethod,
357 Operator.NbCallsOfCached,
359 if which is None: return __nbcalls
360 else: return __nbcalls[which]
362 def __addOneMatrixCall(self):
363 "Comptabilise un appel"
364 self.__NbCallsAsMatrix += 1 # Decompte local
365 Operator.NbCallsAsMatrix += 1 # Decompte global
367 def __addOneMethodCall(self, nb = 1):
368 "Comptabilise un appel"
369 self.__NbCallsAsMethod += nb # Decompte local
370 Operator.NbCallsAsMethod += nb # Decompte global
372 def __addOneCacheCall(self):
373 "Comptabilise un appel"
374 self.__NbCallsOfCached += 1 # Decompte local
375 Operator.NbCallsOfCached += 1 # Decompte global
377 # ==============================================================================
378 class FullOperator(object):
380 Classe générale d'interface de type opérateur complet
381 (Direct, Linéaire Tangent, Adjoint)
384 name = "GenericFullOperator",
386 asOneFunction = None, # 1 Fonction
387 asThreeFunctions = None, # 3 Fonctions in a dictionary
388 asScript = None, # 1 or 3 Fonction(s) by script
389 asDict = None, # Parameters
391 extraArguments = None,
393 inputAsMF = False,# Fonction(s) as Multi-Functions
398 self.__name = str(name)
399 self.__check = bool(toBeChecked)
400 self.__extraArgs = extraArguments
405 if (asDict is not None) and isinstance(asDict, dict):
406 __Parameters.update( asDict )
407 if "DifferentialIncrement" in asDict:
408 __Parameters["withIncrement"] = asDict["DifferentialIncrement"]
409 if "CenteredFiniteDifference" in asDict:
410 __Parameters["withCenteredDF"] = asDict["CenteredFiniteDifference"]
411 if "EnableMultiProcessing" in asDict:
412 __Parameters["withmpEnabled"] = asDict["EnableMultiProcessing"]
413 if "NumberOfProcesses" in asDict:
414 __Parameters["withmpWorkers"] = asDict["NumberOfProcesses"]
416 if asScript is not None:
417 __Matrix, __Function = None, None
419 __Matrix = ImportFromScript(asScript).getvalue( self.__name )
421 __Function = { "Direct":ImportFromScript(asScript).getvalue( "DirectOperator" ) }
422 __Function.update({"useApproximatedDerivatives":True})
423 __Function.update(__Parameters)
424 elif asThreeFunctions:
426 "Direct" :ImportFromScript(asScript).getvalue( "DirectOperator" ),
427 "Tangent":ImportFromScript(asScript).getvalue( "TangentOperator" ),
428 "Adjoint":ImportFromScript(asScript).getvalue( "AdjointOperator" ),
430 __Function.update(__Parameters)
433 if asOneFunction is not None:
434 if isinstance(asOneFunction, dict) and "Direct" in asOneFunction:
435 if asOneFunction["Direct"] is not None:
436 __Function = asOneFunction
438 raise ValueError("The function has to be given in a dictionnary which have 1 key (\"Direct\")")
440 __Function = { "Direct":asOneFunction }
441 __Function.update({"useApproximatedDerivatives":True})
442 __Function.update(__Parameters)
443 elif asThreeFunctions is not None:
444 if isinstance(asThreeFunctions, dict) and \
445 ("Tangent" in asThreeFunctions) and (asThreeFunctions["Tangent"] is not None) and \
446 ("Adjoint" in asThreeFunctions) and (asThreeFunctions["Adjoint"] is not None) and \
447 (("useApproximatedDerivatives" not in asThreeFunctions) or not bool(asThreeFunctions["useApproximatedDerivatives"])):
448 __Function = asThreeFunctions
449 elif isinstance(asThreeFunctions, dict) and \
450 ("Direct" in asThreeFunctions) and (asThreeFunctions["Direct"] is not None):
451 __Function = asThreeFunctions
452 __Function.update({"useApproximatedDerivatives":True})
454 raise ValueError("The functions has to be given in a dictionnary which have either 1 key (\"Direct\") or 3 keys (\"Direct\" (optionnal), \"Tangent\" and \"Adjoint\")")
455 if "Direct" not in asThreeFunctions:
456 __Function["Direct"] = asThreeFunctions["Tangent"]
457 __Function.update(__Parameters)
461 # if sys.version_info[0] < 3 and isinstance(__Function, dict):
462 # for k in ("Direct", "Tangent", "Adjoint"):
463 # if k in __Function and hasattr(__Function[k],"__class__"):
464 # if type(__Function[k]) is type(self.__init__):
465 # raise TypeError("can't use a class method (%s) as a function for the \"%s\" operator. Use a real function instead."%(type(__Function[k]),k))
467 if appliedInX is not None and isinstance(appliedInX, dict):
468 __appliedInX = appliedInX
469 elif appliedInX is not None:
470 __appliedInX = {"HXb":appliedInX}
474 if scheduledBy is not None:
475 self.__T = scheduledBy
477 if isinstance(__Function, dict) and \
478 ("useApproximatedDerivatives" in __Function) and bool(__Function["useApproximatedDerivatives"]) and \
479 ("Direct" in __Function) and (__Function["Direct"] is not None):
480 if "withCenteredDF" not in __Function: __Function["withCenteredDF"] = False
481 if "withIncrement" not in __Function: __Function["withIncrement"] = 0.01
482 if "withdX" not in __Function: __Function["withdX"] = None
483 if "withAvoidingRedundancy" not in __Function: __Function["withAvoidingRedundancy"] = avoidRC
484 if "withToleranceInRedundancy" not in __Function: __Function["withToleranceInRedundancy"] = 1.e-18
485 if "withLenghtOfRedundancy" not in __Function: __Function["withLenghtOfRedundancy"] = -1
486 if "withmpEnabled" not in __Function: __Function["withmpEnabled"] = False
487 if "withmpWorkers" not in __Function: __Function["withmpWorkers"] = None
488 if "withmfEnabled" not in __Function: __Function["withmfEnabled"] = inputAsMF
489 from daNumerics.ApproximatedDerivatives import FDApproximation
490 FDA = FDApproximation(
491 Function = __Function["Direct"],
492 centeredDF = __Function["withCenteredDF"],
493 increment = __Function["withIncrement"],
494 dX = __Function["withdX"],
495 avoidingRedundancy = __Function["withAvoidingRedundancy"],
496 toleranceInRedundancy = __Function["withToleranceInRedundancy"],
497 lenghtOfRedundancy = __Function["withLenghtOfRedundancy"],
498 mpEnabled = __Function["withmpEnabled"],
499 mpWorkers = __Function["withmpWorkers"],
500 mfEnabled = __Function["withmfEnabled"],
502 self.__FO["Direct"] = Operator( fromMethod = FDA.DirectOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
503 self.__FO["Tangent"] = Operator( fromMethod = FDA.TangentOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
504 self.__FO["Adjoint"] = Operator( fromMethod = FDA.AdjointOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
505 elif isinstance(__Function, dict) and \
506 ("Direct" in __Function) and ("Tangent" in __Function) and ("Adjoint" in __Function) and \
507 (__Function["Direct"] is not None) and (__Function["Tangent"] is not None) and (__Function["Adjoint"] is not None):
508 self.__FO["Direct"] = Operator( fromMethod = __Function["Direct"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
509 self.__FO["Tangent"] = Operator( fromMethod = __Function["Tangent"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
510 self.__FO["Adjoint"] = Operator( fromMethod = __Function["Adjoint"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
511 elif asMatrix is not None:
512 __matrice = numpy.matrix( __Matrix, numpy.float )
513 self.__FO["Direct"] = Operator( fromMatrix = __matrice, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
514 self.__FO["Tangent"] = Operator( fromMatrix = __matrice, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
515 self.__FO["Adjoint"] = Operator( fromMatrix = __matrice.T, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
518 raise ValueError("Improperly defined observation operator, it requires at minima either a matrix, a Direct for approximate derivatives or a Tangent/Adjoint pair.")
520 if __appliedInX is not None:
521 self.__FO["AppliedInX"] = {}
522 for key in list(__appliedInX.keys()):
523 if type( __appliedInX[key] ) is type( numpy.matrix([]) ):
524 # Pour le cas où l'on a une vraie matrice
525 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].A1, numpy.float ).T
526 elif type( __appliedInX[key] ) is type( numpy.array([]) ) and len(__appliedInX[key].shape) > 1:
527 # Pour le cas où l'on a un vecteur représenté en array avec 2 dimensions
528 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].reshape(len(__appliedInX[key]),), numpy.float ).T
530 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key], numpy.float ).T
532 self.__FO["AppliedInX"] = None
538 "x.__repr__() <==> repr(x)"
539 return repr(self.__V)
542 "x.__str__() <==> str(x)"
545 # ==============================================================================
546 class Algorithm(object):
548 Classe générale d'interface de type algorithme
550 Elle donne un cadre pour l'écriture d'une classe élémentaire d'algorithme
551 d'assimilation, en fournissant un container (dictionnaire) de variables
552 persistantes initialisées, et des méthodes d'accès à ces variables stockées.
554 Une classe élémentaire d'algorithme doit implémenter la méthode "run".
556 def __init__(self, name):
558 L'initialisation présente permet de fabriquer des variables de stockage
559 disponibles de manière générique dans les algorithmes élémentaires. Ces
560 variables de stockage sont ensuite conservées dans un dictionnaire
561 interne à l'objet, mais auquel on accède par la méthode "get".
563 Les variables prévues sont :
564 - APosterioriCorrelations : matrice de corrélations de la matrice A
565 - APosterioriCovariance : matrice de covariances a posteriori : A
566 - APosterioriStandardDeviations : vecteur des écart-types de la matrice A
567 - APosterioriVariances : vecteur des variances de la matrice A
568 - Analysis : vecteur d'analyse : Xa
569 - BMA : Background moins Analysis : Xa - Xb
570 - CostFunctionJ : fonction-coût globale, somme des deux parties suivantes Jb et Jo
571 - CostFunctionJAtCurrentOptimum : fonction-coût globale à l'état optimal courant lors d'itérations
572 - CostFunctionJb : partie ébauche ou background de la fonction-coût : Jb
573 - CostFunctionJbAtCurrentOptimum : partie ébauche à l'état optimal courant lors d'itérations
574 - CostFunctionJo : partie observations de la fonction-coût : Jo
575 - CostFunctionJoAtCurrentOptimum : partie observations à l'état optimal courant lors d'itérations
576 - CurrentOptimum : état optimal courant lors d'itérations
577 - CurrentState : état courant lors d'itérations
578 - GradientOfCostFunctionJ : gradient de la fonction-coût globale
579 - GradientOfCostFunctionJb : gradient de la partie ébauche de la fonction-coût
580 - GradientOfCostFunctionJo : gradient de la partie observations de la fonction-coût
581 - IndexOfOptimum : index de l'état optimal courant lors d'itérations
582 - Innovation : l'innovation : d = Y - H(X)
583 - InnovationAtCurrentState : l'innovation à l'état courant : dn = Y - H(Xn)
584 - JacobianMatrixAtBackground : matrice jacobienne à l'ébauche
585 - JacobianMatrixAtOptimum : matrice jacobienne à l'optimum
586 - MahalanobisConsistency : indicateur de consistance des covariances
587 - OMA : Observation moins Analyse : Y - Xa
588 - OMB : Observation moins Background : Y - Xb
589 - PredictedState : état prédit courant lors d'itérations
590 - Residu : dans le cas des algorithmes de vérification
591 - SigmaBck2 : indicateur de correction optimale des erreurs d'ébauche
592 - SigmaObs2 : indicateur de correction optimale des erreurs d'observation
593 - SimulatedObservationAtBackground : l'état observé H(Xb) à l'ébauche
594 - SimulatedObservationAtCurrentOptimum : l'état observé H(X) à l'état optimal courant
595 - SimulatedObservationAtCurrentState : l'état observé H(X) à l'état courant
596 - SimulatedObservationAtOptimum : l'état observé H(Xa) à l'optimum
597 - SimulationQuantiles : états observés H(X) pour les quantiles demandés
598 On peut rajouter des variables à stocker dans l'initialisation de
599 l'algorithme élémentaire qui va hériter de cette classe
601 logging.debug("%s Initialisation", str(name))
602 self._m = PlatformInfo.SystemUsage()
604 self._name = str( name )
605 self._parameters = {"StoreSupplementaryCalculations":[]}
606 self.__required_parameters = {}
607 self.__required_inputs = {"RequiredInputValues":{"mandatory":(), "optional":()}}
609 self.StoredVariables = {}
610 self.StoredVariables["APosterioriCorrelations"] = Persistence.OneMatrix(name = "APosterioriCorrelations")
611 self.StoredVariables["APosterioriCovariance"] = Persistence.OneMatrix(name = "APosterioriCovariance")
612 self.StoredVariables["APosterioriStandardDeviations"] = Persistence.OneVector(name = "APosterioriStandardDeviations")
613 self.StoredVariables["APosterioriVariances"] = Persistence.OneVector(name = "APosterioriVariances")
614 self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
615 self.StoredVariables["BMA"] = Persistence.OneVector(name = "BMA")
616 self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
617 self.StoredVariables["CostFunctionJAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJAtCurrentOptimum")
618 self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
619 self.StoredVariables["CostFunctionJbAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJbAtCurrentOptimum")
620 self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
621 self.StoredVariables["CostFunctionJoAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJoAtCurrentOptimum")
622 self.StoredVariables["CurrentOptimum"] = Persistence.OneVector(name = "CurrentOptimum")
623 self.StoredVariables["CurrentState"] = Persistence.OneVector(name = "CurrentState")
624 self.StoredVariables["GradientOfCostFunctionJ"] = Persistence.OneVector(name = "GradientOfCostFunctionJ")
625 self.StoredVariables["GradientOfCostFunctionJb"] = Persistence.OneVector(name = "GradientOfCostFunctionJb")
626 self.StoredVariables["GradientOfCostFunctionJo"] = Persistence.OneVector(name = "GradientOfCostFunctionJo")
627 self.StoredVariables["IndexOfOptimum"] = Persistence.OneIndex(name = "IndexOfOptimum")
628 self.StoredVariables["Innovation"] = Persistence.OneVector(name = "Innovation")
629 self.StoredVariables["InnovationAtCurrentState"] = Persistence.OneVector(name = "InnovationAtCurrentState")
630 self.StoredVariables["JacobianMatrixAtBackground"] = Persistence.OneMatrix(name = "JacobianMatrixAtBackground")
631 self.StoredVariables["JacobianMatrixAtOptimum"] = Persistence.OneMatrix(name = "JacobianMatrixAtOptimum")
632 self.StoredVariables["MahalanobisConsistency"] = Persistence.OneScalar(name = "MahalanobisConsistency")
633 self.StoredVariables["OMA"] = Persistence.OneVector(name = "OMA")
634 self.StoredVariables["OMB"] = Persistence.OneVector(name = "OMB")
635 self.StoredVariables["PredictedState"] = Persistence.OneVector(name = "PredictedState")
636 self.StoredVariables["Residu"] = Persistence.OneScalar(name = "Residu")
637 self.StoredVariables["SigmaBck2"] = Persistence.OneScalar(name = "SigmaBck2")
638 self.StoredVariables["SigmaObs2"] = Persistence.OneScalar(name = "SigmaObs2")
639 self.StoredVariables["SimulatedObservationAtBackground"] = Persistence.OneVector(name = "SimulatedObservationAtBackground")
640 self.StoredVariables["SimulatedObservationAtCurrentOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentOptimum")
641 self.StoredVariables["SimulatedObservationAtCurrentState"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentState")
642 self.StoredVariables["SimulatedObservationAtOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtOptimum")
643 self.StoredVariables["SimulationQuantiles"] = Persistence.OneMatrix(name = "SimulationQuantiles")
645 def _pre_run(self, Parameters, Xb=None, Y=None, R=None, B=None, Q=None ):
647 logging.debug("%s Lancement", self._name)
648 logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
650 # Mise a jour de self._parameters avec Parameters
651 self.__setParameters(Parameters)
653 # Corrections et complements
654 def __test_vvalue(argument, variable, argname):
656 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
657 raise ValueError("%s %s vector %s has to be properly defined!"%(self._name,argname,variable))
658 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
659 logging.debug("%s %s vector %s is not set, but is optional."%(self._name,argname,variable))
661 logging.debug("%s %s vector %s is not set, but is not required."%(self._name,argname,variable))
663 logging.debug("%s %s vector %s is set, and its size is %i."%(self._name,argname,variable,numpy.array(argument).size))
665 __test_vvalue( Xb, "Xb", "Background or initial state" )
666 __test_vvalue( Y, "Y", "Observation" )
668 def __test_cvalue(argument, variable, argname):
670 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
671 raise ValueError("%s %s error covariance matrix %s has to be properly defined!"%(self._name,argname,variable))
672 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
673 logging.debug("%s %s error covariance matrix %s is not set, but is optional."%(self._name,argname,variable))
675 logging.debug("%s %s error covariance matrix %s is not set, but is not required."%(self._name,argname,variable))
677 logging.debug("%s %s error covariance matrix %s is set."%(self._name,argname,variable))
679 __test_cvalue( R, "R", "Observation" )
680 __test_cvalue( B, "B", "Background" )
681 __test_cvalue( Q, "Q", "Evolution" )
683 if ("Bounds" in self._parameters) and isinstance(self._parameters["Bounds"], (list, tuple)) and (len(self._parameters["Bounds"]) > 0):
684 logging.debug("%s Prise en compte des bornes effectuee"%(self._name,))
686 self._parameters["Bounds"] = None
688 if logging.getLogger().level < logging.WARNING:
689 self._parameters["optiprint"], self._parameters["optdisp"] = 1, 1
690 if PlatformInfo.has_scipy:
691 import scipy.optimize
692 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_ALL
694 self._parameters["optmessages"] = 15
696 self._parameters["optiprint"], self._parameters["optdisp"] = -1, 0
697 if PlatformInfo.has_scipy:
698 import scipy.optimize
699 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_NONE
701 self._parameters["optmessages"] = 15
705 def _post_run(self,_oH=None):
707 if ("StoreSupplementaryCalculations" in self._parameters) and \
708 "APosterioriCovariance" in self._parameters["StoreSupplementaryCalculations"]:
709 for _A in self.StoredVariables["APosterioriCovariance"]:
710 if "APosterioriVariances" in self._parameters["StoreSupplementaryCalculations"]:
711 self.StoredVariables["APosterioriVariances"].store( numpy.diag(_A) )
712 if "APosterioriStandardDeviations" in self._parameters["StoreSupplementaryCalculations"]:
713 self.StoredVariables["APosterioriStandardDeviations"].store( numpy.sqrt(numpy.diag(_A)) )
714 if "APosterioriCorrelations" in self._parameters["StoreSupplementaryCalculations"]:
715 _EI = numpy.diag(1./numpy.sqrt(numpy.diag(_A)))
716 _C = numpy.dot(_EI, numpy.dot(_A, _EI))
717 self.StoredVariables["APosterioriCorrelations"].store( _C )
719 logging.debug("%s Nombre d'évaluation(s) de l'opérateur d'observation direct/tangent/adjoint.: %i/%i/%i", self._name, _oH["Direct"].nbcalls(0),_oH["Tangent"].nbcalls(0),_oH["Adjoint"].nbcalls(0))
720 logging.debug("%s Nombre d'appels au cache d'opérateur d'observation direct/tangent/adjoint..: %i/%i/%i", self._name, _oH["Direct"].nbcalls(3),_oH["Tangent"].nbcalls(3),_oH["Adjoint"].nbcalls(3))
721 logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
722 logging.debug("%s Terminé", self._name)
725 def _toStore(self, key):
726 "True if in StoreSupplementaryCalculations, else False"
727 return key in self._parameters["StoreSupplementaryCalculations"]
729 def get(self, key=None):
731 Renvoie l'une des variables stockées identifiée par la clé, ou le
732 dictionnaire de l'ensemble des variables disponibles en l'absence de
733 clé. Ce sont directement les variables sous forme objet qui sont
734 renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
735 des classes de persistance.
738 return self.StoredVariables[key]
740 return self.StoredVariables
742 def __contains__(self, key=None):
743 "D.__contains__(k) -> True if D has a key k, else False"
744 return key in self.StoredVariables
747 "D.keys() -> list of D's keys"
748 if hasattr(self, "StoredVariables"):
749 return self.StoredVariables.keys()
754 "D.pop(k[,d]) -> v, remove specified key and return the corresponding value"
755 if hasattr(self, "StoredVariables"):
756 return self.StoredVariables.pop(k, d)
761 raise TypeError("pop expected at least 1 arguments, got 0")
762 "If key is not found, d is returned if given, otherwise KeyError is raised"
768 def run(self, Xb=None, Y=None, H=None, M=None, R=None, B=None, Q=None, Parameters=None):
770 Doit implémenter l'opération élémentaire de calcul d'assimilation sous
771 sa forme mathématique la plus naturelle possible.
773 raise NotImplementedError("Mathematical assimilation calculation has not been implemented!")
775 def defineRequiredParameter(self, name = None, default = None, typecast = None, message = None, minval = None, maxval = None, listval = None):
777 Permet de définir dans l'algorithme des paramètres requis et leurs
778 caractéristiques par défaut.
781 raise ValueError("A name is mandatory to define a required parameter.")
783 self.__required_parameters[name] = {
785 "typecast" : typecast,
791 logging.debug("%s %s (valeur par défaut = %s)", self._name, message, self.setParameterValue(name))
793 def getRequiredParameters(self, noDetails=True):
795 Renvoie la liste des noms de paramètres requis ou directement le
796 dictionnaire des paramètres requis.
799 return sorted(self.__required_parameters.keys())
801 return self.__required_parameters
803 def setParameterValue(self, name=None, value=None):
805 Renvoie la valeur d'un paramètre requis de manière contrôlée
807 default = self.__required_parameters[name]["default"]
808 typecast = self.__required_parameters[name]["typecast"]
809 minval = self.__required_parameters[name]["minval"]
810 maxval = self.__required_parameters[name]["maxval"]
811 listval = self.__required_parameters[name]["listval"]
813 if value is None and default is None:
815 elif value is None and default is not None:
816 if typecast is None: __val = default
817 else: __val = typecast( default )
819 if typecast is None: __val = value
820 else: __val = typecast( value )
822 if minval is not None and (numpy.array(__val, float) < minval).any():
823 raise ValueError("The parameter named \"%s\" of value \"%s\" can not be less than %s."%(name, __val, minval))
824 if maxval is not None and (numpy.array(__val, float) > maxval).any():
825 raise ValueError("The parameter named \"%s\" of value \"%s\" can not be greater than %s."%(name, __val, maxval))
826 if listval is not None:
827 if typecast is list or typecast is tuple or isinstance(__val,list) or isinstance(__val,tuple):
830 raise ValueError("The value \"%s\" of the parameter named \"%s\" is not allowed, it has to be in the list %s."%(v, name, listval))
831 elif __val not in listval:
832 raise ValueError("The value \"%s\" of the parameter named \"%s\" is not allowed, it has to be in the list %s."%( __val, name,listval))
835 def requireInputArguments(self, mandatory=(), optional=()):
837 Permet d'imposer des arguments requises en entrée
839 self.__required_inputs["RequiredInputValues"]["mandatory"] = tuple( mandatory )
840 self.__required_inputs["RequiredInputValues"]["optional"] = tuple( optional )
842 def __setParameters(self, fromDico={}):
844 Permet de stocker les paramètres reçus dans le dictionnaire interne.
846 self._parameters.update( fromDico )
847 for k in self.__required_parameters.keys():
848 if k in fromDico.keys():
849 self._parameters[k] = self.setParameterValue(k,fromDico[k])
851 self._parameters[k] = self.setParameterValue(k)
852 logging.debug("%s %s : %s", self._name, self.__required_parameters[k]["message"], self._parameters[k])
854 # ==============================================================================
855 class AlgorithmAndParameters(object):
857 Classe générale d'interface d'action pour l'algorithme et ses paramètres
860 name = "GenericAlgorithm",
867 self.__name = str(name)
871 self.__algorithm = {}
872 self.__algorithmFile = None
873 self.__algorithmName = None
875 self.updateParameters( asDict, asScript )
877 if asAlgorithm is None and asScript is not None:
878 __Algo = ImportFromScript(asScript).getvalue( "Algorithm" )
882 if __Algo is not None:
883 self.__A = str(__Algo)
884 self.__P.update( {"Algorithm":self.__A} )
886 self.__setAlgorithm( self.__A )
888 def updateParameters(self,
892 "Mise a jour des parametres"
893 if asDict is None and asScript is not None:
894 __Dict = ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
898 if __Dict is not None:
899 self.__P.update( dict(__Dict) )
901 def executePythonScheme(self, asDictAO = None):
902 "Permet de lancer le calcul d'assimilation"
903 Operator.CM.clearCache()
905 if not isinstance(asDictAO, dict):
906 raise ValueError("The objects for algorithm calculation have to be given together as a dictionnary, and they are not")
907 if hasattr(asDictAO["Background"],"getO"): self.__Xb = asDictAO["Background"].getO()
908 elif hasattr(asDictAO["CheckingPoint"],"getO"): self.__Xb = asDictAO["CheckingPoint"].getO()
909 else: self.__Xb = None
910 if hasattr(asDictAO["Observation"],"getO"): self.__Y = asDictAO["Observation"].getO()
911 else: self.__Y = asDictAO["Observation"]
912 if hasattr(asDictAO["ControlInput"],"getO"): self.__U = asDictAO["ControlInput"].getO()
913 else: self.__U = asDictAO["ControlInput"]
914 if hasattr(asDictAO["ObservationOperator"],"getO"): self.__HO = asDictAO["ObservationOperator"].getO()
915 else: self.__HO = asDictAO["ObservationOperator"]
916 if hasattr(asDictAO["EvolutionModel"],"getO"): self.__EM = asDictAO["EvolutionModel"].getO()
917 else: self.__EM = asDictAO["EvolutionModel"]
918 if hasattr(asDictAO["ControlModel"],"getO"): self.__CM = asDictAO["ControlModel"].getO()
919 else: self.__CM = asDictAO["ControlModel"]
920 self.__B = asDictAO["BackgroundError"]
921 self.__R = asDictAO["ObservationError"]
922 self.__Q = asDictAO["EvolutionError"]
924 self.__shape_validate()
926 self.__algorithm.run(
936 Parameters = self.__P,
940 def executeYACSScheme(self, FileName=None):
941 "Permet de lancer le calcul d'assimilation"
942 if FileName is None or not os.path.exists(FileName):
943 raise ValueError("a YACS file name has to be given for YACS execution.\n")
945 __file = os.path.abspath(FileName)
946 logging.debug("The YACS file name is \"%s\"."%__file)
947 if not PlatformInfo.has_salome or \
948 not PlatformInfo.has_yacs or \
949 not PlatformInfo.has_adao:
950 raise ImportError("\n\n"+\
951 "Unable to get SALOME, YACS or ADAO environnement variables.\n"+\
952 "Please load the right environnement before trying to use it.\n")
957 SALOMERuntime.RuntimeSALOME_setRuntime()
959 r = pilot.getRuntime()
960 xmlLoader = loader.YACSLoader()
961 xmlLoader.registerProcCataLoader()
963 catalogAd = r.loadCatalog("proc", __file)
964 r.addCatalog(catalogAd)
969 p = xmlLoader.load(__file)
970 except IOError as ex:
971 print("The YACS XML schema file can not be loaded: %s"%(ex,))
973 logger = p.getLogger("parser")
974 if not logger.isEmpty():
975 print("The imported YACS XML schema has errors on parsing:")
976 print(logger.getStr())
979 print("The YACS XML schema is not valid and will not be executed:")
980 print(p.getErrorReport())
982 info=pilot.LinkInfo(pilot.LinkInfo.ALL_DONT_STOP)
983 p.checkConsistency(info)
984 if info.areWarningsOrErrors():
985 print("The YACS XML schema is not coherent and will not be executed:")
986 print(info.getGlobalRepr())
988 e = pilot.ExecutorSwig()
990 if p.getEffectiveState() != pilot.DONE:
991 print(p.getErrorReport())
995 def get(self, key = None):
996 "Vérifie l'existence d'une clé de variable ou de paramètres"
997 if key in self.__algorithm:
998 return self.__algorithm.get( key )
999 elif key in self.__P:
1000 return self.__P[key]
1004 def pop(self, k, d):
1005 "Necessaire pour le pickling"
1006 return self.__algorithm.pop(k, d)
1008 def getAlgorithmRequiredParameters(self, noDetails=True):
1009 "Renvoie la liste des paramètres requis selon l'algorithme"
1010 return self.__algorithm.getRequiredParameters(noDetails)
1012 def setObserver(self, __V, __O, __I, __S):
1013 if self.__algorithm is None \
1014 or isinstance(self.__algorithm, dict) \
1015 or not hasattr(self.__algorithm,"StoredVariables"):
1016 raise ValueError("No observer can be build before choosing an algorithm.")
1017 if __V not in self.__algorithm:
1018 raise ValueError("An observer requires to be set on a variable named %s which does not exist."%__V)
1020 self.__algorithm.StoredVariables[ __V ].setDataObserver(
1023 HookParameters = __I,
1026 def removeObserver(self, __V, __O, __A = False):
1027 if self.__algorithm is None \
1028 or isinstance(self.__algorithm, dict) \
1029 or not hasattr(self.__algorithm,"StoredVariables"):
1030 raise ValueError("No observer can be removed before choosing an algorithm.")
1031 if __V not in self.__algorithm:
1032 raise ValueError("An observer requires to be removed on a variable named %s which does not exist."%__V)
1034 return self.__algorithm.StoredVariables[ __V ].removeDataObserver(
1039 def hasObserver(self, __V):
1040 if self.__algorithm is None \
1041 or isinstance(self.__algorithm, dict) \
1042 or not hasattr(self.__algorithm,"StoredVariables"):
1044 if __V not in self.__algorithm:
1046 return self.__algorithm.StoredVariables[ __V ].hasDataObserver()
1049 return list(self.__algorithm.keys()) + list(self.__P.keys())
1051 def __contains__(self, key=None):
1052 "D.__contains__(k) -> True if D has a key k, else False"
1053 return key in self.__algorithm or key in self.__P
1056 "x.__repr__() <==> repr(x)"
1057 return repr(self.__A)+", "+repr(self.__P)
1060 "x.__str__() <==> str(x)"
1061 return str(self.__A)+", "+str(self.__P)
1063 def __setAlgorithm(self, choice = None ):
1065 Permet de sélectionner l'algorithme à utiliser pour mener à bien l'étude
1066 d'assimilation. L'argument est un champ caractère se rapportant au nom
1067 d'un algorithme réalisant l'opération sur les arguments fixes.
1070 raise ValueError("Error: algorithm choice has to be given")
1071 if self.__algorithmName is not None:
1072 raise ValueError("Error: algorithm choice has already been done as \"%s\", it can't be changed."%self.__algorithmName)
1073 daDirectory = "daAlgorithms"
1075 # Recherche explicitement le fichier complet
1076 # ------------------------------------------
1078 for directory in sys.path:
1079 if os.path.isfile(os.path.join(directory, daDirectory, str(choice)+'.py')):
1080 module_path = os.path.abspath(os.path.join(directory, daDirectory))
1081 if module_path is None:
1082 raise ImportError("No algorithm module named \"%s\" has been found in the search path.\n The search path is %s"%(choice, sys.path))
1084 # Importe le fichier complet comme un module
1085 # ------------------------------------------
1087 sys_path_tmp = sys.path ; sys.path.insert(0,module_path)
1088 self.__algorithmFile = __import__(str(choice), globals(), locals(), [])
1089 if not hasattr(self.__algorithmFile, "ElementaryAlgorithm"):
1090 raise ImportError("this module does not define a valid elementary algorithm.")
1091 self.__algorithmName = str(choice)
1092 sys.path = sys_path_tmp ; del sys_path_tmp
1093 except ImportError as e:
1094 raise ImportError("The module named \"%s\" was found, but is incorrect at the import stage.\n The import error message is: %s"%(choice,e))
1096 # Instancie un objet du type élémentaire du fichier
1097 # -------------------------------------------------
1098 self.__algorithm = self.__algorithmFile.ElementaryAlgorithm()
1101 def __shape_validate(self):
1103 Validation de la correspondance correcte des tailles des variables et
1104 des matrices s'il y en a.
1106 if self.__Xb is None: __Xb_shape = (0,)
1107 elif hasattr(self.__Xb,"size"): __Xb_shape = (self.__Xb.size,)
1108 elif hasattr(self.__Xb,"shape"):
1109 if isinstance(self.__Xb.shape, tuple): __Xb_shape = self.__Xb.shape
1110 else: __Xb_shape = self.__Xb.shape()
1111 else: raise TypeError("The background (Xb) has no attribute of shape: problem !")
1113 if self.__Y is None: __Y_shape = (0,)
1114 elif hasattr(self.__Y,"size"): __Y_shape = (self.__Y.size,)
1115 elif hasattr(self.__Y,"shape"):
1116 if isinstance(self.__Y.shape, tuple): __Y_shape = self.__Y.shape
1117 else: __Y_shape = self.__Y.shape()
1118 else: raise TypeError("The observation (Y) has no attribute of shape: problem !")
1120 if self.__U is None: __U_shape = (0,)
1121 elif hasattr(self.__U,"size"): __U_shape = (self.__U.size,)
1122 elif hasattr(self.__U,"shape"):
1123 if isinstance(self.__U.shape, tuple): __U_shape = self.__U.shape
1124 else: __U_shape = self.__U.shape()
1125 else: raise TypeError("The control (U) has no attribute of shape: problem !")
1127 if self.__B is None: __B_shape = (0,0)
1128 elif hasattr(self.__B,"shape"):
1129 if isinstance(self.__B.shape, tuple): __B_shape = self.__B.shape
1130 else: __B_shape = self.__B.shape()
1131 else: raise TypeError("The a priori errors covariance matrix (B) has no attribute of shape: problem !")
1133 if self.__R is None: __R_shape = (0,0)
1134 elif hasattr(self.__R,"shape"):
1135 if isinstance(self.__R.shape, tuple): __R_shape = self.__R.shape
1136 else: __R_shape = self.__R.shape()
1137 else: raise TypeError("The observation errors covariance matrix (R) has no attribute of shape: problem !")
1139 if self.__Q is None: __Q_shape = (0,0)
1140 elif hasattr(self.__Q,"shape"):
1141 if isinstance(self.__Q.shape, tuple): __Q_shape = self.__Q.shape
1142 else: __Q_shape = self.__Q.shape()
1143 else: raise TypeError("The evolution errors covariance matrix (Q) has no attribute of shape: problem !")
1145 if len(self.__HO) == 0: __HO_shape = (0,0)
1146 elif isinstance(self.__HO, dict): __HO_shape = (0,0)
1147 elif hasattr(self.__HO["Direct"],"shape"):
1148 if isinstance(self.__HO["Direct"].shape, tuple): __HO_shape = self.__HO["Direct"].shape
1149 else: __HO_shape = self.__HO["Direct"].shape()
1150 else: raise TypeError("The observation operator (H) has no attribute of shape: problem !")
1152 if len(self.__EM) == 0: __EM_shape = (0,0)
1153 elif isinstance(self.__EM, dict): __EM_shape = (0,0)
1154 elif hasattr(self.__EM["Direct"],"shape"):
1155 if isinstance(self.__EM["Direct"].shape, tuple): __EM_shape = self.__EM["Direct"].shape
1156 else: __EM_shape = self.__EM["Direct"].shape()
1157 else: raise TypeError("The evolution model (EM) has no attribute of shape: problem !")
1159 if len(self.__CM) == 0: __CM_shape = (0,0)
1160 elif isinstance(self.__CM, dict): __CM_shape = (0,0)
1161 elif hasattr(self.__CM["Direct"],"shape"):
1162 if isinstance(self.__CM["Direct"].shape, tuple): __CM_shape = self.__CM["Direct"].shape
1163 else: __CM_shape = self.__CM["Direct"].shape()
1164 else: raise TypeError("The control model (CM) has no attribute of shape: problem !")
1166 # Vérification des conditions
1167 # ---------------------------
1168 if not( len(__Xb_shape) == 1 or min(__Xb_shape) == 1 ):
1169 raise ValueError("Shape characteristic of background (Xb) is incorrect: \"%s\"."%(__Xb_shape,))
1170 if not( len(__Y_shape) == 1 or min(__Y_shape) == 1 ):
1171 raise ValueError("Shape characteristic of observation (Y) is incorrect: \"%s\"."%(__Y_shape,))
1173 if not( min(__B_shape) == max(__B_shape) ):
1174 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) is incorrect: \"%s\"."%(__B_shape,))
1175 if not( min(__R_shape) == max(__R_shape) ):
1176 raise ValueError("Shape characteristic of observation errors covariance matrix (R) is incorrect: \"%s\"."%(__R_shape,))
1177 if not( min(__Q_shape) == max(__Q_shape) ):
1178 raise ValueError("Shape characteristic of evolution errors covariance matrix (Q) is incorrect: \"%s\"."%(__Q_shape,))
1179 if not( min(__EM_shape) == max(__EM_shape) ):
1180 raise ValueError("Shape characteristic of evolution operator (EM) is incorrect: \"%s\"."%(__EM_shape,))
1182 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[1] == max(__Xb_shape) ):
1183 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and state (X) \"%s\" are incompatible."%(__HO_shape,__Xb_shape))
1184 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[0] == max(__Y_shape) ):
1185 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation (Y) \"%s\" are incompatible."%(__HO_shape,__Y_shape))
1186 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__B) > 0 and not( __HO_shape[1] == __B_shape[0] ):
1187 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and a priori errors covariance matrix (B) \"%s\" are incompatible."%(__HO_shape,__B_shape))
1188 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__R) > 0 and not( __HO_shape[0] == __R_shape[1] ):
1189 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation errors covariance matrix (R) \"%s\" are incompatible."%(__HO_shape,__R_shape))
1191 if self.__B is not None and len(self.__B) > 0 and not( __B_shape[1] == max(__Xb_shape) ):
1192 if self.__algorithmName in ["EnsembleBlue",]:
1193 asPersistentVector = self.__Xb.reshape((-1,min(__B_shape)))
1194 self.__Xb = Persistence.OneVector("Background", basetype=numpy.matrix)
1195 for member in asPersistentVector:
1196 self.__Xb.store( numpy.matrix( numpy.ravel(member), numpy.float ).T )
1197 __Xb_shape = min(__B_shape)
1199 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) \"%s\" and background (Xb) \"%s\" are incompatible."%(__B_shape,__Xb_shape))
1201 if self.__R is not None and len(self.__R) > 0 and not( __R_shape[1] == max(__Y_shape) ):
1202 raise ValueError("Shape characteristic of observation errors covariance matrix (R) \"%s\" and observation (Y) \"%s\" are incompatible."%(__R_shape,__Y_shape))
1204 if self.__EM is not None and len(self.__EM) > 0 and not isinstance(self.__EM, dict) and not( __EM_shape[1] == max(__Xb_shape) ):
1205 raise ValueError("Shape characteristic of evolution model (EM) \"%s\" and state (X) \"%s\" are incompatible."%(__EM_shape,__Xb_shape))
1207 if self.__CM is not None and len(self.__CM) > 0 and not isinstance(self.__CM, dict) and not( __CM_shape[1] == max(__U_shape) ):
1208 raise ValueError("Shape characteristic of control model (CM) \"%s\" and control (U) \"%s\" are incompatible."%(__CM_shape,__U_shape))
1210 if ("Bounds" in self.__P) \
1211 and (isinstance(self.__P["Bounds"], list) or isinstance(self.__P["Bounds"], tuple)) \
1212 and (len(self.__P["Bounds"]) != max(__Xb_shape)):
1213 raise ValueError("The number \"%s\" of bound pairs for the state (X) components is different of the size \"%s\" of the state itself." \
1214 %(len(self.__P["Bounds"]),max(__Xb_shape)))
1218 # ==============================================================================
1219 class RegulationAndParameters(object):
1221 Classe générale d'interface d'action pour la régulation et ses paramètres
1224 name = "GenericRegulation",
1231 self.__name = str(name)
1234 if asAlgorithm is None and asScript is not None:
1235 __Algo = ImportFromScript(asScript).getvalue( "Algorithm" )
1237 __Algo = asAlgorithm
1239 if asDict is None and asScript is not None:
1240 __Dict = ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1244 if __Dict is not None:
1245 self.__P.update( dict(__Dict) )
1247 if __Algo is not None:
1248 self.__P.update( {"Algorithm":self.__A} )
1250 def get(self, key = None):
1251 "Vérifie l'existence d'une clé de variable ou de paramètres"
1253 return self.__P[key]
1257 # ==============================================================================
1258 class DataObserver(object):
1260 Classe générale d'interface de type observer
1263 name = "GenericObserver",
1275 self.__name = str(name)
1280 if onVariable is None:
1281 raise ValueError("setting an observer has to be done over a variable name or a list of variable names, not over None.")
1282 elif type(onVariable) in (tuple, list):
1283 self.__V = tuple(map( str, onVariable ))
1284 if withInfo is None:
1287 self.__I = (str(withInfo),)*len(self.__V)
1288 elif isinstance(onVariable, str):
1289 self.__V = (onVariable,)
1290 if withInfo is None:
1291 self.__I = (onVariable,)
1293 self.__I = (str(withInfo),)
1295 raise ValueError("setting an observer has to be done over a variable name or a list of variable names.")
1297 if asString is not None:
1298 __FunctionText = asString
1299 elif (asTemplate is not None) and (asTemplate in Templates.ObserverTemplates):
1300 __FunctionText = Templates.ObserverTemplates[asTemplate]
1301 elif asScript is not None:
1302 __FunctionText = ImportFromScript(asScript).getstring()
1305 __Function = ObserverF(__FunctionText)
1307 if asObsObject is not None:
1308 self.__O = asObsObject
1310 self.__O = __Function.getfunc()
1312 for k in range(len(self.__V)):
1315 if ename not in withAlgo:
1316 raise ValueError("An observer is asked to be set on a variable named %s which does not exist."%ename)
1318 withAlgo.setObserver(ename, self.__O, einfo, scheduledBy)
1321 "x.__repr__() <==> repr(x)"
1322 return repr(self.__V)+"\n"+repr(self.__O)
1325 "x.__str__() <==> str(x)"
1326 return str(self.__V)+"\n"+str(self.__O)
1328 # ==============================================================================
1329 class State(object):
1331 Classe générale d'interface de type état
1334 name = "GenericVector",
1336 asPersistentVector = None,
1342 toBeChecked = False,
1345 Permet de définir un vecteur :
1346 - asVector : entrée des données, comme un vecteur compatible avec le
1347 constructeur de numpy.matrix, ou "True" si entrée par script.
1348 - asPersistentVector : entrée des données, comme une série de vecteurs
1349 compatible avec le constructeur de numpy.matrix, ou comme un objet de
1350 type Persistence, ou "True" si entrée par script.
1351 - asScript : si un script valide est donné contenant une variable
1352 nommée "name", la variable est de type "asVector" (par défaut) ou
1353 "asPersistentVector" selon que l'une de ces variables est placée à
1355 - asDataFile : si un ou plusieurs fichiers valides sont donnés
1356 contenant des valeurs en colonnes, elles-mêmes nommées "colNames"
1357 (s'il n'y a pas de nom de colonne indiquée, on cherche une colonne
1358 nommée "name"), on récupère les colonnes et on les range ligne après
1359 ligne (colMajor=False) ou colonne après colonne (colMajor=True). La
1360 variable résultante est de type "asVector" (par défaut) ou
1361 "asPersistentVector" selon que l'une de ces variables est placée à
1364 self.__name = str(name)
1365 self.__check = bool(toBeChecked)
1369 self.__is_vector = False
1370 self.__is_series = False
1372 if asScript is not None:
1373 __Vector, __Series = None, None
1374 if asPersistentVector:
1375 __Series = ImportFromScript(asScript).getvalue( self.__name )
1377 __Vector = ImportFromScript(asScript).getvalue( self.__name )
1378 elif asDataFile is not None:
1379 __Vector, __Series = None, None
1380 if asPersistentVector:
1381 if colNames is not None:
1382 __Series = ImportFromFile(asDataFile).getvalue( colNames )[1]
1384 __Series = ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1385 if bool(colMajor) and not ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1386 __Series = numpy.transpose(__Series)
1387 elif not bool(colMajor) and ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1388 __Series = numpy.transpose(__Series)
1390 if colNames is not None:
1391 __Vector = ImportFromFile(asDataFile).getvalue( colNames )[1]
1393 __Vector = ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1395 __Vector = numpy.ravel(__Vector, order = "F")
1397 __Vector = numpy.ravel(__Vector, order = "C")
1399 __Vector, __Series = asVector, asPersistentVector
1401 if __Vector is not None:
1402 self.__is_vector = True
1403 self.__V = numpy.matrix( numpy.asmatrix(__Vector).A1, numpy.float ).T
1404 self.shape = self.__V.shape
1405 self.size = self.__V.size
1406 elif __Series is not None:
1407 self.__is_series = True
1408 if isinstance(__Series, (tuple, list, numpy.ndarray, numpy.matrix, str)):
1409 self.__V = Persistence.OneVector(self.__name, basetype=numpy.matrix)
1410 if isinstance(__Series, str): __Series = eval(__Series)
1411 for member in __Series:
1412 self.__V.store( numpy.matrix( numpy.asmatrix(member).A1, numpy.float ).T )
1415 if isinstance(self.__V.shape, (tuple, list)):
1416 self.shape = self.__V.shape
1418 self.shape = self.__V.shape()
1419 if len(self.shape) == 1:
1420 self.shape = (self.shape[0],1)
1421 self.size = self.shape[0] * self.shape[1]
1423 raise ValueError("The %s object is improperly defined, it requires at minima either a vector, a list/tuple of vectors or a persistent object. Please check your vector input."%self.__name)
1425 if scheduledBy is not None:
1426 self.__T = scheduledBy
1428 def getO(self, withScheduler=False):
1430 return self.__V, self.__T
1431 elif self.__T is None:
1437 "Vérification du type interne"
1438 return self.__is_vector
1441 "Vérification du type interne"
1442 return self.__is_series
1445 "x.__repr__() <==> repr(x)"
1446 return repr(self.__V)
1449 "x.__str__() <==> str(x)"
1450 return str(self.__V)
1452 # ==============================================================================
1453 class Covariance(object):
1455 Classe générale d'interface de type covariance
1458 name = "GenericCovariance",
1459 asCovariance = None,
1460 asEyeByScalar = None,
1461 asEyeByVector = None,
1464 toBeChecked = False,
1467 Permet de définir une covariance :
1468 - asCovariance : entrée des données, comme une matrice compatible avec
1469 le constructeur de numpy.matrix
1470 - asEyeByScalar : entrée des données comme un seul scalaire de variance,
1471 multiplicatif d'une matrice de corrélation identité, aucune matrice
1472 n'étant donc explicitement à donner
1473 - asEyeByVector : entrée des données comme un seul vecteur de variance,
1474 à mettre sur la diagonale d'une matrice de corrélation, aucune matrice
1475 n'étant donc explicitement à donner
1476 - asCovObject : entrée des données comme un objet python, qui a les
1477 methodes obligatoires "getT", "getI", "diag", "trace", "__add__",
1478 "__sub__", "__neg__", "__mul__", "__rmul__" et facultatives "shape",
1479 "size", "cholesky", "choleskyI", "asfullmatrix", "__repr__", "__str__"
1480 - toBeChecked : booléen indiquant si le caractère SDP de la matrice
1481 pleine doit être vérifié
1483 self.__name = str(name)
1484 self.__check = bool(toBeChecked)
1487 self.__is_scalar = False
1488 self.__is_vector = False
1489 self.__is_matrix = False
1490 self.__is_object = False
1492 if asScript is not None:
1493 __Matrix, __Scalar, __Vector, __Object = None, None, None, None
1495 __Scalar = ImportFromScript(asScript).getvalue( self.__name )
1497 __Vector = ImportFromScript(asScript).getvalue( self.__name )
1499 __Object = ImportFromScript(asScript).getvalue( self.__name )
1501 __Matrix = ImportFromScript(asScript).getvalue( self.__name )
1503 __Matrix, __Scalar, __Vector, __Object = asCovariance, asEyeByScalar, asEyeByVector, asCovObject
1505 if __Scalar is not None:
1506 if numpy.matrix(__Scalar).size != 1:
1507 raise ValueError(' The diagonal multiplier given to define a sparse matrix is not a unique scalar value.\n Its actual measured size is %i. Please check your scalar input.'%numpy.matrix(__Scalar).size)
1508 self.__is_scalar = True
1509 self.__C = numpy.abs( float(__Scalar) )
1512 elif __Vector is not None:
1513 self.__is_vector = True
1514 self.__C = numpy.abs( numpy.array( numpy.ravel( numpy.matrix(__Vector, float ) ) ) )
1515 self.shape = (self.__C.size,self.__C.size)
1516 self.size = self.__C.size**2
1517 elif __Matrix is not None:
1518 self.__is_matrix = True
1519 self.__C = numpy.matrix( __Matrix, float )
1520 self.shape = self.__C.shape
1521 self.size = self.__C.size
1522 elif __Object is not None:
1523 self.__is_object = True
1525 for at in ("getT","getI","diag","trace","__add__","__sub__","__neg__","__mul__","__rmul__"):
1526 if not hasattr(self.__C,at):
1527 raise ValueError("The matrix given for %s as an object has no attribute \"%s\". Please check your object input."%(self.__name,at))
1528 if hasattr(self.__C,"shape"):
1529 self.shape = self.__C.shape
1532 if hasattr(self.__C,"size"):
1533 self.size = self.__C.size
1538 # raise ValueError("The %s covariance matrix has to be specified either as a matrix, a vector for its diagonal or a scalar multiplying an identity matrix."%self.__name)
1542 def __validate(self):
1544 if self.__C is None:
1545 raise UnboundLocalError("%s covariance matrix value has not been set!"%(self.__name,))
1546 if self.ismatrix() and min(self.shape) != max(self.shape):
1547 raise ValueError("The given matrix for %s is not a square one, its shape is %s. Please check your matrix input."%(self.__name,self.shape))
1548 if self.isobject() and min(self.shape) != max(self.shape):
1549 raise ValueError("The matrix given for \"%s\" is not a square one, its shape is %s. Please check your object input."%(self.__name,self.shape))
1550 if self.isscalar() and self.__C <= 0:
1551 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your scalar input %s."%(self.__name,self.__C))
1552 if self.isvector() and (self.__C <= 0).any():
1553 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your vector input."%(self.__name,))
1554 if self.ismatrix() and (self.__check or logging.getLogger().level < logging.WARNING):
1556 L = numpy.linalg.cholesky( self.__C )
1558 raise ValueError("The %s covariance matrix is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1559 if self.isobject() and (self.__check or logging.getLogger().level < logging.WARNING):
1561 L = self.__C.cholesky()
1563 raise ValueError("The %s covariance object is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1566 "Vérification du type interne"
1567 return self.__is_scalar
1570 "Vérification du type interne"
1571 return self.__is_vector
1574 "Vérification du type interne"
1575 return self.__is_matrix
1578 "Vérification du type interne"
1579 return self.__is_object
1584 return Covariance(self.__name+"I", asCovariance = self.__C.I )
1585 elif self.isvector():
1586 return Covariance(self.__name+"I", asEyeByVector = 1. / self.__C )
1587 elif self.isscalar():
1588 return Covariance(self.__name+"I", asEyeByScalar = 1. / self.__C )
1589 elif self.isobject():
1590 return Covariance(self.__name+"I", asCovObject = self.__C.getI() )
1592 return None # Indispensable
1597 return Covariance(self.__name+"T", asCovariance = self.__C.T )
1598 elif self.isvector():
1599 return Covariance(self.__name+"T", asEyeByVector = self.__C )
1600 elif self.isscalar():
1601 return Covariance(self.__name+"T", asEyeByScalar = self.__C )
1602 elif self.isobject():
1603 return Covariance(self.__name+"T", asCovObject = self.__C.getT() )
1606 "Décomposition de Cholesky"
1608 return Covariance(self.__name+"C", asCovariance = numpy.linalg.cholesky(self.__C) )
1609 elif self.isvector():
1610 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
1611 elif self.isscalar():
1612 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
1613 elif self.isobject() and hasattr(self.__C,"cholesky"):
1614 return Covariance(self.__name+"C", asCovObject = self.__C.cholesky() )
1616 def choleskyI(self):
1617 "Inversion de la décomposition de Cholesky"
1619 return Covariance(self.__name+"H", asCovariance = numpy.linalg.cholesky(self.__C).I )
1620 elif self.isvector():
1621 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
1622 elif self.isscalar():
1623 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
1624 elif self.isobject() and hasattr(self.__C,"choleskyI"):
1625 return Covariance(self.__name+"H", asCovObject = self.__C.choleskyI() )
1627 def diag(self, msize=None):
1628 "Diagonale de la matrice"
1630 return numpy.diag(self.__C)
1631 elif self.isvector():
1633 elif self.isscalar():
1635 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1637 return self.__C * numpy.ones(int(msize))
1638 elif self.isobject():
1639 return self.__C.diag()
1641 def asfullmatrix(self, msize=None):
1645 elif self.isvector():
1646 return numpy.matrix( numpy.diag(self.__C), float )
1647 elif self.isscalar():
1649 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1651 return numpy.matrix( self.__C * numpy.eye(int(msize)), float )
1652 elif self.isobject() and hasattr(self.__C,"asfullmatrix"):
1653 return self.__C.asfullmatrix()
1655 def trace(self, msize=None):
1656 "Trace de la matrice"
1658 return numpy.trace(self.__C)
1659 elif self.isvector():
1660 return float(numpy.sum(self.__C))
1661 elif self.isscalar():
1663 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1665 return self.__C * int(msize)
1666 elif self.isobject():
1667 return self.__C.trace()
1673 "x.__repr__() <==> repr(x)"
1674 return repr(self.__C)
1677 "x.__str__() <==> str(x)"
1678 return str(self.__C)
1680 def __add__(self, other):
1681 "x.__add__(y) <==> x+y"
1682 if self.ismatrix() or self.isobject():
1683 return self.__C + numpy.asmatrix(other)
1684 elif self.isvector() or self.isscalar():
1685 _A = numpy.asarray(other)
1686 _A.reshape(_A.size)[::_A.shape[1]+1] += self.__C
1687 return numpy.asmatrix(_A)
1689 def __radd__(self, other):
1690 "x.__radd__(y) <==> y+x"
1691 raise NotImplementedError("%s covariance matrix __radd__ method not available for %s type!"%(self.__name,type(other)))
1693 def __sub__(self, other):
1694 "x.__sub__(y) <==> x-y"
1695 if self.ismatrix() or self.isobject():
1696 return self.__C - numpy.asmatrix(other)
1697 elif self.isvector() or self.isscalar():
1698 _A = numpy.asarray(other)
1699 _A.reshape(_A.size)[::_A.shape[1]+1] = self.__C - _A.reshape(_A.size)[::_A.shape[1]+1]
1700 return numpy.asmatrix(_A)
1702 def __rsub__(self, other):
1703 "x.__rsub__(y) <==> y-x"
1704 raise NotImplementedError("%s covariance matrix __rsub__ method not available for %s type!"%(self.__name,type(other)))
1707 "x.__neg__() <==> -x"
1710 def __mul__(self, other):
1711 "x.__mul__(y) <==> x*y"
1712 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
1713 return self.__C * other
1714 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
1715 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1716 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
1717 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1718 return self.__C * numpy.asmatrix(other)
1720 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asmatrix(other).shape,self.__name))
1721 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
1722 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1723 return numpy.asmatrix(self.__C * numpy.ravel(other)).T
1724 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1725 return numpy.asmatrix((self.__C * (numpy.asarray(other).transpose())).transpose())
1727 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
1728 elif self.isscalar() and isinstance(other,numpy.matrix):
1729 return self.__C * other
1730 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
1731 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
1732 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
1734 return self.__C * numpy.asmatrix(other)
1735 elif self.isobject():
1736 return self.__C.__mul__(other)
1738 raise NotImplementedError("%s covariance matrix __mul__ method not available for %s type!"%(self.__name,type(other)))
1740 def __rmul__(self, other):
1741 "x.__rmul__(y) <==> y*x"
1742 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
1743 return other * self.__C
1744 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
1745 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1746 return numpy.asmatrix(numpy.ravel(other)) * self.__C
1747 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1748 return numpy.asmatrix(other) * self.__C
1750 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name))
1751 elif self.isvector() and isinstance(other,numpy.matrix):
1752 if numpy.ravel(other).size == self.shape[0]: # Vecteur
1753 return numpy.asmatrix(numpy.ravel(other) * self.__C)
1754 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
1755 return numpy.asmatrix(numpy.array(other) * self.__C)
1757 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
1758 elif self.isscalar() and isinstance(other,numpy.matrix):
1759 return other * self.__C
1760 elif self.isobject():
1761 return self.__C.__rmul__(other)
1763 raise NotImplementedError("%s covariance matrix __rmul__ method not available for %s type!"%(self.__name,type(other)))
1766 "x.__len__() <==> len(x)"
1767 return self.shape[0]
1769 # ==============================================================================
1770 class ObserverF(object):
1772 Creation d'une fonction d'observateur a partir de son texte
1774 def __init__(self, corps=""):
1775 self.__corps = corps
1776 def func(self,var,info):
1777 "Fonction d'observation"
1780 "Restitution du pointeur de fonction dans l'objet"
1783 # ==============================================================================
1784 class CaseLogger(object):
1786 Conservation des commandes de creation d'un cas
1788 def __init__(self, __name="", __objname="case", __addViewers=None, __addLoaders=None):
1789 self.__name = str(__name)
1790 self.__objname = str(__objname)
1791 self.__logSerie = []
1792 self.__switchoff = False
1794 "TUI" :Interfaces._TUIViewer,
1795 "SCD" :Interfaces._SCDViewer,
1796 "YACS":Interfaces._YACSViewer,
1799 "TUI" :Interfaces._TUIViewer,
1800 "COM" :Interfaces._COMViewer,
1802 if __addViewers is not None:
1803 self.__viewers.update(dict(__addViewers))
1804 if __addLoaders is not None:
1805 self.__loaders.update(dict(__addLoaders))
1807 def register(self, __command=None, __keys=None, __local=None, __pre=None, __switchoff=False):
1808 "Enregistrement d'une commande individuelle"
1809 if __command is not None and __keys is not None and __local is not None and not self.__switchoff:
1810 if "self" in __keys: __keys.remove("self")
1811 self.__logSerie.append( (str(__command), __keys, __local, __pre, __switchoff) )
1813 self.__switchoff = True
1815 self.__switchoff = False
1817 def dump(self, __filename=None, __format="TUI", __upa=""):
1818 "Restitution normalisée des commandes"
1819 if __format in self.__viewers:
1820 __formater = self.__viewers[__format](self.__name, self.__objname, self.__logSerie)
1822 raise ValueError("Dumping as \"%s\" is not available"%__format)
1823 return __formater.dump(__filename, __upa)
1825 def load(self, __filename=None, __content=None, __object=None, __format="TUI"):
1826 "Chargement normalisé des commandes"
1827 if __format in self.__loaders:
1828 __formater = self.__loaders[__format]()
1830 raise ValueError("Loading as \"%s\" is not available"%__format)
1831 return __formater.load(__filename, __content, __object)
1833 # ==============================================================================
1834 def MultiFonction( __xserie, _extraArguments = None, _sFunction = lambda x: x ):
1836 Pour une liste ordonnée de vecteurs en entrée, renvoie en sortie la liste
1837 correspondante de valeurs de la fonction en argument
1839 if not PlatformInfo.isIterable( __xserie ):
1840 raise TypeError("MultiFonction not iterable unkown input type: %s"%(type(__xserie),))
1843 if _extraArguments is None:
1844 for __xvalue in __xserie:
1845 __multiHX.append( _sFunction( __xvalue ) )
1846 elif _extraArguments is not None and isinstance(_extraArguments, (list, tuple, map)):
1847 for __xvalue in __xserie:
1848 __multiHX.append( _sFunction( __xvalue, *_extraArguments ) )
1849 elif _extraArguments is not None and isinstance(_extraArguments, dict):
1850 for __xvalue in __xserie:
1851 __multiHX.append( _sFunction( __xvalue, **_extraArguments ) )
1853 raise TypeError("MultiFonction extra arguments unkown input type: %s"%(type(_extraArguments),))
1857 # ==============================================================================
1858 def CostFunction3D(_x,
1859 _Hm = None, # Pour simuler Hm(x) : HO["Direct"].appliedTo
1860 _HmX = None, # Simulation déjà faite de Hm(x)
1861 _arg = None, # Arguments supplementaires pour Hm, sous la forme d'un tuple
1866 _SIV = False, # A résorber pour la 8.0
1867 _SSC = [], # self._parameters["StoreSupplementaryCalculations"]
1868 _nPS = 0, # nbPreviousSteps
1869 _QM = "DA", # QualityMeasure
1870 _SSV = {}, # Entrée et/ou sortie : self.StoredVariables
1871 _fRt = False, # Restitue ou pas la sortie étendue
1872 _sSc = True, # Stocke ou pas les SSC
1875 Fonction-coût générale utile pour les algorithmes statiques/3D : 3DVAR, BLUE
1876 et dérivés, Kalman et dérivés, LeastSquares, SamplingTest, PSO, SA, Tabu,
1877 DFO, QuantileRegression
1883 for k in ["CostFunctionJ",
1889 "SimulatedObservationAtCurrentOptimum",
1890 "SimulatedObservationAtCurrentState",
1894 if hasattr(_SSV[k],"store"):
1895 _SSV[k].append = _SSV[k].store # Pour utiliser "append" au lieu de "store"
1897 _X = numpy.asmatrix(numpy.ravel( _x )).T
1898 if _SIV or "CurrentState" in _SSC or "CurrentOptimum" in _SSC:
1899 _SSV["CurrentState"].append( _X )
1901 if _HmX is not None:
1905 raise ValueError("COSTFUNCTION3D Operator has to be defined.")
1909 _HX = _Hm( _X, *_arg )
1910 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
1912 if "SimulatedObservationAtCurrentState" in _SSC or \
1913 "SimulatedObservationAtCurrentOptimum" in _SSC:
1914 _SSV["SimulatedObservationAtCurrentState"].append( _HX )
1916 if numpy.any(numpy.isnan(_HX)):
1917 Jb, Jo, J = numpy.nan, numpy.nan, numpy.nan
1919 _Y = numpy.asmatrix(numpy.ravel( _Y )).T
1920 if _QM in ["AugmentedWeightedLeastSquares", "AWLS", "AugmentedPonderatedLeastSquares", "APLS", "DA"]:
1921 if _BI is None or _RI is None:
1922 raise ValueError("Background and Observation error covariance matrix has to be properly defined!")
1923 _Xb = numpy.asmatrix(numpy.ravel( _Xb )).T
1924 Jb = 0.5 * (_X - _Xb).T * _BI * (_X - _Xb)
1925 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
1926 elif _QM in ["WeightedLeastSquares", "WLS", "PonderatedLeastSquares", "PLS"]:
1928 raise ValueError("Observation error covariance matrix has to be properly defined!")
1930 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
1931 elif _QM in ["LeastSquares", "LS", "L2"]:
1933 Jo = 0.5 * (_Y - _HX).T * (_Y - _HX)
1934 elif _QM in ["AbsoluteValue", "L1"]:
1936 Jo = numpy.sum( numpy.abs(_Y - _HX) )
1937 elif _QM in ["MaximumError", "ME"]:
1939 Jo = numpy.max( numpy.abs(_Y - _HX) )
1940 elif _QM in ["QR", "Null"]:
1944 raise ValueError("Unknown asked quality measure!")
1946 J = float( Jb ) + float( Jo )
1949 _SSV["CostFunctionJb"].append( Jb )
1950 _SSV["CostFunctionJo"].append( Jo )
1951 _SSV["CostFunctionJ" ].append( J )
1953 if "IndexOfOptimum" in _SSC or \
1954 "CurrentOptimum" in _SSC or \
1955 "SimulatedObservationAtCurrentOptimum" in _SSC:
1956 IndexMin = numpy.argmin( _SSV["CostFunctionJ"][_nPS:] ) + _nPS
1957 if "IndexOfOptimum" in _SSC:
1958 _SSV["IndexOfOptimum"].append( IndexMin )
1959 if "CurrentOptimum" in _SSC:
1960 _SSV["CurrentOptimum"].append( _SSV["CurrentState"][IndexMin] )
1961 if "SimulatedObservationAtCurrentOptimum" in _SSC:
1962 _SSV["SimulatedObservationAtCurrentOptimum"].append( _SSV["SimulatedObservationAtCurrentState"][IndexMin] )
1967 if _QM in ["QR"]: # Pour le QuantileRegression
1972 # ==============================================================================
1973 if __name__ == "__main__":
1974 print('\n AUTODIAGNOSTIC \n')