1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2019 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les outils généraux élémentaires.
26 __author__ = "Jean-Philippe ARGAUD"
34 from functools import partial
35 from daCore import Persistence
36 from daCore import PlatformInfo
37 from daCore import Interfaces
38 from daCore import Templates
39 from daCore.Interfaces import ImportFromScript, ImportFromFile
41 # ==============================================================================
42 class CacheManager(object):
44 Classe générale de gestion d'un cache de calculs
47 toleranceInRedundancy = 1.e-18,
48 lenghtOfRedundancy = -1,
51 Les caractéristiques de tolérance peuvent être modifées à la création.
53 self.__tolerBP = float(toleranceInRedundancy)
54 self.__lenghtOR = int(lenghtOfRedundancy)
55 self.__initlnOR = self.__lenghtOR
60 self.__listOPCV = [] # Operator Previous Calculated Points, Results, Point Norms
61 # logging.debug("CM Tolerance de determination des doublons : %.2e", self.__tolerBP)
63 def wasCalculatedIn(self, xValue ): #, info="" ):
64 "Vérifie l'existence d'un calcul correspondant à la valeur"
67 for i in range(min(len(self.__listOPCV),self.__lenghtOR)-1,-1,-1):
68 if not hasattr(xValue, 'size') or (xValue.size != self.__listOPCV[i][0].size):
69 # logging.debug("CM Différence de la taille %s de X et de celle %s du point %i déjà calculé", xValue.shape,i,self.__listOPCP[i].shape)
71 if numpy.linalg.norm(numpy.ravel(xValue) - self.__listOPCV[i][0]) < self.__tolerBP * self.__listOPCV[i][2]:
73 __HxV = self.__listOPCV[i][1]
74 # logging.debug("CM Cas%s déja calculé, portant le numéro %i", info, i)
78 def storeValueInX(self, xValue, HxValue ):
79 "Stocke un calcul correspondant à la valeur"
80 if self.__lenghtOR < 0:
81 self.__lenghtOR = 2 * xValue.size + 2
82 self.__initlnOR = self.__lenghtOR
83 while len(self.__listOPCV) > self.__lenghtOR:
84 # logging.debug("CM Réduction de la liste des cas à %i éléments par suppression du premier", self.__lenghtOR)
85 self.__listOPCV.pop(0)
86 self.__listOPCV.append( (
87 copy.copy(numpy.ravel(xValue)),
89 numpy.linalg.norm(xValue),
94 self.__initlnOR = self.__lenghtOR
99 self.__lenghtOR = self.__initlnOR
101 # ==============================================================================
102 class Operator(object):
104 Classe générale d'interface de type opérateur simple
114 avoidingRedundancy = True,
115 inputAsMultiFunction = False,
116 extraArguments = None,
119 On construit un objet de ce type en fournissant, à l'aide de l'un des
120 deux mots-clé, soit une fonction ou un multi-fonction python, soit une
123 - fromMethod : argument de type fonction Python
124 - fromMatrix : argument adapté au constructeur numpy.matrix
125 - avoidingRedundancy : booléen évitant (ou pas) les calculs redondants
126 - inputAsMultiFunction : booléen indiquant une fonction explicitement
127 définie (ou pas) en multi-fonction
128 - extraArguments : arguments supplémentaires passés à la fonction de
129 base et ses dérivées (tuple ou dictionnaire)
131 self.__NbCallsAsMatrix, self.__NbCallsAsMethod, self.__NbCallsOfCached = 0, 0, 0
132 self.__AvoidRC = bool( avoidingRedundancy )
133 self.__inputAsMF = bool( inputAsMultiFunction )
134 self.__extraArgs = extraArguments
135 if fromMethod is not None and self.__inputAsMF:
136 self.__Method = fromMethod # logtimer(fromMethod)
138 self.__Type = "Method"
139 elif fromMethod is not None and not self.__inputAsMF:
140 self.__Method = partial( MultiFonction, _sFunction=fromMethod)
142 self.__Type = "Method"
143 elif fromMatrix is not None:
145 self.__Matrix = numpy.matrix( fromMatrix, numpy.float )
146 self.__Type = "Matrix"
152 def disableAvoidingRedundancy(self):
154 Operator.CM.disable()
156 def enableAvoidingRedundancy(self):
161 Operator.CM.disable()
167 def appliedTo(self, xValue, HValue = None, argsAsSerie = False):
169 Permet de restituer le résultat de l'application de l'opérateur à une
170 série d'arguments xValue. Cette méthode se contente d'appliquer, chaque
171 argument devant a priori être du bon type.
173 - les arguments par série sont :
174 - xValue : argument adapté pour appliquer l'opérateur
175 - HValue : valeur précalculée de l'opérateur en ce point
176 - argsAsSerie : indique si les arguments sont une mono ou multi-valeur
183 if HValue is not None:
187 PlatformInfo.isIterable( _xValue, True, " in Operator.appliedTo" )
189 if _HValue is not None:
190 assert len(_xValue) == len(_HValue), "Incompatible number of elements in xValue and HValue"
192 for i in range(len(_HValue)):
193 HxValue.append( numpy.asmatrix( numpy.ravel( _HValue[i] ) ).T )
195 Operator.CM.storeValueInX(_xValue[i],HxValue[-1])
200 for i, xv in enumerate(_xValue):
202 __alreadyCalculated, __HxV = Operator.CM.wasCalculatedIn(xv)
204 __alreadyCalculated = False
206 if __alreadyCalculated:
207 self.__addOneCacheCall()
210 if self.__Matrix is not None:
211 self.__addOneMatrixCall()
212 _hv = self.__Matrix * xv
214 self.__addOneMethodCall()
218 HxValue.append( _hv )
220 if len(_xserie)>0 and self.__Matrix is None:
221 if self.__extraArgs is None:
222 _hserie = self.__Method( _xserie ) # Calcul MF
224 _hserie = self.__Method( _xserie, self.__extraArgs ) # Calcul MF
225 if not hasattr(_hserie, "pop"):
226 raise TypeError("The user input multi-function doesn't seem to return sequence results, behaving like a mono-function. It has to be checked.")
232 Operator.CM.storeValueInX(_xv,_hv)
234 if argsAsSerie: return HxValue
235 else: return HxValue[-1]
237 def appliedControledFormTo(self, paires, argsAsSerie = False ):
239 Permet de restituer le résultat de l'application de l'opérateur à des
240 paires (xValue, uValue). Cette méthode se contente d'appliquer, son
241 argument devant a priori être du bon type. Si la uValue est None,
242 on suppose que l'opérateur ne s'applique qu'à xValue.
244 - paires : les arguments par paire sont :
245 - xValue : argument X adapté pour appliquer l'opérateur
246 - uValue : argument U adapté pour appliquer l'opérateur
247 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
249 if argsAsSerie: _xuValue = paires
250 else: _xuValue = (paires,)
251 PlatformInfo.isIterable( _xuValue, True, " in Operator.appliedControledFormTo" )
253 if self.__Matrix is not None:
255 for paire in _xuValue:
256 _xValue, _uValue = paire
257 self.__addOneMatrixCall()
258 HxValue.append( self.__Matrix * _xValue )
261 for paire in _xuValue:
263 _xValue, _uValue = paire
264 if _uValue is not None:
265 _xuValue.append( paire )
267 _xuValue.append( _xValue )
268 self.__addOneMethodCall( len(_xuValue) )
269 if self.__extraArgs is None:
270 HxValue = self.__Method( _xuValue ) # Calcul MF
272 HxValue = self.__Method( _xuValue, self.__extraArgs ) # Calcul MF
274 if argsAsSerie: return HxValue
275 else: return HxValue[-1]
277 def appliedInXTo(self, paires, argsAsSerie = False ):
279 Permet de restituer le résultat de l'application de l'opérateur à une
280 série d'arguments xValue, sachant que l'opérateur est valable en
281 xNominal. Cette méthode se contente d'appliquer, son argument devant a
282 priori être du bon type. Si l'opérateur est linéaire car c'est une
283 matrice, alors il est valable en tout point nominal et xNominal peut
284 être quelconque. Il n'y a qu'une seule paire par défaut, et argsAsSerie
285 permet d'indiquer que l'argument est multi-paires.
287 - paires : les arguments par paire sont :
288 - xNominal : série d'arguments permettant de donner le point où
289 l'opérateur est construit pour être ensuite appliqué
290 - xValue : série d'arguments adaptés pour appliquer l'opérateur
291 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
293 if argsAsSerie: _nxValue = paires
294 else: _nxValue = (paires,)
295 PlatformInfo.isIterable( _nxValue, True, " in Operator.appliedInXTo" )
297 if self.__Matrix is not None:
299 for paire in _nxValue:
300 _xNominal, _xValue = paire
301 self.__addOneMatrixCall()
302 HxValue.append( self.__Matrix * _xValue )
304 self.__addOneMethodCall( len(_nxValue) )
305 if self.__extraArgs is None:
306 HxValue = self.__Method( _nxValue ) # Calcul MF
308 HxValue = self.__Method( _nxValue, self.__extraArgs ) # Calcul MF
310 if argsAsSerie: return HxValue
311 else: return HxValue[-1]
313 def asMatrix(self, ValueForMethodForm = "UnknownVoidValue", argsAsSerie = False):
315 Permet de renvoyer l'opérateur sous la forme d'une matrice
317 if self.__Matrix is not None:
318 self.__addOneMatrixCall()
319 mValue = [self.__Matrix,]
320 elif ValueForMethodForm is not "UnknownVoidValue": # Ne pas utiliser "None"
323 self.__addOneMethodCall( len(ValueForMethodForm) )
324 for _vfmf in ValueForMethodForm:
325 mValue.append( numpy.matrix( self.__Method(((_vfmf, None),)) ) )
327 self.__addOneMethodCall()
328 mValue = self.__Method(((ValueForMethodForm, None),))
330 raise ValueError("Matrix form of the operator defined as a function/method requires to give an operating point.")
332 if argsAsSerie: return mValue
333 else: return mValue[-1]
337 Renvoie la taille sous forme numpy si l'opérateur est disponible sous
338 la forme d'une matrice
340 if self.__Matrix is not None:
341 return self.__Matrix.shape
343 raise ValueError("Matrix form of the operator is not available, nor the shape")
345 def nbcalls(self, which=None):
347 Renvoie les nombres d'évaluations de l'opérateur
350 self.__NbCallsAsMatrix+self.__NbCallsAsMethod,
351 self.__NbCallsAsMatrix,
352 self.__NbCallsAsMethod,
353 self.__NbCallsOfCached,
354 Operator.NbCallsAsMatrix+Operator.NbCallsAsMethod,
355 Operator.NbCallsAsMatrix,
356 Operator.NbCallsAsMethod,
357 Operator.NbCallsOfCached,
359 if which is None: return __nbcalls
360 else: return __nbcalls[which]
362 def __addOneMatrixCall(self):
363 "Comptabilise un appel"
364 self.__NbCallsAsMatrix += 1 # Decompte local
365 Operator.NbCallsAsMatrix += 1 # Decompte global
367 def __addOneMethodCall(self, nb = 1):
368 "Comptabilise un appel"
369 self.__NbCallsAsMethod += nb # Decompte local
370 Operator.NbCallsAsMethod += nb # Decompte global
372 def __addOneCacheCall(self):
373 "Comptabilise un appel"
374 self.__NbCallsOfCached += 1 # Decompte local
375 Operator.NbCallsOfCached += 1 # Decompte global
377 # ==============================================================================
378 class FullOperator(object):
380 Classe générale d'interface de type opérateur complet
381 (Direct, Linéaire Tangent, Adjoint)
384 name = "GenericFullOperator",
386 asOneFunction = None, # 1 Fonction
387 asThreeFunctions = None, # 3 Fonctions in a dictionary
388 asScript = None, # 1 or 3 Fonction(s) by script
389 asDict = None, # Parameters
391 extraArguments = None,
393 inputAsMF = False,# Fonction(s) as Multi-Functions
398 self.__name = str(name)
399 self.__check = bool(toBeChecked)
400 self.__extraArgs = extraArguments
405 if (asDict is not None) and isinstance(asDict, dict):
406 __Parameters.update( asDict )
407 if "DifferentialIncrement" in asDict:
408 __Parameters["withIncrement"] = asDict["DifferentialIncrement"]
409 if "CenteredFiniteDifference" in asDict:
410 __Parameters["withCenteredDF"] = asDict["CenteredFiniteDifference"]
411 if "EnableMultiProcessing" in asDict:
412 __Parameters["withmpEnabled"] = asDict["EnableMultiProcessing"]
413 if "NumberOfProcesses" in asDict:
414 __Parameters["withmpWorkers"] = asDict["NumberOfProcesses"]
416 if asScript is not None:
417 __Matrix, __Function = None, None
419 __Matrix = ImportFromScript(asScript).getvalue( self.__name )
421 __Function = { "Direct":ImportFromScript(asScript).getvalue( "DirectOperator" ) }
422 __Function.update({"useApproximatedDerivatives":True})
423 __Function.update(__Parameters)
424 elif asThreeFunctions:
426 "Direct" :ImportFromScript(asScript).getvalue( "DirectOperator" ),
427 "Tangent":ImportFromScript(asScript).getvalue( "TangentOperator" ),
428 "Adjoint":ImportFromScript(asScript).getvalue( "AdjointOperator" ),
430 __Function.update(__Parameters)
433 if asOneFunction is not None:
434 if isinstance(asOneFunction, dict) and "Direct" in asOneFunction:
435 if asOneFunction["Direct"] is not None:
436 __Function = asOneFunction
438 raise ValueError("The function has to be given in a dictionnary which have 1 key (\"Direct\")")
440 __Function = { "Direct":asOneFunction }
441 __Function.update({"useApproximatedDerivatives":True})
442 __Function.update(__Parameters)
443 elif asThreeFunctions is not None:
444 if isinstance(asThreeFunctions, dict) and \
445 ("Tangent" in asThreeFunctions) and (asThreeFunctions["Tangent"] is not None) and \
446 ("Adjoint" in asThreeFunctions) and (asThreeFunctions["Adjoint"] is not None) and \
447 (("useApproximatedDerivatives" not in asThreeFunctions) or not bool(asThreeFunctions["useApproximatedDerivatives"])):
448 __Function = asThreeFunctions
449 elif isinstance(asThreeFunctions, dict) and \
450 ("Direct" in asThreeFunctions) and (asThreeFunctions["Direct"] is not None):
451 __Function = asThreeFunctions
452 __Function.update({"useApproximatedDerivatives":True})
454 raise ValueError("The functions has to be given in a dictionnary which have either 1 key (\"Direct\") or 3 keys (\"Direct\" (optionnal), \"Tangent\" and \"Adjoint\")")
455 if "Direct" not in asThreeFunctions:
456 __Function["Direct"] = asThreeFunctions["Tangent"]
457 __Function.update(__Parameters)
461 # if sys.version_info[0] < 3 and isinstance(__Function, dict):
462 # for k in ("Direct", "Tangent", "Adjoint"):
463 # if k in __Function and hasattr(__Function[k],"__class__"):
464 # if type(__Function[k]) is type(self.__init__):
465 # raise TypeError("can't use a class method (%s) as a function for the \"%s\" operator. Use a real function instead."%(type(__Function[k]),k))
467 if appliedInX is not None and isinstance(appliedInX, dict):
468 __appliedInX = appliedInX
469 elif appliedInX is not None:
470 __appliedInX = {"HXb":appliedInX}
474 if scheduledBy is not None:
475 self.__T = scheduledBy
477 if isinstance(__Function, dict) and \
478 ("useApproximatedDerivatives" in __Function) and bool(__Function["useApproximatedDerivatives"]) and \
479 ("Direct" in __Function) and (__Function["Direct"] is not None):
480 if "withCenteredDF" not in __Function: __Function["withCenteredDF"] = False
481 if "withIncrement" not in __Function: __Function["withIncrement"] = 0.01
482 if "withdX" not in __Function: __Function["withdX"] = None
483 if "withAvoidingRedundancy" not in __Function: __Function["withAvoidingRedundancy"] = avoidRC
484 if "withToleranceInRedundancy" not in __Function: __Function["withToleranceInRedundancy"] = 1.e-18
485 if "withLenghtOfRedundancy" not in __Function: __Function["withLenghtOfRedundancy"] = -1
486 if "withmpEnabled" not in __Function: __Function["withmpEnabled"] = False
487 if "withmpWorkers" not in __Function: __Function["withmpWorkers"] = None
488 if "withmfEnabled" not in __Function: __Function["withmfEnabled"] = inputAsMF
489 from daNumerics.ApproximatedDerivatives import FDApproximation
490 FDA = FDApproximation(
491 Function = __Function["Direct"],
492 centeredDF = __Function["withCenteredDF"],
493 increment = __Function["withIncrement"],
494 dX = __Function["withdX"],
495 avoidingRedundancy = __Function["withAvoidingRedundancy"],
496 toleranceInRedundancy = __Function["withToleranceInRedundancy"],
497 lenghtOfRedundancy = __Function["withLenghtOfRedundancy"],
498 mpEnabled = __Function["withmpEnabled"],
499 mpWorkers = __Function["withmpWorkers"],
500 mfEnabled = __Function["withmfEnabled"],
502 self.__FO["Direct"] = Operator( fromMethod = FDA.DirectOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
503 self.__FO["Tangent"] = Operator( fromMethod = FDA.TangentOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
504 self.__FO["Adjoint"] = Operator( fromMethod = FDA.AdjointOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
505 elif isinstance(__Function, dict) and \
506 ("Direct" in __Function) and ("Tangent" in __Function) and ("Adjoint" in __Function) and \
507 (__Function["Direct"] is not None) and (__Function["Tangent"] is not None) and (__Function["Adjoint"] is not None):
508 self.__FO["Direct"] = Operator( fromMethod = __Function["Direct"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
509 self.__FO["Tangent"] = Operator( fromMethod = __Function["Tangent"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
510 self.__FO["Adjoint"] = Operator( fromMethod = __Function["Adjoint"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
511 elif asMatrix is not None:
512 __matrice = numpy.matrix( __Matrix, numpy.float )
513 self.__FO["Direct"] = Operator( fromMatrix = __matrice, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
514 self.__FO["Tangent"] = Operator( fromMatrix = __matrice, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
515 self.__FO["Adjoint"] = Operator( fromMatrix = __matrice.T, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
518 raise ValueError("Improperly defined observation operator, it requires at minima either a matrix, a Direct for approximate derivatives or a Tangent/Adjoint pair.")
520 if __appliedInX is not None:
521 self.__FO["AppliedInX"] = {}
522 for key in list(__appliedInX.keys()):
523 if type( __appliedInX[key] ) is type( numpy.matrix([]) ):
524 # Pour le cas où l'on a une vraie matrice
525 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].A1, numpy.float ).T
526 elif type( __appliedInX[key] ) is type( numpy.array([]) ) and len(__appliedInX[key].shape) > 1:
527 # Pour le cas où l'on a un vecteur représenté en array avec 2 dimensions
528 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].reshape(len(__appliedInX[key]),), numpy.float ).T
530 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key], numpy.float ).T
532 self.__FO["AppliedInX"] = None
538 "x.__repr__() <==> repr(x)"
539 return repr(self.__V)
542 "x.__str__() <==> str(x)"
545 # ==============================================================================
546 class Algorithm(object):
548 Classe générale d'interface de type algorithme
550 Elle donne un cadre pour l'écriture d'une classe élémentaire d'algorithme
551 d'assimilation, en fournissant un container (dictionnaire) de variables
552 persistantes initialisées, et des méthodes d'accès à ces variables stockées.
554 Une classe élémentaire d'algorithme doit implémenter la méthode "run".
556 def __init__(self, name):
558 L'initialisation présente permet de fabriquer des variables de stockage
559 disponibles de manière générique dans les algorithmes élémentaires. Ces
560 variables de stockage sont ensuite conservées dans un dictionnaire
561 interne à l'objet, mais auquel on accède par la méthode "get".
563 Les variables prévues sont :
564 - APosterioriCorrelations : matrice de corrélations de la matrice A
565 - APosterioriCovariance : matrice de covariances a posteriori : A
566 - APosterioriStandardDeviations : vecteur des écart-types de la matrice A
567 - APosterioriVariances : vecteur des variances de la matrice A
568 - Analysis : vecteur d'analyse : Xa
569 - BMA : Background moins Analysis : Xa - Xb
570 - CostFunctionJ : fonction-coût globale, somme des deux parties suivantes Jb et Jo
571 - CostFunctionJAtCurrentOptimum : fonction-coût globale à l'état optimal courant lors d'itérations
572 - CostFunctionJb : partie ébauche ou background de la fonction-coût : Jb
573 - CostFunctionJbAtCurrentOptimum : partie ébauche à l'état optimal courant lors d'itérations
574 - CostFunctionJo : partie observations de la fonction-coût : Jo
575 - CostFunctionJoAtCurrentOptimum : partie observations à l'état optimal courant lors d'itérations
576 - CurrentOptimum : état optimal courant lors d'itérations
577 - CurrentState : état courant lors d'itérations
578 - GradientOfCostFunctionJ : gradient de la fonction-coût globale
579 - GradientOfCostFunctionJb : gradient de la partie ébauche de la fonction-coût
580 - GradientOfCostFunctionJo : gradient de la partie observations de la fonction-coût
581 - IndexOfOptimum : index de l'état optimal courant lors d'itérations
582 - Innovation : l'innovation : d = Y - H(X)
583 - InnovationAtCurrentState : l'innovation à l'état courant : dn = Y - H(Xn)
584 - JacobianMatrixAtCurrentState : matrice jacobienne à l'état courant
585 - JacobianMatrixAtOptimum : matrice jacobienne à l'optimum
586 - KalmanGainAtOptimum : gain de Kalman à l'optimum
587 - MahalanobisConsistency : indicateur de consistance des covariances
588 - OMA : Observation moins Analyse : Y - Xa
589 - OMB : Observation moins Background : Y - Xb
590 - PredictedState : état prédit courant lors d'itérations
591 - Residu : dans le cas des algorithmes de vérification
592 - SigmaBck2 : indicateur de correction optimale des erreurs d'ébauche
593 - SigmaObs2 : indicateur de correction optimale des erreurs d'observation
594 - SimulatedObservationAtBackground : l'état observé H(Xb) à l'ébauche
595 - SimulatedObservationAtCurrentOptimum : l'état observé H(X) à l'état optimal courant
596 - SimulatedObservationAtCurrentState : l'état observé H(X) à l'état courant
597 - SimulatedObservationAtOptimum : l'état observé H(Xa) à l'optimum
598 - SimulationQuantiles : états observés H(X) pour les quantiles demandés
599 On peut rajouter des variables à stocker dans l'initialisation de
600 l'algorithme élémentaire qui va hériter de cette classe
602 logging.debug("%s Initialisation", str(name))
603 self._m = PlatformInfo.SystemUsage()
605 self._name = str( name )
606 self._parameters = {"StoreSupplementaryCalculations":[]}
607 self.__required_parameters = {}
608 self.__required_inputs = {"RequiredInputValues":{"mandatory":(), "optional":()}}
610 self.StoredVariables = {}
611 self.StoredVariables["APosterioriCorrelations"] = Persistence.OneMatrix(name = "APosterioriCorrelations")
612 self.StoredVariables["APosterioriCovariance"] = Persistence.OneMatrix(name = "APosterioriCovariance")
613 self.StoredVariables["APosterioriStandardDeviations"] = Persistence.OneVector(name = "APosterioriStandardDeviations")
614 self.StoredVariables["APosterioriVariances"] = Persistence.OneVector(name = "APosterioriVariances")
615 self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
616 self.StoredVariables["BMA"] = Persistence.OneVector(name = "BMA")
617 self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
618 self.StoredVariables["CostFunctionJAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJAtCurrentOptimum")
619 self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
620 self.StoredVariables["CostFunctionJbAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJbAtCurrentOptimum")
621 self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
622 self.StoredVariables["CostFunctionJoAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJoAtCurrentOptimum")
623 self.StoredVariables["CurrentOptimum"] = Persistence.OneVector(name = "CurrentOptimum")
624 self.StoredVariables["CurrentState"] = Persistence.OneVector(name = "CurrentState")
625 self.StoredVariables["GradientOfCostFunctionJ"] = Persistence.OneVector(name = "GradientOfCostFunctionJ")
626 self.StoredVariables["GradientOfCostFunctionJb"] = Persistence.OneVector(name = "GradientOfCostFunctionJb")
627 self.StoredVariables["GradientOfCostFunctionJo"] = Persistence.OneVector(name = "GradientOfCostFunctionJo")
628 self.StoredVariables["IndexOfOptimum"] = Persistence.OneIndex(name = "IndexOfOptimum")
629 self.StoredVariables["Innovation"] = Persistence.OneVector(name = "Innovation")
630 self.StoredVariables["InnovationAtCurrentState"] = Persistence.OneVector(name = "InnovationAtCurrentState")
631 self.StoredVariables["JacobianMatrixAtCurrentState"] = Persistence.OneMatrix(name = "JacobianMatrixAtCurrentState")
632 self.StoredVariables["JacobianMatrixAtOptimum"] = Persistence.OneMatrix(name = "JacobianMatrixAtOptimum")
633 self.StoredVariables["KalmanGainAtOptimum"] = Persistence.OneMatrix(name = "KalmanGainAtOptimum")
634 self.StoredVariables["MahalanobisConsistency"] = Persistence.OneScalar(name = "MahalanobisConsistency")
635 self.StoredVariables["OMA"] = Persistence.OneVector(name = "OMA")
636 self.StoredVariables["OMB"] = Persistence.OneVector(name = "OMB")
637 self.StoredVariables["PredictedState"] = Persistence.OneVector(name = "PredictedState")
638 self.StoredVariables["Residu"] = Persistence.OneScalar(name = "Residu")
639 self.StoredVariables["SigmaBck2"] = Persistence.OneScalar(name = "SigmaBck2")
640 self.StoredVariables["SigmaObs2"] = Persistence.OneScalar(name = "SigmaObs2")
641 self.StoredVariables["SimulatedObservationAtBackground"] = Persistence.OneVector(name = "SimulatedObservationAtBackground")
642 self.StoredVariables["SimulatedObservationAtCurrentOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentOptimum")
643 self.StoredVariables["SimulatedObservationAtCurrentState"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentState")
644 self.StoredVariables["SimulatedObservationAtOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtOptimum")
645 self.StoredVariables["SimulationQuantiles"] = Persistence.OneMatrix(name = "SimulationQuantiles")
647 def _pre_run(self, Parameters, Xb=None, Y=None, R=None, B=None, Q=None ):
649 logging.debug("%s Lancement", self._name)
650 logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
652 # Mise a jour de self._parameters avec Parameters
653 self.__setParameters(Parameters)
655 # Corrections et complements
656 def __test_vvalue(argument, variable, argname):
658 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
659 raise ValueError("%s %s vector %s has to be properly defined!"%(self._name,argname,variable))
660 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
661 logging.debug("%s %s vector %s is not set, but is optional."%(self._name,argname,variable))
663 logging.debug("%s %s vector %s is not set, but is not required."%(self._name,argname,variable))
665 logging.debug("%s %s vector %s is set, and its size is %i."%(self._name,argname,variable,numpy.array(argument).size))
667 __test_vvalue( Xb, "Xb", "Background or initial state" )
668 __test_vvalue( Y, "Y", "Observation" )
670 def __test_cvalue(argument, variable, argname):
672 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
673 raise ValueError("%s %s error covariance matrix %s has to be properly defined!"%(self._name,argname,variable))
674 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
675 logging.debug("%s %s error covariance matrix %s is not set, but is optional."%(self._name,argname,variable))
677 logging.debug("%s %s error covariance matrix %s is not set, but is not required."%(self._name,argname,variable))
679 logging.debug("%s %s error covariance matrix %s is set."%(self._name,argname,variable))
681 __test_cvalue( R, "R", "Observation" )
682 __test_cvalue( B, "B", "Background" )
683 __test_cvalue( Q, "Q", "Evolution" )
685 if ("Bounds" in self._parameters) and isinstance(self._parameters["Bounds"], (list, tuple)) and (len(self._parameters["Bounds"]) > 0):
686 logging.debug("%s Prise en compte des bornes effectuee"%(self._name,))
688 self._parameters["Bounds"] = None
690 if logging.getLogger().level < logging.WARNING:
691 self._parameters["optiprint"], self._parameters["optdisp"] = 1, 1
692 if PlatformInfo.has_scipy:
693 import scipy.optimize
694 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_ALL
696 self._parameters["optmessages"] = 15
698 self._parameters["optiprint"], self._parameters["optdisp"] = -1, 0
699 if PlatformInfo.has_scipy:
700 import scipy.optimize
701 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_NONE
703 self._parameters["optmessages"] = 15
707 def _post_run(self,_oH=None):
709 if ("StoreSupplementaryCalculations" in self._parameters) and \
710 "APosterioriCovariance" in self._parameters["StoreSupplementaryCalculations"]:
711 for _A in self.StoredVariables["APosterioriCovariance"]:
712 if "APosterioriVariances" in self._parameters["StoreSupplementaryCalculations"]:
713 self.StoredVariables["APosterioriVariances"].store( numpy.diag(_A) )
714 if "APosterioriStandardDeviations" in self._parameters["StoreSupplementaryCalculations"]:
715 self.StoredVariables["APosterioriStandardDeviations"].store( numpy.sqrt(numpy.diag(_A)) )
716 if "APosterioriCorrelations" in self._parameters["StoreSupplementaryCalculations"]:
717 _EI = numpy.diag(1./numpy.sqrt(numpy.diag(_A)))
718 _C = numpy.dot(_EI, numpy.dot(_A, _EI))
719 self.StoredVariables["APosterioriCorrelations"].store( _C )
721 logging.debug("%s Nombre d'évaluation(s) de l'opérateur d'observation direct/tangent/adjoint.: %i/%i/%i", self._name, _oH["Direct"].nbcalls(0),_oH["Tangent"].nbcalls(0),_oH["Adjoint"].nbcalls(0))
722 logging.debug("%s Nombre d'appels au cache d'opérateur d'observation direct/tangent/adjoint..: %i/%i/%i", self._name, _oH["Direct"].nbcalls(3),_oH["Tangent"].nbcalls(3),_oH["Adjoint"].nbcalls(3))
723 logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
724 logging.debug("%s Terminé", self._name)
727 def _toStore(self, key):
728 "True if in StoreSupplementaryCalculations, else False"
729 return key in self._parameters["StoreSupplementaryCalculations"]
731 def get(self, key=None):
733 Renvoie l'une des variables stockées identifiée par la clé, ou le
734 dictionnaire de l'ensemble des variables disponibles en l'absence de
735 clé. Ce sont directement les variables sous forme objet qui sont
736 renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
737 des classes de persistance.
740 return self.StoredVariables[key]
742 return self.StoredVariables
744 def __contains__(self, key=None):
745 "D.__contains__(k) -> True if D has a key k, else False"
746 return key in self.StoredVariables
749 "D.keys() -> list of D's keys"
750 if hasattr(self, "StoredVariables"):
751 return self.StoredVariables.keys()
756 "D.pop(k[,d]) -> v, remove specified key and return the corresponding value"
757 if hasattr(self, "StoredVariables"):
758 return self.StoredVariables.pop(k, d)
763 raise TypeError("pop expected at least 1 arguments, got 0")
764 "If key is not found, d is returned if given, otherwise KeyError is raised"
770 def run(self, Xb=None, Y=None, H=None, M=None, R=None, B=None, Q=None, Parameters=None):
772 Doit implémenter l'opération élémentaire de calcul d'assimilation sous
773 sa forme mathématique la plus naturelle possible.
775 raise NotImplementedError("Mathematical assimilation calculation has not been implemented!")
777 def defineRequiredParameter(self, name = None, default = None, typecast = None, message = None, minval = None, maxval = None, listval = None):
779 Permet de définir dans l'algorithme des paramètres requis et leurs
780 caractéristiques par défaut.
783 raise ValueError("A name is mandatory to define a required parameter.")
785 self.__required_parameters[name] = {
787 "typecast" : typecast,
793 logging.debug("%s %s (valeur par défaut = %s)", self._name, message, self.setParameterValue(name))
795 def getRequiredParameters(self, noDetails=True):
797 Renvoie la liste des noms de paramètres requis ou directement le
798 dictionnaire des paramètres requis.
801 return sorted(self.__required_parameters.keys())
803 return self.__required_parameters
805 def setParameterValue(self, name=None, value=None):
807 Renvoie la valeur d'un paramètre requis de manière contrôlée
809 default = self.__required_parameters[name]["default"]
810 typecast = self.__required_parameters[name]["typecast"]
811 minval = self.__required_parameters[name]["minval"]
812 maxval = self.__required_parameters[name]["maxval"]
813 listval = self.__required_parameters[name]["listval"]
815 if value is None and default is None:
817 elif value is None and default is not None:
818 if typecast is None: __val = default
819 else: __val = typecast( default )
821 if typecast is None: __val = value
822 else: __val = typecast( value )
824 if minval is not None and (numpy.array(__val, float) < minval).any():
825 raise ValueError("The parameter named \"%s\" of value \"%s\" can not be less than %s."%(name, __val, minval))
826 if maxval is not None and (numpy.array(__val, float) > maxval).any():
827 raise ValueError("The parameter named \"%s\" of value \"%s\" can not be greater than %s."%(name, __val, maxval))
828 if listval is not None:
829 if typecast is list or typecast is tuple or isinstance(__val,list) or isinstance(__val,tuple):
832 raise ValueError("The value \"%s\" of the parameter named \"%s\" is not allowed, it has to be in the list %s."%(v, name, listval))
833 elif __val not in listval:
834 raise ValueError("The value \"%s\" of the parameter named \"%s\" is not allowed, it has to be in the list %s."%( __val, name,listval))
837 def requireInputArguments(self, mandatory=(), optional=()):
839 Permet d'imposer des arguments requises en entrée
841 self.__required_inputs["RequiredInputValues"]["mandatory"] = tuple( mandatory )
842 self.__required_inputs["RequiredInputValues"]["optional"] = tuple( optional )
844 def __setParameters(self, fromDico={}):
846 Permet de stocker les paramètres reçus dans le dictionnaire interne.
848 self._parameters.update( fromDico )
849 for k in self.__required_parameters.keys():
850 if k in fromDico.keys():
851 self._parameters[k] = self.setParameterValue(k,fromDico[k])
853 self._parameters[k] = self.setParameterValue(k)
854 logging.debug("%s %s : %s", self._name, self.__required_parameters[k]["message"], self._parameters[k])
856 # ==============================================================================
857 class AlgorithmAndParameters(object):
859 Classe générale d'interface d'action pour l'algorithme et ses paramètres
862 name = "GenericAlgorithm",
869 self.__name = str(name)
873 self.__algorithm = {}
874 self.__algorithmFile = None
875 self.__algorithmName = None
877 self.updateParameters( asDict, asScript )
879 if asAlgorithm is None and asScript is not None:
880 __Algo = ImportFromScript(asScript).getvalue( "Algorithm" )
884 if __Algo is not None:
885 self.__A = str(__Algo)
886 self.__P.update( {"Algorithm":self.__A} )
888 self.__setAlgorithm( self.__A )
890 def updateParameters(self,
894 "Mise a jour des parametres"
895 if asDict is None and asScript is not None:
896 __Dict = ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
900 if __Dict is not None:
901 self.__P.update( dict(__Dict) )
903 def executePythonScheme(self, asDictAO = None):
904 "Permet de lancer le calcul d'assimilation"
905 Operator.CM.clearCache()
907 if not isinstance(asDictAO, dict):
908 raise ValueError("The objects for algorithm calculation have to be given together as a dictionnary, and they are not")
909 if hasattr(asDictAO["Background"],"getO"): self.__Xb = asDictAO["Background"].getO()
910 elif hasattr(asDictAO["CheckingPoint"],"getO"): self.__Xb = asDictAO["CheckingPoint"].getO()
911 else: self.__Xb = None
912 if hasattr(asDictAO["Observation"],"getO"): self.__Y = asDictAO["Observation"].getO()
913 else: self.__Y = asDictAO["Observation"]
914 if hasattr(asDictAO["ControlInput"],"getO"): self.__U = asDictAO["ControlInput"].getO()
915 else: self.__U = asDictAO["ControlInput"]
916 if hasattr(asDictAO["ObservationOperator"],"getO"): self.__HO = asDictAO["ObservationOperator"].getO()
917 else: self.__HO = asDictAO["ObservationOperator"]
918 if hasattr(asDictAO["EvolutionModel"],"getO"): self.__EM = asDictAO["EvolutionModel"].getO()
919 else: self.__EM = asDictAO["EvolutionModel"]
920 if hasattr(asDictAO["ControlModel"],"getO"): self.__CM = asDictAO["ControlModel"].getO()
921 else: self.__CM = asDictAO["ControlModel"]
922 self.__B = asDictAO["BackgroundError"]
923 self.__R = asDictAO["ObservationError"]
924 self.__Q = asDictAO["EvolutionError"]
926 self.__shape_validate()
928 self.__algorithm.run(
938 Parameters = self.__P,
942 def executeYACSScheme(self, FileName=None):
943 "Permet de lancer le calcul d'assimilation"
944 if FileName is None or not os.path.exists(FileName):
945 raise ValueError("a YACS file name has to be given for YACS execution.\n")
947 __file = os.path.abspath(FileName)
948 logging.debug("The YACS file name is \"%s\"."%__file)
949 if not PlatformInfo.has_salome or \
950 not PlatformInfo.has_yacs or \
951 not PlatformInfo.has_adao:
952 raise ImportError("\n\n"+\
953 "Unable to get SALOME, YACS or ADAO environnement variables.\n"+\
954 "Please load the right environnement before trying to use it.\n")
959 SALOMERuntime.RuntimeSALOME_setRuntime()
961 r = pilot.getRuntime()
962 xmlLoader = loader.YACSLoader()
963 xmlLoader.registerProcCataLoader()
965 catalogAd = r.loadCatalog("proc", __file)
966 r.addCatalog(catalogAd)
971 p = xmlLoader.load(__file)
972 except IOError as ex:
973 print("The YACS XML schema file can not be loaded: %s"%(ex,))
975 logger = p.getLogger("parser")
976 if not logger.isEmpty():
977 print("The imported YACS XML schema has errors on parsing:")
978 print(logger.getStr())
981 print("The YACS XML schema is not valid and will not be executed:")
982 print(p.getErrorReport())
984 info=pilot.LinkInfo(pilot.LinkInfo.ALL_DONT_STOP)
985 p.checkConsistency(info)
986 if info.areWarningsOrErrors():
987 print("The YACS XML schema is not coherent and will not be executed:")
988 print(info.getGlobalRepr())
990 e = pilot.ExecutorSwig()
992 if p.getEffectiveState() != pilot.DONE:
993 print(p.getErrorReport())
997 def get(self, key = None):
998 "Vérifie l'existence d'une clé de variable ou de paramètres"
999 if key in self.__algorithm:
1000 return self.__algorithm.get( key )
1001 elif key in self.__P:
1002 return self.__P[key]
1006 def pop(self, k, d):
1007 "Necessaire pour le pickling"
1008 return self.__algorithm.pop(k, d)
1010 def getAlgorithmRequiredParameters(self, noDetails=True):
1011 "Renvoie la liste des paramètres requis selon l'algorithme"
1012 return self.__algorithm.getRequiredParameters(noDetails)
1014 def setObserver(self, __V, __O, __I, __S):
1015 if self.__algorithm is None \
1016 or isinstance(self.__algorithm, dict) \
1017 or not hasattr(self.__algorithm,"StoredVariables"):
1018 raise ValueError("No observer can be build before choosing an algorithm.")
1019 if __V not in self.__algorithm:
1020 raise ValueError("An observer requires to be set on a variable named %s which does not exist."%__V)
1022 self.__algorithm.StoredVariables[ __V ].setDataObserver(
1025 HookParameters = __I,
1028 def removeObserver(self, __V, __O, __A = False):
1029 if self.__algorithm is None \
1030 or isinstance(self.__algorithm, dict) \
1031 or not hasattr(self.__algorithm,"StoredVariables"):
1032 raise ValueError("No observer can be removed before choosing an algorithm.")
1033 if __V not in self.__algorithm:
1034 raise ValueError("An observer requires to be removed on a variable named %s which does not exist."%__V)
1036 return self.__algorithm.StoredVariables[ __V ].removeDataObserver(
1041 def hasObserver(self, __V):
1042 if self.__algorithm is None \
1043 or isinstance(self.__algorithm, dict) \
1044 or not hasattr(self.__algorithm,"StoredVariables"):
1046 if __V not in self.__algorithm:
1048 return self.__algorithm.StoredVariables[ __V ].hasDataObserver()
1051 return list(self.__algorithm.keys()) + list(self.__P.keys())
1053 def __contains__(self, key=None):
1054 "D.__contains__(k) -> True if D has a key k, else False"
1055 return key in self.__algorithm or key in self.__P
1058 "x.__repr__() <==> repr(x)"
1059 return repr(self.__A)+", "+repr(self.__P)
1062 "x.__str__() <==> str(x)"
1063 return str(self.__A)+", "+str(self.__P)
1065 def __setAlgorithm(self, choice = None ):
1067 Permet de sélectionner l'algorithme à utiliser pour mener à bien l'étude
1068 d'assimilation. L'argument est un champ caractère se rapportant au nom
1069 d'un algorithme réalisant l'opération sur les arguments fixes.
1072 raise ValueError("Error: algorithm choice has to be given")
1073 if self.__algorithmName is not None:
1074 raise ValueError("Error: algorithm choice has already been done as \"%s\", it can't be changed."%self.__algorithmName)
1075 daDirectory = "daAlgorithms"
1077 # Recherche explicitement le fichier complet
1078 # ------------------------------------------
1080 for directory in sys.path:
1081 if os.path.isfile(os.path.join(directory, daDirectory, str(choice)+'.py')):
1082 module_path = os.path.abspath(os.path.join(directory, daDirectory))
1083 if module_path is None:
1084 raise ImportError("No algorithm module named \"%s\" has been found in the search path.\n The search path is %s"%(choice, sys.path))
1086 # Importe le fichier complet comme un module
1087 # ------------------------------------------
1089 sys_path_tmp = sys.path ; sys.path.insert(0,module_path)
1090 self.__algorithmFile = __import__(str(choice), globals(), locals(), [])
1091 if not hasattr(self.__algorithmFile, "ElementaryAlgorithm"):
1092 raise ImportError("this module does not define a valid elementary algorithm.")
1093 self.__algorithmName = str(choice)
1094 sys.path = sys_path_tmp ; del sys_path_tmp
1095 except ImportError as e:
1096 raise ImportError("The module named \"%s\" was found, but is incorrect at the import stage.\n The import error message is: %s"%(choice,e))
1098 # Instancie un objet du type élémentaire du fichier
1099 # -------------------------------------------------
1100 self.__algorithm = self.__algorithmFile.ElementaryAlgorithm()
1103 def __shape_validate(self):
1105 Validation de la correspondance correcte des tailles des variables et
1106 des matrices s'il y en a.
1108 if self.__Xb is None: __Xb_shape = (0,)
1109 elif hasattr(self.__Xb,"size"): __Xb_shape = (self.__Xb.size,)
1110 elif hasattr(self.__Xb,"shape"):
1111 if isinstance(self.__Xb.shape, tuple): __Xb_shape = self.__Xb.shape
1112 else: __Xb_shape = self.__Xb.shape()
1113 else: raise TypeError("The background (Xb) has no attribute of shape: problem !")
1115 if self.__Y is None: __Y_shape = (0,)
1116 elif hasattr(self.__Y,"size"): __Y_shape = (self.__Y.size,)
1117 elif hasattr(self.__Y,"shape"):
1118 if isinstance(self.__Y.shape, tuple): __Y_shape = self.__Y.shape
1119 else: __Y_shape = self.__Y.shape()
1120 else: raise TypeError("The observation (Y) has no attribute of shape: problem !")
1122 if self.__U is None: __U_shape = (0,)
1123 elif hasattr(self.__U,"size"): __U_shape = (self.__U.size,)
1124 elif hasattr(self.__U,"shape"):
1125 if isinstance(self.__U.shape, tuple): __U_shape = self.__U.shape
1126 else: __U_shape = self.__U.shape()
1127 else: raise TypeError("The control (U) has no attribute of shape: problem !")
1129 if self.__B is None: __B_shape = (0,0)
1130 elif hasattr(self.__B,"shape"):
1131 if isinstance(self.__B.shape, tuple): __B_shape = self.__B.shape
1132 else: __B_shape = self.__B.shape()
1133 else: raise TypeError("The a priori errors covariance matrix (B) has no attribute of shape: problem !")
1135 if self.__R is None: __R_shape = (0,0)
1136 elif hasattr(self.__R,"shape"):
1137 if isinstance(self.__R.shape, tuple): __R_shape = self.__R.shape
1138 else: __R_shape = self.__R.shape()
1139 else: raise TypeError("The observation errors covariance matrix (R) has no attribute of shape: problem !")
1141 if self.__Q is None: __Q_shape = (0,0)
1142 elif hasattr(self.__Q,"shape"):
1143 if isinstance(self.__Q.shape, tuple): __Q_shape = self.__Q.shape
1144 else: __Q_shape = self.__Q.shape()
1145 else: raise TypeError("The evolution errors covariance matrix (Q) has no attribute of shape: problem !")
1147 if len(self.__HO) == 0: __HO_shape = (0,0)
1148 elif isinstance(self.__HO, dict): __HO_shape = (0,0)
1149 elif hasattr(self.__HO["Direct"],"shape"):
1150 if isinstance(self.__HO["Direct"].shape, tuple): __HO_shape = self.__HO["Direct"].shape
1151 else: __HO_shape = self.__HO["Direct"].shape()
1152 else: raise TypeError("The observation operator (H) has no attribute of shape: problem !")
1154 if len(self.__EM) == 0: __EM_shape = (0,0)
1155 elif isinstance(self.__EM, dict): __EM_shape = (0,0)
1156 elif hasattr(self.__EM["Direct"],"shape"):
1157 if isinstance(self.__EM["Direct"].shape, tuple): __EM_shape = self.__EM["Direct"].shape
1158 else: __EM_shape = self.__EM["Direct"].shape()
1159 else: raise TypeError("The evolution model (EM) has no attribute of shape: problem !")
1161 if len(self.__CM) == 0: __CM_shape = (0,0)
1162 elif isinstance(self.__CM, dict): __CM_shape = (0,0)
1163 elif hasattr(self.__CM["Direct"],"shape"):
1164 if isinstance(self.__CM["Direct"].shape, tuple): __CM_shape = self.__CM["Direct"].shape
1165 else: __CM_shape = self.__CM["Direct"].shape()
1166 else: raise TypeError("The control model (CM) has no attribute of shape: problem !")
1168 # Vérification des conditions
1169 # ---------------------------
1170 if not( len(__Xb_shape) == 1 or min(__Xb_shape) == 1 ):
1171 raise ValueError("Shape characteristic of background (Xb) is incorrect: \"%s\"."%(__Xb_shape,))
1172 if not( len(__Y_shape) == 1 or min(__Y_shape) == 1 ):
1173 raise ValueError("Shape characteristic of observation (Y) is incorrect: \"%s\"."%(__Y_shape,))
1175 if not( min(__B_shape) == max(__B_shape) ):
1176 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) is incorrect: \"%s\"."%(__B_shape,))
1177 if not( min(__R_shape) == max(__R_shape) ):
1178 raise ValueError("Shape characteristic of observation errors covariance matrix (R) is incorrect: \"%s\"."%(__R_shape,))
1179 if not( min(__Q_shape) == max(__Q_shape) ):
1180 raise ValueError("Shape characteristic of evolution errors covariance matrix (Q) is incorrect: \"%s\"."%(__Q_shape,))
1181 if not( min(__EM_shape) == max(__EM_shape) ):
1182 raise ValueError("Shape characteristic of evolution operator (EM) is incorrect: \"%s\"."%(__EM_shape,))
1184 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[1] == max(__Xb_shape) ):
1185 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and state (X) \"%s\" are incompatible."%(__HO_shape,__Xb_shape))
1186 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[0] == max(__Y_shape) ):
1187 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation (Y) \"%s\" are incompatible."%(__HO_shape,__Y_shape))
1188 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__B) > 0 and not( __HO_shape[1] == __B_shape[0] ):
1189 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and a priori errors covariance matrix (B) \"%s\" are incompatible."%(__HO_shape,__B_shape))
1190 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__R) > 0 and not( __HO_shape[0] == __R_shape[1] ):
1191 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation errors covariance matrix (R) \"%s\" are incompatible."%(__HO_shape,__R_shape))
1193 if self.__B is not None and len(self.__B) > 0 and not( __B_shape[1] == max(__Xb_shape) ):
1194 if self.__algorithmName in ["EnsembleBlue",]:
1195 asPersistentVector = self.__Xb.reshape((-1,min(__B_shape)))
1196 self.__Xb = Persistence.OneVector("Background", basetype=numpy.matrix)
1197 for member in asPersistentVector:
1198 self.__Xb.store( numpy.matrix( numpy.ravel(member), numpy.float ).T )
1199 __Xb_shape = min(__B_shape)
1201 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) \"%s\" and background (Xb) \"%s\" are incompatible."%(__B_shape,__Xb_shape))
1203 if self.__R is not None and len(self.__R) > 0 and not( __R_shape[1] == max(__Y_shape) ):
1204 raise ValueError("Shape characteristic of observation errors covariance matrix (R) \"%s\" and observation (Y) \"%s\" are incompatible."%(__R_shape,__Y_shape))
1206 if self.__EM is not None and len(self.__EM) > 0 and not isinstance(self.__EM, dict) and not( __EM_shape[1] == max(__Xb_shape) ):
1207 raise ValueError("Shape characteristic of evolution model (EM) \"%s\" and state (X) \"%s\" are incompatible."%(__EM_shape,__Xb_shape))
1209 if self.__CM is not None and len(self.__CM) > 0 and not isinstance(self.__CM, dict) and not( __CM_shape[1] == max(__U_shape) ):
1210 raise ValueError("Shape characteristic of control model (CM) \"%s\" and control (U) \"%s\" are incompatible."%(__CM_shape,__U_shape))
1212 if ("Bounds" in self.__P) \
1213 and (isinstance(self.__P["Bounds"], list) or isinstance(self.__P["Bounds"], tuple)) \
1214 and (len(self.__P["Bounds"]) != max(__Xb_shape)):
1215 raise ValueError("The number \"%s\" of bound pairs for the state (X) components is different of the size \"%s\" of the state itself." \
1216 %(len(self.__P["Bounds"]),max(__Xb_shape)))
1220 # ==============================================================================
1221 class RegulationAndParameters(object):
1223 Classe générale d'interface d'action pour la régulation et ses paramètres
1226 name = "GenericRegulation",
1233 self.__name = str(name)
1236 if asAlgorithm is None and asScript is not None:
1237 __Algo = ImportFromScript(asScript).getvalue( "Algorithm" )
1239 __Algo = asAlgorithm
1241 if asDict is None and asScript is not None:
1242 __Dict = ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1246 if __Dict is not None:
1247 self.__P.update( dict(__Dict) )
1249 if __Algo is not None:
1250 self.__P.update( {"Algorithm":self.__A} )
1252 def get(self, key = None):
1253 "Vérifie l'existence d'une clé de variable ou de paramètres"
1255 return self.__P[key]
1259 # ==============================================================================
1260 class DataObserver(object):
1262 Classe générale d'interface de type observer
1265 name = "GenericObserver",
1277 self.__name = str(name)
1282 if onVariable is None:
1283 raise ValueError("setting an observer has to be done over a variable name or a list of variable names, not over None.")
1284 elif type(onVariable) in (tuple, list):
1285 self.__V = tuple(map( str, onVariable ))
1286 if withInfo is None:
1289 self.__I = (str(withInfo),)*len(self.__V)
1290 elif isinstance(onVariable, str):
1291 self.__V = (onVariable,)
1292 if withInfo is None:
1293 self.__I = (onVariable,)
1295 self.__I = (str(withInfo),)
1297 raise ValueError("setting an observer has to be done over a variable name or a list of variable names.")
1299 if asString is not None:
1300 __FunctionText = asString
1301 elif (asTemplate is not None) and (asTemplate in Templates.ObserverTemplates):
1302 __FunctionText = Templates.ObserverTemplates[asTemplate]
1303 elif asScript is not None:
1304 __FunctionText = ImportFromScript(asScript).getstring()
1307 __Function = ObserverF(__FunctionText)
1309 if asObsObject is not None:
1310 self.__O = asObsObject
1312 self.__O = __Function.getfunc()
1314 for k in range(len(self.__V)):
1317 if ename not in withAlgo:
1318 raise ValueError("An observer is asked to be set on a variable named %s which does not exist."%ename)
1320 withAlgo.setObserver(ename, self.__O, einfo, scheduledBy)
1323 "x.__repr__() <==> repr(x)"
1324 return repr(self.__V)+"\n"+repr(self.__O)
1327 "x.__str__() <==> str(x)"
1328 return str(self.__V)+"\n"+str(self.__O)
1330 # ==============================================================================
1331 class State(object):
1333 Classe générale d'interface de type état
1336 name = "GenericVector",
1338 asPersistentVector = None,
1344 toBeChecked = False,
1347 Permet de définir un vecteur :
1348 - asVector : entrée des données, comme un vecteur compatible avec le
1349 constructeur de numpy.matrix, ou "True" si entrée par script.
1350 - asPersistentVector : entrée des données, comme une série de vecteurs
1351 compatible avec le constructeur de numpy.matrix, ou comme un objet de
1352 type Persistence, ou "True" si entrée par script.
1353 - asScript : si un script valide est donné contenant une variable
1354 nommée "name", la variable est de type "asVector" (par défaut) ou
1355 "asPersistentVector" selon que l'une de ces variables est placée à
1357 - asDataFile : si un ou plusieurs fichiers valides sont donnés
1358 contenant des valeurs en colonnes, elles-mêmes nommées "colNames"
1359 (s'il n'y a pas de nom de colonne indiquée, on cherche une colonne
1360 nommée "name"), on récupère les colonnes et on les range ligne après
1361 ligne (colMajor=False) ou colonne après colonne (colMajor=True). La
1362 variable résultante est de type "asVector" (par défaut) ou
1363 "asPersistentVector" selon que l'une de ces variables est placée à
1366 self.__name = str(name)
1367 self.__check = bool(toBeChecked)
1371 self.__is_vector = False
1372 self.__is_series = False
1374 if asScript is not None:
1375 __Vector, __Series = None, None
1376 if asPersistentVector:
1377 __Series = ImportFromScript(asScript).getvalue( self.__name )
1379 __Vector = ImportFromScript(asScript).getvalue( self.__name )
1380 elif asDataFile is not None:
1381 __Vector, __Series = None, None
1382 if asPersistentVector:
1383 if colNames is not None:
1384 __Series = ImportFromFile(asDataFile).getvalue( colNames )[1]
1386 __Series = ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1387 if bool(colMajor) and not ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1388 __Series = numpy.transpose(__Series)
1389 elif not bool(colMajor) and ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1390 __Series = numpy.transpose(__Series)
1392 if colNames is not None:
1393 __Vector = ImportFromFile(asDataFile).getvalue( colNames )[1]
1395 __Vector = ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1397 __Vector = numpy.ravel(__Vector, order = "F")
1399 __Vector = numpy.ravel(__Vector, order = "C")
1401 __Vector, __Series = asVector, asPersistentVector
1403 if __Vector is not None:
1404 self.__is_vector = True
1405 self.__V = numpy.matrix( numpy.asmatrix(__Vector).A1, numpy.float ).T
1406 self.shape = self.__V.shape
1407 self.size = self.__V.size
1408 elif __Series is not None:
1409 self.__is_series = True
1410 if isinstance(__Series, (tuple, list, numpy.ndarray, numpy.matrix, str)):
1411 self.__V = Persistence.OneVector(self.__name, basetype=numpy.matrix)
1412 if isinstance(__Series, str): __Series = eval(__Series)
1413 for member in __Series:
1414 self.__V.store( numpy.matrix( numpy.asmatrix(member).A1, numpy.float ).T )
1417 if isinstance(self.__V.shape, (tuple, list)):
1418 self.shape = self.__V.shape
1420 self.shape = self.__V.shape()
1421 if len(self.shape) == 1:
1422 self.shape = (self.shape[0],1)
1423 self.size = self.shape[0] * self.shape[1]
1425 raise ValueError("The %s object is improperly defined, it requires at minima either a vector, a list/tuple of vectors or a persistent object. Please check your vector input."%self.__name)
1427 if scheduledBy is not None:
1428 self.__T = scheduledBy
1430 def getO(self, withScheduler=False):
1432 return self.__V, self.__T
1433 elif self.__T is None:
1439 "Vérification du type interne"
1440 return self.__is_vector
1443 "Vérification du type interne"
1444 return self.__is_series
1447 "x.__repr__() <==> repr(x)"
1448 return repr(self.__V)
1451 "x.__str__() <==> str(x)"
1452 return str(self.__V)
1454 # ==============================================================================
1455 class Covariance(object):
1457 Classe générale d'interface de type covariance
1460 name = "GenericCovariance",
1461 asCovariance = None,
1462 asEyeByScalar = None,
1463 asEyeByVector = None,
1466 toBeChecked = False,
1469 Permet de définir une covariance :
1470 - asCovariance : entrée des données, comme une matrice compatible avec
1471 le constructeur de numpy.matrix
1472 - asEyeByScalar : entrée des données comme un seul scalaire de variance,
1473 multiplicatif d'une matrice de corrélation identité, aucune matrice
1474 n'étant donc explicitement à donner
1475 - asEyeByVector : entrée des données comme un seul vecteur de variance,
1476 à mettre sur la diagonale d'une matrice de corrélation, aucune matrice
1477 n'étant donc explicitement à donner
1478 - asCovObject : entrée des données comme un objet python, qui a les
1479 methodes obligatoires "getT", "getI", "diag", "trace", "__add__",
1480 "__sub__", "__neg__", "__mul__", "__rmul__" et facultatives "shape",
1481 "size", "cholesky", "choleskyI", "asfullmatrix", "__repr__", "__str__"
1482 - toBeChecked : booléen indiquant si le caractère SDP de la matrice
1483 pleine doit être vérifié
1485 self.__name = str(name)
1486 self.__check = bool(toBeChecked)
1489 self.__is_scalar = False
1490 self.__is_vector = False
1491 self.__is_matrix = False
1492 self.__is_object = False
1494 if asScript is not None:
1495 __Matrix, __Scalar, __Vector, __Object = None, None, None, None
1497 __Scalar = ImportFromScript(asScript).getvalue( self.__name )
1499 __Vector = ImportFromScript(asScript).getvalue( self.__name )
1501 __Object = ImportFromScript(asScript).getvalue( self.__name )
1503 __Matrix = ImportFromScript(asScript).getvalue( self.__name )
1505 __Matrix, __Scalar, __Vector, __Object = asCovariance, asEyeByScalar, asEyeByVector, asCovObject
1507 if __Scalar is not None:
1508 if numpy.matrix(__Scalar).size != 1:
1509 raise ValueError(' The diagonal multiplier given to define a sparse matrix is not a unique scalar value.\n Its actual measured size is %i. Please check your scalar input.'%numpy.matrix(__Scalar).size)
1510 self.__is_scalar = True
1511 self.__C = numpy.abs( float(__Scalar) )
1514 elif __Vector is not None:
1515 self.__is_vector = True
1516 self.__C = numpy.abs( numpy.array( numpy.ravel( numpy.matrix(__Vector, float ) ) ) )
1517 self.shape = (self.__C.size,self.__C.size)
1518 self.size = self.__C.size**2
1519 elif __Matrix is not None:
1520 self.__is_matrix = True
1521 self.__C = numpy.matrix( __Matrix, float )
1522 self.shape = self.__C.shape
1523 self.size = self.__C.size
1524 elif __Object is not None:
1525 self.__is_object = True
1527 for at in ("getT","getI","diag","trace","__add__","__sub__","__neg__","__mul__","__rmul__"):
1528 if not hasattr(self.__C,at):
1529 raise ValueError("The matrix given for %s as an object has no attribute \"%s\". Please check your object input."%(self.__name,at))
1530 if hasattr(self.__C,"shape"):
1531 self.shape = self.__C.shape
1534 if hasattr(self.__C,"size"):
1535 self.size = self.__C.size
1540 # raise ValueError("The %s covariance matrix has to be specified either as a matrix, a vector for its diagonal or a scalar multiplying an identity matrix."%self.__name)
1544 def __validate(self):
1546 if self.__C is None:
1547 raise UnboundLocalError("%s covariance matrix value has not been set!"%(self.__name,))
1548 if self.ismatrix() and min(self.shape) != max(self.shape):
1549 raise ValueError("The given matrix for %s is not a square one, its shape is %s. Please check your matrix input."%(self.__name,self.shape))
1550 if self.isobject() and min(self.shape) != max(self.shape):
1551 raise ValueError("The matrix given for \"%s\" is not a square one, its shape is %s. Please check your object input."%(self.__name,self.shape))
1552 if self.isscalar() and self.__C <= 0:
1553 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your scalar input %s."%(self.__name,self.__C))
1554 if self.isvector() and (self.__C <= 0).any():
1555 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your vector input."%(self.__name,))
1556 if self.ismatrix() and (self.__check or logging.getLogger().level < logging.WARNING):
1558 L = numpy.linalg.cholesky( self.__C )
1560 raise ValueError("The %s covariance matrix is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1561 if self.isobject() and (self.__check or logging.getLogger().level < logging.WARNING):
1563 L = self.__C.cholesky()
1565 raise ValueError("The %s covariance object is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1568 "Vérification du type interne"
1569 return self.__is_scalar
1572 "Vérification du type interne"
1573 return self.__is_vector
1576 "Vérification du type interne"
1577 return self.__is_matrix
1580 "Vérification du type interne"
1581 return self.__is_object
1586 return Covariance(self.__name+"I", asCovariance = self.__C.I )
1587 elif self.isvector():
1588 return Covariance(self.__name+"I", asEyeByVector = 1. / self.__C )
1589 elif self.isscalar():
1590 return Covariance(self.__name+"I", asEyeByScalar = 1. / self.__C )
1591 elif self.isobject():
1592 return Covariance(self.__name+"I", asCovObject = self.__C.getI() )
1594 return None # Indispensable
1599 return Covariance(self.__name+"T", asCovariance = self.__C.T )
1600 elif self.isvector():
1601 return Covariance(self.__name+"T", asEyeByVector = self.__C )
1602 elif self.isscalar():
1603 return Covariance(self.__name+"T", asEyeByScalar = self.__C )
1604 elif self.isobject():
1605 return Covariance(self.__name+"T", asCovObject = self.__C.getT() )
1608 "Décomposition de Cholesky"
1610 return Covariance(self.__name+"C", asCovariance = numpy.linalg.cholesky(self.__C) )
1611 elif self.isvector():
1612 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
1613 elif self.isscalar():
1614 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
1615 elif self.isobject() and hasattr(self.__C,"cholesky"):
1616 return Covariance(self.__name+"C", asCovObject = self.__C.cholesky() )
1618 def choleskyI(self):
1619 "Inversion de la décomposition de Cholesky"
1621 return Covariance(self.__name+"H", asCovariance = numpy.linalg.cholesky(self.__C).I )
1622 elif self.isvector():
1623 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
1624 elif self.isscalar():
1625 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
1626 elif self.isobject() and hasattr(self.__C,"choleskyI"):
1627 return Covariance(self.__name+"H", asCovObject = self.__C.choleskyI() )
1629 def diag(self, msize=None):
1630 "Diagonale de la matrice"
1632 return numpy.diag(self.__C)
1633 elif self.isvector():
1635 elif self.isscalar():
1637 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1639 return self.__C * numpy.ones(int(msize))
1640 elif self.isobject():
1641 return self.__C.diag()
1643 def asfullmatrix(self, msize=None):
1647 elif self.isvector():
1648 return numpy.matrix( numpy.diag(self.__C), float )
1649 elif self.isscalar():
1651 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1653 return numpy.matrix( self.__C * numpy.eye(int(msize)), float )
1654 elif self.isobject() and hasattr(self.__C,"asfullmatrix"):
1655 return self.__C.asfullmatrix()
1657 def trace(self, msize=None):
1658 "Trace de la matrice"
1660 return numpy.trace(self.__C)
1661 elif self.isvector():
1662 return float(numpy.sum(self.__C))
1663 elif self.isscalar():
1665 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1667 return self.__C * int(msize)
1668 elif self.isobject():
1669 return self.__C.trace()
1675 "x.__repr__() <==> repr(x)"
1676 return repr(self.__C)
1679 "x.__str__() <==> str(x)"
1680 return str(self.__C)
1682 def __add__(self, other):
1683 "x.__add__(y) <==> x+y"
1684 if self.ismatrix() or self.isobject():
1685 return self.__C + numpy.asmatrix(other)
1686 elif self.isvector() or self.isscalar():
1687 _A = numpy.asarray(other)
1688 _A.reshape(_A.size)[::_A.shape[1]+1] += self.__C
1689 return numpy.asmatrix(_A)
1691 def __radd__(self, other):
1692 "x.__radd__(y) <==> y+x"
1693 raise NotImplementedError("%s covariance matrix __radd__ method not available for %s type!"%(self.__name,type(other)))
1695 def __sub__(self, other):
1696 "x.__sub__(y) <==> x-y"
1697 if self.ismatrix() or self.isobject():
1698 return self.__C - numpy.asmatrix(other)
1699 elif self.isvector() or self.isscalar():
1700 _A = numpy.asarray(other)
1701 _A.reshape(_A.size)[::_A.shape[1]+1] = self.__C - _A.reshape(_A.size)[::_A.shape[1]+1]
1702 return numpy.asmatrix(_A)
1704 def __rsub__(self, other):
1705 "x.__rsub__(y) <==> y-x"
1706 raise NotImplementedError("%s covariance matrix __rsub__ method not available for %s type!"%(self.__name,type(other)))
1709 "x.__neg__() <==> -x"
1712 def __mul__(self, other):
1713 "x.__mul__(y) <==> x*y"
1714 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
1715 return self.__C * other
1716 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
1717 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1718 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
1719 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1720 return self.__C * numpy.asmatrix(other)
1722 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asmatrix(other).shape,self.__name))
1723 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
1724 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1725 return numpy.asmatrix(self.__C * numpy.ravel(other)).T
1726 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1727 return numpy.asmatrix((self.__C * (numpy.asarray(other).transpose())).transpose())
1729 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
1730 elif self.isscalar() and isinstance(other,numpy.matrix):
1731 return self.__C * other
1732 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
1733 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
1734 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
1736 return self.__C * numpy.asmatrix(other)
1737 elif self.isobject():
1738 return self.__C.__mul__(other)
1740 raise NotImplementedError("%s covariance matrix __mul__ method not available for %s type!"%(self.__name,type(other)))
1742 def __rmul__(self, other):
1743 "x.__rmul__(y) <==> y*x"
1744 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
1745 return other * self.__C
1746 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
1747 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1748 return numpy.asmatrix(numpy.ravel(other)) * self.__C
1749 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1750 return numpy.asmatrix(other) * self.__C
1752 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name))
1753 elif self.isvector() and isinstance(other,numpy.matrix):
1754 if numpy.ravel(other).size == self.shape[0]: # Vecteur
1755 return numpy.asmatrix(numpy.ravel(other) * self.__C)
1756 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
1757 return numpy.asmatrix(numpy.array(other) * self.__C)
1759 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
1760 elif self.isscalar() and isinstance(other,numpy.matrix):
1761 return other * self.__C
1762 elif self.isobject():
1763 return self.__C.__rmul__(other)
1765 raise NotImplementedError("%s covariance matrix __rmul__ method not available for %s type!"%(self.__name,type(other)))
1768 "x.__len__() <==> len(x)"
1769 return self.shape[0]
1771 # ==============================================================================
1772 class ObserverF(object):
1774 Creation d'une fonction d'observateur a partir de son texte
1776 def __init__(self, corps=""):
1777 self.__corps = corps
1778 def func(self,var,info):
1779 "Fonction d'observation"
1782 "Restitution du pointeur de fonction dans l'objet"
1785 # ==============================================================================
1786 class CaseLogger(object):
1788 Conservation des commandes de creation d'un cas
1790 def __init__(self, __name="", __objname="case", __addViewers=None, __addLoaders=None):
1791 self.__name = str(__name)
1792 self.__objname = str(__objname)
1793 self.__logSerie = []
1794 self.__switchoff = False
1796 "TUI" :Interfaces._TUIViewer,
1797 "SCD" :Interfaces._SCDViewer,
1798 "YACS":Interfaces._YACSViewer,
1801 "TUI" :Interfaces._TUIViewer,
1802 "COM" :Interfaces._COMViewer,
1804 if __addViewers is not None:
1805 self.__viewers.update(dict(__addViewers))
1806 if __addLoaders is not None:
1807 self.__loaders.update(dict(__addLoaders))
1809 def register(self, __command=None, __keys=None, __local=None, __pre=None, __switchoff=False):
1810 "Enregistrement d'une commande individuelle"
1811 if __command is not None and __keys is not None and __local is not None and not self.__switchoff:
1812 if "self" in __keys: __keys.remove("self")
1813 self.__logSerie.append( (str(__command), __keys, __local, __pre, __switchoff) )
1815 self.__switchoff = True
1817 self.__switchoff = False
1819 def dump(self, __filename=None, __format="TUI", __upa=""):
1820 "Restitution normalisée des commandes"
1821 if __format in self.__viewers:
1822 __formater = self.__viewers[__format](self.__name, self.__objname, self.__logSerie)
1824 raise ValueError("Dumping as \"%s\" is not available"%__format)
1825 return __formater.dump(__filename, __upa)
1827 def load(self, __filename=None, __content=None, __object=None, __format="TUI"):
1828 "Chargement normalisé des commandes"
1829 if __format in self.__loaders:
1830 __formater = self.__loaders[__format]()
1832 raise ValueError("Loading as \"%s\" is not available"%__format)
1833 return __formater.load(__filename, __content, __object)
1835 # ==============================================================================
1836 def MultiFonction( __xserie, _extraArguments = None, _sFunction = lambda x: x ):
1838 Pour une liste ordonnée de vecteurs en entrée, renvoie en sortie la liste
1839 correspondante de valeurs de la fonction en argument
1841 if not PlatformInfo.isIterable( __xserie ):
1842 raise TypeError("MultiFonction not iterable unkown input type: %s"%(type(__xserie),))
1845 if _extraArguments is None:
1846 for __xvalue in __xserie:
1847 __multiHX.append( _sFunction( __xvalue ) )
1848 elif _extraArguments is not None and isinstance(_extraArguments, (list, tuple, map)):
1849 for __xvalue in __xserie:
1850 __multiHX.append( _sFunction( __xvalue, *_extraArguments ) )
1851 elif _extraArguments is not None and isinstance(_extraArguments, dict):
1852 for __xvalue in __xserie:
1853 __multiHX.append( _sFunction( __xvalue, **_extraArguments ) )
1855 raise TypeError("MultiFonction extra arguments unkown input type: %s"%(type(_extraArguments),))
1859 # ==============================================================================
1860 def CostFunction3D(_x,
1861 _Hm = None, # Pour simuler Hm(x) : HO["Direct"].appliedTo
1862 _HmX = None, # Simulation déjà faite de Hm(x)
1863 _arg = None, # Arguments supplementaires pour Hm, sous la forme d'un tuple
1868 _SIV = False, # A résorber pour la 8.0
1869 _SSC = [], # self._parameters["StoreSupplementaryCalculations"]
1870 _nPS = 0, # nbPreviousSteps
1871 _QM = "DA", # QualityMeasure
1872 _SSV = {}, # Entrée et/ou sortie : self.StoredVariables
1873 _fRt = False, # Restitue ou pas la sortie étendue
1874 _sSc = True, # Stocke ou pas les SSC
1877 Fonction-coût générale utile pour les algorithmes statiques/3D : 3DVAR, BLUE
1878 et dérivés, Kalman et dérivés, LeastSquares, SamplingTest, PSO, SA, Tabu,
1879 DFO, QuantileRegression
1885 for k in ["CostFunctionJ",
1891 "SimulatedObservationAtCurrentOptimum",
1892 "SimulatedObservationAtCurrentState",
1896 if hasattr(_SSV[k],"store"):
1897 _SSV[k].append = _SSV[k].store # Pour utiliser "append" au lieu de "store"
1899 _X = numpy.asmatrix(numpy.ravel( _x )).T
1900 if _SIV or "CurrentState" in _SSC or "CurrentOptimum" in _SSC:
1901 _SSV["CurrentState"].append( _X )
1903 if _HmX is not None:
1907 raise ValueError("COSTFUNCTION3D Operator has to be defined.")
1911 _HX = _Hm( _X, *_arg )
1912 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
1914 if "SimulatedObservationAtCurrentState" in _SSC or \
1915 "SimulatedObservationAtCurrentOptimum" in _SSC:
1916 _SSV["SimulatedObservationAtCurrentState"].append( _HX )
1918 if numpy.any(numpy.isnan(_HX)):
1919 Jb, Jo, J = numpy.nan, numpy.nan, numpy.nan
1921 _Y = numpy.asmatrix(numpy.ravel( _Y )).T
1922 if _QM in ["AugmentedWeightedLeastSquares", "AWLS", "AugmentedPonderatedLeastSquares", "APLS", "DA"]:
1923 if _BI is None or _RI is None:
1924 raise ValueError("Background and Observation error covariance matrix has to be properly defined!")
1925 _Xb = numpy.asmatrix(numpy.ravel( _Xb )).T
1926 Jb = 0.5 * (_X - _Xb).T * _BI * (_X - _Xb)
1927 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
1928 elif _QM in ["WeightedLeastSquares", "WLS", "PonderatedLeastSquares", "PLS"]:
1930 raise ValueError("Observation error covariance matrix has to be properly defined!")
1932 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
1933 elif _QM in ["LeastSquares", "LS", "L2"]:
1935 Jo = 0.5 * (_Y - _HX).T * (_Y - _HX)
1936 elif _QM in ["AbsoluteValue", "L1"]:
1938 Jo = numpy.sum( numpy.abs(_Y - _HX) )
1939 elif _QM in ["MaximumError", "ME"]:
1941 Jo = numpy.max( numpy.abs(_Y - _HX) )
1942 elif _QM in ["QR", "Null"]:
1946 raise ValueError("Unknown asked quality measure!")
1948 J = float( Jb ) + float( Jo )
1951 _SSV["CostFunctionJb"].append( Jb )
1952 _SSV["CostFunctionJo"].append( Jo )
1953 _SSV["CostFunctionJ" ].append( J )
1955 if "IndexOfOptimum" in _SSC or \
1956 "CurrentOptimum" in _SSC or \
1957 "SimulatedObservationAtCurrentOptimum" in _SSC:
1958 IndexMin = numpy.argmin( _SSV["CostFunctionJ"][_nPS:] ) + _nPS
1959 if "IndexOfOptimum" in _SSC:
1960 _SSV["IndexOfOptimum"].append( IndexMin )
1961 if "CurrentOptimum" in _SSC:
1962 _SSV["CurrentOptimum"].append( _SSV["CurrentState"][IndexMin] )
1963 if "SimulatedObservationAtCurrentOptimum" in _SSC:
1964 _SSV["SimulatedObservationAtCurrentOptimum"].append( _SSV["SimulatedObservationAtCurrentState"][IndexMin] )
1969 if _QM in ["QR"]: # Pour le QuantileRegression
1974 # ==============================================================================
1975 if __name__ == "__main__":
1976 print('\n AUTODIAGNOSTIC \n')