1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2019 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les outils généraux élémentaires.
26 __author__ = "Jean-Philippe ARGAUD"
34 from functools import partial
35 from daCore import Persistence, PlatformInfo, Interfaces
36 from daCore import Templates
38 # ==============================================================================
39 class CacheManager(object):
41 Classe générale de gestion d'un cache de calculs
44 toleranceInRedundancy = 1.e-18,
45 lenghtOfRedundancy = -1,
48 Les caractéristiques de tolérance peuvent être modifées à la création.
50 self.__tolerBP = float(toleranceInRedundancy)
51 self.__lenghtOR = int(lenghtOfRedundancy)
52 self.__initlnOR = self.__lenghtOR
57 self.__listOPCV = [] # Operator Previous Calculated Points, Results, Point Norms
58 # logging.debug("CM Tolerance de determination des doublons : %.2e", self.__tolerBP)
60 def wasCalculatedIn(self, xValue ): #, info="" ):
61 "Vérifie l'existence d'un calcul correspondant à la valeur"
64 for i in range(min(len(self.__listOPCV),self.__lenghtOR)-1,-1,-1):
65 if not hasattr(xValue, 'size') or (xValue.size != self.__listOPCV[i][0].size):
66 # logging.debug("CM Différence de la taille %s de X et de celle %s du point %i déjà calculé", xValue.shape,i,self.__listOPCP[i].shape)
68 if numpy.linalg.norm(numpy.ravel(xValue) - self.__listOPCV[i][0]) < self.__tolerBP * self.__listOPCV[i][2]:
70 __HxV = self.__listOPCV[i][1]
71 # logging.debug("CM Cas%s déja calculé, portant le numéro %i", info, i)
75 def storeValueInX(self, xValue, HxValue ):
76 "Stocke un calcul correspondant à la valeur"
77 if self.__lenghtOR < 0:
78 self.__lenghtOR = 2 * xValue.size + 2
79 self.__initlnOR = self.__lenghtOR
80 while len(self.__listOPCV) > self.__lenghtOR:
81 # logging.debug("CM Réduction de la liste des cas à %i éléments par suppression du premier", self.__lenghtOR)
82 self.__listOPCV.pop(0)
83 self.__listOPCV.append( (
84 copy.copy(numpy.ravel(xValue)),
86 numpy.linalg.norm(xValue),
91 self.__initlnOR = self.__lenghtOR
96 self.__lenghtOR = self.__initlnOR
98 # ==============================================================================
99 class Operator(object):
101 Classe générale d'interface de type opérateur simple
111 avoidingRedundancy = True,
112 inputAsMultiFunction = False,
113 enableMultiProcess = False,
114 extraArguments = None,
117 On construit un objet de ce type en fournissant, à l'aide de l'un des
118 deux mots-clé, soit une fonction ou un multi-fonction python, soit une
121 - fromMethod : argument de type fonction Python
122 - fromMatrix : argument adapté au constructeur numpy.matrix
123 - avoidingRedundancy : booléen évitant (ou pas) les calculs redondants
124 - inputAsMultiFunction : booléen indiquant une fonction explicitement
125 définie (ou pas) en multi-fonction
126 - extraArguments : arguments supplémentaires passés à la fonction de
127 base et ses dérivées (tuple ou dictionnaire)
129 self.__NbCallsAsMatrix, self.__NbCallsAsMethod, self.__NbCallsOfCached = 0, 0, 0
130 self.__AvoidRC = bool( avoidingRedundancy )
131 self.__inputAsMF = bool( inputAsMultiFunction )
132 self.__mpEnabled = bool( enableMultiProcess )
133 self.__extraArgs = extraArguments
134 if fromMethod is not None and self.__inputAsMF:
135 self.__Method = fromMethod # logtimer(fromMethod)
137 self.__Type = "Method"
138 elif fromMethod is not None and not self.__inputAsMF:
139 self.__Method = partial( MultiFonction, _sFunction=fromMethod, _mpEnabled=self.__mpEnabled)
141 self.__Type = "Method"
142 elif fromMatrix is not None:
144 self.__Matrix = numpy.matrix( fromMatrix, numpy.float )
145 self.__Type = "Matrix"
151 def disableAvoidingRedundancy(self):
153 Operator.CM.disable()
155 def enableAvoidingRedundancy(self):
160 Operator.CM.disable()
166 def appliedTo(self, xValue, HValue = None, argsAsSerie = False):
168 Permet de restituer le résultat de l'application de l'opérateur à une
169 série d'arguments xValue. Cette méthode se contente d'appliquer, chaque
170 argument devant a priori être du bon type.
172 - les arguments par série sont :
173 - xValue : argument adapté pour appliquer l'opérateur
174 - HValue : valeur précalculée de l'opérateur en ce point
175 - argsAsSerie : indique si les arguments sont une mono ou multi-valeur
182 if HValue is not None:
186 PlatformInfo.isIterable( _xValue, True, " in Operator.appliedTo" )
188 if _HValue is not None:
189 assert len(_xValue) == len(_HValue), "Incompatible number of elements in xValue and HValue"
191 for i in range(len(_HValue)):
192 HxValue.append( numpy.asmatrix( numpy.ravel( _HValue[i] ) ).T )
194 Operator.CM.storeValueInX(_xValue[i],HxValue[-1])
199 for i, xv in enumerate(_xValue):
201 __alreadyCalculated, __HxV = Operator.CM.wasCalculatedIn(xv)
203 __alreadyCalculated = False
205 if __alreadyCalculated:
206 self.__addOneCacheCall()
209 if self.__Matrix is not None:
210 self.__addOneMatrixCall()
211 _hv = self.__Matrix * xv
213 self.__addOneMethodCall()
217 HxValue.append( _hv )
219 if len(_xserie)>0 and self.__Matrix is None:
220 if self.__extraArgs is None:
221 _hserie = self.__Method( _xserie ) # Calcul MF
223 _hserie = self.__Method( _xserie, self.__extraArgs ) # Calcul MF
224 if not hasattr(_hserie, "pop"):
225 raise TypeError("The user input multi-function doesn't seem to return sequence results, behaving like a mono-function. It has to be checked.")
231 Operator.CM.storeValueInX(_xv,_hv)
233 if argsAsSerie: return HxValue
234 else: return HxValue[-1]
236 def appliedControledFormTo(self, paires, argsAsSerie = False):
238 Permet de restituer le résultat de l'application de l'opérateur à des
239 paires (xValue, uValue). Cette méthode se contente d'appliquer, son
240 argument devant a priori être du bon type. Si la uValue est None,
241 on suppose que l'opérateur ne s'applique qu'à xValue.
243 - paires : les arguments par paire sont :
244 - xValue : argument X adapté pour appliquer l'opérateur
245 - uValue : argument U adapté pour appliquer l'opérateur
246 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
248 if argsAsSerie: _xuValue = paires
249 else: _xuValue = (paires,)
250 PlatformInfo.isIterable( _xuValue, True, " in Operator.appliedControledFormTo" )
252 if self.__Matrix is not None:
254 for paire in _xuValue:
255 _xValue, _uValue = paire
256 self.__addOneMatrixCall()
257 HxValue.append( self.__Matrix * _xValue )
260 for paire in _xuValue:
262 _xValue, _uValue = paire
263 if _uValue is not None:
264 _xuValue.append( paire )
266 _xuValue.append( _xValue )
267 self.__addOneMethodCall( len(_xuValue) )
268 if self.__extraArgs is None:
269 HxValue = self.__Method( _xuValue ) # Calcul MF
271 HxValue = self.__Method( _xuValue, self.__extraArgs ) # Calcul MF
273 if argsAsSerie: return HxValue
274 else: return HxValue[-1]
276 def appliedInXTo(self, paires, argsAsSerie = False):
278 Permet de restituer le résultat de l'application de l'opérateur à une
279 série d'arguments xValue, sachant que l'opérateur est valable en
280 xNominal. Cette méthode se contente d'appliquer, son argument devant a
281 priori être du bon type. Si l'opérateur est linéaire car c'est une
282 matrice, alors il est valable en tout point nominal et xNominal peut
283 être quelconque. Il n'y a qu'une seule paire par défaut, et argsAsSerie
284 permet d'indiquer que l'argument est multi-paires.
286 - paires : les arguments par paire sont :
287 - xNominal : série d'arguments permettant de donner le point où
288 l'opérateur est construit pour être ensuite appliqué
289 - xValue : série d'arguments adaptés pour appliquer l'opérateur
290 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
292 if argsAsSerie: _nxValue = paires
293 else: _nxValue = (paires,)
294 PlatformInfo.isIterable( _nxValue, True, " in Operator.appliedInXTo" )
296 if self.__Matrix is not None:
298 for paire in _nxValue:
299 _xNominal, _xValue = paire
300 self.__addOneMatrixCall()
301 HxValue.append( self.__Matrix * _xValue )
303 self.__addOneMethodCall( len(_nxValue) )
304 if self.__extraArgs is None:
305 HxValue = self.__Method( _nxValue ) # Calcul MF
307 HxValue = self.__Method( _nxValue, self.__extraArgs ) # Calcul MF
309 if argsAsSerie: return HxValue
310 else: return HxValue[-1]
312 def asMatrix(self, ValueForMethodForm = "UnknownVoidValue", argsAsSerie = False):
314 Permet de renvoyer l'opérateur sous la forme d'une matrice
316 if self.__Matrix is not None:
317 self.__addOneMatrixCall()
318 mValue = [self.__Matrix,]
319 elif ValueForMethodForm is not "UnknownVoidValue": # Ne pas utiliser "None"
322 self.__addOneMethodCall( len(ValueForMethodForm) )
323 for _vfmf in ValueForMethodForm:
324 mValue.append( numpy.matrix( self.__Method(((_vfmf, None),)) ) )
326 self.__addOneMethodCall()
327 mValue = self.__Method(((ValueForMethodForm, None),))
329 raise ValueError("Matrix form of the operator defined as a function/method requires to give an operating point.")
331 if argsAsSerie: return mValue
332 else: return mValue[-1]
336 Renvoie la taille sous forme numpy si l'opérateur est disponible sous
337 la forme d'une matrice
339 if self.__Matrix is not None:
340 return self.__Matrix.shape
342 raise ValueError("Matrix form of the operator is not available, nor the shape")
344 def nbcalls(self, which=None):
346 Renvoie les nombres d'évaluations de l'opérateur
349 self.__NbCallsAsMatrix+self.__NbCallsAsMethod,
350 self.__NbCallsAsMatrix,
351 self.__NbCallsAsMethod,
352 self.__NbCallsOfCached,
353 Operator.NbCallsAsMatrix+Operator.NbCallsAsMethod,
354 Operator.NbCallsAsMatrix,
355 Operator.NbCallsAsMethod,
356 Operator.NbCallsOfCached,
358 if which is None: return __nbcalls
359 else: return __nbcalls[which]
361 def __addOneMatrixCall(self):
362 "Comptabilise un appel"
363 self.__NbCallsAsMatrix += 1 # Decompte local
364 Operator.NbCallsAsMatrix += 1 # Decompte global
366 def __addOneMethodCall(self, nb = 1):
367 "Comptabilise un appel"
368 self.__NbCallsAsMethod += nb # Decompte local
369 Operator.NbCallsAsMethod += nb # Decompte global
371 def __addOneCacheCall(self):
372 "Comptabilise un appel"
373 self.__NbCallsOfCached += 1 # Decompte local
374 Operator.NbCallsOfCached += 1 # Decompte global
376 # ==============================================================================
377 class FullOperator(object):
379 Classe générale d'interface de type opérateur complet
380 (Direct, Linéaire Tangent, Adjoint)
383 name = "GenericFullOperator",
385 asOneFunction = None, # 1 Fonction
386 asThreeFunctions = None, # 3 Fonctions in a dictionary
387 asScript = None, # 1 or 3 Fonction(s) by script
388 asDict = None, # Parameters
390 extraArguments = None,
392 inputAsMF = False,# Fonction(s) as Multi-Functions
397 self.__name = str(name)
398 self.__check = bool(toBeChecked)
399 self.__extraArgs = extraArguments
404 if (asDict is not None) and isinstance(asDict, dict):
405 __Parameters.update( asDict )
406 # Priorité à EnableMultiProcessingInDerivatives=True
407 if "EnableMultiProcessing" in __Parameters and __Parameters["EnableMultiProcessing"]:
408 __Parameters["EnableMultiProcessingInDerivatives"] = True
409 __Parameters["EnableMultiProcessingInEvaluation"] = False
410 if "EnableMultiProcessingInDerivatives" not in __Parameters:
411 __Parameters["EnableMultiProcessingInDerivatives"] = False
412 if __Parameters["EnableMultiProcessingInDerivatives"]:
413 __Parameters["EnableMultiProcessingInEvaluation"] = False
414 if "EnableMultiProcessingInEvaluation" not in __Parameters:
415 __Parameters["EnableMultiProcessingInEvaluation"] = False
416 if "withIncrement" in __Parameters: # Temporaire
417 __Parameters["DifferentialIncrement"] = __Parameters["withIncrement"]
419 if asScript is not None:
420 __Matrix, __Function = None, None
422 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
424 __Function = { "Direct":Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ) }
425 __Function.update({"useApproximatedDerivatives":True})
426 __Function.update(__Parameters)
427 elif asThreeFunctions:
429 "Direct" :Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ),
430 "Tangent":Interfaces.ImportFromScript(asScript).getvalue( "TangentOperator" ),
431 "Adjoint":Interfaces.ImportFromScript(asScript).getvalue( "AdjointOperator" ),
433 __Function.update(__Parameters)
436 if asOneFunction is not None:
437 if isinstance(asOneFunction, dict) and "Direct" in asOneFunction:
438 if asOneFunction["Direct"] is not None:
439 __Function = asOneFunction
441 raise ValueError("The function has to be given in a dictionnary which have 1 key (\"Direct\")")
443 __Function = { "Direct":asOneFunction }
444 __Function.update({"useApproximatedDerivatives":True})
445 __Function.update(__Parameters)
446 elif asThreeFunctions is not None:
447 if isinstance(asThreeFunctions, dict) and \
448 ("Tangent" in asThreeFunctions) and (asThreeFunctions["Tangent"] is not None) and \
449 ("Adjoint" in asThreeFunctions) and (asThreeFunctions["Adjoint"] is not None) and \
450 (("useApproximatedDerivatives" not in asThreeFunctions) or not bool(asThreeFunctions["useApproximatedDerivatives"])):
451 __Function = asThreeFunctions
452 elif isinstance(asThreeFunctions, dict) and \
453 ("Direct" in asThreeFunctions) and (asThreeFunctions["Direct"] is not None):
454 __Function = asThreeFunctions
455 __Function.update({"useApproximatedDerivatives":True})
457 raise ValueError("The functions has to be given in a dictionnary which have either 1 key (\"Direct\") or 3 keys (\"Direct\" (optionnal), \"Tangent\" and \"Adjoint\")")
458 if "Direct" not in asThreeFunctions:
459 __Function["Direct"] = asThreeFunctions["Tangent"]
460 __Function.update(__Parameters)
464 # if sys.version_info[0] < 3 and isinstance(__Function, dict):
465 # for k in ("Direct", "Tangent", "Adjoint"):
466 # if k in __Function and hasattr(__Function[k],"__class__"):
467 # if type(__Function[k]) is type(self.__init__):
468 # raise TypeError("can't use a class method (%s) as a function for the \"%s\" operator. Use a real function instead."%(type(__Function[k]),k))
470 if appliedInX is not None and isinstance(appliedInX, dict):
471 __appliedInX = appliedInX
472 elif appliedInX is not None:
473 __appliedInX = {"HXb":appliedInX}
477 if scheduledBy is not None:
478 self.__T = scheduledBy
480 if isinstance(__Function, dict) and \
481 ("useApproximatedDerivatives" in __Function) and bool(__Function["useApproximatedDerivatives"]) and \
482 ("Direct" in __Function) and (__Function["Direct"] is not None):
483 if "CenteredFiniteDifference" not in __Function: __Function["CenteredFiniteDifference"] = False
484 if "DifferentialIncrement" not in __Function: __Function["DifferentialIncrement"] = 0.01
485 if "withdX" not in __Function: __Function["withdX"] = None
486 if "withAvoidingRedundancy" not in __Function: __Function["withAvoidingRedundancy"] = avoidRC
487 if "withToleranceInRedundancy" not in __Function: __Function["withToleranceInRedundancy"] = 1.e-18
488 if "withLenghtOfRedundancy" not in __Function: __Function["withLenghtOfRedundancy"] = -1
489 if "NumberOfProcesses" not in __Function: __Function["NumberOfProcesses"] = None
490 if "withmfEnabled" not in __Function: __Function["withmfEnabled"] = inputAsMF
491 from daCore import NumericObjects
492 FDA = NumericObjects.FDApproximation(
493 Function = __Function["Direct"],
494 centeredDF = __Function["CenteredFiniteDifference"],
495 increment = __Function["DifferentialIncrement"],
496 dX = __Function["withdX"],
497 avoidingRedundancy = __Function["withAvoidingRedundancy"],
498 toleranceInRedundancy = __Function["withToleranceInRedundancy"],
499 lenghtOfRedundancy = __Function["withLenghtOfRedundancy"],
500 mpEnabled = __Function["EnableMultiProcessingInDerivatives"],
501 mpWorkers = __Function["NumberOfProcesses"],
502 mfEnabled = __Function["withmfEnabled"],
504 self.__FO["Direct"] = Operator( fromMethod = FDA.DirectOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
505 self.__FO["Tangent"] = Operator( fromMethod = FDA.TangentOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
506 self.__FO["Adjoint"] = Operator( fromMethod = FDA.AdjointOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
507 elif isinstance(__Function, dict) and \
508 ("Direct" in __Function) and ("Tangent" in __Function) and ("Adjoint" in __Function) and \
509 (__Function["Direct"] is not None) and (__Function["Tangent"] is not None) and (__Function["Adjoint"] is not None):
510 self.__FO["Direct"] = Operator( fromMethod = __Function["Direct"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
511 self.__FO["Tangent"] = Operator( fromMethod = __Function["Tangent"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
512 self.__FO["Adjoint"] = Operator( fromMethod = __Function["Adjoint"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
513 elif asMatrix is not None:
514 __matrice = numpy.matrix( __Matrix, numpy.float )
515 self.__FO["Direct"] = Operator( fromMatrix = __matrice, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
516 self.__FO["Tangent"] = Operator( fromMatrix = __matrice, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
517 self.__FO["Adjoint"] = Operator( fromMatrix = __matrice.T, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
520 raise ValueError("The %s object is improperly defined or undefined, it requires at minima either a matrix, a Direct operator for approximate derivatives or a Tangent/Adjoint operators pair. Please check your operator input."%self.__name)
522 if __appliedInX is not None:
523 self.__FO["AppliedInX"] = {}
524 for key in list(__appliedInX.keys()):
525 if type( __appliedInX[key] ) is type( numpy.matrix([]) ):
526 # Pour le cas où l'on a une vraie matrice
527 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].A1, numpy.float ).T
528 elif type( __appliedInX[key] ) is type( numpy.array([]) ) and len(__appliedInX[key].shape) > 1:
529 # Pour le cas où l'on a un vecteur représenté en array avec 2 dimensions
530 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].reshape(len(__appliedInX[key]),), numpy.float ).T
532 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key], numpy.float ).T
534 self.__FO["AppliedInX"] = None
540 "x.__repr__() <==> repr(x)"
541 return repr(self.__FO)
544 "x.__str__() <==> str(x)"
545 return str(self.__FO)
547 # ==============================================================================
548 class Algorithm(object):
550 Classe générale d'interface de type algorithme
552 Elle donne un cadre pour l'écriture d'une classe élémentaire d'algorithme
553 d'assimilation, en fournissant un container (dictionnaire) de variables
554 persistantes initialisées, et des méthodes d'accès à ces variables stockées.
556 Une classe élémentaire d'algorithme doit implémenter la méthode "run".
558 def __init__(self, name):
560 L'initialisation présente permet de fabriquer des variables de stockage
561 disponibles de manière générique dans les algorithmes élémentaires. Ces
562 variables de stockage sont ensuite conservées dans un dictionnaire
563 interne à l'objet, mais auquel on accède par la méthode "get".
565 Les variables prévues sont :
566 - APosterioriCorrelations : matrice de corrélations de la matrice A
567 - APosterioriCovariance : matrice de covariances a posteriori : A
568 - APosterioriStandardDeviations : vecteur des écart-types de la matrice A
569 - APosterioriVariances : vecteur des variances de la matrice A
570 - Analysis : vecteur d'analyse : Xa
571 - BMA : Background moins Analysis : Xa - Xb
572 - CostFunctionJ : fonction-coût globale, somme des deux parties suivantes Jb et Jo
573 - CostFunctionJAtCurrentOptimum : fonction-coût globale à l'état optimal courant lors d'itérations
574 - CostFunctionJb : partie ébauche ou background de la fonction-coût : Jb
575 - CostFunctionJbAtCurrentOptimum : partie ébauche à l'état optimal courant lors d'itérations
576 - CostFunctionJo : partie observations de la fonction-coût : Jo
577 - CostFunctionJoAtCurrentOptimum : partie observations à l'état optimal courant lors d'itérations
578 - CurrentOptimum : état optimal courant lors d'itérations
579 - CurrentState : état courant lors d'itérations
580 - GradientOfCostFunctionJ : gradient de la fonction-coût globale
581 - GradientOfCostFunctionJb : gradient de la partie ébauche de la fonction-coût
582 - GradientOfCostFunctionJo : gradient de la partie observations de la fonction-coût
583 - IndexOfOptimum : index de l'état optimal courant lors d'itérations
584 - Innovation : l'innovation : d = Y - H(X)
585 - InnovationAtCurrentState : l'innovation à l'état courant : dn = Y - H(Xn)
586 - JacobianMatrixAtBackground : matrice jacobienne à l'état d'ébauche
587 - JacobianMatrixAtCurrentState : matrice jacobienne à l'état courant
588 - JacobianMatrixAtOptimum : matrice jacobienne à l'optimum
589 - KalmanGainAtOptimum : gain de Kalman à l'optimum
590 - MahalanobisConsistency : indicateur de consistance des covariances
591 - OMA : Observation moins Analyse : Y - Xa
592 - OMB : Observation moins Background : Y - Xb
593 - PredictedState : état prédit courant lors d'itérations
594 - Residu : dans le cas des algorithmes de vérification
595 - SigmaBck2 : indicateur de correction optimale des erreurs d'ébauche
596 - SigmaObs2 : indicateur de correction optimale des erreurs d'observation
597 - SimulatedObservationAtBackground : l'état observé H(Xb) à l'ébauche
598 - SimulatedObservationAtCurrentOptimum : l'état observé H(X) à l'état optimal courant
599 - SimulatedObservationAtCurrentState : l'état observé H(X) à l'état courant
600 - SimulatedObservationAtOptimum : l'état observé H(Xa) à l'optimum
601 - SimulationQuantiles : états observés H(X) pour les quantiles demandés
602 On peut rajouter des variables à stocker dans l'initialisation de
603 l'algorithme élémentaire qui va hériter de cette classe
605 logging.debug("%s Initialisation", str(name))
606 self._m = PlatformInfo.SystemUsage()
608 self._name = str( name )
609 self._parameters = {"StoreSupplementaryCalculations":[]}
610 self.__required_parameters = {}
611 self.__required_inputs = {"RequiredInputValues":{"mandatory":(), "optional":()}}
612 self.__variable_names_not_public = {"nextStep":False} # Duplication dans AlgorithmAndParameters
613 self.__canonical_parameter_name = {} # Correspondance "lower"->"correct"
614 self.__canonical_stored_name = {} # Correspondance "lower"->"correct"
616 self.StoredVariables = {}
617 self.StoredVariables["APosterioriCorrelations"] = Persistence.OneMatrix(name = "APosterioriCorrelations")
618 self.StoredVariables["APosterioriCovariance"] = Persistence.OneMatrix(name = "APosterioriCovariance")
619 self.StoredVariables["APosterioriStandardDeviations"] = Persistence.OneVector(name = "APosterioriStandardDeviations")
620 self.StoredVariables["APosterioriVariances"] = Persistence.OneVector(name = "APosterioriVariances")
621 self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
622 self.StoredVariables["BMA"] = Persistence.OneVector(name = "BMA")
623 self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
624 self.StoredVariables["CostFunctionJAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJAtCurrentOptimum")
625 self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
626 self.StoredVariables["CostFunctionJbAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJbAtCurrentOptimum")
627 self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
628 self.StoredVariables["CostFunctionJoAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJoAtCurrentOptimum")
629 self.StoredVariables["CurrentOptimum"] = Persistence.OneVector(name = "CurrentOptimum")
630 self.StoredVariables["CurrentState"] = Persistence.OneVector(name = "CurrentState")
631 self.StoredVariables["GradientOfCostFunctionJ"] = Persistence.OneVector(name = "GradientOfCostFunctionJ")
632 self.StoredVariables["GradientOfCostFunctionJb"] = Persistence.OneVector(name = "GradientOfCostFunctionJb")
633 self.StoredVariables["GradientOfCostFunctionJo"] = Persistence.OneVector(name = "GradientOfCostFunctionJo")
634 self.StoredVariables["IndexOfOptimum"] = Persistence.OneIndex(name = "IndexOfOptimum")
635 self.StoredVariables["Innovation"] = Persistence.OneVector(name = "Innovation")
636 self.StoredVariables["InnovationAtCurrentAnalysis"] = Persistence.OneVector(name = "InnovationAtCurrentAnalysis")
637 self.StoredVariables["InnovationAtCurrentState"] = Persistence.OneVector(name = "InnovationAtCurrentState")
638 self.StoredVariables["JacobianMatrixAtBackground"] = Persistence.OneMatrix(name = "JacobianMatrixAtBackground")
639 self.StoredVariables["JacobianMatrixAtCurrentState"] = Persistence.OneMatrix(name = "JacobianMatrixAtCurrentState")
640 self.StoredVariables["JacobianMatrixAtOptimum"] = Persistence.OneMatrix(name = "JacobianMatrixAtOptimum")
641 self.StoredVariables["KalmanGainAtOptimum"] = Persistence.OneMatrix(name = "KalmanGainAtOptimum")
642 self.StoredVariables["MahalanobisConsistency"] = Persistence.OneScalar(name = "MahalanobisConsistency")
643 self.StoredVariables["OMA"] = Persistence.OneVector(name = "OMA")
644 self.StoredVariables["OMB"] = Persistence.OneVector(name = "OMB")
645 self.StoredVariables["PredictedState"] = Persistence.OneVector(name = "PredictedState")
646 self.StoredVariables["Residu"] = Persistence.OneScalar(name = "Residu")
647 self.StoredVariables["SigmaBck2"] = Persistence.OneScalar(name = "SigmaBck2")
648 self.StoredVariables["SigmaObs2"] = Persistence.OneScalar(name = "SigmaObs2")
649 self.StoredVariables["SimulatedObservationAtBackground"] = Persistence.OneVector(name = "SimulatedObservationAtBackground")
650 self.StoredVariables["SimulatedObservationAtCurrentAnalysis"]= Persistence.OneVector(name = "SimulatedObservationAtCurrentAnalysis")
651 self.StoredVariables["SimulatedObservationAtCurrentOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentOptimum")
652 self.StoredVariables["SimulatedObservationAtCurrentState"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentState")
653 self.StoredVariables["SimulatedObservationAtOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtOptimum")
654 self.StoredVariables["SimulationQuantiles"] = Persistence.OneMatrix(name = "SimulationQuantiles")
656 for k in self.StoredVariables:
657 self.__canonical_stored_name[k.lower()] = k
659 def _pre_run(self, Parameters, Xb=None, Y=None, R=None, B=None, Q=None ):
661 logging.debug("%s Lancement", self._name)
662 logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
664 # Mise a jour de self._parameters avec Parameters
665 self.__setParameters(Parameters)
667 for k, v in self.__variable_names_not_public.items():
668 if k not in self._parameters: self.__setParameters( {k:v} )
670 # Corrections et complements
671 def __test_vvalue(argument, variable, argname):
673 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
674 raise ValueError("%s %s vector %s has to be properly defined!"%(self._name,argname,variable))
675 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
676 logging.debug("%s %s vector %s is not set, but is optional."%(self._name,argname,variable))
678 logging.debug("%s %s vector %s is not set, but is not required."%(self._name,argname,variable))
680 logging.debug("%s %s vector %s is set, and its size is %i."%(self._name,argname,variable,numpy.array(argument).size))
682 __test_vvalue( Xb, "Xb", "Background or initial state" )
683 __test_vvalue( Y, "Y", "Observation" )
685 def __test_cvalue(argument, variable, argname):
687 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
688 raise ValueError("%s %s error covariance matrix %s has to be properly defined!"%(self._name,argname,variable))
689 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
690 logging.debug("%s %s error covariance matrix %s is not set, but is optional."%(self._name,argname,variable))
692 logging.debug("%s %s error covariance matrix %s is not set, but is not required."%(self._name,argname,variable))
694 logging.debug("%s %s error covariance matrix %s is set."%(self._name,argname,variable))
696 __test_cvalue( R, "R", "Observation" )
697 __test_cvalue( B, "B", "Background" )
698 __test_cvalue( Q, "Q", "Evolution" )
700 if ("Bounds" in self._parameters) and isinstance(self._parameters["Bounds"], (list, tuple)) and (len(self._parameters["Bounds"]) > 0):
701 logging.debug("%s Prise en compte des bornes effectuee"%(self._name,))
703 self._parameters["Bounds"] = None
705 if logging.getLogger().level < logging.WARNING:
706 self._parameters["optiprint"], self._parameters["optdisp"] = 1, 1
707 if PlatformInfo.has_scipy:
708 import scipy.optimize
709 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_ALL
711 self._parameters["optmessages"] = 15
713 self._parameters["optiprint"], self._parameters["optdisp"] = -1, 0
714 if PlatformInfo.has_scipy:
715 import scipy.optimize
716 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_NONE
718 self._parameters["optmessages"] = 15
722 def _post_run(self,_oH=None):
724 if ("StoreSupplementaryCalculations" in self._parameters) and \
725 "APosterioriCovariance" in self._parameters["StoreSupplementaryCalculations"]:
726 for _A in self.StoredVariables["APosterioriCovariance"]:
727 if "APosterioriVariances" in self._parameters["StoreSupplementaryCalculations"]:
728 self.StoredVariables["APosterioriVariances"].store( numpy.diag(_A) )
729 if "APosterioriStandardDeviations" in self._parameters["StoreSupplementaryCalculations"]:
730 self.StoredVariables["APosterioriStandardDeviations"].store( numpy.sqrt(numpy.diag(_A)) )
731 if "APosterioriCorrelations" in self._parameters["StoreSupplementaryCalculations"]:
732 _EI = numpy.diag(1./numpy.sqrt(numpy.diag(_A)))
733 _C = numpy.dot(_EI, numpy.dot(_A, _EI))
734 self.StoredVariables["APosterioriCorrelations"].store( _C )
736 logging.debug("%s Nombre d'évaluation(s) de l'opérateur d'observation direct/tangent/adjoint.: %i/%i/%i", self._name, _oH["Direct"].nbcalls(0),_oH["Tangent"].nbcalls(0),_oH["Adjoint"].nbcalls(0))
737 logging.debug("%s Nombre d'appels au cache d'opérateur d'observation direct/tangent/adjoint..: %i/%i/%i", self._name, _oH["Direct"].nbcalls(3),_oH["Tangent"].nbcalls(3),_oH["Adjoint"].nbcalls(3))
738 logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
739 logging.debug("%s Terminé", self._name)
742 def _toStore(self, key):
743 "True if in StoreSupplementaryCalculations, else False"
744 return key in self._parameters["StoreSupplementaryCalculations"]
746 def get(self, key=None):
748 Renvoie l'une des variables stockées identifiée par la clé, ou le
749 dictionnaire de l'ensemble des variables disponibles en l'absence de
750 clé. Ce sont directement les variables sous forme objet qui sont
751 renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
752 des classes de persistance.
755 return self.StoredVariables[self.__canonical_stored_name[key.lower()]]
757 return self.StoredVariables
759 def __contains__(self, key=None):
760 "D.__contains__(k) -> True if D has a key k, else False"
761 if key is None or key.lower() not in self.__canonical_stored_name:
764 return self.__canonical_stored_name[key.lower()] in self.StoredVariables
767 "D.keys() -> list of D's keys"
768 if hasattr(self, "StoredVariables"):
769 return self.StoredVariables.keys()
774 "D.pop(k[,d]) -> v, remove specified key and return the corresponding value"
775 if hasattr(self, "StoredVariables") and k.lower() in self.__canonical_stored_name:
776 return self.StoredVariables.pop(self.__canonical_stored_name[k.lower()], d)
781 raise TypeError("pop expected at least 1 arguments, got 0")
782 "If key is not found, d is returned if given, otherwise KeyError is raised"
788 def run(self, Xb=None, Y=None, H=None, M=None, R=None, B=None, Q=None, Parameters=None):
790 Doit implémenter l'opération élémentaire de calcul d'assimilation sous
791 sa forme mathématique la plus naturelle possible.
793 raise NotImplementedError("Mathematical assimilation calculation has not been implemented!")
795 def defineRequiredParameter(self, name = None, default = None, typecast = None, message = None, minval = None, maxval = None, listval = None):
797 Permet de définir dans l'algorithme des paramètres requis et leurs
798 caractéristiques par défaut.
801 raise ValueError("A name is mandatory to define a required parameter.")
803 self.__required_parameters[name] = {
805 "typecast" : typecast,
811 self.__canonical_parameter_name[name.lower()] = name
812 logging.debug("%s %s (valeur par défaut = %s)", self._name, message, self.setParameterValue(name))
814 def getRequiredParameters(self, noDetails=True):
816 Renvoie la liste des noms de paramètres requis ou directement le
817 dictionnaire des paramètres requis.
820 return sorted(self.__required_parameters.keys())
822 return self.__required_parameters
824 def setParameterValue(self, name=None, value=None):
826 Renvoie la valeur d'un paramètre requis de manière contrôlée
828 __k = self.__canonical_parameter_name[name.lower()]
829 default = self.__required_parameters[__k]["default"]
830 typecast = self.__required_parameters[__k]["typecast"]
831 minval = self.__required_parameters[__k]["minval"]
832 maxval = self.__required_parameters[__k]["maxval"]
833 listval = self.__required_parameters[__k]["listval"]
835 if value is None and default is None:
837 elif value is None and default is not None:
838 if typecast is None: __val = default
839 else: __val = typecast( default )
841 if typecast is None: __val = value
844 __val = typecast( value )
846 raise ValueError("The value '%s' for the parameter named '%s' can not be correctly evaluated with type '%s'."%(value, __k, typecast))
848 if minval is not None and (numpy.array(__val, float) < minval).any():
849 raise ValueError("The parameter named '%s' of value '%s' can not be less than %s."%(__k, __val, minval))
850 if maxval is not None and (numpy.array(__val, float) > maxval).any():
851 raise ValueError("The parameter named '%s' of value '%s' can not be greater than %s."%(__k, __val, maxval))
852 if listval is not None:
853 if typecast is list or typecast is tuple or isinstance(__val,list) or isinstance(__val,tuple):
856 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%(v, __k, listval))
857 elif __val not in listval:
858 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%( __val, __k,listval))
861 def requireInputArguments(self, mandatory=(), optional=()):
863 Permet d'imposer des arguments requises en entrée
865 self.__required_inputs["RequiredInputValues"]["mandatory"] = tuple( mandatory )
866 self.__required_inputs["RequiredInputValues"]["optional"] = tuple( optional )
868 def __setParameters(self, fromDico={}):
870 Permet de stocker les paramètres reçus dans le dictionnaire interne.
872 self._parameters.update( fromDico )
873 for k in self.__required_parameters.keys():
874 if k in fromDico.keys():
875 self._parameters[k] = self.setParameterValue(k,fromDico[k])
877 self._parameters[k] = self.setParameterValue(k)
878 logging.debug("%s %s : %s", self._name, self.__required_parameters[k]["message"], self._parameters[k])
880 # ==============================================================================
881 class AlgorithmAndParameters(object):
883 Classe générale d'interface d'action pour l'algorithme et ses paramètres
886 name = "GenericAlgorithm",
893 self.__name = str(name)
897 self.__algorithm = {}
898 self.__algorithmFile = None
899 self.__algorithmName = None
901 self.updateParameters( asDict, asScript )
903 if asAlgorithm is None and asScript is not None:
904 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
908 if __Algo is not None:
909 self.__A = str(__Algo)
910 self.__P.update( {"Algorithm":self.__A} )
912 self.__setAlgorithm( self.__A )
914 self.__variable_names_not_public = {"nextStep":False} # Duplication dans Algorithm
916 def updateParameters(self,
920 "Mise a jour des parametres"
921 if asDict is None and asScript is not None:
922 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
926 if __Dict is not None:
927 self.__P.update( dict(__Dict) )
929 def executePythonScheme(self, asDictAO = None):
930 "Permet de lancer le calcul d'assimilation"
931 Operator.CM.clearCache()
933 if not isinstance(asDictAO, dict):
934 raise ValueError("The objects for algorithm calculation have to be given together as a dictionnary, and they are not")
935 if hasattr(asDictAO["Background"],"getO"): self.__Xb = asDictAO["Background"].getO()
936 elif hasattr(asDictAO["CheckingPoint"],"getO"): self.__Xb = asDictAO["CheckingPoint"].getO()
937 else: self.__Xb = None
938 if hasattr(asDictAO["Observation"],"getO"): self.__Y = asDictAO["Observation"].getO()
939 else: self.__Y = asDictAO["Observation"]
940 if hasattr(asDictAO["ControlInput"],"getO"): self.__U = asDictAO["ControlInput"].getO()
941 else: self.__U = asDictAO["ControlInput"]
942 if hasattr(asDictAO["ObservationOperator"],"getO"): self.__HO = asDictAO["ObservationOperator"].getO()
943 else: self.__HO = asDictAO["ObservationOperator"]
944 if hasattr(asDictAO["EvolutionModel"],"getO"): self.__EM = asDictAO["EvolutionModel"].getO()
945 else: self.__EM = asDictAO["EvolutionModel"]
946 if hasattr(asDictAO["ControlModel"],"getO"): self.__CM = asDictAO["ControlModel"].getO()
947 else: self.__CM = asDictAO["ControlModel"]
948 self.__B = asDictAO["BackgroundError"]
949 self.__R = asDictAO["ObservationError"]
950 self.__Q = asDictAO["EvolutionError"]
952 self.__shape_validate()
954 self.__algorithm.run(
964 Parameters = self.__P,
968 def executeYACSScheme(self, FileName=None):
969 "Permet de lancer le calcul d'assimilation"
970 if FileName is None or not os.path.exists(FileName):
971 raise ValueError("a YACS file name has to be given for YACS execution.\n")
973 __file = os.path.abspath(FileName)
974 logging.debug("The YACS file name is \"%s\"."%__file)
975 if not PlatformInfo.has_salome or \
976 not PlatformInfo.has_yacs or \
977 not PlatformInfo.has_adao:
978 raise ImportError("\n\n"+\
979 "Unable to get SALOME, YACS or ADAO environnement variables.\n"+\
980 "Please load the right environnement before trying to use it.\n")
985 SALOMERuntime.RuntimeSALOME_setRuntime()
987 r = pilot.getRuntime()
988 xmlLoader = loader.YACSLoader()
989 xmlLoader.registerProcCataLoader()
991 catalogAd = r.loadCatalog("proc", __file)
992 r.addCatalog(catalogAd)
997 p = xmlLoader.load(__file)
998 except IOError as ex:
999 print("The YACS XML schema file can not be loaded: %s"%(ex,))
1001 logger = p.getLogger("parser")
1002 if not logger.isEmpty():
1003 print("The imported YACS XML schema has errors on parsing:")
1004 print(logger.getStr())
1007 print("The YACS XML schema is not valid and will not be executed:")
1008 print(p.getErrorReport())
1010 info=pilot.LinkInfo(pilot.LinkInfo.ALL_DONT_STOP)
1011 p.checkConsistency(info)
1012 if info.areWarningsOrErrors():
1013 print("The YACS XML schema is not coherent and will not be executed:")
1014 print(info.getGlobalRepr())
1016 e = pilot.ExecutorSwig()
1018 if p.getEffectiveState() != pilot.DONE:
1019 print(p.getErrorReport())
1023 def get(self, key = None):
1024 "Vérifie l'existence d'une clé de variable ou de paramètres"
1025 if key in self.__algorithm:
1026 return self.__algorithm.get( key )
1027 elif key in self.__P:
1028 return self.__P[key]
1030 allvariables = self.__P
1031 for k in self.__variable_names_not_public: allvariables.pop(k, None)
1034 def pop(self, k, d):
1035 "Necessaire pour le pickling"
1036 return self.__algorithm.pop(k, d)
1038 def getAlgorithmRequiredParameters(self, noDetails=True):
1039 "Renvoie la liste des paramètres requis selon l'algorithme"
1040 return self.__algorithm.getRequiredParameters(noDetails)
1042 def setObserver(self, __V, __O, __I, __S):
1043 if self.__algorithm is None \
1044 or isinstance(self.__algorithm, dict) \
1045 or not hasattr(self.__algorithm,"StoredVariables"):
1046 raise ValueError("No observer can be build before choosing an algorithm.")
1047 if __V not in self.__algorithm:
1048 raise ValueError("An observer requires to be set on a variable named %s which does not exist."%__V)
1050 self.__algorithm.StoredVariables[ __V ].setDataObserver(
1053 HookParameters = __I,
1056 def removeObserver(self, __V, __O, __A = False):
1057 if self.__algorithm is None \
1058 or isinstance(self.__algorithm, dict) \
1059 or not hasattr(self.__algorithm,"StoredVariables"):
1060 raise ValueError("No observer can be removed before choosing an algorithm.")
1061 if __V not in self.__algorithm:
1062 raise ValueError("An observer requires to be removed on a variable named %s which does not exist."%__V)
1064 return self.__algorithm.StoredVariables[ __V ].removeDataObserver(
1069 def hasObserver(self, __V):
1070 if self.__algorithm is None \
1071 or isinstance(self.__algorithm, dict) \
1072 or not hasattr(self.__algorithm,"StoredVariables"):
1074 if __V not in self.__algorithm:
1076 return self.__algorithm.StoredVariables[ __V ].hasDataObserver()
1079 __allvariables = list(self.__algorithm.keys()) + list(self.__P.keys())
1080 for k in self.__variable_names_not_public:
1081 if k in __allvariables: __allvariables.remove(k)
1082 return __allvariables
1084 def __contains__(self, key=None):
1085 "D.__contains__(k) -> True if D has a key k, else False"
1086 return key in self.__algorithm or key in self.__P
1089 "x.__repr__() <==> repr(x)"
1090 return repr(self.__A)+", "+repr(self.__P)
1093 "x.__str__() <==> str(x)"
1094 return str(self.__A)+", "+str(self.__P)
1096 def __setAlgorithm(self, choice = None ):
1098 Permet de sélectionner l'algorithme à utiliser pour mener à bien l'étude
1099 d'assimilation. L'argument est un champ caractère se rapportant au nom
1100 d'un algorithme réalisant l'opération sur les arguments fixes.
1103 raise ValueError("Error: algorithm choice has to be given")
1104 if self.__algorithmName is not None:
1105 raise ValueError("Error: algorithm choice has already been done as \"%s\", it can't be changed."%self.__algorithmName)
1106 daDirectory = "daAlgorithms"
1108 # Recherche explicitement le fichier complet
1109 # ------------------------------------------
1111 for directory in sys.path:
1112 if os.path.isfile(os.path.join(directory, daDirectory, str(choice)+'.py')):
1113 module_path = os.path.abspath(os.path.join(directory, daDirectory))
1114 if module_path is None:
1115 raise ImportError("No algorithm module named \"%s\" has been found in the search path.\n The search path is %s"%(choice, sys.path))
1117 # Importe le fichier complet comme un module
1118 # ------------------------------------------
1120 sys_path_tmp = sys.path ; sys.path.insert(0,module_path)
1121 self.__algorithmFile = __import__(str(choice), globals(), locals(), [])
1122 if not hasattr(self.__algorithmFile, "ElementaryAlgorithm"):
1123 raise ImportError("this module does not define a valid elementary algorithm.")
1124 self.__algorithmName = str(choice)
1125 sys.path = sys_path_tmp ; del sys_path_tmp
1126 except ImportError as e:
1127 raise ImportError("The module named \"%s\" was found, but is incorrect at the import stage.\n The import error message is: %s"%(choice,e))
1129 # Instancie un objet du type élémentaire du fichier
1130 # -------------------------------------------------
1131 self.__algorithm = self.__algorithmFile.ElementaryAlgorithm()
1134 def __shape_validate(self):
1136 Validation de la correspondance correcte des tailles des variables et
1137 des matrices s'il y en a.
1139 if self.__Xb is None: __Xb_shape = (0,)
1140 elif hasattr(self.__Xb,"size"): __Xb_shape = (self.__Xb.size,)
1141 elif hasattr(self.__Xb,"shape"):
1142 if isinstance(self.__Xb.shape, tuple): __Xb_shape = self.__Xb.shape
1143 else: __Xb_shape = self.__Xb.shape()
1144 else: raise TypeError("The background (Xb) has no attribute of shape: problem !")
1146 if self.__Y is None: __Y_shape = (0,)
1147 elif hasattr(self.__Y,"size"): __Y_shape = (self.__Y.size,)
1148 elif hasattr(self.__Y,"shape"):
1149 if isinstance(self.__Y.shape, tuple): __Y_shape = self.__Y.shape
1150 else: __Y_shape = self.__Y.shape()
1151 else: raise TypeError("The observation (Y) has no attribute of shape: problem !")
1153 if self.__U is None: __U_shape = (0,)
1154 elif hasattr(self.__U,"size"): __U_shape = (self.__U.size,)
1155 elif hasattr(self.__U,"shape"):
1156 if isinstance(self.__U.shape, tuple): __U_shape = self.__U.shape
1157 else: __U_shape = self.__U.shape()
1158 else: raise TypeError("The control (U) has no attribute of shape: problem !")
1160 if self.__B is None: __B_shape = (0,0)
1161 elif hasattr(self.__B,"shape"):
1162 if isinstance(self.__B.shape, tuple): __B_shape = self.__B.shape
1163 else: __B_shape = self.__B.shape()
1164 else: raise TypeError("The a priori errors covariance matrix (B) has no attribute of shape: problem !")
1166 if self.__R is None: __R_shape = (0,0)
1167 elif hasattr(self.__R,"shape"):
1168 if isinstance(self.__R.shape, tuple): __R_shape = self.__R.shape
1169 else: __R_shape = self.__R.shape()
1170 else: raise TypeError("The observation errors covariance matrix (R) has no attribute of shape: problem !")
1172 if self.__Q is None: __Q_shape = (0,0)
1173 elif hasattr(self.__Q,"shape"):
1174 if isinstance(self.__Q.shape, tuple): __Q_shape = self.__Q.shape
1175 else: __Q_shape = self.__Q.shape()
1176 else: raise TypeError("The evolution errors covariance matrix (Q) has no attribute of shape: problem !")
1178 if len(self.__HO) == 0: __HO_shape = (0,0)
1179 elif isinstance(self.__HO, dict): __HO_shape = (0,0)
1180 elif hasattr(self.__HO["Direct"],"shape"):
1181 if isinstance(self.__HO["Direct"].shape, tuple): __HO_shape = self.__HO["Direct"].shape
1182 else: __HO_shape = self.__HO["Direct"].shape()
1183 else: raise TypeError("The observation operator (H) has no attribute of shape: problem !")
1185 if len(self.__EM) == 0: __EM_shape = (0,0)
1186 elif isinstance(self.__EM, dict): __EM_shape = (0,0)
1187 elif hasattr(self.__EM["Direct"],"shape"):
1188 if isinstance(self.__EM["Direct"].shape, tuple): __EM_shape = self.__EM["Direct"].shape
1189 else: __EM_shape = self.__EM["Direct"].shape()
1190 else: raise TypeError("The evolution model (EM) has no attribute of shape: problem !")
1192 if len(self.__CM) == 0: __CM_shape = (0,0)
1193 elif isinstance(self.__CM, dict): __CM_shape = (0,0)
1194 elif hasattr(self.__CM["Direct"],"shape"):
1195 if isinstance(self.__CM["Direct"].shape, tuple): __CM_shape = self.__CM["Direct"].shape
1196 else: __CM_shape = self.__CM["Direct"].shape()
1197 else: raise TypeError("The control model (CM) has no attribute of shape: problem !")
1199 # Vérification des conditions
1200 # ---------------------------
1201 if not( len(__Xb_shape) == 1 or min(__Xb_shape) == 1 ):
1202 raise ValueError("Shape characteristic of background (Xb) is incorrect: \"%s\"."%(__Xb_shape,))
1203 if not( len(__Y_shape) == 1 or min(__Y_shape) == 1 ):
1204 raise ValueError("Shape characteristic of observation (Y) is incorrect: \"%s\"."%(__Y_shape,))
1206 if not( min(__B_shape) == max(__B_shape) ):
1207 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) is incorrect: \"%s\"."%(__B_shape,))
1208 if not( min(__R_shape) == max(__R_shape) ):
1209 raise ValueError("Shape characteristic of observation errors covariance matrix (R) is incorrect: \"%s\"."%(__R_shape,))
1210 if not( min(__Q_shape) == max(__Q_shape) ):
1211 raise ValueError("Shape characteristic of evolution errors covariance matrix (Q) is incorrect: \"%s\"."%(__Q_shape,))
1212 if not( min(__EM_shape) == max(__EM_shape) ):
1213 raise ValueError("Shape characteristic of evolution operator (EM) is incorrect: \"%s\"."%(__EM_shape,))
1215 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[1] == max(__Xb_shape) ):
1216 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and state (X) \"%s\" are incompatible."%(__HO_shape,__Xb_shape))
1217 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[0] == max(__Y_shape) ):
1218 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation (Y) \"%s\" are incompatible."%(__HO_shape,__Y_shape))
1219 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__B) > 0 and not( __HO_shape[1] == __B_shape[0] ):
1220 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and a priori errors covariance matrix (B) \"%s\" are incompatible."%(__HO_shape,__B_shape))
1221 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__R) > 0 and not( __HO_shape[0] == __R_shape[1] ):
1222 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation errors covariance matrix (R) \"%s\" are incompatible."%(__HO_shape,__R_shape))
1224 if self.__B is not None and len(self.__B) > 0 and not( __B_shape[1] == max(__Xb_shape) ):
1225 if self.__algorithmName in ["EnsembleBlue",]:
1226 asPersistentVector = self.__Xb.reshape((-1,min(__B_shape)))
1227 self.__Xb = Persistence.OneVector("Background", basetype=numpy.matrix)
1228 for member in asPersistentVector:
1229 self.__Xb.store( numpy.matrix( numpy.ravel(member), numpy.float ).T )
1230 __Xb_shape = min(__B_shape)
1232 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) \"%s\" and background (Xb) \"%s\" are incompatible."%(__B_shape,__Xb_shape))
1234 if self.__R is not None and len(self.__R) > 0 and not( __R_shape[1] == max(__Y_shape) ):
1235 raise ValueError("Shape characteristic of observation errors covariance matrix (R) \"%s\" and observation (Y) \"%s\" are incompatible."%(__R_shape,__Y_shape))
1237 if self.__EM is not None and len(self.__EM) > 0 and not isinstance(self.__EM, dict) and not( __EM_shape[1] == max(__Xb_shape) ):
1238 raise ValueError("Shape characteristic of evolution model (EM) \"%s\" and state (X) \"%s\" are incompatible."%(__EM_shape,__Xb_shape))
1240 if self.__CM is not None and len(self.__CM) > 0 and not isinstance(self.__CM, dict) and not( __CM_shape[1] == max(__U_shape) ):
1241 raise ValueError("Shape characteristic of control model (CM) \"%s\" and control (U) \"%s\" are incompatible."%(__CM_shape,__U_shape))
1243 if ("Bounds" in self.__P) \
1244 and (isinstance(self.__P["Bounds"], list) or isinstance(self.__P["Bounds"], tuple)) \
1245 and (len(self.__P["Bounds"]) != max(__Xb_shape)):
1246 raise ValueError("The number \"%s\" of bound pairs for the state (X) components is different of the size \"%s\" of the state itself." \
1247 %(len(self.__P["Bounds"]),max(__Xb_shape)))
1251 # ==============================================================================
1252 class RegulationAndParameters(object):
1254 Classe générale d'interface d'action pour la régulation et ses paramètres
1257 name = "GenericRegulation",
1264 self.__name = str(name)
1267 if asAlgorithm is None and asScript is not None:
1268 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
1270 __Algo = asAlgorithm
1272 if asDict is None and asScript is not None:
1273 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1277 if __Dict is not None:
1278 self.__P.update( dict(__Dict) )
1280 if __Algo is not None:
1281 self.__P.update( {"Algorithm":__Algo} )
1283 def get(self, key = None):
1284 "Vérifie l'existence d'une clé de variable ou de paramètres"
1286 return self.__P[key]
1290 # ==============================================================================
1291 class DataObserver(object):
1293 Classe générale d'interface de type observer
1296 name = "GenericObserver",
1308 self.__name = str(name)
1313 if onVariable is None:
1314 raise ValueError("setting an observer has to be done over a variable name or a list of variable names, not over None.")
1315 elif type(onVariable) in (tuple, list):
1316 self.__V = tuple(map( str, onVariable ))
1317 if withInfo is None:
1320 self.__I = (str(withInfo),)*len(self.__V)
1321 elif isinstance(onVariable, str):
1322 self.__V = (onVariable,)
1323 if withInfo is None:
1324 self.__I = (onVariable,)
1326 self.__I = (str(withInfo),)
1328 raise ValueError("setting an observer has to be done over a variable name or a list of variable names.")
1330 if asString is not None:
1331 __FunctionText = asString
1332 elif (asTemplate is not None) and (asTemplate in Templates.ObserverTemplates):
1333 __FunctionText = Templates.ObserverTemplates[asTemplate]
1334 elif asScript is not None:
1335 __FunctionText = Interfaces.ImportFromScript(asScript).getstring()
1338 __Function = ObserverF(__FunctionText)
1340 if asObsObject is not None:
1341 self.__O = asObsObject
1343 self.__O = __Function.getfunc()
1345 for k in range(len(self.__V)):
1348 if ename not in withAlgo:
1349 raise ValueError("An observer is asked to be set on a variable named %s which does not exist."%ename)
1351 withAlgo.setObserver(ename, self.__O, einfo, scheduledBy)
1354 "x.__repr__() <==> repr(x)"
1355 return repr(self.__V)+"\n"+repr(self.__O)
1358 "x.__str__() <==> str(x)"
1359 return str(self.__V)+"\n"+str(self.__O)
1361 # ==============================================================================
1362 class State(object):
1364 Classe générale d'interface de type état
1367 name = "GenericVector",
1369 asPersistentVector = None,
1375 toBeChecked = False,
1378 Permet de définir un vecteur :
1379 - asVector : entrée des données, comme un vecteur compatible avec le
1380 constructeur de numpy.matrix, ou "True" si entrée par script.
1381 - asPersistentVector : entrée des données, comme une série de vecteurs
1382 compatible avec le constructeur de numpy.matrix, ou comme un objet de
1383 type Persistence, ou "True" si entrée par script.
1384 - asScript : si un script valide est donné contenant une variable
1385 nommée "name", la variable est de type "asVector" (par défaut) ou
1386 "asPersistentVector" selon que l'une de ces variables est placée à
1388 - asDataFile : si un ou plusieurs fichiers valides sont donnés
1389 contenant des valeurs en colonnes, elles-mêmes nommées "colNames"
1390 (s'il n'y a pas de nom de colonne indiquée, on cherche une colonne
1391 nommée "name"), on récupère les colonnes et on les range ligne après
1392 ligne (colMajor=False, par défaut) ou colonne après colonne
1393 (colMajor=True). La variable résultante est de type "asVector" (par
1394 défaut) ou "asPersistentVector" selon que l'une de ces variables est
1397 self.__name = str(name)
1398 self.__check = bool(toBeChecked)
1402 self.__is_vector = False
1403 self.__is_series = False
1405 if asScript is not None:
1406 __Vector, __Series = None, None
1407 if asPersistentVector:
1408 __Series = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1410 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1411 elif asDataFile is not None:
1412 __Vector, __Series = None, None
1413 if asPersistentVector:
1414 if colNames is not None:
1415 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
1417 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1418 if bool(colMajor) and not Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1419 __Series = numpy.transpose(__Series)
1420 elif not bool(colMajor) and Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1421 __Series = numpy.transpose(__Series)
1423 if colNames is not None:
1424 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
1426 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1428 __Vector = numpy.ravel(__Vector, order = "F")
1430 __Vector = numpy.ravel(__Vector, order = "C")
1432 __Vector, __Series = asVector, asPersistentVector
1434 if __Vector is not None:
1435 self.__is_vector = True
1436 self.__V = numpy.matrix( numpy.asmatrix(__Vector).A1, numpy.float ).T
1437 self.shape = self.__V.shape
1438 self.size = self.__V.size
1439 elif __Series is not None:
1440 self.__is_series = True
1441 if isinstance(__Series, (tuple, list, numpy.ndarray, numpy.matrix, str)):
1442 self.__V = Persistence.OneVector(self.__name, basetype=numpy.matrix)
1443 if isinstance(__Series, str): __Series = eval(__Series)
1444 for member in __Series:
1445 self.__V.store( numpy.matrix( numpy.asmatrix(member).A1, numpy.float ).T )
1448 if isinstance(self.__V.shape, (tuple, list)):
1449 self.shape = self.__V.shape
1451 self.shape = self.__V.shape()
1452 if len(self.shape) == 1:
1453 self.shape = (self.shape[0],1)
1454 self.size = self.shape[0] * self.shape[1]
1456 raise ValueError("The %s object is improperly defined or undefined, it requires at minima either a vector, a list/tuple of vectors or a persistent object. Please check your vector input."%self.__name)
1458 if scheduledBy is not None:
1459 self.__T = scheduledBy
1461 def getO(self, withScheduler=False):
1463 return self.__V, self.__T
1464 elif self.__T is None:
1470 "Vérification du type interne"
1471 return self.__is_vector
1474 "Vérification du type interne"
1475 return self.__is_series
1478 "x.__repr__() <==> repr(x)"
1479 return repr(self.__V)
1482 "x.__str__() <==> str(x)"
1483 return str(self.__V)
1485 # ==============================================================================
1486 class Covariance(object):
1488 Classe générale d'interface de type covariance
1491 name = "GenericCovariance",
1492 asCovariance = None,
1493 asEyeByScalar = None,
1494 asEyeByVector = None,
1497 toBeChecked = False,
1500 Permet de définir une covariance :
1501 - asCovariance : entrée des données, comme une matrice compatible avec
1502 le constructeur de numpy.matrix
1503 - asEyeByScalar : entrée des données comme un seul scalaire de variance,
1504 multiplicatif d'une matrice de corrélation identité, aucune matrice
1505 n'étant donc explicitement à donner
1506 - asEyeByVector : entrée des données comme un seul vecteur de variance,
1507 à mettre sur la diagonale d'une matrice de corrélation, aucune matrice
1508 n'étant donc explicitement à donner
1509 - asCovObject : entrée des données comme un objet python, qui a les
1510 methodes obligatoires "getT", "getI", "diag", "trace", "__add__",
1511 "__sub__", "__neg__", "__mul__", "__rmul__" et facultatives "shape",
1512 "size", "cholesky", "choleskyI", "asfullmatrix", "__repr__", "__str__"
1513 - toBeChecked : booléen indiquant si le caractère SDP de la matrice
1514 pleine doit être vérifié
1516 self.__name = str(name)
1517 self.__check = bool(toBeChecked)
1520 self.__is_scalar = False
1521 self.__is_vector = False
1522 self.__is_matrix = False
1523 self.__is_object = False
1525 if asScript is not None:
1526 __Matrix, __Scalar, __Vector, __Object = None, None, None, None
1528 __Scalar = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1530 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1532 __Object = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1534 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1536 __Matrix, __Scalar, __Vector, __Object = asCovariance, asEyeByScalar, asEyeByVector, asCovObject
1538 if __Scalar is not None:
1539 if numpy.matrix(__Scalar).size != 1:
1540 raise ValueError(' The diagonal multiplier given to define a sparse matrix is not a unique scalar value.\n Its actual measured size is %i. Please check your scalar input.'%numpy.matrix(__Scalar).size)
1541 self.__is_scalar = True
1542 self.__C = numpy.abs( float(__Scalar) )
1545 elif __Vector is not None:
1546 self.__is_vector = True
1547 self.__C = numpy.abs( numpy.array( numpy.ravel( numpy.matrix(__Vector, float ) ) ) )
1548 self.shape = (self.__C.size,self.__C.size)
1549 self.size = self.__C.size**2
1550 elif __Matrix is not None:
1551 self.__is_matrix = True
1552 self.__C = numpy.matrix( __Matrix, float )
1553 self.shape = self.__C.shape
1554 self.size = self.__C.size
1555 elif __Object is not None:
1556 self.__is_object = True
1558 for at in ("getT","getI","diag","trace","__add__","__sub__","__neg__","__mul__","__rmul__"):
1559 if not hasattr(self.__C,at):
1560 raise ValueError("The matrix given for %s as an object has no attribute \"%s\". Please check your object input."%(self.__name,at))
1561 if hasattr(self.__C,"shape"):
1562 self.shape = self.__C.shape
1565 if hasattr(self.__C,"size"):
1566 self.size = self.__C.size
1571 # raise ValueError("The %s covariance matrix has to be specified either as a matrix, a vector for its diagonal or a scalar multiplying an identity matrix."%self.__name)
1575 def __validate(self):
1577 if self.__C is None:
1578 raise UnboundLocalError("%s covariance matrix value has not been set!"%(self.__name,))
1579 if self.ismatrix() and min(self.shape) != max(self.shape):
1580 raise ValueError("The given matrix for %s is not a square one, its shape is %s. Please check your matrix input."%(self.__name,self.shape))
1581 if self.isobject() and min(self.shape) != max(self.shape):
1582 raise ValueError("The matrix given for \"%s\" is not a square one, its shape is %s. Please check your object input."%(self.__name,self.shape))
1583 if self.isscalar() and self.__C <= 0:
1584 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your scalar input %s."%(self.__name,self.__C))
1585 if self.isvector() and (self.__C <= 0).any():
1586 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your vector input."%(self.__name,))
1587 if self.ismatrix() and (self.__check or logging.getLogger().level < logging.WARNING):
1589 L = numpy.linalg.cholesky( self.__C )
1591 raise ValueError("The %s covariance matrix is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1592 if self.isobject() and (self.__check or logging.getLogger().level < logging.WARNING):
1594 L = self.__C.cholesky()
1596 raise ValueError("The %s covariance object is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1599 "Vérification du type interne"
1600 return self.__is_scalar
1603 "Vérification du type interne"
1604 return self.__is_vector
1607 "Vérification du type interne"
1608 return self.__is_matrix
1611 "Vérification du type interne"
1612 return self.__is_object
1617 return Covariance(self.__name+"I", asCovariance = self.__C.I )
1618 elif self.isvector():
1619 return Covariance(self.__name+"I", asEyeByVector = 1. / self.__C )
1620 elif self.isscalar():
1621 return Covariance(self.__name+"I", asEyeByScalar = 1. / self.__C )
1622 elif self.isobject():
1623 return Covariance(self.__name+"I", asCovObject = self.__C.getI() )
1625 return None # Indispensable
1630 return Covariance(self.__name+"T", asCovariance = self.__C.T )
1631 elif self.isvector():
1632 return Covariance(self.__name+"T", asEyeByVector = self.__C )
1633 elif self.isscalar():
1634 return Covariance(self.__name+"T", asEyeByScalar = self.__C )
1635 elif self.isobject():
1636 return Covariance(self.__name+"T", asCovObject = self.__C.getT() )
1639 "Décomposition de Cholesky"
1641 return Covariance(self.__name+"C", asCovariance = numpy.linalg.cholesky(self.__C) )
1642 elif self.isvector():
1643 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
1644 elif self.isscalar():
1645 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
1646 elif self.isobject() and hasattr(self.__C,"cholesky"):
1647 return Covariance(self.__name+"C", asCovObject = self.__C.cholesky() )
1649 def choleskyI(self):
1650 "Inversion de la décomposition de Cholesky"
1652 return Covariance(self.__name+"H", asCovariance = numpy.linalg.cholesky(self.__C).I )
1653 elif self.isvector():
1654 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
1655 elif self.isscalar():
1656 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
1657 elif self.isobject() and hasattr(self.__C,"choleskyI"):
1658 return Covariance(self.__name+"H", asCovObject = self.__C.choleskyI() )
1660 def diag(self, msize=None):
1661 "Diagonale de la matrice"
1663 return numpy.diag(self.__C)
1664 elif self.isvector():
1666 elif self.isscalar():
1668 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1670 return self.__C * numpy.ones(int(msize))
1671 elif self.isobject():
1672 return self.__C.diag()
1674 def asfullmatrix(self, msize=None):
1678 elif self.isvector():
1679 return numpy.matrix( numpy.diag(self.__C), float )
1680 elif self.isscalar():
1682 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1684 return numpy.matrix( self.__C * numpy.eye(int(msize)), float )
1685 elif self.isobject() and hasattr(self.__C,"asfullmatrix"):
1686 return self.__C.asfullmatrix()
1688 def trace(self, msize=None):
1689 "Trace de la matrice"
1691 return numpy.trace(self.__C)
1692 elif self.isvector():
1693 return float(numpy.sum(self.__C))
1694 elif self.isscalar():
1696 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1698 return self.__C * int(msize)
1699 elif self.isobject():
1700 return self.__C.trace()
1706 "x.__repr__() <==> repr(x)"
1707 return repr(self.__C)
1710 "x.__str__() <==> str(x)"
1711 return str(self.__C)
1713 def __add__(self, other):
1714 "x.__add__(y) <==> x+y"
1715 if self.ismatrix() or self.isobject():
1716 return self.__C + numpy.asmatrix(other)
1717 elif self.isvector() or self.isscalar():
1718 _A = numpy.asarray(other)
1719 _A.reshape(_A.size)[::_A.shape[1]+1] += self.__C
1720 return numpy.asmatrix(_A)
1722 def __radd__(self, other):
1723 "x.__radd__(y) <==> y+x"
1724 raise NotImplementedError("%s covariance matrix __radd__ method not available for %s type!"%(self.__name,type(other)))
1726 def __sub__(self, other):
1727 "x.__sub__(y) <==> x-y"
1728 if self.ismatrix() or self.isobject():
1729 return self.__C - numpy.asmatrix(other)
1730 elif self.isvector() or self.isscalar():
1731 _A = numpy.asarray(other)
1732 _A.reshape(_A.size)[::_A.shape[1]+1] = self.__C - _A.reshape(_A.size)[::_A.shape[1]+1]
1733 return numpy.asmatrix(_A)
1735 def __rsub__(self, other):
1736 "x.__rsub__(y) <==> y-x"
1737 raise NotImplementedError("%s covariance matrix __rsub__ method not available for %s type!"%(self.__name,type(other)))
1740 "x.__neg__() <==> -x"
1743 def __mul__(self, other):
1744 "x.__mul__(y) <==> x*y"
1745 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
1746 return self.__C * other
1747 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
1748 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1749 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
1750 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1751 return self.__C * numpy.asmatrix(other)
1753 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asmatrix(other).shape,self.__name))
1754 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
1755 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1756 return numpy.asmatrix(self.__C * numpy.ravel(other)).T
1757 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1758 return numpy.asmatrix((self.__C * (numpy.asarray(other).transpose())).transpose())
1760 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
1761 elif self.isscalar() and isinstance(other,numpy.matrix):
1762 return self.__C * other
1763 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
1764 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
1765 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
1767 return self.__C * numpy.asmatrix(other)
1768 elif self.isobject():
1769 return self.__C.__mul__(other)
1771 raise NotImplementedError("%s covariance matrix __mul__ method not available for %s type!"%(self.__name,type(other)))
1773 def __rmul__(self, other):
1774 "x.__rmul__(y) <==> y*x"
1775 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
1776 return other * self.__C
1777 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
1778 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1779 return numpy.asmatrix(numpy.ravel(other)) * self.__C
1780 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1781 return numpy.asmatrix(other) * self.__C
1783 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name))
1784 elif self.isvector() and isinstance(other,numpy.matrix):
1785 if numpy.ravel(other).size == self.shape[0]: # Vecteur
1786 return numpy.asmatrix(numpy.ravel(other) * self.__C)
1787 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
1788 return numpy.asmatrix(numpy.array(other) * self.__C)
1790 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
1791 elif self.isscalar() and isinstance(other,numpy.matrix):
1792 return other * self.__C
1793 elif self.isobject():
1794 return self.__C.__rmul__(other)
1796 raise NotImplementedError("%s covariance matrix __rmul__ method not available for %s type!"%(self.__name,type(other)))
1799 "x.__len__() <==> len(x)"
1800 return self.shape[0]
1802 # ==============================================================================
1803 class ObserverF(object):
1805 Creation d'une fonction d'observateur a partir de son texte
1807 def __init__(self, corps=""):
1808 self.__corps = corps
1809 def func(self,var,info):
1810 "Fonction d'observation"
1813 "Restitution du pointeur de fonction dans l'objet"
1816 # ==============================================================================
1817 class CaseLogger(object):
1819 Conservation des commandes de creation d'un cas
1821 def __init__(self, __name="", __objname="case", __addViewers=None, __addLoaders=None):
1822 self.__name = str(__name)
1823 self.__objname = str(__objname)
1824 self.__logSerie = []
1825 self.__switchoff = False
1827 "TUI" :Interfaces._TUIViewer,
1828 "SCD" :Interfaces._SCDViewer,
1829 "YACS":Interfaces._YACSViewer,
1832 "TUI" :Interfaces._TUIViewer,
1833 "COM" :Interfaces._COMViewer,
1835 if __addViewers is not None:
1836 self.__viewers.update(dict(__addViewers))
1837 if __addLoaders is not None:
1838 self.__loaders.update(dict(__addLoaders))
1840 def register(self, __command=None, __keys=None, __local=None, __pre=None, __switchoff=False):
1841 "Enregistrement d'une commande individuelle"
1842 if __command is not None and __keys is not None and __local is not None and not self.__switchoff:
1843 if "self" in __keys: __keys.remove("self")
1844 self.__logSerie.append( (str(__command), __keys, __local, __pre, __switchoff) )
1846 self.__switchoff = True
1848 self.__switchoff = False
1850 def dump(self, __filename=None, __format="TUI", __upa=""):
1851 "Restitution normalisée des commandes"
1852 if __format in self.__viewers:
1853 __formater = self.__viewers[__format](self.__name, self.__objname, self.__logSerie)
1855 raise ValueError("Dumping as \"%s\" is not available"%__format)
1856 return __formater.dump(__filename, __upa)
1858 def load(self, __filename=None, __content=None, __object=None, __format="TUI"):
1859 "Chargement normalisé des commandes"
1860 if __format in self.__loaders:
1861 __formater = self.__loaders[__format]()
1863 raise ValueError("Loading as \"%s\" is not available"%__format)
1864 return __formater.load(__filename, __content, __object)
1866 # ==============================================================================
1869 _extraArguments = None,
1870 _sFunction = lambda x: x,
1875 Pour une liste ordonnée de vecteurs en entrée, renvoie en sortie la liste
1876 correspondante de valeurs de la fonction en argument
1878 # Vérifications et définitions initiales
1879 # logging.debug("MULTF Internal multifonction calculations begin with function %s"%(_sFunction.__name__,))
1880 if not PlatformInfo.isIterable( __xserie ):
1881 raise TypeError("MultiFonction not iterable unkown input type: %s"%(type(__xserie),))
1883 if (_mpWorkers is None) or (_mpWorkers is not None and _mpWorkers < 1):
1886 __mpWorkers = int(_mpWorkers)
1888 import multiprocessing
1899 if _extraArguments is None:
1901 elif _extraArguments is not None and isinstance(_extraArguments, (list, tuple, map)):
1902 for __xvalue in __xserie:
1903 _jobs.append( [__xvalue, ] + list(_extraArguments) )
1905 raise TypeError("MultiFonction extra arguments unkown input type: %s"%(type(_extraArguments),))
1906 # logging.debug("MULTF Internal multiprocessing calculations begin : evaluation of %i point(s)"%(len(_jobs),))
1907 import multiprocessing
1908 with multiprocessing.Pool(__mpWorkers) as pool:
1909 __multiHX = pool.map( _sFunction, _jobs )
1912 # logging.debug("MULTF Internal multiprocessing calculation end")
1914 # logging.debug("MULTF Internal monoprocessing calculation begin")
1916 if _extraArguments is None:
1917 for __xvalue in __xserie:
1918 __multiHX.append( _sFunction( __xvalue ) )
1919 elif _extraArguments is not None and isinstance(_extraArguments, (list, tuple, map)):
1920 for __xvalue in __xserie:
1921 __multiHX.append( _sFunction( __xvalue, *_extraArguments ) )
1922 elif _extraArguments is not None and isinstance(_extraArguments, dict):
1923 for __xvalue in __xserie:
1924 __multiHX.append( _sFunction( __xvalue, **_extraArguments ) )
1926 raise TypeError("MultiFonction extra arguments unkown input type: %s"%(type(_extraArguments),))
1927 # logging.debug("MULTF Internal monoprocessing calculation end")
1929 # logging.debug("MULTF Internal multifonction calculations end")
1932 # ==============================================================================
1933 def CostFunction3D(_x,
1934 _Hm = None, # Pour simuler Hm(x) : HO["Direct"].appliedTo
1935 _HmX = None, # Simulation déjà faite de Hm(x)
1936 _arg = None, # Arguments supplementaires pour Hm, sous la forme d'un tuple
1941 _SIV = False, # A résorber pour la 8.0
1942 _SSC = [], # self._parameters["StoreSupplementaryCalculations"]
1943 _nPS = 0, # nbPreviousSteps
1944 _QM = "DA", # QualityMeasure
1945 _SSV = {}, # Entrée et/ou sortie : self.StoredVariables
1946 _fRt = False, # Restitue ou pas la sortie étendue
1947 _sSc = True, # Stocke ou pas les SSC
1950 Fonction-coût générale utile pour les algorithmes statiques/3D : 3DVAR, BLUE
1951 et dérivés, Kalman et dérivés, LeastSquares, SamplingTest, PSO, SA, Tabu,
1952 DFO, QuantileRegression
1958 for k in ["CostFunctionJ",
1964 "SimulatedObservationAtCurrentOptimum",
1965 "SimulatedObservationAtCurrentState",
1969 if hasattr(_SSV[k],"store"):
1970 _SSV[k].append = _SSV[k].store # Pour utiliser "append" au lieu de "store"
1972 _X = numpy.asmatrix(numpy.ravel( _x )).T
1973 if _SIV or "CurrentState" in _SSC or "CurrentOptimum" in _SSC:
1974 _SSV["CurrentState"].append( _X )
1976 if _HmX is not None:
1980 raise ValueError("COSTFUNCTION3D Operator has to be defined.")
1984 _HX = _Hm( _X, *_arg )
1985 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
1987 if "SimulatedObservationAtCurrentState" in _SSC or \
1988 "SimulatedObservationAtCurrentOptimum" in _SSC:
1989 _SSV["SimulatedObservationAtCurrentState"].append( _HX )
1991 if numpy.any(numpy.isnan(_HX)):
1992 Jb, Jo, J = numpy.nan, numpy.nan, numpy.nan
1994 _Y = numpy.asmatrix(numpy.ravel( _Y )).T
1995 if _QM in ["AugmentedWeightedLeastSquares", "AWLS", "AugmentedPonderatedLeastSquares", "APLS", "DA"]:
1996 if _BI is None or _RI is None:
1997 raise ValueError("Background and Observation error covariance matrix has to be properly defined!")
1998 _Xb = numpy.asmatrix(numpy.ravel( _Xb )).T
1999 Jb = 0.5 * (_X - _Xb).T * _BI * (_X - _Xb)
2000 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
2001 elif _QM in ["WeightedLeastSquares", "WLS", "PonderatedLeastSquares", "PLS"]:
2003 raise ValueError("Observation error covariance matrix has to be properly defined!")
2005 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
2006 elif _QM in ["LeastSquares", "LS", "L2"]:
2008 Jo = 0.5 * (_Y - _HX).T * (_Y - _HX)
2009 elif _QM in ["AbsoluteValue", "L1"]:
2011 Jo = numpy.sum( numpy.abs(_Y - _HX) )
2012 elif _QM in ["MaximumError", "ME"]:
2014 Jo = numpy.max( numpy.abs(_Y - _HX) )
2015 elif _QM in ["QR", "Null"]:
2019 raise ValueError("Unknown asked quality measure!")
2021 J = float( Jb ) + float( Jo )
2024 _SSV["CostFunctionJb"].append( Jb )
2025 _SSV["CostFunctionJo"].append( Jo )
2026 _SSV["CostFunctionJ" ].append( J )
2028 if "IndexOfOptimum" in _SSC or \
2029 "CurrentOptimum" in _SSC or \
2030 "SimulatedObservationAtCurrentOptimum" in _SSC:
2031 IndexMin = numpy.argmin( _SSV["CostFunctionJ"][_nPS:] ) + _nPS
2032 if "IndexOfOptimum" in _SSC:
2033 _SSV["IndexOfOptimum"].append( IndexMin )
2034 if "CurrentOptimum" in _SSC:
2035 _SSV["CurrentOptimum"].append( _SSV["CurrentState"][IndexMin] )
2036 if "SimulatedObservationAtCurrentOptimum" in _SSC:
2037 _SSV["SimulatedObservationAtCurrentOptimum"].append( _SSV["SimulatedObservationAtCurrentState"][IndexMin] )
2042 if _QM in ["QR"]: # Pour le QuantileRegression
2047 # ==============================================================================
2048 if __name__ == "__main__":
2049 print('\n AUTODIAGNOSTIC\n')