1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2019 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les outils généraux élémentaires.
26 __author__ = "Jean-Philippe ARGAUD"
34 from functools import partial
35 from daCore import Persistence, PlatformInfo, Interfaces
36 from daCore import Templates
38 # ==============================================================================
39 class CacheManager(object):
41 Classe générale de gestion d'un cache de calculs
44 toleranceInRedundancy = 1.e-18,
45 lenghtOfRedundancy = -1,
48 Les caractéristiques de tolérance peuvent être modifées à la création.
50 self.__tolerBP = float(toleranceInRedundancy)
51 self.__lenghtOR = int(lenghtOfRedundancy)
52 self.__initlnOR = self.__lenghtOR
57 self.__listOPCV = [] # Operator Previous Calculated Points, Results, Point Norms
58 # logging.debug("CM Tolerance de determination des doublons : %.2e", self.__tolerBP)
60 def wasCalculatedIn(self, xValue ): #, info="" ):
61 "Vérifie l'existence d'un calcul correspondant à la valeur"
64 for i in range(min(len(self.__listOPCV),self.__lenghtOR)-1,-1,-1):
65 if not hasattr(xValue, 'size') or (xValue.size != self.__listOPCV[i][0].size):
66 # logging.debug("CM Différence de la taille %s de X et de celle %s du point %i déjà calculé", xValue.shape,i,self.__listOPCP[i].shape)
68 if numpy.linalg.norm(numpy.ravel(xValue) - self.__listOPCV[i][0]) < self.__tolerBP * self.__listOPCV[i][2]:
70 __HxV = self.__listOPCV[i][1]
71 # logging.debug("CM Cas%s déja calculé, portant le numéro %i", info, i)
75 def storeValueInX(self, xValue, HxValue ):
76 "Stocke un calcul correspondant à la valeur"
77 if self.__lenghtOR < 0:
78 self.__lenghtOR = 2 * xValue.size + 2
79 self.__initlnOR = self.__lenghtOR
80 while len(self.__listOPCV) > self.__lenghtOR:
81 # logging.debug("CM Réduction de la liste des cas à %i éléments par suppression du premier", self.__lenghtOR)
82 self.__listOPCV.pop(0)
83 self.__listOPCV.append( (
84 copy.copy(numpy.ravel(xValue)),
86 numpy.linalg.norm(xValue),
91 self.__initlnOR = self.__lenghtOR
96 self.__lenghtOR = self.__initlnOR
98 # ==============================================================================
99 class Operator(object):
101 Classe générale d'interface de type opérateur simple
111 avoidingRedundancy = True,
112 inputAsMultiFunction = False,
113 enableMultiProcess = False,
114 extraArguments = None,
117 On construit un objet de ce type en fournissant, à l'aide de l'un des
118 deux mots-clé, soit une fonction ou un multi-fonction python, soit une
121 - fromMethod : argument de type fonction Python
122 - fromMatrix : argument adapté au constructeur numpy.matrix
123 - avoidingRedundancy : booléen évitant (ou pas) les calculs redondants
124 - inputAsMultiFunction : booléen indiquant une fonction explicitement
125 définie (ou pas) en multi-fonction
126 - extraArguments : arguments supplémentaires passés à la fonction de
127 base et ses dérivées (tuple ou dictionnaire)
129 self.__NbCallsAsMatrix, self.__NbCallsAsMethod, self.__NbCallsOfCached = 0, 0, 0
130 self.__AvoidRC = bool( avoidingRedundancy )
131 self.__inputAsMF = bool( inputAsMultiFunction )
132 self.__mpEnabled = bool( enableMultiProcess )
133 self.__extraArgs = extraArguments
134 if fromMethod is not None and self.__inputAsMF:
135 self.__Method = fromMethod # logtimer(fromMethod)
137 self.__Type = "Method"
138 elif fromMethod is not None and not self.__inputAsMF:
139 self.__Method = partial( MultiFonction, _sFunction=fromMethod, _mpEnabled=self.__mpEnabled)
141 self.__Type = "Method"
142 elif fromMatrix is not None:
144 self.__Matrix = numpy.matrix( fromMatrix, numpy.float )
145 self.__Type = "Matrix"
151 def disableAvoidingRedundancy(self):
153 Operator.CM.disable()
155 def enableAvoidingRedundancy(self):
160 Operator.CM.disable()
166 def appliedTo(self, xValue, HValue = None, argsAsSerie = False):
168 Permet de restituer le résultat de l'application de l'opérateur à une
169 série d'arguments xValue. Cette méthode se contente d'appliquer, chaque
170 argument devant a priori être du bon type.
172 - les arguments par série sont :
173 - xValue : argument adapté pour appliquer l'opérateur
174 - HValue : valeur précalculée de l'opérateur en ce point
175 - argsAsSerie : indique si les arguments sont une mono ou multi-valeur
182 if HValue is not None:
186 PlatformInfo.isIterable( _xValue, True, " in Operator.appliedTo" )
188 if _HValue is not None:
189 assert len(_xValue) == len(_HValue), "Incompatible number of elements in xValue and HValue"
191 for i in range(len(_HValue)):
192 HxValue.append( numpy.asmatrix( numpy.ravel( _HValue[i] ) ).T )
194 Operator.CM.storeValueInX(_xValue[i],HxValue[-1])
199 for i, xv in enumerate(_xValue):
201 __alreadyCalculated, __HxV = Operator.CM.wasCalculatedIn(xv)
203 __alreadyCalculated = False
205 if __alreadyCalculated:
206 self.__addOneCacheCall()
209 if self.__Matrix is not None:
210 self.__addOneMatrixCall()
211 _hv = self.__Matrix * xv
213 self.__addOneMethodCall()
217 HxValue.append( _hv )
219 if len(_xserie)>0 and self.__Matrix is None:
220 if self.__extraArgs is None:
221 _hserie = self.__Method( _xserie ) # Calcul MF
223 _hserie = self.__Method( _xserie, self.__extraArgs ) # Calcul MF
224 if not hasattr(_hserie, "pop"):
225 raise TypeError("The user input multi-function doesn't seem to return sequence results, behaving like a mono-function. It has to be checked.")
231 Operator.CM.storeValueInX(_xv,_hv)
233 if argsAsSerie: return HxValue
234 else: return HxValue[-1]
236 def appliedControledFormTo(self, paires, argsAsSerie = False):
238 Permet de restituer le résultat de l'application de l'opérateur à des
239 paires (xValue, uValue). Cette méthode se contente d'appliquer, son
240 argument devant a priori être du bon type. Si la uValue est None,
241 on suppose que l'opérateur ne s'applique qu'à xValue.
243 - paires : les arguments par paire sont :
244 - xValue : argument X adapté pour appliquer l'opérateur
245 - uValue : argument U adapté pour appliquer l'opérateur
246 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
248 if argsAsSerie: _xuValue = paires
249 else: _xuValue = (paires,)
250 PlatformInfo.isIterable( _xuValue, True, " in Operator.appliedControledFormTo" )
252 if self.__Matrix is not None:
254 for paire in _xuValue:
255 _xValue, _uValue = paire
256 self.__addOneMatrixCall()
257 HxValue.append( self.__Matrix * _xValue )
260 for paire in _xuValue:
262 _xValue, _uValue = paire
263 if _uValue is not None:
264 _xuValue.append( paire )
266 _xuValue.append( _xValue )
267 self.__addOneMethodCall( len(_xuValue) )
268 if self.__extraArgs is None:
269 HxValue = self.__Method( _xuValue ) # Calcul MF
271 HxValue = self.__Method( _xuValue, self.__extraArgs ) # Calcul MF
273 if argsAsSerie: return HxValue
274 else: return HxValue[-1]
276 def appliedInXTo(self, paires, argsAsSerie = False):
278 Permet de restituer le résultat de l'application de l'opérateur à une
279 série d'arguments xValue, sachant que l'opérateur est valable en
280 xNominal. Cette méthode se contente d'appliquer, son argument devant a
281 priori être du bon type. Si l'opérateur est linéaire car c'est une
282 matrice, alors il est valable en tout point nominal et xNominal peut
283 être quelconque. Il n'y a qu'une seule paire par défaut, et argsAsSerie
284 permet d'indiquer que l'argument est multi-paires.
286 - paires : les arguments par paire sont :
287 - xNominal : série d'arguments permettant de donner le point où
288 l'opérateur est construit pour être ensuite appliqué
289 - xValue : série d'arguments adaptés pour appliquer l'opérateur
290 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
292 if argsAsSerie: _nxValue = paires
293 else: _nxValue = (paires,)
294 PlatformInfo.isIterable( _nxValue, True, " in Operator.appliedInXTo" )
296 if self.__Matrix is not None:
298 for paire in _nxValue:
299 _xNominal, _xValue = paire
300 self.__addOneMatrixCall()
301 HxValue.append( self.__Matrix * _xValue )
303 self.__addOneMethodCall( len(_nxValue) )
304 if self.__extraArgs is None:
305 HxValue = self.__Method( _nxValue ) # Calcul MF
307 HxValue = self.__Method( _nxValue, self.__extraArgs ) # Calcul MF
309 if argsAsSerie: return HxValue
310 else: return HxValue[-1]
312 def asMatrix(self, ValueForMethodForm = "UnknownVoidValue", argsAsSerie = False):
314 Permet de renvoyer l'opérateur sous la forme d'une matrice
316 if self.__Matrix is not None:
317 self.__addOneMatrixCall()
318 mValue = [self.__Matrix,]
319 elif ValueForMethodForm is not "UnknownVoidValue": # Ne pas utiliser "None"
322 self.__addOneMethodCall( len(ValueForMethodForm) )
323 for _vfmf in ValueForMethodForm:
324 mValue.append( numpy.matrix( self.__Method(((_vfmf, None),)) ) )
326 self.__addOneMethodCall()
327 mValue = self.__Method(((ValueForMethodForm, None),))
329 raise ValueError("Matrix form of the operator defined as a function/method requires to give an operating point.")
331 if argsAsSerie: return mValue
332 else: return mValue[-1]
336 Renvoie la taille sous forme numpy si l'opérateur est disponible sous
337 la forme d'une matrice
339 if self.__Matrix is not None:
340 return self.__Matrix.shape
342 raise ValueError("Matrix form of the operator is not available, nor the shape")
344 def nbcalls(self, which=None):
346 Renvoie les nombres d'évaluations de l'opérateur
349 self.__NbCallsAsMatrix+self.__NbCallsAsMethod,
350 self.__NbCallsAsMatrix,
351 self.__NbCallsAsMethod,
352 self.__NbCallsOfCached,
353 Operator.NbCallsAsMatrix+Operator.NbCallsAsMethod,
354 Operator.NbCallsAsMatrix,
355 Operator.NbCallsAsMethod,
356 Operator.NbCallsOfCached,
358 if which is None: return __nbcalls
359 else: return __nbcalls[which]
361 def __addOneMatrixCall(self):
362 "Comptabilise un appel"
363 self.__NbCallsAsMatrix += 1 # Decompte local
364 Operator.NbCallsAsMatrix += 1 # Decompte global
366 def __addOneMethodCall(self, nb = 1):
367 "Comptabilise un appel"
368 self.__NbCallsAsMethod += nb # Decompte local
369 Operator.NbCallsAsMethod += nb # Decompte global
371 def __addOneCacheCall(self):
372 "Comptabilise un appel"
373 self.__NbCallsOfCached += 1 # Decompte local
374 Operator.NbCallsOfCached += 1 # Decompte global
376 # ==============================================================================
377 class FullOperator(object):
379 Classe générale d'interface de type opérateur complet
380 (Direct, Linéaire Tangent, Adjoint)
383 name = "GenericFullOperator",
385 asOneFunction = None, # 1 Fonction
386 asThreeFunctions = None, # 3 Fonctions in a dictionary
387 asScript = None, # 1 or 3 Fonction(s) by script
388 asDict = None, # Parameters
390 extraArguments = None,
392 inputAsMF = False,# Fonction(s) as Multi-Functions
397 self.__name = str(name)
398 self.__check = bool(toBeChecked)
399 self.__extraArgs = extraArguments
404 if (asDict is not None) and isinstance(asDict, dict):
405 __Parameters.update( asDict )
406 # Priorité à EnableMultiProcessingInDerivatives=True
407 if "EnableMultiProcessing" in __Parameters and __Parameters["EnableMultiProcessing"]:
408 __Parameters["EnableMultiProcessingInDerivatives"] = True
409 __Parameters["EnableMultiProcessingInEvaluation"] = False
410 if "EnableMultiProcessingInDerivatives" not in __Parameters:
411 __Parameters["EnableMultiProcessingInDerivatives"] = False
412 if __Parameters["EnableMultiProcessingInDerivatives"]:
413 __Parameters["EnableMultiProcessingInEvaluation"] = False
414 if "EnableMultiProcessingInEvaluation" not in __Parameters:
415 __Parameters["EnableMultiProcessingInEvaluation"] = False
416 if "withIncrement" in __Parameters: # Temporaire
417 __Parameters["DifferentialIncrement"] = __Parameters["withIncrement"]
419 if asScript is not None:
420 __Matrix, __Function = None, None
422 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
424 __Function = { "Direct":Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ) }
425 __Function.update({"useApproximatedDerivatives":True})
426 __Function.update(__Parameters)
427 elif asThreeFunctions:
429 "Direct" :Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ),
430 "Tangent":Interfaces.ImportFromScript(asScript).getvalue( "TangentOperator" ),
431 "Adjoint":Interfaces.ImportFromScript(asScript).getvalue( "AdjointOperator" ),
433 __Function.update(__Parameters)
436 if asOneFunction is not None:
437 if isinstance(asOneFunction, dict) and "Direct" in asOneFunction:
438 if asOneFunction["Direct"] is not None:
439 __Function = asOneFunction
441 raise ValueError("The function has to be given in a dictionnary which have 1 key (\"Direct\")")
443 __Function = { "Direct":asOneFunction }
444 __Function.update({"useApproximatedDerivatives":True})
445 __Function.update(__Parameters)
446 elif asThreeFunctions is not None:
447 if isinstance(asThreeFunctions, dict) and \
448 ("Tangent" in asThreeFunctions) and (asThreeFunctions["Tangent"] is not None) and \
449 ("Adjoint" in asThreeFunctions) and (asThreeFunctions["Adjoint"] is not None) and \
450 (("useApproximatedDerivatives" not in asThreeFunctions) or not bool(asThreeFunctions["useApproximatedDerivatives"])):
451 __Function = asThreeFunctions
452 elif isinstance(asThreeFunctions, dict) and \
453 ("Direct" in asThreeFunctions) and (asThreeFunctions["Direct"] is not None):
454 __Function = asThreeFunctions
455 __Function.update({"useApproximatedDerivatives":True})
457 raise ValueError("The functions has to be given in a dictionnary which have either 1 key (\"Direct\") or 3 keys (\"Direct\" (optionnal), \"Tangent\" and \"Adjoint\")")
458 if "Direct" not in asThreeFunctions:
459 __Function["Direct"] = asThreeFunctions["Tangent"]
460 __Function.update(__Parameters)
464 # if sys.version_info[0] < 3 and isinstance(__Function, dict):
465 # for k in ("Direct", "Tangent", "Adjoint"):
466 # if k in __Function and hasattr(__Function[k],"__class__"):
467 # if type(__Function[k]) is type(self.__init__):
468 # raise TypeError("can't use a class method (%s) as a function for the \"%s\" operator. Use a real function instead."%(type(__Function[k]),k))
470 if appliedInX is not None and isinstance(appliedInX, dict):
471 __appliedInX = appliedInX
472 elif appliedInX is not None:
473 __appliedInX = {"HXb":appliedInX}
477 if scheduledBy is not None:
478 self.__T = scheduledBy
480 if isinstance(__Function, dict) and \
481 ("useApproximatedDerivatives" in __Function) and bool(__Function["useApproximatedDerivatives"]) and \
482 ("Direct" in __Function) and (__Function["Direct"] is not None):
483 if "CenteredFiniteDifference" not in __Function: __Function["CenteredFiniteDifference"] = False
484 if "DifferentialIncrement" not in __Function: __Function["DifferentialIncrement"] = 0.01
485 if "withdX" not in __Function: __Function["withdX"] = None
486 if "withAvoidingRedundancy" not in __Function: __Function["withAvoidingRedundancy"] = avoidRC
487 if "withToleranceInRedundancy" not in __Function: __Function["withToleranceInRedundancy"] = 1.e-18
488 if "withLenghtOfRedundancy" not in __Function: __Function["withLenghtOfRedundancy"] = -1
489 if "NumberOfProcesses" not in __Function: __Function["NumberOfProcesses"] = None
490 if "withmfEnabled" not in __Function: __Function["withmfEnabled"] = inputAsMF
491 from daCore import NumericObjects
492 FDA = NumericObjects.FDApproximation(
493 Function = __Function["Direct"],
494 centeredDF = __Function["CenteredFiniteDifference"],
495 increment = __Function["DifferentialIncrement"],
496 dX = __Function["withdX"],
497 avoidingRedundancy = __Function["withAvoidingRedundancy"],
498 toleranceInRedundancy = __Function["withToleranceInRedundancy"],
499 lenghtOfRedundancy = __Function["withLenghtOfRedundancy"],
500 mpEnabled = __Function["EnableMultiProcessingInDerivatives"],
501 mpWorkers = __Function["NumberOfProcesses"],
502 mfEnabled = __Function["withmfEnabled"],
504 self.__FO["Direct"] = Operator( fromMethod = FDA.DirectOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
505 self.__FO["Tangent"] = Operator( fromMethod = FDA.TangentOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
506 self.__FO["Adjoint"] = Operator( fromMethod = FDA.AdjointOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
507 elif isinstance(__Function, dict) and \
508 ("Direct" in __Function) and ("Tangent" in __Function) and ("Adjoint" in __Function) and \
509 (__Function["Direct"] is not None) and (__Function["Tangent"] is not None) and (__Function["Adjoint"] is not None):
510 self.__FO["Direct"] = Operator( fromMethod = __Function["Direct"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
511 self.__FO["Tangent"] = Operator( fromMethod = __Function["Tangent"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
512 self.__FO["Adjoint"] = Operator( fromMethod = __Function["Adjoint"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
513 elif asMatrix is not None:
514 __matrice = numpy.matrix( __Matrix, numpy.float )
515 self.__FO["Direct"] = Operator( fromMatrix = __matrice, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
516 self.__FO["Tangent"] = Operator( fromMatrix = __matrice, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
517 self.__FO["Adjoint"] = Operator( fromMatrix = __matrice.T, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
520 raise ValueError("The %s object is improperly defined or undefined, it requires at minima either a matrix, a Direct operator for approximate derivatives or a Tangent/Adjoint operators pair. Please check your operator input."%self.__name)
522 if __appliedInX is not None:
523 self.__FO["AppliedInX"] = {}
524 for key in list(__appliedInX.keys()):
525 if type( __appliedInX[key] ) is type( numpy.matrix([]) ):
526 # Pour le cas où l'on a une vraie matrice
527 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].A1, numpy.float ).T
528 elif type( __appliedInX[key] ) is type( numpy.array([]) ) and len(__appliedInX[key].shape) > 1:
529 # Pour le cas où l'on a un vecteur représenté en array avec 2 dimensions
530 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].reshape(len(__appliedInX[key]),), numpy.float ).T
532 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key], numpy.float ).T
534 self.__FO["AppliedInX"] = None
540 "x.__repr__() <==> repr(x)"
541 return repr(self.__FO)
544 "x.__str__() <==> str(x)"
545 return str(self.__FO)
547 # ==============================================================================
548 class Algorithm(object):
550 Classe générale d'interface de type algorithme
552 Elle donne un cadre pour l'écriture d'une classe élémentaire d'algorithme
553 d'assimilation, en fournissant un container (dictionnaire) de variables
554 persistantes initialisées, et des méthodes d'accès à ces variables stockées.
556 Une classe élémentaire d'algorithme doit implémenter la méthode "run".
558 def __init__(self, name):
560 L'initialisation présente permet de fabriquer des variables de stockage
561 disponibles de manière générique dans les algorithmes élémentaires. Ces
562 variables de stockage sont ensuite conservées dans un dictionnaire
563 interne à l'objet, mais auquel on accède par la méthode "get".
565 Les variables prévues sont :
566 - APosterioriCorrelations : matrice de corrélations de la matrice A
567 - APosterioriCovariance : matrice de covariances a posteriori : A
568 - APosterioriStandardDeviations : vecteur des écart-types de la matrice A
569 - APosterioriVariances : vecteur des variances de la matrice A
570 - Analysis : vecteur d'analyse : Xa
571 - BMA : Background moins Analysis : Xa - Xb
572 - CostFunctionJ : fonction-coût globale, somme des deux parties suivantes Jb et Jo
573 - CostFunctionJAtCurrentOptimum : fonction-coût globale à l'état optimal courant lors d'itérations
574 - CostFunctionJb : partie ébauche ou background de la fonction-coût : Jb
575 - CostFunctionJbAtCurrentOptimum : partie ébauche à l'état optimal courant lors d'itérations
576 - CostFunctionJo : partie observations de la fonction-coût : Jo
577 - CostFunctionJoAtCurrentOptimum : partie observations à l'état optimal courant lors d'itérations
578 - CurrentOptimum : état optimal courant lors d'itérations
579 - CurrentState : état courant lors d'itérations
580 - GradientOfCostFunctionJ : gradient de la fonction-coût globale
581 - GradientOfCostFunctionJb : gradient de la partie ébauche de la fonction-coût
582 - GradientOfCostFunctionJo : gradient de la partie observations de la fonction-coût
583 - IndexOfOptimum : index de l'état optimal courant lors d'itérations
584 - Innovation : l'innovation : d = Y - H(X)
585 - InnovationAtCurrentState : l'innovation à l'état courant : dn = Y - H(Xn)
586 - JacobianMatrixAtBackground : matrice jacobienne à l'état d'ébauche
587 - JacobianMatrixAtCurrentState : matrice jacobienne à l'état courant
588 - JacobianMatrixAtOptimum : matrice jacobienne à l'optimum
589 - KalmanGainAtOptimum : gain de Kalman à l'optimum
590 - MahalanobisConsistency : indicateur de consistance des covariances
591 - OMA : Observation moins Analyse : Y - Xa
592 - OMB : Observation moins Background : Y - Xb
593 - PredictedState : état prédit courant lors d'itérations
594 - Residu : dans le cas des algorithmes de vérification
595 - SigmaBck2 : indicateur de correction optimale des erreurs d'ébauche
596 - SigmaObs2 : indicateur de correction optimale des erreurs d'observation
597 - SimulatedObservationAtBackground : l'état observé H(Xb) à l'ébauche
598 - SimulatedObservationAtCurrentOptimum : l'état observé H(X) à l'état optimal courant
599 - SimulatedObservationAtCurrentState : l'état observé H(X) à l'état courant
600 - SimulatedObservationAtOptimum : l'état observé H(Xa) à l'optimum
601 - SimulationQuantiles : états observés H(X) pour les quantiles demandés
602 On peut rajouter des variables à stocker dans l'initialisation de
603 l'algorithme élémentaire qui va hériter de cette classe
605 logging.debug("%s Initialisation", str(name))
606 self._m = PlatformInfo.SystemUsage()
608 self._name = str( name )
609 self._parameters = {"StoreSupplementaryCalculations":[]}
610 self.__required_parameters = {}
611 self.__required_inputs = {"RequiredInputValues":{"mandatory":(), "optional":()}}
612 self.__variable_names_not_public = {"nextStep":False} # Duplication dans AlgorithmAndParameters
613 self.__canonical_parameter_name = {} # Correspondance "lower"->"correct"
614 self.__canonical_stored_name = {} # Correspondance "lower"->"correct"
616 self.StoredVariables = {}
617 self.StoredVariables["APosterioriCorrelations"] = Persistence.OneMatrix(name = "APosterioriCorrelations")
618 self.StoredVariables["APosterioriCovariance"] = Persistence.OneMatrix(name = "APosterioriCovariance")
619 self.StoredVariables["APosterioriStandardDeviations"] = Persistence.OneVector(name = "APosterioriStandardDeviations")
620 self.StoredVariables["APosterioriVariances"] = Persistence.OneVector(name = "APosterioriVariances")
621 self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
622 self.StoredVariables["BMA"] = Persistence.OneVector(name = "BMA")
623 self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
624 self.StoredVariables["CostFunctionJAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJAtCurrentOptimum")
625 self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
626 self.StoredVariables["CostFunctionJbAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJbAtCurrentOptimum")
627 self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
628 self.StoredVariables["CostFunctionJoAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJoAtCurrentOptimum")
629 self.StoredVariables["CurrentOptimum"] = Persistence.OneVector(name = "CurrentOptimum")
630 self.StoredVariables["CurrentState"] = Persistence.OneVector(name = "CurrentState")
631 self.StoredVariables["GradientOfCostFunctionJ"] = Persistence.OneVector(name = "GradientOfCostFunctionJ")
632 self.StoredVariables["GradientOfCostFunctionJb"] = Persistence.OneVector(name = "GradientOfCostFunctionJb")
633 self.StoredVariables["GradientOfCostFunctionJo"] = Persistence.OneVector(name = "GradientOfCostFunctionJo")
634 self.StoredVariables["IndexOfOptimum"] = Persistence.OneIndex(name = "IndexOfOptimum")
635 self.StoredVariables["Innovation"] = Persistence.OneVector(name = "Innovation")
636 self.StoredVariables["InnovationAtCurrentAnalysis"] = Persistence.OneVector(name = "InnovationAtCurrentAnalysis")
637 self.StoredVariables["InnovationAtCurrentState"] = Persistence.OneVector(name = "InnovationAtCurrentState")
638 self.StoredVariables["JacobianMatrixAtBackground"] = Persistence.OneMatrix(name = "JacobianMatrixAtBackground")
639 self.StoredVariables["JacobianMatrixAtCurrentState"] = Persistence.OneMatrix(name = "JacobianMatrixAtCurrentState")
640 self.StoredVariables["JacobianMatrixAtOptimum"] = Persistence.OneMatrix(name = "JacobianMatrixAtOptimum")
641 self.StoredVariables["KalmanGainAtOptimum"] = Persistence.OneMatrix(name = "KalmanGainAtOptimum")
642 self.StoredVariables["MahalanobisConsistency"] = Persistence.OneScalar(name = "MahalanobisConsistency")
643 self.StoredVariables["OMA"] = Persistence.OneVector(name = "OMA")
644 self.StoredVariables["OMB"] = Persistence.OneVector(name = "OMB")
645 self.StoredVariables["PredictedState"] = Persistence.OneVector(name = "PredictedState")
646 self.StoredVariables["Residu"] = Persistence.OneScalar(name = "Residu")
647 self.StoredVariables["SigmaBck2"] = Persistence.OneScalar(name = "SigmaBck2")
648 self.StoredVariables["SigmaObs2"] = Persistence.OneScalar(name = "SigmaObs2")
649 self.StoredVariables["SimulatedObservationAtBackground"] = Persistence.OneVector(name = "SimulatedObservationAtBackground")
650 self.StoredVariables["SimulatedObservationAtCurrentAnalysis"]= Persistence.OneVector(name = "SimulatedObservationAtCurrentAnalysis")
651 self.StoredVariables["SimulatedObservationAtCurrentOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentOptimum")
652 self.StoredVariables["SimulatedObservationAtCurrentState"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentState")
653 self.StoredVariables["SimulatedObservationAtOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtOptimum")
654 self.StoredVariables["SimulationQuantiles"] = Persistence.OneMatrix(name = "SimulationQuantiles")
656 for k in self.StoredVariables:
657 self.__canonical_stored_name[k.lower()] = k
659 for k, v in self.__variable_names_not_public.items():
660 self.__canonical_parameter_name[k.lower()] = k
661 self.__canonical_parameter_name["algorithm"] = "Algorithm"
662 self.__canonical_parameter_name["storesupplementarycalculations"] = "StoreSupplementaryCalculations"
664 def _pre_run(self, Parameters, Xb=None, Y=None, R=None, B=None, Q=None ):
666 logging.debug("%s Lancement", self._name)
667 logging.debug("%s Taille mémoire utilisée de %.0f Mio"%(self._name, self._m.getUsedMemory("Mio")))
669 # Mise a jour des paramètres internes avec le contenu de Parameters, en
670 # reprenant les valeurs par défauts pour toutes celles non définies
671 self.__setParameters(Parameters, reset=True)
672 for k, v in self.__variable_names_not_public.items():
673 if k not in self._parameters: self.__setParameters( {k:v} )
675 # Corrections et compléments
676 def __test_vvalue(argument, variable, argname):
678 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
679 raise ValueError("%s %s vector %s has to be properly defined!"%(self._name,argname,variable))
680 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
681 logging.debug("%s %s vector %s is not set, but is optional."%(self._name,argname,variable))
683 logging.debug("%s %s vector %s is not set, but is not required."%(self._name,argname,variable))
685 logging.debug("%s %s vector %s is set, and its size is %i."%(self._name,argname,variable,numpy.array(argument).size))
687 __test_vvalue( Xb, "Xb", "Background or initial state" )
688 __test_vvalue( Y, "Y", "Observation" )
690 def __test_cvalue(argument, variable, argname):
692 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
693 raise ValueError("%s %s error covariance matrix %s has to be properly defined!"%(self._name,argname,variable))
694 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
695 logging.debug("%s %s error covariance matrix %s is not set, but is optional."%(self._name,argname,variable))
697 logging.debug("%s %s error covariance matrix %s is not set, but is not required."%(self._name,argname,variable))
699 logging.debug("%s %s error covariance matrix %s is set."%(self._name,argname,variable))
701 __test_cvalue( R, "R", "Observation" )
702 __test_cvalue( B, "B", "Background" )
703 __test_cvalue( Q, "Q", "Evolution" )
705 if ("Bounds" in self._parameters) and isinstance(self._parameters["Bounds"], (list, tuple)) and (len(self._parameters["Bounds"]) > 0):
706 logging.debug("%s Prise en compte des bornes effectuee"%(self._name,))
708 self._parameters["Bounds"] = None
710 if logging.getLogger().level < logging.WARNING:
711 self._parameters["optiprint"], self._parameters["optdisp"] = 1, 1
712 if PlatformInfo.has_scipy:
713 import scipy.optimize
714 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_ALL
716 self._parameters["optmessages"] = 15
718 self._parameters["optiprint"], self._parameters["optdisp"] = -1, 0
719 if PlatformInfo.has_scipy:
720 import scipy.optimize
721 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_NONE
723 self._parameters["optmessages"] = 15
727 def _post_run(self,_oH=None):
729 if ("StoreSupplementaryCalculations" in self._parameters) and \
730 "APosterioriCovariance" in self._parameters["StoreSupplementaryCalculations"]:
731 for _A in self.StoredVariables["APosterioriCovariance"]:
732 if "APosterioriVariances" in self._parameters["StoreSupplementaryCalculations"]:
733 self.StoredVariables["APosterioriVariances"].store( numpy.diag(_A) )
734 if "APosterioriStandardDeviations" in self._parameters["StoreSupplementaryCalculations"]:
735 self.StoredVariables["APosterioriStandardDeviations"].store( numpy.sqrt(numpy.diag(_A)) )
736 if "APosterioriCorrelations" in self._parameters["StoreSupplementaryCalculations"]:
737 _EI = numpy.diag(1./numpy.sqrt(numpy.diag(_A)))
738 _C = numpy.dot(_EI, numpy.dot(_A, _EI))
739 self.StoredVariables["APosterioriCorrelations"].store( _C )
741 logging.debug("%s Nombre d'évaluation(s) de l'opérateur d'observation direct/tangent/adjoint.: %i/%i/%i", self._name, _oH["Direct"].nbcalls(0),_oH["Tangent"].nbcalls(0),_oH["Adjoint"].nbcalls(0))
742 logging.debug("%s Nombre d'appels au cache d'opérateur d'observation direct/tangent/adjoint..: %i/%i/%i", self._name, _oH["Direct"].nbcalls(3),_oH["Tangent"].nbcalls(3),_oH["Adjoint"].nbcalls(3))
743 logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
744 logging.debug("%s Terminé", self._name)
747 def _toStore(self, key):
748 "True if in StoreSupplementaryCalculations, else False"
749 return key in self._parameters["StoreSupplementaryCalculations"]
751 def get(self, key=None):
753 Renvoie l'une des variables stockées identifiée par la clé, ou le
754 dictionnaire de l'ensemble des variables disponibles en l'absence de
755 clé. Ce sont directement les variables sous forme objet qui sont
756 renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
757 des classes de persistance.
760 return self.StoredVariables[self.__canonical_stored_name[key.lower()]]
762 return self.StoredVariables
764 def __contains__(self, key=None):
765 "D.__contains__(k) -> True if D has a key k, else False"
766 if key is None or key.lower() not in self.__canonical_stored_name:
769 return self.__canonical_stored_name[key.lower()] in self.StoredVariables
772 "D.keys() -> list of D's keys"
773 if hasattr(self, "StoredVariables"):
774 return self.StoredVariables.keys()
779 "D.pop(k[,d]) -> v, remove specified key and return the corresponding value"
780 if hasattr(self, "StoredVariables") and k.lower() in self.__canonical_stored_name:
781 return self.StoredVariables.pop(self.__canonical_stored_name[k.lower()], d)
786 raise TypeError("pop expected at least 1 arguments, got 0")
787 "If key is not found, d is returned if given, otherwise KeyError is raised"
793 def run(self, Xb=None, Y=None, H=None, M=None, R=None, B=None, Q=None, Parameters=None):
795 Doit implémenter l'opération élémentaire de calcul d'assimilation sous
796 sa forme mathématique la plus naturelle possible.
798 raise NotImplementedError("Mathematical assimilation calculation has not been implemented!")
800 def defineRequiredParameter(self, name = None, default = None, typecast = None, message = None, minval = None, maxval = None, listval = None):
802 Permet de définir dans l'algorithme des paramètres requis et leurs
803 caractéristiques par défaut.
806 raise ValueError("A name is mandatory to define a required parameter.")
808 self.__required_parameters[name] = {
810 "typecast" : typecast,
816 self.__canonical_parameter_name[name.lower()] = name
817 logging.debug("%s %s (valeur par défaut = %s)", self._name, message, self.setParameterValue(name))
819 def getRequiredParameters(self, noDetails=True):
821 Renvoie la liste des noms de paramètres requis ou directement le
822 dictionnaire des paramètres requis.
825 return sorted(self.__required_parameters.keys())
827 return self.__required_parameters
829 def setParameterValue(self, name=None, value=None):
831 Renvoie la valeur d'un paramètre requis de manière contrôlée
833 __k = self.__canonical_parameter_name[name.lower()]
834 default = self.__required_parameters[__k]["default"]
835 typecast = self.__required_parameters[__k]["typecast"]
836 minval = self.__required_parameters[__k]["minval"]
837 maxval = self.__required_parameters[__k]["maxval"]
838 listval = self.__required_parameters[__k]["listval"]
840 if value is None and default is None:
842 elif value is None and default is not None:
843 if typecast is None: __val = default
844 else: __val = typecast( default )
846 if typecast is None: __val = value
849 __val = typecast( value )
851 raise ValueError("The value '%s' for the parameter named '%s' can not be correctly evaluated with type '%s'."%(value, __k, typecast))
853 if minval is not None and (numpy.array(__val, float) < minval).any():
854 raise ValueError("The parameter named '%s' of value '%s' can not be less than %s."%(__k, __val, minval))
855 if maxval is not None and (numpy.array(__val, float) > maxval).any():
856 raise ValueError("The parameter named '%s' of value '%s' can not be greater than %s."%(__k, __val, maxval))
857 if listval is not None:
858 if typecast is list or typecast is tuple or isinstance(__val,list) or isinstance(__val,tuple):
861 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%(v, __k, listval))
862 elif __val not in listval:
863 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%( __val, __k,listval))
867 def requireInputArguments(self, mandatory=(), optional=()):
869 Permet d'imposer des arguments requises en entrée
871 self.__required_inputs["RequiredInputValues"]["mandatory"] = tuple( mandatory )
872 self.__required_inputs["RequiredInputValues"]["optional"] = tuple( optional )
874 def __setParameters(self, fromDico={}, reset=False):
876 Permet de stocker les paramètres reçus dans le dictionnaire interne.
878 self._parameters.update( fromDico )
879 __inverse_fromDico_keys = {}
880 for k in fromDico.keys():
881 if k.lower() in self.__canonical_parameter_name:
882 __inverse_fromDico_keys[self.__canonical_parameter_name[k.lower()]] = k
883 #~ __inverse_fromDico_keys = dict([(self.__canonical_parameter_name[k.lower()],k) for k in fromDico.keys()])
884 __canonic_fromDico_keys = __inverse_fromDico_keys.keys()
885 for k in self.__required_parameters.keys():
886 if k in __canonic_fromDico_keys:
887 self._parameters[k] = self.setParameterValue(k,fromDico[__inverse_fromDico_keys[k]])
889 self._parameters[k] = self.setParameterValue(k)
892 logging.debug("%s %s : %s", self._name, self.__required_parameters[k]["message"], self._parameters[k])
894 # ==============================================================================
895 class AlgorithmAndParameters(object):
897 Classe générale d'interface d'action pour l'algorithme et ses paramètres
900 name = "GenericAlgorithm",
907 self.__name = str(name)
911 self.__algorithm = {}
912 self.__algorithmFile = None
913 self.__algorithmName = None
915 self.updateParameters( asDict, asScript )
917 if asAlgorithm is None and asScript is not None:
918 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
922 if __Algo is not None:
923 self.__A = str(__Algo)
924 self.__P.update( {"Algorithm":self.__A} )
926 self.__setAlgorithm( self.__A )
928 self.__variable_names_not_public = {"nextStep":False} # Duplication dans Algorithm
930 def updateParameters(self,
934 "Mise a jour des parametres"
935 if asDict is None and asScript is not None:
936 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
940 if __Dict is not None:
941 self.__P.update( dict(__Dict) )
943 def executePythonScheme(self, asDictAO = None):
944 "Permet de lancer le calcul d'assimilation"
945 Operator.CM.clearCache()
947 if not isinstance(asDictAO, dict):
948 raise ValueError("The objects for algorithm calculation have to be given together as a dictionnary, and they are not")
949 if hasattr(asDictAO["Background"],"getO"): self.__Xb = asDictAO["Background"].getO()
950 elif hasattr(asDictAO["CheckingPoint"],"getO"): self.__Xb = asDictAO["CheckingPoint"].getO()
951 else: self.__Xb = None
952 if hasattr(asDictAO["Observation"],"getO"): self.__Y = asDictAO["Observation"].getO()
953 else: self.__Y = asDictAO["Observation"]
954 if hasattr(asDictAO["ControlInput"],"getO"): self.__U = asDictAO["ControlInput"].getO()
955 else: self.__U = asDictAO["ControlInput"]
956 if hasattr(asDictAO["ObservationOperator"],"getO"): self.__HO = asDictAO["ObservationOperator"].getO()
957 else: self.__HO = asDictAO["ObservationOperator"]
958 if hasattr(asDictAO["EvolutionModel"],"getO"): self.__EM = asDictAO["EvolutionModel"].getO()
959 else: self.__EM = asDictAO["EvolutionModel"]
960 if hasattr(asDictAO["ControlModel"],"getO"): self.__CM = asDictAO["ControlModel"].getO()
961 else: self.__CM = asDictAO["ControlModel"]
962 self.__B = asDictAO["BackgroundError"]
963 self.__R = asDictAO["ObservationError"]
964 self.__Q = asDictAO["EvolutionError"]
966 self.__shape_validate()
968 self.__algorithm.run(
978 Parameters = self.__P,
982 def executeYACSScheme(self, FileName=None):
983 "Permet de lancer le calcul d'assimilation"
984 if FileName is None or not os.path.exists(FileName):
985 raise ValueError("a YACS file name has to be given for YACS execution.\n")
987 __file = os.path.abspath(FileName)
988 logging.debug("The YACS file name is \"%s\"."%__file)
989 if not PlatformInfo.has_salome or \
990 not PlatformInfo.has_yacs or \
991 not PlatformInfo.has_adao:
992 raise ImportError("\n\n"+\
993 "Unable to get SALOME, YACS or ADAO environnement variables.\n"+\
994 "Please load the right environnement before trying to use it.\n")
999 SALOMERuntime.RuntimeSALOME_setRuntime()
1001 r = pilot.getRuntime()
1002 xmlLoader = loader.YACSLoader()
1003 xmlLoader.registerProcCataLoader()
1005 catalogAd = r.loadCatalog("proc", __file)
1006 r.addCatalog(catalogAd)
1011 p = xmlLoader.load(__file)
1012 except IOError as ex:
1013 print("The YACS XML schema file can not be loaded: %s"%(ex,))
1015 logger = p.getLogger("parser")
1016 if not logger.isEmpty():
1017 print("The imported YACS XML schema has errors on parsing:")
1018 print(logger.getStr())
1021 print("The YACS XML schema is not valid and will not be executed:")
1022 print(p.getErrorReport())
1024 info=pilot.LinkInfo(pilot.LinkInfo.ALL_DONT_STOP)
1025 p.checkConsistency(info)
1026 if info.areWarningsOrErrors():
1027 print("The YACS XML schema is not coherent and will not be executed:")
1028 print(info.getGlobalRepr())
1030 e = pilot.ExecutorSwig()
1032 if p.getEffectiveState() != pilot.DONE:
1033 print(p.getErrorReport())
1037 def get(self, key = None):
1038 "Vérifie l'existence d'une clé de variable ou de paramètres"
1039 if key in self.__algorithm:
1040 return self.__algorithm.get( key )
1041 elif key in self.__P:
1042 return self.__P[key]
1044 allvariables = self.__P
1045 for k in self.__variable_names_not_public: allvariables.pop(k, None)
1048 def pop(self, k, d):
1049 "Necessaire pour le pickling"
1050 return self.__algorithm.pop(k, d)
1052 def getAlgorithmRequiredParameters(self, noDetails=True):
1053 "Renvoie la liste des paramètres requis selon l'algorithme"
1054 return self.__algorithm.getRequiredParameters(noDetails)
1056 def setObserver(self, __V, __O, __I, __S):
1057 if self.__algorithm is None \
1058 or isinstance(self.__algorithm, dict) \
1059 or not hasattr(self.__algorithm,"StoredVariables"):
1060 raise ValueError("No observer can be build before choosing an algorithm.")
1061 if __V not in self.__algorithm:
1062 raise ValueError("An observer requires to be set on a variable named %s which does not exist."%__V)
1064 self.__algorithm.StoredVariables[ __V ].setDataObserver(
1067 HookParameters = __I,
1070 def removeObserver(self, __V, __O, __A = False):
1071 if self.__algorithm is None \
1072 or isinstance(self.__algorithm, dict) \
1073 or not hasattr(self.__algorithm,"StoredVariables"):
1074 raise ValueError("No observer can be removed before choosing an algorithm.")
1075 if __V not in self.__algorithm:
1076 raise ValueError("An observer requires to be removed on a variable named %s which does not exist."%__V)
1078 return self.__algorithm.StoredVariables[ __V ].removeDataObserver(
1083 def hasObserver(self, __V):
1084 if self.__algorithm is None \
1085 or isinstance(self.__algorithm, dict) \
1086 or not hasattr(self.__algorithm,"StoredVariables"):
1088 if __V not in self.__algorithm:
1090 return self.__algorithm.StoredVariables[ __V ].hasDataObserver()
1093 __allvariables = list(self.__algorithm.keys()) + list(self.__P.keys())
1094 for k in self.__variable_names_not_public:
1095 if k in __allvariables: __allvariables.remove(k)
1096 return __allvariables
1098 def __contains__(self, key=None):
1099 "D.__contains__(k) -> True if D has a key k, else False"
1100 return key in self.__algorithm or key in self.__P
1103 "x.__repr__() <==> repr(x)"
1104 return repr(self.__A)+", "+repr(self.__P)
1107 "x.__str__() <==> str(x)"
1108 return str(self.__A)+", "+str(self.__P)
1110 def __setAlgorithm(self, choice = None ):
1112 Permet de sélectionner l'algorithme à utiliser pour mener à bien l'étude
1113 d'assimilation. L'argument est un champ caractère se rapportant au nom
1114 d'un algorithme réalisant l'opération sur les arguments fixes.
1117 raise ValueError("Error: algorithm choice has to be given")
1118 if self.__algorithmName is not None:
1119 raise ValueError("Error: algorithm choice has already been done as \"%s\", it can't be changed."%self.__algorithmName)
1120 daDirectory = "daAlgorithms"
1122 # Recherche explicitement le fichier complet
1123 # ------------------------------------------
1125 for directory in sys.path:
1126 if os.path.isfile(os.path.join(directory, daDirectory, str(choice)+'.py')):
1127 module_path = os.path.abspath(os.path.join(directory, daDirectory))
1128 if module_path is None:
1129 raise ImportError("No algorithm module named \"%s\" has been found in the search path.\n The search path is %s"%(choice, sys.path))
1131 # Importe le fichier complet comme un module
1132 # ------------------------------------------
1134 sys_path_tmp = sys.path ; sys.path.insert(0,module_path)
1135 self.__algorithmFile = __import__(str(choice), globals(), locals(), [])
1136 if not hasattr(self.__algorithmFile, "ElementaryAlgorithm"):
1137 raise ImportError("this module does not define a valid elementary algorithm.")
1138 self.__algorithmName = str(choice)
1139 sys.path = sys_path_tmp ; del sys_path_tmp
1140 except ImportError as e:
1141 raise ImportError("The module named \"%s\" was found, but is incorrect at the import stage.\n The import error message is: %s"%(choice,e))
1143 # Instancie un objet du type élémentaire du fichier
1144 # -------------------------------------------------
1145 self.__algorithm = self.__algorithmFile.ElementaryAlgorithm()
1148 def __shape_validate(self):
1150 Validation de la correspondance correcte des tailles des variables et
1151 des matrices s'il y en a.
1153 if self.__Xb is None: __Xb_shape = (0,)
1154 elif hasattr(self.__Xb,"size"): __Xb_shape = (self.__Xb.size,)
1155 elif hasattr(self.__Xb,"shape"):
1156 if isinstance(self.__Xb.shape, tuple): __Xb_shape = self.__Xb.shape
1157 else: __Xb_shape = self.__Xb.shape()
1158 else: raise TypeError("The background (Xb) has no attribute of shape: problem !")
1160 if self.__Y is None: __Y_shape = (0,)
1161 elif hasattr(self.__Y,"size"): __Y_shape = (self.__Y.size,)
1162 elif hasattr(self.__Y,"shape"):
1163 if isinstance(self.__Y.shape, tuple): __Y_shape = self.__Y.shape
1164 else: __Y_shape = self.__Y.shape()
1165 else: raise TypeError("The observation (Y) has no attribute of shape: problem !")
1167 if self.__U is None: __U_shape = (0,)
1168 elif hasattr(self.__U,"size"): __U_shape = (self.__U.size,)
1169 elif hasattr(self.__U,"shape"):
1170 if isinstance(self.__U.shape, tuple): __U_shape = self.__U.shape
1171 else: __U_shape = self.__U.shape()
1172 else: raise TypeError("The control (U) has no attribute of shape: problem !")
1174 if self.__B is None: __B_shape = (0,0)
1175 elif hasattr(self.__B,"shape"):
1176 if isinstance(self.__B.shape, tuple): __B_shape = self.__B.shape
1177 else: __B_shape = self.__B.shape()
1178 else: raise TypeError("The a priori errors covariance matrix (B) has no attribute of shape: problem !")
1180 if self.__R is None: __R_shape = (0,0)
1181 elif hasattr(self.__R,"shape"):
1182 if isinstance(self.__R.shape, tuple): __R_shape = self.__R.shape
1183 else: __R_shape = self.__R.shape()
1184 else: raise TypeError("The observation errors covariance matrix (R) has no attribute of shape: problem !")
1186 if self.__Q is None: __Q_shape = (0,0)
1187 elif hasattr(self.__Q,"shape"):
1188 if isinstance(self.__Q.shape, tuple): __Q_shape = self.__Q.shape
1189 else: __Q_shape = self.__Q.shape()
1190 else: raise TypeError("The evolution errors covariance matrix (Q) has no attribute of shape: problem !")
1192 if len(self.__HO) == 0: __HO_shape = (0,0)
1193 elif isinstance(self.__HO, dict): __HO_shape = (0,0)
1194 elif hasattr(self.__HO["Direct"],"shape"):
1195 if isinstance(self.__HO["Direct"].shape, tuple): __HO_shape = self.__HO["Direct"].shape
1196 else: __HO_shape = self.__HO["Direct"].shape()
1197 else: raise TypeError("The observation operator (H) has no attribute of shape: problem !")
1199 if len(self.__EM) == 0: __EM_shape = (0,0)
1200 elif isinstance(self.__EM, dict): __EM_shape = (0,0)
1201 elif hasattr(self.__EM["Direct"],"shape"):
1202 if isinstance(self.__EM["Direct"].shape, tuple): __EM_shape = self.__EM["Direct"].shape
1203 else: __EM_shape = self.__EM["Direct"].shape()
1204 else: raise TypeError("The evolution model (EM) has no attribute of shape: problem !")
1206 if len(self.__CM) == 0: __CM_shape = (0,0)
1207 elif isinstance(self.__CM, dict): __CM_shape = (0,0)
1208 elif hasattr(self.__CM["Direct"],"shape"):
1209 if isinstance(self.__CM["Direct"].shape, tuple): __CM_shape = self.__CM["Direct"].shape
1210 else: __CM_shape = self.__CM["Direct"].shape()
1211 else: raise TypeError("The control model (CM) has no attribute of shape: problem !")
1213 # Vérification des conditions
1214 # ---------------------------
1215 if not( len(__Xb_shape) == 1 or min(__Xb_shape) == 1 ):
1216 raise ValueError("Shape characteristic of background (Xb) is incorrect: \"%s\"."%(__Xb_shape,))
1217 if not( len(__Y_shape) == 1 or min(__Y_shape) == 1 ):
1218 raise ValueError("Shape characteristic of observation (Y) is incorrect: \"%s\"."%(__Y_shape,))
1220 if not( min(__B_shape) == max(__B_shape) ):
1221 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) is incorrect: \"%s\"."%(__B_shape,))
1222 if not( min(__R_shape) == max(__R_shape) ):
1223 raise ValueError("Shape characteristic of observation errors covariance matrix (R) is incorrect: \"%s\"."%(__R_shape,))
1224 if not( min(__Q_shape) == max(__Q_shape) ):
1225 raise ValueError("Shape characteristic of evolution errors covariance matrix (Q) is incorrect: \"%s\"."%(__Q_shape,))
1226 if not( min(__EM_shape) == max(__EM_shape) ):
1227 raise ValueError("Shape characteristic of evolution operator (EM) is incorrect: \"%s\"."%(__EM_shape,))
1229 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[1] == max(__Xb_shape) ):
1230 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and state (X) \"%s\" are incompatible."%(__HO_shape,__Xb_shape))
1231 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[0] == max(__Y_shape) ):
1232 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation (Y) \"%s\" are incompatible."%(__HO_shape,__Y_shape))
1233 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__B) > 0 and not( __HO_shape[1] == __B_shape[0] ):
1234 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and a priori errors covariance matrix (B) \"%s\" are incompatible."%(__HO_shape,__B_shape))
1235 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__R) > 0 and not( __HO_shape[0] == __R_shape[1] ):
1236 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation errors covariance matrix (R) \"%s\" are incompatible."%(__HO_shape,__R_shape))
1238 if self.__B is not None and len(self.__B) > 0 and not( __B_shape[1] == max(__Xb_shape) ):
1239 if self.__algorithmName in ["EnsembleBlue",]:
1240 asPersistentVector = self.__Xb.reshape((-1,min(__B_shape)))
1241 self.__Xb = Persistence.OneVector("Background", basetype=numpy.matrix)
1242 for member in asPersistentVector:
1243 self.__Xb.store( numpy.matrix( numpy.ravel(member), numpy.float ).T )
1244 __Xb_shape = min(__B_shape)
1246 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) \"%s\" and background (Xb) \"%s\" are incompatible."%(__B_shape,__Xb_shape))
1248 if self.__R is not None and len(self.__R) > 0 and not( __R_shape[1] == max(__Y_shape) ):
1249 raise ValueError("Shape characteristic of observation errors covariance matrix (R) \"%s\" and observation (Y) \"%s\" are incompatible."%(__R_shape,__Y_shape))
1251 if self.__EM is not None and len(self.__EM) > 0 and not isinstance(self.__EM, dict) and not( __EM_shape[1] == max(__Xb_shape) ):
1252 raise ValueError("Shape characteristic of evolution model (EM) \"%s\" and state (X) \"%s\" are incompatible."%(__EM_shape,__Xb_shape))
1254 if self.__CM is not None and len(self.__CM) > 0 and not isinstance(self.__CM, dict) and not( __CM_shape[1] == max(__U_shape) ):
1255 raise ValueError("Shape characteristic of control model (CM) \"%s\" and control (U) \"%s\" are incompatible."%(__CM_shape,__U_shape))
1257 if ("Bounds" in self.__P) \
1258 and (isinstance(self.__P["Bounds"], list) or isinstance(self.__P["Bounds"], tuple)) \
1259 and (len(self.__P["Bounds"]) != max(__Xb_shape)):
1260 raise ValueError("The number \"%s\" of bound pairs for the state (X) components is different of the size \"%s\" of the state itself." \
1261 %(len(self.__P["Bounds"]),max(__Xb_shape)))
1265 # ==============================================================================
1266 class RegulationAndParameters(object):
1268 Classe générale d'interface d'action pour la régulation et ses paramètres
1271 name = "GenericRegulation",
1278 self.__name = str(name)
1281 if asAlgorithm is None and asScript is not None:
1282 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
1284 __Algo = asAlgorithm
1286 if asDict is None and asScript is not None:
1287 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1291 if __Dict is not None:
1292 self.__P.update( dict(__Dict) )
1294 if __Algo is not None:
1295 self.__P.update( {"Algorithm":__Algo} )
1297 def get(self, key = None):
1298 "Vérifie l'existence d'une clé de variable ou de paramètres"
1300 return self.__P[key]
1304 # ==============================================================================
1305 class DataObserver(object):
1307 Classe générale d'interface de type observer
1310 name = "GenericObserver",
1322 self.__name = str(name)
1327 if onVariable is None:
1328 raise ValueError("setting an observer has to be done over a variable name or a list of variable names, not over None.")
1329 elif type(onVariable) in (tuple, list):
1330 self.__V = tuple(map( str, onVariable ))
1331 if withInfo is None:
1334 self.__I = (str(withInfo),)*len(self.__V)
1335 elif isinstance(onVariable, str):
1336 self.__V = (onVariable,)
1337 if withInfo is None:
1338 self.__I = (onVariable,)
1340 self.__I = (str(withInfo),)
1342 raise ValueError("setting an observer has to be done over a variable name or a list of variable names.")
1344 if asString is not None:
1345 __FunctionText = asString
1346 elif (asTemplate is not None) and (asTemplate in Templates.ObserverTemplates):
1347 __FunctionText = Templates.ObserverTemplates[asTemplate]
1348 elif asScript is not None:
1349 __FunctionText = Interfaces.ImportFromScript(asScript).getstring()
1352 __Function = ObserverF(__FunctionText)
1354 if asObsObject is not None:
1355 self.__O = asObsObject
1357 self.__O = __Function.getfunc()
1359 for k in range(len(self.__V)):
1362 if ename not in withAlgo:
1363 raise ValueError("An observer is asked to be set on a variable named %s which does not exist."%ename)
1365 withAlgo.setObserver(ename, self.__O, einfo, scheduledBy)
1368 "x.__repr__() <==> repr(x)"
1369 return repr(self.__V)+"\n"+repr(self.__O)
1372 "x.__str__() <==> str(x)"
1373 return str(self.__V)+"\n"+str(self.__O)
1375 # ==============================================================================
1376 class State(object):
1378 Classe générale d'interface de type état
1381 name = "GenericVector",
1383 asPersistentVector = None,
1389 toBeChecked = False,
1392 Permet de définir un vecteur :
1393 - asVector : entrée des données, comme un vecteur compatible avec le
1394 constructeur de numpy.matrix, ou "True" si entrée par script.
1395 - asPersistentVector : entrée des données, comme une série de vecteurs
1396 compatible avec le constructeur de numpy.matrix, ou comme un objet de
1397 type Persistence, ou "True" si entrée par script.
1398 - asScript : si un script valide est donné contenant une variable
1399 nommée "name", la variable est de type "asVector" (par défaut) ou
1400 "asPersistentVector" selon que l'une de ces variables est placée à
1402 - asDataFile : si un ou plusieurs fichiers valides sont donnés
1403 contenant des valeurs en colonnes, elles-mêmes nommées "colNames"
1404 (s'il n'y a pas de nom de colonne indiquée, on cherche une colonne
1405 nommée "name"), on récupère les colonnes et on les range ligne après
1406 ligne (colMajor=False, par défaut) ou colonne après colonne
1407 (colMajor=True). La variable résultante est de type "asVector" (par
1408 défaut) ou "asPersistentVector" selon que l'une de ces variables est
1411 self.__name = str(name)
1412 self.__check = bool(toBeChecked)
1416 self.__is_vector = False
1417 self.__is_series = False
1419 if asScript is not None:
1420 __Vector, __Series = None, None
1421 if asPersistentVector:
1422 __Series = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1424 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1425 elif asDataFile is not None:
1426 __Vector, __Series = None, None
1427 if asPersistentVector:
1428 if colNames is not None:
1429 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
1431 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1432 if bool(colMajor) and not Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1433 __Series = numpy.transpose(__Series)
1434 elif not bool(colMajor) and Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1435 __Series = numpy.transpose(__Series)
1437 if colNames is not None:
1438 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
1440 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1442 __Vector = numpy.ravel(__Vector, order = "F")
1444 __Vector = numpy.ravel(__Vector, order = "C")
1446 __Vector, __Series = asVector, asPersistentVector
1448 if __Vector is not None:
1449 self.__is_vector = True
1450 self.__V = numpy.matrix( numpy.asmatrix(__Vector).A1, numpy.float ).T
1451 self.shape = self.__V.shape
1452 self.size = self.__V.size
1453 elif __Series is not None:
1454 self.__is_series = True
1455 if isinstance(__Series, (tuple, list, numpy.ndarray, numpy.matrix, str)):
1456 self.__V = Persistence.OneVector(self.__name, basetype=numpy.matrix)
1457 if isinstance(__Series, str): __Series = eval(__Series)
1458 for member in __Series:
1459 self.__V.store( numpy.matrix( numpy.asmatrix(member).A1, numpy.float ).T )
1462 if isinstance(self.__V.shape, (tuple, list)):
1463 self.shape = self.__V.shape
1465 self.shape = self.__V.shape()
1466 if len(self.shape) == 1:
1467 self.shape = (self.shape[0],1)
1468 self.size = self.shape[0] * self.shape[1]
1470 raise ValueError("The %s object is improperly defined or undefined, it requires at minima either a vector, a list/tuple of vectors or a persistent object. Please check your vector input."%self.__name)
1472 if scheduledBy is not None:
1473 self.__T = scheduledBy
1475 def getO(self, withScheduler=False):
1477 return self.__V, self.__T
1478 elif self.__T is None:
1484 "Vérification du type interne"
1485 return self.__is_vector
1488 "Vérification du type interne"
1489 return self.__is_series
1492 "x.__repr__() <==> repr(x)"
1493 return repr(self.__V)
1496 "x.__str__() <==> str(x)"
1497 return str(self.__V)
1499 # ==============================================================================
1500 class Covariance(object):
1502 Classe générale d'interface de type covariance
1505 name = "GenericCovariance",
1506 asCovariance = None,
1507 asEyeByScalar = None,
1508 asEyeByVector = None,
1511 toBeChecked = False,
1514 Permet de définir une covariance :
1515 - asCovariance : entrée des données, comme une matrice compatible avec
1516 le constructeur de numpy.matrix
1517 - asEyeByScalar : entrée des données comme un seul scalaire de variance,
1518 multiplicatif d'une matrice de corrélation identité, aucune matrice
1519 n'étant donc explicitement à donner
1520 - asEyeByVector : entrée des données comme un seul vecteur de variance,
1521 à mettre sur la diagonale d'une matrice de corrélation, aucune matrice
1522 n'étant donc explicitement à donner
1523 - asCovObject : entrée des données comme un objet python, qui a les
1524 methodes obligatoires "getT", "getI", "diag", "trace", "__add__",
1525 "__sub__", "__neg__", "__mul__", "__rmul__" et facultatives "shape",
1526 "size", "cholesky", "choleskyI", "asfullmatrix", "__repr__", "__str__"
1527 - toBeChecked : booléen indiquant si le caractère SDP de la matrice
1528 pleine doit être vérifié
1530 self.__name = str(name)
1531 self.__check = bool(toBeChecked)
1534 self.__is_scalar = False
1535 self.__is_vector = False
1536 self.__is_matrix = False
1537 self.__is_object = False
1539 if asScript is not None:
1540 __Matrix, __Scalar, __Vector, __Object = None, None, None, None
1542 __Scalar = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1544 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1546 __Object = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1548 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1550 __Matrix, __Scalar, __Vector, __Object = asCovariance, asEyeByScalar, asEyeByVector, asCovObject
1552 if __Scalar is not None:
1553 if numpy.matrix(__Scalar).size != 1:
1554 raise ValueError(' The diagonal multiplier given to define a sparse matrix is not a unique scalar value.\n Its actual measured size is %i. Please check your scalar input.'%numpy.matrix(__Scalar).size)
1555 self.__is_scalar = True
1556 self.__C = numpy.abs( float(__Scalar) )
1559 elif __Vector is not None:
1560 self.__is_vector = True
1561 self.__C = numpy.abs( numpy.array( numpy.ravel( numpy.matrix(__Vector, float ) ) ) )
1562 self.shape = (self.__C.size,self.__C.size)
1563 self.size = self.__C.size**2
1564 elif __Matrix is not None:
1565 self.__is_matrix = True
1566 self.__C = numpy.matrix( __Matrix, float )
1567 self.shape = self.__C.shape
1568 self.size = self.__C.size
1569 elif __Object is not None:
1570 self.__is_object = True
1572 for at in ("getT","getI","diag","trace","__add__","__sub__","__neg__","__mul__","__rmul__"):
1573 if not hasattr(self.__C,at):
1574 raise ValueError("The matrix given for %s as an object has no attribute \"%s\". Please check your object input."%(self.__name,at))
1575 if hasattr(self.__C,"shape"):
1576 self.shape = self.__C.shape
1579 if hasattr(self.__C,"size"):
1580 self.size = self.__C.size
1585 # raise ValueError("The %s covariance matrix has to be specified either as a matrix, a vector for its diagonal or a scalar multiplying an identity matrix."%self.__name)
1589 def __validate(self):
1591 if self.__C is None:
1592 raise UnboundLocalError("%s covariance matrix value has not been set!"%(self.__name,))
1593 if self.ismatrix() and min(self.shape) != max(self.shape):
1594 raise ValueError("The given matrix for %s is not a square one, its shape is %s. Please check your matrix input."%(self.__name,self.shape))
1595 if self.isobject() and min(self.shape) != max(self.shape):
1596 raise ValueError("The matrix given for \"%s\" is not a square one, its shape is %s. Please check your object input."%(self.__name,self.shape))
1597 if self.isscalar() and self.__C <= 0:
1598 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your scalar input %s."%(self.__name,self.__C))
1599 if self.isvector() and (self.__C <= 0).any():
1600 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your vector input."%(self.__name,))
1601 if self.ismatrix() and (self.__check or logging.getLogger().level < logging.WARNING):
1603 L = numpy.linalg.cholesky( self.__C )
1605 raise ValueError("The %s covariance matrix is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1606 if self.isobject() and (self.__check or logging.getLogger().level < logging.WARNING):
1608 L = self.__C.cholesky()
1610 raise ValueError("The %s covariance object is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1613 "Vérification du type interne"
1614 return self.__is_scalar
1617 "Vérification du type interne"
1618 return self.__is_vector
1621 "Vérification du type interne"
1622 return self.__is_matrix
1625 "Vérification du type interne"
1626 return self.__is_object
1631 return Covariance(self.__name+"I", asCovariance = self.__C.I )
1632 elif self.isvector():
1633 return Covariance(self.__name+"I", asEyeByVector = 1. / self.__C )
1634 elif self.isscalar():
1635 return Covariance(self.__name+"I", asEyeByScalar = 1. / self.__C )
1636 elif self.isobject():
1637 return Covariance(self.__name+"I", asCovObject = self.__C.getI() )
1639 return None # Indispensable
1644 return Covariance(self.__name+"T", asCovariance = self.__C.T )
1645 elif self.isvector():
1646 return Covariance(self.__name+"T", asEyeByVector = self.__C )
1647 elif self.isscalar():
1648 return Covariance(self.__name+"T", asEyeByScalar = self.__C )
1649 elif self.isobject():
1650 return Covariance(self.__name+"T", asCovObject = self.__C.getT() )
1653 "Décomposition de Cholesky"
1655 return Covariance(self.__name+"C", asCovariance = numpy.linalg.cholesky(self.__C) )
1656 elif self.isvector():
1657 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
1658 elif self.isscalar():
1659 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
1660 elif self.isobject() and hasattr(self.__C,"cholesky"):
1661 return Covariance(self.__name+"C", asCovObject = self.__C.cholesky() )
1663 def choleskyI(self):
1664 "Inversion de la décomposition de Cholesky"
1666 return Covariance(self.__name+"H", asCovariance = numpy.linalg.cholesky(self.__C).I )
1667 elif self.isvector():
1668 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
1669 elif self.isscalar():
1670 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
1671 elif self.isobject() and hasattr(self.__C,"choleskyI"):
1672 return Covariance(self.__name+"H", asCovObject = self.__C.choleskyI() )
1674 def diag(self, msize=None):
1675 "Diagonale de la matrice"
1677 return numpy.diag(self.__C)
1678 elif self.isvector():
1680 elif self.isscalar():
1682 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1684 return self.__C * numpy.ones(int(msize))
1685 elif self.isobject():
1686 return self.__C.diag()
1688 def asfullmatrix(self, msize=None):
1692 elif self.isvector():
1693 return numpy.matrix( numpy.diag(self.__C), float )
1694 elif self.isscalar():
1696 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1698 return numpy.matrix( self.__C * numpy.eye(int(msize)), float )
1699 elif self.isobject() and hasattr(self.__C,"asfullmatrix"):
1700 return self.__C.asfullmatrix()
1702 def trace(self, msize=None):
1703 "Trace de la matrice"
1705 return numpy.trace(self.__C)
1706 elif self.isvector():
1707 return float(numpy.sum(self.__C))
1708 elif self.isscalar():
1710 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1712 return self.__C * int(msize)
1713 elif self.isobject():
1714 return self.__C.trace()
1720 "x.__repr__() <==> repr(x)"
1721 return repr(self.__C)
1724 "x.__str__() <==> str(x)"
1725 return str(self.__C)
1727 def __add__(self, other):
1728 "x.__add__(y) <==> x+y"
1729 if self.ismatrix() or self.isobject():
1730 return self.__C + numpy.asmatrix(other)
1731 elif self.isvector() or self.isscalar():
1732 _A = numpy.asarray(other)
1733 _A.reshape(_A.size)[::_A.shape[1]+1] += self.__C
1734 return numpy.asmatrix(_A)
1736 def __radd__(self, other):
1737 "x.__radd__(y) <==> y+x"
1738 raise NotImplementedError("%s covariance matrix __radd__ method not available for %s type!"%(self.__name,type(other)))
1740 def __sub__(self, other):
1741 "x.__sub__(y) <==> x-y"
1742 if self.ismatrix() or self.isobject():
1743 return self.__C - numpy.asmatrix(other)
1744 elif self.isvector() or self.isscalar():
1745 _A = numpy.asarray(other)
1746 _A.reshape(_A.size)[::_A.shape[1]+1] = self.__C - _A.reshape(_A.size)[::_A.shape[1]+1]
1747 return numpy.asmatrix(_A)
1749 def __rsub__(self, other):
1750 "x.__rsub__(y) <==> y-x"
1751 raise NotImplementedError("%s covariance matrix __rsub__ method not available for %s type!"%(self.__name,type(other)))
1754 "x.__neg__() <==> -x"
1757 def __mul__(self, other):
1758 "x.__mul__(y) <==> x*y"
1759 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
1760 return self.__C * other
1761 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
1762 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1763 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
1764 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1765 return self.__C * numpy.asmatrix(other)
1767 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asmatrix(other).shape,self.__name))
1768 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
1769 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1770 return numpy.asmatrix(self.__C * numpy.ravel(other)).T
1771 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1772 return numpy.asmatrix((self.__C * (numpy.asarray(other).transpose())).transpose())
1774 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
1775 elif self.isscalar() and isinstance(other,numpy.matrix):
1776 return self.__C * other
1777 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
1778 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
1779 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
1781 return self.__C * numpy.asmatrix(other)
1782 elif self.isobject():
1783 return self.__C.__mul__(other)
1785 raise NotImplementedError("%s covariance matrix __mul__ method not available for %s type!"%(self.__name,type(other)))
1787 def __rmul__(self, other):
1788 "x.__rmul__(y) <==> y*x"
1789 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
1790 return other * self.__C
1791 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
1792 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1793 return numpy.asmatrix(numpy.ravel(other)) * self.__C
1794 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1795 return numpy.asmatrix(other) * self.__C
1797 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name))
1798 elif self.isvector() and isinstance(other,numpy.matrix):
1799 if numpy.ravel(other).size == self.shape[0]: # Vecteur
1800 return numpy.asmatrix(numpy.ravel(other) * self.__C)
1801 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
1802 return numpy.asmatrix(numpy.array(other) * self.__C)
1804 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
1805 elif self.isscalar() and isinstance(other,numpy.matrix):
1806 return other * self.__C
1807 elif self.isobject():
1808 return self.__C.__rmul__(other)
1810 raise NotImplementedError("%s covariance matrix __rmul__ method not available for %s type!"%(self.__name,type(other)))
1813 "x.__len__() <==> len(x)"
1814 return self.shape[0]
1816 # ==============================================================================
1817 class ObserverF(object):
1819 Creation d'une fonction d'observateur a partir de son texte
1821 def __init__(self, corps=""):
1822 self.__corps = corps
1823 def func(self,var,info):
1824 "Fonction d'observation"
1827 "Restitution du pointeur de fonction dans l'objet"
1830 # ==============================================================================
1831 class CaseLogger(object):
1833 Conservation des commandes de creation d'un cas
1835 def __init__(self, __name="", __objname="case", __addViewers=None, __addLoaders=None):
1836 self.__name = str(__name)
1837 self.__objname = str(__objname)
1838 self.__logSerie = []
1839 self.__switchoff = False
1841 "TUI" :Interfaces._TUIViewer,
1842 "SCD" :Interfaces._SCDViewer,
1843 "YACS":Interfaces._YACSViewer,
1846 "TUI" :Interfaces._TUIViewer,
1847 "COM" :Interfaces._COMViewer,
1849 if __addViewers is not None:
1850 self.__viewers.update(dict(__addViewers))
1851 if __addLoaders is not None:
1852 self.__loaders.update(dict(__addLoaders))
1854 def register(self, __command=None, __keys=None, __local=None, __pre=None, __switchoff=False):
1855 "Enregistrement d'une commande individuelle"
1856 if __command is not None and __keys is not None and __local is not None and not self.__switchoff:
1857 if "self" in __keys: __keys.remove("self")
1858 self.__logSerie.append( (str(__command), __keys, __local, __pre, __switchoff) )
1860 self.__switchoff = True
1862 self.__switchoff = False
1864 def dump(self, __filename=None, __format="TUI", __upa=""):
1865 "Restitution normalisée des commandes"
1866 if __format in self.__viewers:
1867 __formater = self.__viewers[__format](self.__name, self.__objname, self.__logSerie)
1869 raise ValueError("Dumping as \"%s\" is not available"%__format)
1870 return __formater.dump(__filename, __upa)
1872 def load(self, __filename=None, __content=None, __object=None, __format="TUI"):
1873 "Chargement normalisé des commandes"
1874 if __format in self.__loaders:
1875 __formater = self.__loaders[__format]()
1877 raise ValueError("Loading as \"%s\" is not available"%__format)
1878 return __formater.load(__filename, __content, __object)
1880 # ==============================================================================
1883 _extraArguments = None,
1884 _sFunction = lambda x: x,
1889 Pour une liste ordonnée de vecteurs en entrée, renvoie en sortie la liste
1890 correspondante de valeurs de la fonction en argument
1892 # Vérifications et définitions initiales
1893 # logging.debug("MULTF Internal multifonction calculations begin with function %s"%(_sFunction.__name__,))
1894 if not PlatformInfo.isIterable( __xserie ):
1895 raise TypeError("MultiFonction not iterable unkown input type: %s"%(type(__xserie),))
1897 if (_mpWorkers is None) or (_mpWorkers is not None and _mpWorkers < 1):
1900 __mpWorkers = int(_mpWorkers)
1902 import multiprocessing
1913 if _extraArguments is None:
1915 elif _extraArguments is not None and isinstance(_extraArguments, (list, tuple, map)):
1916 for __xvalue in __xserie:
1917 _jobs.append( [__xvalue, ] + list(_extraArguments) )
1919 raise TypeError("MultiFonction extra arguments unkown input type: %s"%(type(_extraArguments),))
1920 # logging.debug("MULTF Internal multiprocessing calculations begin : evaluation of %i point(s)"%(len(_jobs),))
1921 import multiprocessing
1922 with multiprocessing.Pool(__mpWorkers) as pool:
1923 __multiHX = pool.map( _sFunction, _jobs )
1926 # logging.debug("MULTF Internal multiprocessing calculation end")
1928 # logging.debug("MULTF Internal monoprocessing calculation begin")
1930 if _extraArguments is None:
1931 for __xvalue in __xserie:
1932 __multiHX.append( _sFunction( __xvalue ) )
1933 elif _extraArguments is not None and isinstance(_extraArguments, (list, tuple, map)):
1934 for __xvalue in __xserie:
1935 __multiHX.append( _sFunction( __xvalue, *_extraArguments ) )
1936 elif _extraArguments is not None and isinstance(_extraArguments, dict):
1937 for __xvalue in __xserie:
1938 __multiHX.append( _sFunction( __xvalue, **_extraArguments ) )
1940 raise TypeError("MultiFonction extra arguments unkown input type: %s"%(type(_extraArguments),))
1941 # logging.debug("MULTF Internal monoprocessing calculation end")
1943 # logging.debug("MULTF Internal multifonction calculations end")
1946 # ==============================================================================
1947 def CostFunction3D(_x,
1948 _Hm = None, # Pour simuler Hm(x) : HO["Direct"].appliedTo
1949 _HmX = None, # Simulation déjà faite de Hm(x)
1950 _arg = None, # Arguments supplementaires pour Hm, sous la forme d'un tuple
1955 _SIV = False, # A résorber pour la 8.0
1956 _SSC = [], # self._parameters["StoreSupplementaryCalculations"]
1957 _nPS = 0, # nbPreviousSteps
1958 _QM = "DA", # QualityMeasure
1959 _SSV = {}, # Entrée et/ou sortie : self.StoredVariables
1960 _fRt = False, # Restitue ou pas la sortie étendue
1961 _sSc = True, # Stocke ou pas les SSC
1964 Fonction-coût générale utile pour les algorithmes statiques/3D : 3DVAR, BLUE
1965 et dérivés, Kalman et dérivés, LeastSquares, SamplingTest, PSO, SA, Tabu,
1966 DFO, QuantileRegression
1972 for k in ["CostFunctionJ",
1978 "SimulatedObservationAtCurrentOptimum",
1979 "SimulatedObservationAtCurrentState",
1983 if hasattr(_SSV[k],"store"):
1984 _SSV[k].append = _SSV[k].store # Pour utiliser "append" au lieu de "store"
1986 _X = numpy.asmatrix(numpy.ravel( _x )).T
1987 if _SIV or "CurrentState" in _SSC or "CurrentOptimum" in _SSC:
1988 _SSV["CurrentState"].append( _X )
1990 if _HmX is not None:
1994 raise ValueError("COSTFUNCTION3D Operator has to be defined.")
1998 _HX = _Hm( _X, *_arg )
1999 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
2001 if "SimulatedObservationAtCurrentState" in _SSC or \
2002 "SimulatedObservationAtCurrentOptimum" in _SSC:
2003 _SSV["SimulatedObservationAtCurrentState"].append( _HX )
2005 if numpy.any(numpy.isnan(_HX)):
2006 Jb, Jo, J = numpy.nan, numpy.nan, numpy.nan
2008 _Y = numpy.asmatrix(numpy.ravel( _Y )).T
2009 if _QM in ["AugmentedWeightedLeastSquares", "AWLS", "AugmentedPonderatedLeastSquares", "APLS", "DA"]:
2010 if _BI is None or _RI is None:
2011 raise ValueError("Background and Observation error covariance matrix has to be properly defined!")
2012 _Xb = numpy.asmatrix(numpy.ravel( _Xb )).T
2013 Jb = 0.5 * (_X - _Xb).T * _BI * (_X - _Xb)
2014 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
2015 elif _QM in ["WeightedLeastSquares", "WLS", "PonderatedLeastSquares", "PLS"]:
2017 raise ValueError("Observation error covariance matrix has to be properly defined!")
2019 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
2020 elif _QM in ["LeastSquares", "LS", "L2"]:
2022 Jo = 0.5 * (_Y - _HX).T * (_Y - _HX)
2023 elif _QM in ["AbsoluteValue", "L1"]:
2025 Jo = numpy.sum( numpy.abs(_Y - _HX) )
2026 elif _QM in ["MaximumError", "ME"]:
2028 Jo = numpy.max( numpy.abs(_Y - _HX) )
2029 elif _QM in ["QR", "Null"]:
2033 raise ValueError("Unknown asked quality measure!")
2035 J = float( Jb ) + float( Jo )
2038 _SSV["CostFunctionJb"].append( Jb )
2039 _SSV["CostFunctionJo"].append( Jo )
2040 _SSV["CostFunctionJ" ].append( J )
2042 if "IndexOfOptimum" in _SSC or \
2043 "CurrentOptimum" in _SSC or \
2044 "SimulatedObservationAtCurrentOptimum" in _SSC:
2045 IndexMin = numpy.argmin( _SSV["CostFunctionJ"][_nPS:] ) + _nPS
2046 if "IndexOfOptimum" in _SSC:
2047 _SSV["IndexOfOptimum"].append( IndexMin )
2048 if "CurrentOptimum" in _SSC:
2049 _SSV["CurrentOptimum"].append( _SSV["CurrentState"][IndexMin] )
2050 if "SimulatedObservationAtCurrentOptimum" in _SSC:
2051 _SSV["SimulatedObservationAtCurrentOptimum"].append( _SSV["SimulatedObservationAtCurrentState"][IndexMin] )
2056 if _QM in ["QR"]: # Pour le QuantileRegression
2061 # ==============================================================================
2062 if __name__ == "__main__":
2063 print('\n AUTODIAGNOSTIC\n')