1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2020 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les outils généraux élémentaires.
26 __author__ = "Jean-Philippe ARGAUD"
34 from functools import partial
35 from daCore import Persistence, PlatformInfo, Interfaces
36 from daCore import Templates
38 # ==============================================================================
39 class CacheManager(object):
41 Classe générale de gestion d'un cache de calculs
44 toleranceInRedundancy = 1.e-18,
45 lenghtOfRedundancy = -1,
48 Les caractéristiques de tolérance peuvent être modifiées à la création.
50 self.__tolerBP = float(toleranceInRedundancy)
51 self.__lenghtOR = int(lenghtOfRedundancy)
52 self.__initlnOR = self.__lenghtOR
59 self.__listOPCV = [] # Previous Calculated Points, Results, Point Norms, Operator
61 # logging.debug("CM Tolerance de determination des doublons : %.2e", self.__tolerBP)
63 def wasCalculatedIn(self, xValue, oName="" ): #, info="" ):
64 "Vérifie l'existence d'un calcul correspondant à la valeur"
68 for i in range(min(len(self.__listOPCV),self.__lenghtOR)-1,-1,-1):
69 if not hasattr(xValue, 'size') or (str(oName) != self.__listOPCV[i][3]) or (xValue.size != self.__listOPCV[i][0].size):
70 # logging.debug("CM Différence de la taille %s de X et de celle %s du point %i déjà calculé", xValue.shape,i,self.__listOPCP[i].shape)
72 elif numpy.linalg.norm(numpy.ravel(xValue) - self.__listOPCV[i][0]) < self.__tolerBP * self.__listOPCV[i][2]:
74 __HxV = self.__listOPCV[i][1]
75 # logging.debug("CM Cas%s déja calculé, portant le numéro %i", info, i)
79 def storeValueInX(self, xValue, HxValue, oName="" ):
80 "Stocke pour un opérateur o un calcul Hx correspondant à la valeur x"
81 if self.__lenghtOR < 0:
82 self.__lenghtOR = 2 * xValue.size + 2
83 self.__initlnOR = self.__lenghtOR
84 self.__seenNames.append(str(oName))
85 if str(oName) not in self.__seenNames: # Etend la liste si nouveau
86 self.__lenghtOR += 2 * xValue.size + 2
87 self.__initlnOR += self.__lenghtOR
88 self.__seenNames.append(str(oName))
89 while len(self.__listOPCV) > self.__lenghtOR:
90 # logging.debug("CM Réduction de la liste des cas à %i éléments par suppression du premier", self.__lenghtOR)
91 self.__listOPCV.pop(0)
92 self.__listOPCV.append( (
93 copy.copy(numpy.ravel(xValue)),
95 numpy.linalg.norm(xValue),
101 self.__initlnOR = self.__lenghtOR
103 self.__enabled = False
107 self.__lenghtOR = self.__initlnOR
108 self.__enabled = True
110 # ==============================================================================
111 class Operator(object):
113 Classe générale d'interface de type opérateur simple
121 name = "GenericOperator",
124 avoidingRedundancy = True,
125 inputAsMultiFunction = False,
126 enableMultiProcess = False,
127 extraArguments = None,
130 On construit un objet de ce type en fournissant, à l'aide de l'un des
131 deux mots-clé, soit une fonction ou un multi-fonction python, soit une
134 - name : nom d'opérateur
135 - fromMethod : argument de type fonction Python
136 - fromMatrix : argument adapté au constructeur numpy.matrix
137 - avoidingRedundancy : booléen évitant (ou pas) les calculs redondants
138 - inputAsMultiFunction : booléen indiquant une fonction explicitement
139 définie (ou pas) en multi-fonction
140 - extraArguments : arguments supplémentaires passés à la fonction de
141 base et ses dérivées (tuple ou dictionnaire)
143 self.__name = str(name)
144 self.__NbCallsAsMatrix, self.__NbCallsAsMethod, self.__NbCallsOfCached = 0, 0, 0
145 self.__AvoidRC = bool( avoidingRedundancy )
146 self.__inputAsMF = bool( inputAsMultiFunction )
147 self.__mpEnabled = bool( enableMultiProcess )
148 self.__extraArgs = extraArguments
149 if fromMethod is not None and self.__inputAsMF:
150 self.__Method = fromMethod # logtimer(fromMethod)
152 self.__Type = "Method"
153 elif fromMethod is not None and not self.__inputAsMF:
154 self.__Method = partial( MultiFonction, _sFunction=fromMethod, _mpEnabled=self.__mpEnabled)
156 self.__Type = "Method"
157 elif fromMatrix is not None:
159 self.__Matrix = numpy.matrix( fromMatrix, numpy.float )
160 self.__Type = "Matrix"
166 def disableAvoidingRedundancy(self):
168 Operator.CM.disable()
170 def enableAvoidingRedundancy(self):
175 Operator.CM.disable()
181 def appliedTo(self, xValue, HValue = None, argsAsSerie = False):
183 Permet de restituer le résultat de l'application de l'opérateur à une
184 série d'arguments xValue. Cette méthode se contente d'appliquer, chaque
185 argument devant a priori être du bon type.
187 - les arguments par série sont :
188 - xValue : argument adapté pour appliquer l'opérateur
189 - HValue : valeur précalculée de l'opérateur en ce point
190 - argsAsSerie : indique si les arguments sont une mono ou multi-valeur
197 if HValue is not None:
201 PlatformInfo.isIterable( _xValue, True, " in Operator.appliedTo" )
203 if _HValue is not None:
204 assert len(_xValue) == len(_HValue), "Incompatible number of elements in xValue and HValue"
206 for i in range(len(_HValue)):
207 HxValue.append( numpy.asmatrix( numpy.ravel( _HValue[i] ) ).T )
209 Operator.CM.storeValueInX(_xValue[i],HxValue[-1],self.__name)
214 for i, xv in enumerate(_xValue):
216 __alreadyCalculated, __HxV = Operator.CM.wasCalculatedIn(xv,self.__name)
218 __alreadyCalculated = False
220 if __alreadyCalculated:
221 self.__addOneCacheCall()
224 if self.__Matrix is not None:
225 self.__addOneMatrixCall()
226 _hv = self.__Matrix * xv
228 self.__addOneMethodCall()
232 HxValue.append( _hv )
234 if len(_xserie)>0 and self.__Matrix is None:
235 if self.__extraArgs is None:
236 _hserie = self.__Method( _xserie ) # Calcul MF
238 _hserie = self.__Method( _xserie, self.__extraArgs ) # Calcul MF
239 if not hasattr(_hserie, "pop"):
240 raise TypeError("The user input multi-function doesn't seem to return sequence results, behaving like a mono-function. It has to be checked.")
246 Operator.CM.storeValueInX(_xv,_hv,self.__name)
248 if argsAsSerie: return HxValue
249 else: return HxValue[-1]
251 def appliedControledFormTo(self, paires, argsAsSerie = False):
253 Permet de restituer le résultat de l'application de l'opérateur à des
254 paires (xValue, uValue). Cette méthode se contente d'appliquer, son
255 argument devant a priori être du bon type. Si la uValue est None,
256 on suppose que l'opérateur ne s'applique qu'à xValue.
258 - paires : les arguments par paire sont :
259 - xValue : argument X adapté pour appliquer l'opérateur
260 - uValue : argument U adapté pour appliquer l'opérateur
261 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
263 if argsAsSerie: _xuValue = paires
264 else: _xuValue = (paires,)
265 PlatformInfo.isIterable( _xuValue, True, " in Operator.appliedControledFormTo" )
267 if self.__Matrix is not None:
269 for paire in _xuValue:
270 _xValue, _uValue = paire
271 self.__addOneMatrixCall()
272 HxValue.append( self.__Matrix * _xValue )
275 for paire in _xuValue:
277 _xValue, _uValue = paire
278 if _uValue is not None:
279 _xuValue.append( paire )
281 _xuValue.append( _xValue )
282 self.__addOneMethodCall( len(_xuValue) )
283 if self.__extraArgs is None:
284 HxValue = self.__Method( _xuValue ) # Calcul MF
286 HxValue = self.__Method( _xuValue, self.__extraArgs ) # Calcul MF
288 if argsAsSerie: return HxValue
289 else: return HxValue[-1]
291 def appliedInXTo(self, paires, argsAsSerie = False):
293 Permet de restituer le résultat de l'application de l'opérateur à une
294 série d'arguments xValue, sachant que l'opérateur est valable en
295 xNominal. Cette méthode se contente d'appliquer, son argument devant a
296 priori être du bon type. Si l'opérateur est linéaire car c'est une
297 matrice, alors il est valable en tout point nominal et xNominal peut
298 être quelconque. Il n'y a qu'une seule paire par défaut, et argsAsSerie
299 permet d'indiquer que l'argument est multi-paires.
301 - paires : les arguments par paire sont :
302 - xNominal : série d'arguments permettant de donner le point où
303 l'opérateur est construit pour être ensuite appliqué
304 - xValue : série d'arguments adaptés pour appliquer l'opérateur
305 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
307 if argsAsSerie: _nxValue = paires
308 else: _nxValue = (paires,)
309 PlatformInfo.isIterable( _nxValue, True, " in Operator.appliedInXTo" )
311 if self.__Matrix is not None:
313 for paire in _nxValue:
314 _xNominal, _xValue = paire
315 self.__addOneMatrixCall()
316 HxValue.append( self.__Matrix * _xValue )
318 self.__addOneMethodCall( len(_nxValue) )
319 if self.__extraArgs is None:
320 HxValue = self.__Method( _nxValue ) # Calcul MF
322 HxValue = self.__Method( _nxValue, self.__extraArgs ) # Calcul MF
324 if argsAsSerie: return HxValue
325 else: return HxValue[-1]
327 def asMatrix(self, ValueForMethodForm = "UnknownVoidValue", argsAsSerie = False):
329 Permet de renvoyer l'opérateur sous la forme d'une matrice
331 if self.__Matrix is not None:
332 self.__addOneMatrixCall()
333 mValue = [self.__Matrix,]
334 elif not isinstance(ValueForMethodForm,str) or ValueForMethodForm != "UnknownVoidValue": # Ne pas utiliser "None"
337 self.__addOneMethodCall( len(ValueForMethodForm) )
338 for _vfmf in ValueForMethodForm:
339 mValue.append( numpy.matrix( self.__Method(((_vfmf, None),)) ) )
341 self.__addOneMethodCall()
342 mValue = self.__Method(((ValueForMethodForm, None),))
344 raise ValueError("Matrix form of the operator defined as a function/method requires to give an operating point.")
346 if argsAsSerie: return mValue
347 else: return mValue[-1]
351 Renvoie la taille sous forme numpy si l'opérateur est disponible sous
352 la forme d'une matrice
354 if self.__Matrix is not None:
355 return self.__Matrix.shape
357 raise ValueError("Matrix form of the operator is not available, nor the shape")
359 def nbcalls(self, which=None):
361 Renvoie les nombres d'évaluations de l'opérateur
364 self.__NbCallsAsMatrix+self.__NbCallsAsMethod,
365 self.__NbCallsAsMatrix,
366 self.__NbCallsAsMethod,
367 self.__NbCallsOfCached,
368 Operator.NbCallsAsMatrix+Operator.NbCallsAsMethod,
369 Operator.NbCallsAsMatrix,
370 Operator.NbCallsAsMethod,
371 Operator.NbCallsOfCached,
373 if which is None: return __nbcalls
374 else: return __nbcalls[which]
376 def __addOneMatrixCall(self):
377 "Comptabilise un appel"
378 self.__NbCallsAsMatrix += 1 # Decompte local
379 Operator.NbCallsAsMatrix += 1 # Decompte global
381 def __addOneMethodCall(self, nb = 1):
382 "Comptabilise un appel"
383 self.__NbCallsAsMethod += nb # Decompte local
384 Operator.NbCallsAsMethod += nb # Decompte global
386 def __addOneCacheCall(self):
387 "Comptabilise un appel"
388 self.__NbCallsOfCached += 1 # Decompte local
389 Operator.NbCallsOfCached += 1 # Decompte global
391 # ==============================================================================
392 class FullOperator(object):
394 Classe générale d'interface de type opérateur complet
395 (Direct, Linéaire Tangent, Adjoint)
398 name = "GenericFullOperator",
400 asOneFunction = None, # 1 Fonction
401 asThreeFunctions = None, # 3 Fonctions in a dictionary
402 asScript = None, # 1 or 3 Fonction(s) by script
403 asDict = None, # Parameters
405 extraArguments = None,
407 inputAsMF = False,# Fonction(s) as Multi-Functions
412 self.__name = str(name)
413 self.__check = bool(toBeChecked)
414 self.__extraArgs = extraArguments
419 if (asDict is not None) and isinstance(asDict, dict):
420 __Parameters.update( asDict )
421 # Priorité à EnableMultiProcessingInDerivatives=True
422 if "EnableMultiProcessing" in __Parameters and __Parameters["EnableMultiProcessing"]:
423 __Parameters["EnableMultiProcessingInDerivatives"] = True
424 __Parameters["EnableMultiProcessingInEvaluation"] = False
425 if "EnableMultiProcessingInDerivatives" not in __Parameters:
426 __Parameters["EnableMultiProcessingInDerivatives"] = False
427 if __Parameters["EnableMultiProcessingInDerivatives"]:
428 __Parameters["EnableMultiProcessingInEvaluation"] = False
429 if "EnableMultiProcessingInEvaluation" not in __Parameters:
430 __Parameters["EnableMultiProcessingInEvaluation"] = False
431 if "withIncrement" in __Parameters: # Temporaire
432 __Parameters["DifferentialIncrement"] = __Parameters["withIncrement"]
434 if asScript is not None:
435 __Matrix, __Function = None, None
437 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
439 __Function = { "Direct":Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ) }
440 __Function.update({"useApproximatedDerivatives":True})
441 __Function.update(__Parameters)
442 elif asThreeFunctions:
444 "Direct" :Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ),
445 "Tangent":Interfaces.ImportFromScript(asScript).getvalue( "TangentOperator" ),
446 "Adjoint":Interfaces.ImportFromScript(asScript).getvalue( "AdjointOperator" ),
448 __Function.update(__Parameters)
451 if asOneFunction is not None:
452 if isinstance(asOneFunction, dict) and "Direct" in asOneFunction:
453 if asOneFunction["Direct"] is not None:
454 __Function = asOneFunction
456 raise ValueError("The function has to be given in a dictionnary which have 1 key (\"Direct\")")
458 __Function = { "Direct":asOneFunction }
459 __Function.update({"useApproximatedDerivatives":True})
460 __Function.update(__Parameters)
461 elif asThreeFunctions is not None:
462 if isinstance(asThreeFunctions, dict) and \
463 ("Tangent" in asThreeFunctions) and (asThreeFunctions["Tangent"] is not None) and \
464 ("Adjoint" in asThreeFunctions) and (asThreeFunctions["Adjoint"] is not None) and \
465 (("useApproximatedDerivatives" not in asThreeFunctions) or not bool(asThreeFunctions["useApproximatedDerivatives"])):
466 __Function = asThreeFunctions
467 elif isinstance(asThreeFunctions, dict) and \
468 ("Direct" in asThreeFunctions) and (asThreeFunctions["Direct"] is not None):
469 __Function = asThreeFunctions
470 __Function.update({"useApproximatedDerivatives":True})
472 raise ValueError("The functions has to be given in a dictionnary which have either 1 key (\"Direct\") or 3 keys (\"Direct\" (optionnal), \"Tangent\" and \"Adjoint\")")
473 if "Direct" not in asThreeFunctions:
474 __Function["Direct"] = asThreeFunctions["Tangent"]
475 __Function.update(__Parameters)
479 # if sys.version_info[0] < 3 and isinstance(__Function, dict):
480 # for k in ("Direct", "Tangent", "Adjoint"):
481 # if k in __Function and hasattr(__Function[k],"__class__"):
482 # if type(__Function[k]) is type(self.__init__):
483 # raise TypeError("can't use a class method (%s) as a function for the \"%s\" operator. Use a real function instead."%(type(__Function[k]),k))
485 if appliedInX is not None and isinstance(appliedInX, dict):
486 __appliedInX = appliedInX
487 elif appliedInX is not None:
488 __appliedInX = {"HXb":appliedInX}
492 if scheduledBy is not None:
493 self.__T = scheduledBy
495 if isinstance(__Function, dict) and \
496 ("useApproximatedDerivatives" in __Function) and bool(__Function["useApproximatedDerivatives"]) and \
497 ("Direct" in __Function) and (__Function["Direct"] is not None):
498 if "CenteredFiniteDifference" not in __Function: __Function["CenteredFiniteDifference"] = False
499 if "DifferentialIncrement" not in __Function: __Function["DifferentialIncrement"] = 0.01
500 if "withdX" not in __Function: __Function["withdX"] = None
501 if "withAvoidingRedundancy" not in __Function: __Function["withAvoidingRedundancy"] = avoidRC
502 if "withToleranceInRedundancy" not in __Function: __Function["withToleranceInRedundancy"] = 1.e-18
503 if "withLenghtOfRedundancy" not in __Function: __Function["withLenghtOfRedundancy"] = -1
504 if "NumberOfProcesses" not in __Function: __Function["NumberOfProcesses"] = None
505 if "withmfEnabled" not in __Function: __Function["withmfEnabled"] = inputAsMF
506 from daCore import NumericObjects
507 FDA = NumericObjects.FDApproximation(
509 Function = __Function["Direct"],
510 centeredDF = __Function["CenteredFiniteDifference"],
511 increment = __Function["DifferentialIncrement"],
512 dX = __Function["withdX"],
513 avoidingRedundancy = __Function["withAvoidingRedundancy"],
514 toleranceInRedundancy = __Function["withToleranceInRedundancy"],
515 lenghtOfRedundancy = __Function["withLenghtOfRedundancy"],
516 mpEnabled = __Function["EnableMultiProcessingInDerivatives"],
517 mpWorkers = __Function["NumberOfProcesses"],
518 mfEnabled = __Function["withmfEnabled"],
520 self.__FO["Direct"] = Operator( name = self.__name, fromMethod = FDA.DirectOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
521 self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMethod = FDA.TangentOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
522 self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMethod = FDA.AdjointOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
523 elif isinstance(__Function, dict) and \
524 ("Direct" in __Function) and ("Tangent" in __Function) and ("Adjoint" in __Function) and \
525 (__Function["Direct"] is not None) and (__Function["Tangent"] is not None) and (__Function["Adjoint"] is not None):
526 self.__FO["Direct"] = Operator( name = self.__name, fromMethod = __Function["Direct"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
527 self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMethod = __Function["Tangent"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
528 self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMethod = __Function["Adjoint"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
529 elif asMatrix is not None:
530 __matrice = numpy.matrix( __Matrix, numpy.float )
531 self.__FO["Direct"] = Operator( name = self.__name, fromMatrix = __matrice, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
532 self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMatrix = __matrice, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
533 self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMatrix = __matrice.T, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
536 raise ValueError("The %s object is improperly defined or undefined, it requires at minima either a matrix, a Direct operator for approximate derivatives or a Tangent/Adjoint operators pair. Please check your operator input."%self.__name)
538 if __appliedInX is not None:
539 self.__FO["AppliedInX"] = {}
540 for key in list(__appliedInX.keys()):
541 if type( __appliedInX[key] ) is type( numpy.matrix([]) ):
542 # Pour le cas où l'on a une vraie matrice
543 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].A1, numpy.float ).T
544 elif type( __appliedInX[key] ) is type( numpy.array([]) ) and len(__appliedInX[key].shape) > 1:
545 # Pour le cas où l'on a un vecteur représenté en array avec 2 dimensions
546 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].reshape(len(__appliedInX[key]),), numpy.float ).T
548 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key], numpy.float ).T
550 self.__FO["AppliedInX"] = None
556 "x.__repr__() <==> repr(x)"
557 return repr(self.__FO)
560 "x.__str__() <==> str(x)"
561 return str(self.__FO)
563 # ==============================================================================
564 class Algorithm(object):
566 Classe générale d'interface de type algorithme
568 Elle donne un cadre pour l'écriture d'une classe élémentaire d'algorithme
569 d'assimilation, en fournissant un container (dictionnaire) de variables
570 persistantes initialisées, et des méthodes d'accès à ces variables stockées.
572 Une classe élémentaire d'algorithme doit implémenter la méthode "run".
574 def __init__(self, name):
576 L'initialisation présente permet de fabriquer des variables de stockage
577 disponibles de manière générique dans les algorithmes élémentaires. Ces
578 variables de stockage sont ensuite conservées dans un dictionnaire
579 interne à l'objet, mais auquel on accède par la méthode "get".
581 Les variables prévues sont :
582 - APosterioriCorrelations : matrice de corrélations de la matrice A
583 - APosterioriCovariance : matrice de covariances a posteriori : A
584 - APosterioriStandardDeviations : vecteur des écart-types de la matrice A
585 - APosterioriVariances : vecteur des variances de la matrice A
586 - Analysis : vecteur d'analyse : Xa
587 - BMA : Background moins Analysis : Xa - Xb
588 - CostFunctionJ : fonction-coût globale, somme des deux parties suivantes Jb et Jo
589 - CostFunctionJAtCurrentOptimum : fonction-coût globale à l'état optimal courant lors d'itérations
590 - CostFunctionJb : partie ébauche ou background de la fonction-coût : Jb
591 - CostFunctionJbAtCurrentOptimum : partie ébauche à l'état optimal courant lors d'itérations
592 - CostFunctionJo : partie observations de la fonction-coût : Jo
593 - CostFunctionJoAtCurrentOptimum : partie observations à l'état optimal courant lors d'itérations
594 - CurrentIterationNumber : numéro courant d'itération dans les algorithmes itératifs, à partir de 0
595 - CurrentOptimum : état optimal courant lors d'itérations
596 - CurrentState : état courant lors d'itérations
597 - GradientOfCostFunctionJ : gradient de la fonction-coût globale
598 - GradientOfCostFunctionJb : gradient de la partie ébauche de la fonction-coût
599 - GradientOfCostFunctionJo : gradient de la partie observations de la fonction-coût
600 - IndexOfOptimum : index de l'état optimal courant lors d'itérations
601 - Innovation : l'innovation : d = Y - H(X)
602 - InnovationAtCurrentState : l'innovation à l'état courant : dn = Y - H(Xn)
603 - JacobianMatrixAtBackground : matrice jacobienne à l'état d'ébauche
604 - JacobianMatrixAtCurrentState : matrice jacobienne à l'état courant
605 - JacobianMatrixAtOptimum : matrice jacobienne à l'optimum
606 - KalmanGainAtOptimum : gain de Kalman à l'optimum
607 - MahalanobisConsistency : indicateur de consistance des covariances
608 - OMA : Observation moins Analyse : Y - Xa
609 - OMB : Observation moins Background : Y - Xb
610 - ForecastState : état prédit courant lors d'itérations
611 - Residu : dans le cas des algorithmes de vérification
612 - SigmaBck2 : indicateur de correction optimale des erreurs d'ébauche
613 - SigmaObs2 : indicateur de correction optimale des erreurs d'observation
614 - SimulatedObservationAtBackground : l'état observé H(Xb) à l'ébauche
615 - SimulatedObservationAtCurrentOptimum : l'état observé H(X) à l'état optimal courant
616 - SimulatedObservationAtCurrentState : l'état observé H(X) à l'état courant
617 - SimulatedObservationAtOptimum : l'état observé H(Xa) à l'optimum
618 - SimulationQuantiles : états observés H(X) pour les quantiles demandés
619 On peut rajouter des variables à stocker dans l'initialisation de
620 l'algorithme élémentaire qui va hériter de cette classe
622 logging.debug("%s Initialisation", str(name))
623 self._m = PlatformInfo.SystemUsage()
625 self._name = str( name )
626 self._parameters = {"StoreSupplementaryCalculations":[]}
627 self.__required_parameters = {}
628 self.__required_inputs = {
629 "RequiredInputValues":{"mandatory":(), "optional":()},
630 "ClassificationTags":[],
632 self.__variable_names_not_public = {"nextStep":False} # Duplication dans AlgorithmAndParameters
633 self.__canonical_parameter_name = {} # Correspondance "lower"->"correct"
634 self.__canonical_stored_name = {} # Correspondance "lower"->"correct"
636 self.StoredVariables = {}
637 self.StoredVariables["APosterioriCorrelations"] = Persistence.OneMatrix(name = "APosterioriCorrelations")
638 self.StoredVariables["APosterioriCovariance"] = Persistence.OneMatrix(name = "APosterioriCovariance")
639 self.StoredVariables["APosterioriStandardDeviations"] = Persistence.OneVector(name = "APosterioriStandardDeviations")
640 self.StoredVariables["APosterioriVariances"] = Persistence.OneVector(name = "APosterioriVariances")
641 self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
642 self.StoredVariables["BMA"] = Persistence.OneVector(name = "BMA")
643 self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
644 self.StoredVariables["CostFunctionJAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJAtCurrentOptimum")
645 self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
646 self.StoredVariables["CostFunctionJbAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJbAtCurrentOptimum")
647 self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
648 self.StoredVariables["CostFunctionJoAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJoAtCurrentOptimum")
649 self.StoredVariables["CurrentIterationNumber"] = Persistence.OneIndex(name = "CurrentIterationNumber")
650 self.StoredVariables["CurrentOptimum"] = Persistence.OneVector(name = "CurrentOptimum")
651 self.StoredVariables["CurrentState"] = Persistence.OneVector(name = "CurrentState")
652 self.StoredVariables["ForecastState"] = Persistence.OneVector(name = "ForecastState")
653 self.StoredVariables["GradientOfCostFunctionJ"] = Persistence.OneVector(name = "GradientOfCostFunctionJ")
654 self.StoredVariables["GradientOfCostFunctionJb"] = Persistence.OneVector(name = "GradientOfCostFunctionJb")
655 self.StoredVariables["GradientOfCostFunctionJo"] = Persistence.OneVector(name = "GradientOfCostFunctionJo")
656 self.StoredVariables["IndexOfOptimum"] = Persistence.OneIndex(name = "IndexOfOptimum")
657 self.StoredVariables["Innovation"] = Persistence.OneVector(name = "Innovation")
658 self.StoredVariables["InnovationAtCurrentAnalysis"] = Persistence.OneVector(name = "InnovationAtCurrentAnalysis")
659 self.StoredVariables["InnovationAtCurrentState"] = Persistence.OneVector(name = "InnovationAtCurrentState")
660 self.StoredVariables["JacobianMatrixAtBackground"] = Persistence.OneMatrix(name = "JacobianMatrixAtBackground")
661 self.StoredVariables["JacobianMatrixAtCurrentState"] = Persistence.OneMatrix(name = "JacobianMatrixAtCurrentState")
662 self.StoredVariables["JacobianMatrixAtOptimum"] = Persistence.OneMatrix(name = "JacobianMatrixAtOptimum")
663 self.StoredVariables["KalmanGainAtOptimum"] = Persistence.OneMatrix(name = "KalmanGainAtOptimum")
664 self.StoredVariables["MahalanobisConsistency"] = Persistence.OneScalar(name = "MahalanobisConsistency")
665 self.StoredVariables["OMA"] = Persistence.OneVector(name = "OMA")
666 self.StoredVariables["OMB"] = Persistence.OneVector(name = "OMB")
667 self.StoredVariables["Residu"] = Persistence.OneScalar(name = "Residu")
668 self.StoredVariables["SigmaBck2"] = Persistence.OneScalar(name = "SigmaBck2")
669 self.StoredVariables["SigmaObs2"] = Persistence.OneScalar(name = "SigmaObs2")
670 self.StoredVariables["SimulatedObservationAtBackground"] = Persistence.OneVector(name = "SimulatedObservationAtBackground")
671 self.StoredVariables["SimulatedObservationAtCurrentAnalysis"]= Persistence.OneVector(name = "SimulatedObservationAtCurrentAnalysis")
672 self.StoredVariables["SimulatedObservationAtCurrentOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentOptimum")
673 self.StoredVariables["SimulatedObservationAtCurrentState"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentState")
674 self.StoredVariables["SimulatedObservationAtOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtOptimum")
675 self.StoredVariables["SimulationQuantiles"] = Persistence.OneMatrix(name = "SimulationQuantiles")
677 for k in self.StoredVariables:
678 self.__canonical_stored_name[k.lower()] = k
680 for k, v in self.__variable_names_not_public.items():
681 self.__canonical_parameter_name[k.lower()] = k
682 self.__canonical_parameter_name["algorithm"] = "Algorithm"
683 self.__canonical_parameter_name["storesupplementarycalculations"] = "StoreSupplementaryCalculations"
685 def _pre_run(self, Parameters, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None ):
687 logging.debug("%s Lancement", self._name)
688 logging.debug("%s Taille mémoire utilisée de %.0f Mio"%(self._name, self._m.getUsedMemory("Mio")))
690 # Mise a jour des paramètres internes avec le contenu de Parameters, en
691 # reprenant les valeurs par défauts pour toutes celles non définies
692 self.__setParameters(Parameters, reset=True)
693 for k, v in self.__variable_names_not_public.items():
694 if k not in self._parameters: self.__setParameters( {k:v} )
696 # Corrections et compléments des vecteurs
697 def __test_vvalue(argument, variable, argname, symbol=None):
698 if symbol is None: symbol = variable
700 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
701 raise ValueError("%s %s vector %s is not set and has to be properly defined!"%(self._name,argname,symbol))
702 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
703 logging.debug("%s %s vector %s is not set, but is optional."%(self._name,argname,symbol))
705 logging.debug("%s %s vector %s is not set, but is not required."%(self._name,argname,symbol))
707 logging.debug("%s %s vector %s is set, and its size is %i."%(self._name,argname,symbol,numpy.array(argument).size))
709 __test_vvalue( Xb, "Xb", "Background or initial state" )
710 __test_vvalue( Y, "Y", "Observation" )
711 __test_vvalue( U, "U", "Control" )
713 # Corrections et compléments des covariances
714 def __test_cvalue(argument, variable, argname, symbol=None):
715 if symbol is None: symbol = variable
717 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
718 raise ValueError("%s %s error covariance matrix %s is not set and has to be properly defined!"%(self._name,argname,symbol))
719 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
720 logging.debug("%s %s error covariance matrix %s is not set, but is optional."%(self._name,argname,symbol))
722 logging.debug("%s %s error covariance matrix %s is not set, but is not required."%(self._name,argname,symbol))
724 logging.debug("%s %s error covariance matrix %s is set."%(self._name,argname,symbol))
726 __test_cvalue( B, "B", "Background" )
727 __test_cvalue( R, "R", "Observation" )
728 __test_cvalue( Q, "Q", "Evolution" )
730 # Corrections et compléments des opérateurs
731 def __test_ovalue(argument, variable, argname, symbol=None):
732 if symbol is None: symbol = variable
733 if argument is None or (isinstance(argument,dict) and len(argument)==0):
734 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
735 raise ValueError("%s %s operator %s is not set and has to be properly defined!"%(self._name,argname,symbol))
736 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
737 logging.debug("%s %s operator %s is not set, but is optional."%(self._name,argname,symbol))
739 logging.debug("%s %s operator %s is not set, but is not required."%(self._name,argname,symbol))
741 logging.debug("%s %s operator %s is set."%(self._name,argname,symbol))
743 __test_ovalue( HO, "HO", "Observation", "H" )
744 __test_ovalue( EM, "EM", "Evolution", "M" )
745 __test_ovalue( CM, "CM", "Control Model", "C" )
747 if ("Bounds" in self._parameters) and isinstance(self._parameters["Bounds"], (list, tuple)) and (len(self._parameters["Bounds"]) > 0):
748 logging.debug("%s Prise en compte des bornes effectuee"%(self._name,))
750 self._parameters["Bounds"] = None
752 if logging.getLogger().level < logging.WARNING:
753 self._parameters["optiprint"], self._parameters["optdisp"] = 1, 1
754 if PlatformInfo.has_scipy:
755 import scipy.optimize
756 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_ALL
758 self._parameters["optmessages"] = 15
760 self._parameters["optiprint"], self._parameters["optdisp"] = -1, 0
761 if PlatformInfo.has_scipy:
762 import scipy.optimize
763 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_NONE
765 self._parameters["optmessages"] = 15
769 def _post_run(self,_oH=None):
771 if ("StoreSupplementaryCalculations" in self._parameters) and \
772 "APosterioriCovariance" in self._parameters["StoreSupplementaryCalculations"]:
773 for _A in self.StoredVariables["APosterioriCovariance"]:
774 if "APosterioriVariances" in self._parameters["StoreSupplementaryCalculations"]:
775 self.StoredVariables["APosterioriVariances"].store( numpy.diag(_A) )
776 if "APosterioriStandardDeviations" in self._parameters["StoreSupplementaryCalculations"]:
777 self.StoredVariables["APosterioriStandardDeviations"].store( numpy.sqrt(numpy.diag(_A)) )
778 if "APosterioriCorrelations" in self._parameters["StoreSupplementaryCalculations"]:
779 _EI = numpy.diag(1./numpy.sqrt(numpy.diag(_A)))
780 _C = numpy.dot(_EI, numpy.dot(_A, _EI))
781 self.StoredVariables["APosterioriCorrelations"].store( _C )
782 if _oH is not None and "Direct" in _oH and "Tangent" in _oH and "Adjoint" in _oH:
783 logging.debug("%s Nombre d'évaluation(s) de l'opérateur d'observation direct/tangent/adjoint.: %i/%i/%i", self._name, _oH["Direct"].nbcalls(0),_oH["Tangent"].nbcalls(0),_oH["Adjoint"].nbcalls(0))
784 logging.debug("%s Nombre d'appels au cache d'opérateur d'observation direct/tangent/adjoint..: %i/%i/%i", self._name, _oH["Direct"].nbcalls(3),_oH["Tangent"].nbcalls(3),_oH["Adjoint"].nbcalls(3))
785 logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
786 logging.debug("%s Terminé", self._name)
789 def _toStore(self, key):
790 "True if in StoreSupplementaryCalculations, else False"
791 return key in self._parameters["StoreSupplementaryCalculations"]
793 def get(self, key=None):
795 Renvoie l'une des variables stockées identifiée par la clé, ou le
796 dictionnaire de l'ensemble des variables disponibles en l'absence de
797 clé. Ce sont directement les variables sous forme objet qui sont
798 renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
799 des classes de persistance.
802 return self.StoredVariables[self.__canonical_stored_name[key.lower()]]
804 return self.StoredVariables
806 def __contains__(self, key=None):
807 "D.__contains__(k) -> True if D has a key k, else False"
808 if key is None or key.lower() not in self.__canonical_stored_name:
811 return self.__canonical_stored_name[key.lower()] in self.StoredVariables
814 "D.keys() -> list of D's keys"
815 if hasattr(self, "StoredVariables"):
816 return self.StoredVariables.keys()
821 "D.pop(k[,d]) -> v, remove specified key and return the corresponding value"
822 if hasattr(self, "StoredVariables") and k.lower() in self.__canonical_stored_name:
823 return self.StoredVariables.pop(self.__canonical_stored_name[k.lower()], d)
828 raise TypeError("pop expected at least 1 arguments, got 0")
829 "If key is not found, d is returned if given, otherwise KeyError is raised"
835 def run(self, Xb=None, Y=None, H=None, M=None, R=None, B=None, Q=None, Parameters=None):
837 Doit implémenter l'opération élémentaire de calcul d'assimilation sous
838 sa forme mathématique la plus naturelle possible.
840 raise NotImplementedError("Mathematical assimilation calculation has not been implemented!")
842 def defineRequiredParameter(self, name = None, default = None, typecast = None, message = None, minval = None, maxval = None, listval = None):
844 Permet de définir dans l'algorithme des paramètres requis et leurs
845 caractéristiques par défaut.
848 raise ValueError("A name is mandatory to define a required parameter.")
850 self.__required_parameters[name] = {
852 "typecast" : typecast,
858 self.__canonical_parameter_name[name.lower()] = name
859 logging.debug("%s %s (valeur par défaut = %s)", self._name, message, self.setParameterValue(name))
861 def getRequiredParameters(self, noDetails=True):
863 Renvoie la liste des noms de paramètres requis ou directement le
864 dictionnaire des paramètres requis.
867 return sorted(self.__required_parameters.keys())
869 return self.__required_parameters
871 def setParameterValue(self, name=None, value=None):
873 Renvoie la valeur d'un paramètre requis de manière contrôlée
875 __k = self.__canonical_parameter_name[name.lower()]
876 default = self.__required_parameters[__k]["default"]
877 typecast = self.__required_parameters[__k]["typecast"]
878 minval = self.__required_parameters[__k]["minval"]
879 maxval = self.__required_parameters[__k]["maxval"]
880 listval = self.__required_parameters[__k]["listval"]
882 if value is None and default is None:
884 elif value is None and default is not None:
885 if typecast is None: __val = default
886 else: __val = typecast( default )
888 if typecast is None: __val = value
891 __val = typecast( value )
893 raise ValueError("The value '%s' for the parameter named '%s' can not be correctly evaluated with type '%s'."%(value, __k, typecast))
895 if minval is not None and (numpy.array(__val, float) < minval).any():
896 raise ValueError("The parameter named '%s' of value '%s' can not be less than %s."%(__k, __val, minval))
897 if maxval is not None and (numpy.array(__val, float) > maxval).any():
898 raise ValueError("The parameter named '%s' of value '%s' can not be greater than %s."%(__k, __val, maxval))
899 if listval is not None:
900 if typecast is list or typecast is tuple or isinstance(__val,list) or isinstance(__val,tuple):
903 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%(v, __k, listval))
904 elif __val not in listval:
905 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%( __val, __k,listval))
909 def requireInputArguments(self, mandatory=(), optional=()):
911 Permet d'imposer des arguments de calcul requis en entrée.
913 self.__required_inputs["RequiredInputValues"]["mandatory"] = tuple( mandatory )
914 self.__required_inputs["RequiredInputValues"]["optional"] = tuple( optional )
916 def getInputArguments(self):
918 Permet d'obtenir les listes des arguments de calcul requis en entrée.
920 return self.__required_inputs["RequiredInputValues"]["mandatory"], self.__required_inputs["RequiredInputValues"]["optional"]
922 def setAttributes(self, tags=()):
924 Permet d'adjoindre des attributs comme les tags de classification.
925 Renvoie la liste actuelle dans tous les cas.
927 self.__required_inputs["ClassificationTags"].extend( tags )
928 return self.__required_inputs["ClassificationTags"]
930 def __setParameters(self, fromDico={}, reset=False):
932 Permet de stocker les paramètres reçus dans le dictionnaire interne.
934 self._parameters.update( fromDico )
935 __inverse_fromDico_keys = {}
936 for k in fromDico.keys():
937 if k.lower() in self.__canonical_parameter_name:
938 __inverse_fromDico_keys[self.__canonical_parameter_name[k.lower()]] = k
939 #~ __inverse_fromDico_keys = dict([(self.__canonical_parameter_name[k.lower()],k) for k in fromDico.keys()])
940 __canonic_fromDico_keys = __inverse_fromDico_keys.keys()
941 for k in self.__required_parameters.keys():
942 if k in __canonic_fromDico_keys:
943 self._parameters[k] = self.setParameterValue(k,fromDico[__inverse_fromDico_keys[k]])
945 self._parameters[k] = self.setParameterValue(k)
948 logging.debug("%s %s : %s", self._name, self.__required_parameters[k]["message"], self._parameters[k])
950 # ==============================================================================
951 class AlgorithmAndParameters(object):
953 Classe générale d'interface d'action pour l'algorithme et ses paramètres
956 name = "GenericAlgorithm",
963 self.__name = str(name)
967 self.__algorithm = {}
968 self.__algorithmFile = None
969 self.__algorithmName = None
971 self.updateParameters( asDict, asScript )
973 if asAlgorithm is None and asScript is not None:
974 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
978 if __Algo is not None:
979 self.__A = str(__Algo)
980 self.__P.update( {"Algorithm":self.__A} )
982 self.__setAlgorithm( self.__A )
984 self.__variable_names_not_public = {"nextStep":False} # Duplication dans Algorithm
986 def updateParameters(self,
990 "Mise a jour des parametres"
991 if asDict is None and asScript is not None:
992 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
996 if __Dict is not None:
997 self.__P.update( dict(__Dict) )
999 def executePythonScheme(self, asDictAO = None):
1000 "Permet de lancer le calcul d'assimilation"
1001 Operator.CM.clearCache()
1003 if not isinstance(asDictAO, dict):
1004 raise ValueError("The objects for algorithm calculation have to be given together as a dictionnary, and they are not")
1005 if hasattr(asDictAO["Background"],"getO"): self.__Xb = asDictAO["Background"].getO()
1006 elif hasattr(asDictAO["CheckingPoint"],"getO"): self.__Xb = asDictAO["CheckingPoint"].getO()
1007 else: self.__Xb = None
1008 if hasattr(asDictAO["Observation"],"getO"): self.__Y = asDictAO["Observation"].getO()
1009 else: self.__Y = asDictAO["Observation"]
1010 if hasattr(asDictAO["ControlInput"],"getO"): self.__U = asDictAO["ControlInput"].getO()
1011 else: self.__U = asDictAO["ControlInput"]
1012 if hasattr(asDictAO["ObservationOperator"],"getO"): self.__HO = asDictAO["ObservationOperator"].getO()
1013 else: self.__HO = asDictAO["ObservationOperator"]
1014 if hasattr(asDictAO["EvolutionModel"],"getO"): self.__EM = asDictAO["EvolutionModel"].getO()
1015 else: self.__EM = asDictAO["EvolutionModel"]
1016 if hasattr(asDictAO["ControlModel"],"getO"): self.__CM = asDictAO["ControlModel"].getO()
1017 else: self.__CM = asDictAO["ControlModel"]
1018 self.__B = asDictAO["BackgroundError"]
1019 self.__R = asDictAO["ObservationError"]
1020 self.__Q = asDictAO["EvolutionError"]
1022 self.__shape_validate()
1024 self.__algorithm.run(
1034 Parameters = self.__P,
1038 def executeYACSScheme(self, FileName=None):
1039 "Permet de lancer le calcul d'assimilation"
1040 if FileName is None or not os.path.exists(FileName):
1041 raise ValueError("a YACS file name has to be given for YACS execution.\n")
1043 __file = os.path.abspath(FileName)
1044 logging.debug("The YACS file name is \"%s\"."%__file)
1045 if not PlatformInfo.has_salome or \
1046 not PlatformInfo.has_yacs or \
1047 not PlatformInfo.has_adao:
1048 raise ImportError("\n\n"+\
1049 "Unable to get SALOME, YACS or ADAO environnement variables.\n"+\
1050 "Please load the right environnement before trying to use it.\n")
1053 import SALOMERuntime
1055 SALOMERuntime.RuntimeSALOME_setRuntime()
1057 r = pilot.getRuntime()
1058 xmlLoader = loader.YACSLoader()
1059 xmlLoader.registerProcCataLoader()
1061 catalogAd = r.loadCatalog("proc", __file)
1062 r.addCatalog(catalogAd)
1067 p = xmlLoader.load(__file)
1068 except IOError as ex:
1069 print("The YACS XML schema file can not be loaded: %s"%(ex,))
1071 logger = p.getLogger("parser")
1072 if not logger.isEmpty():
1073 print("The imported YACS XML schema has errors on parsing:")
1074 print(logger.getStr())
1077 print("The YACS XML schema is not valid and will not be executed:")
1078 print(p.getErrorReport())
1080 info=pilot.LinkInfo(pilot.LinkInfo.ALL_DONT_STOP)
1081 p.checkConsistency(info)
1082 if info.areWarningsOrErrors():
1083 print("The YACS XML schema is not coherent and will not be executed:")
1084 print(info.getGlobalRepr())
1086 e = pilot.ExecutorSwig()
1088 if p.getEffectiveState() != pilot.DONE:
1089 print(p.getErrorReport())
1093 def get(self, key = None):
1094 "Vérifie l'existence d'une clé de variable ou de paramètres"
1095 if key in self.__algorithm:
1096 return self.__algorithm.get( key )
1097 elif key in self.__P:
1098 return self.__P[key]
1100 allvariables = self.__P
1101 for k in self.__variable_names_not_public: allvariables.pop(k, None)
1104 def pop(self, k, d):
1105 "Necessaire pour le pickling"
1106 return self.__algorithm.pop(k, d)
1108 def getAlgorithmRequiredParameters(self, noDetails=True):
1109 "Renvoie la liste des paramètres requis selon l'algorithme"
1110 return self.__algorithm.getRequiredParameters(noDetails)
1112 def getAlgorithmInputArguments(self):
1113 "Renvoie la liste des entrées requises selon l'algorithme"
1114 return self.__algorithm.getInputArguments()
1116 def getAlgorithmAttributes(self):
1117 "Renvoie la liste des attributs selon l'algorithme"
1118 return self.__algorithm.setAttributes()
1120 def setObserver(self, __V, __O, __I, __S):
1121 if self.__algorithm is None \
1122 or isinstance(self.__algorithm, dict) \
1123 or not hasattr(self.__algorithm,"StoredVariables"):
1124 raise ValueError("No observer can be build before choosing an algorithm.")
1125 if __V not in self.__algorithm:
1126 raise ValueError("An observer requires to be set on a variable named %s which does not exist."%__V)
1128 self.__algorithm.StoredVariables[ __V ].setDataObserver(
1131 HookParameters = __I,
1134 def removeObserver(self, __V, __O, __A = False):
1135 if self.__algorithm is None \
1136 or isinstance(self.__algorithm, dict) \
1137 or not hasattr(self.__algorithm,"StoredVariables"):
1138 raise ValueError("No observer can be removed before choosing an algorithm.")
1139 if __V not in self.__algorithm:
1140 raise ValueError("An observer requires to be removed on a variable named %s which does not exist."%__V)
1142 return self.__algorithm.StoredVariables[ __V ].removeDataObserver(
1147 def hasObserver(self, __V):
1148 if self.__algorithm is None \
1149 or isinstance(self.__algorithm, dict) \
1150 or not hasattr(self.__algorithm,"StoredVariables"):
1152 if __V not in self.__algorithm:
1154 return self.__algorithm.StoredVariables[ __V ].hasDataObserver()
1157 __allvariables = list(self.__algorithm.keys()) + list(self.__P.keys())
1158 for k in self.__variable_names_not_public:
1159 if k in __allvariables: __allvariables.remove(k)
1160 return __allvariables
1162 def __contains__(self, key=None):
1163 "D.__contains__(k) -> True if D has a key k, else False"
1164 return key in self.__algorithm or key in self.__P
1167 "x.__repr__() <==> repr(x)"
1168 return repr(self.__A)+", "+repr(self.__P)
1171 "x.__str__() <==> str(x)"
1172 return str(self.__A)+", "+str(self.__P)
1174 def __setAlgorithm(self, choice = None ):
1176 Permet de sélectionner l'algorithme à utiliser pour mener à bien l'étude
1177 d'assimilation. L'argument est un champ caractère se rapportant au nom
1178 d'un algorithme réalisant l'opération sur les arguments fixes.
1181 raise ValueError("Error: algorithm choice has to be given")
1182 if self.__algorithmName is not None:
1183 raise ValueError("Error: algorithm choice has already been done as \"%s\", it can't be changed."%self.__algorithmName)
1184 daDirectory = "daAlgorithms"
1186 # Recherche explicitement le fichier complet
1187 # ------------------------------------------
1189 for directory in sys.path:
1190 if os.path.isfile(os.path.join(directory, daDirectory, str(choice)+'.py')):
1191 module_path = os.path.abspath(os.path.join(directory, daDirectory))
1192 if module_path is None:
1193 raise ImportError("No algorithm module named \"%s\" has been found in the search path.\n The search path is %s"%(choice, sys.path))
1195 # Importe le fichier complet comme un module
1196 # ------------------------------------------
1198 sys_path_tmp = sys.path ; sys.path.insert(0,module_path)
1199 self.__algorithmFile = __import__(str(choice), globals(), locals(), [])
1200 if not hasattr(self.__algorithmFile, "ElementaryAlgorithm"):
1201 raise ImportError("this module does not define a valid elementary algorithm.")
1202 self.__algorithmName = str(choice)
1203 sys.path = sys_path_tmp ; del sys_path_tmp
1204 except ImportError as e:
1205 raise ImportError("The module named \"%s\" was found, but is incorrect at the import stage.\n The import error message is: %s"%(choice,e))
1207 # Instancie un objet du type élémentaire du fichier
1208 # -------------------------------------------------
1209 self.__algorithm = self.__algorithmFile.ElementaryAlgorithm()
1212 def __shape_validate(self):
1214 Validation de la correspondance correcte des tailles des variables et
1215 des matrices s'il y en a.
1217 if self.__Xb is None: __Xb_shape = (0,)
1218 elif hasattr(self.__Xb,"size"): __Xb_shape = (self.__Xb.size,)
1219 elif hasattr(self.__Xb,"shape"):
1220 if isinstance(self.__Xb.shape, tuple): __Xb_shape = self.__Xb.shape
1221 else: __Xb_shape = self.__Xb.shape()
1222 else: raise TypeError("The background (Xb) has no attribute of shape: problem !")
1224 if self.__Y is None: __Y_shape = (0,)
1225 elif hasattr(self.__Y,"size"): __Y_shape = (self.__Y.size,)
1226 elif hasattr(self.__Y,"shape"):
1227 if isinstance(self.__Y.shape, tuple): __Y_shape = self.__Y.shape
1228 else: __Y_shape = self.__Y.shape()
1229 else: raise TypeError("The observation (Y) has no attribute of shape: problem !")
1231 if self.__U is None: __U_shape = (0,)
1232 elif hasattr(self.__U,"size"): __U_shape = (self.__U.size,)
1233 elif hasattr(self.__U,"shape"):
1234 if isinstance(self.__U.shape, tuple): __U_shape = self.__U.shape
1235 else: __U_shape = self.__U.shape()
1236 else: raise TypeError("The control (U) has no attribute of shape: problem !")
1238 if self.__B is None: __B_shape = (0,0)
1239 elif hasattr(self.__B,"shape"):
1240 if isinstance(self.__B.shape, tuple): __B_shape = self.__B.shape
1241 else: __B_shape = self.__B.shape()
1242 else: raise TypeError("The a priori errors covariance matrix (B) has no attribute of shape: problem !")
1244 if self.__R is None: __R_shape = (0,0)
1245 elif hasattr(self.__R,"shape"):
1246 if isinstance(self.__R.shape, tuple): __R_shape = self.__R.shape
1247 else: __R_shape = self.__R.shape()
1248 else: raise TypeError("The observation errors covariance matrix (R) has no attribute of shape: problem !")
1250 if self.__Q is None: __Q_shape = (0,0)
1251 elif hasattr(self.__Q,"shape"):
1252 if isinstance(self.__Q.shape, tuple): __Q_shape = self.__Q.shape
1253 else: __Q_shape = self.__Q.shape()
1254 else: raise TypeError("The evolution errors covariance matrix (Q) has no attribute of shape: problem !")
1256 if len(self.__HO) == 0: __HO_shape = (0,0)
1257 elif isinstance(self.__HO, dict): __HO_shape = (0,0)
1258 elif hasattr(self.__HO["Direct"],"shape"):
1259 if isinstance(self.__HO["Direct"].shape, tuple): __HO_shape = self.__HO["Direct"].shape
1260 else: __HO_shape = self.__HO["Direct"].shape()
1261 else: raise TypeError("The observation operator (H) has no attribute of shape: problem !")
1263 if len(self.__EM) == 0: __EM_shape = (0,0)
1264 elif isinstance(self.__EM, dict): __EM_shape = (0,0)
1265 elif hasattr(self.__EM["Direct"],"shape"):
1266 if isinstance(self.__EM["Direct"].shape, tuple): __EM_shape = self.__EM["Direct"].shape
1267 else: __EM_shape = self.__EM["Direct"].shape()
1268 else: raise TypeError("The evolution model (EM) has no attribute of shape: problem !")
1270 if len(self.__CM) == 0: __CM_shape = (0,0)
1271 elif isinstance(self.__CM, dict): __CM_shape = (0,0)
1272 elif hasattr(self.__CM["Direct"],"shape"):
1273 if isinstance(self.__CM["Direct"].shape, tuple): __CM_shape = self.__CM["Direct"].shape
1274 else: __CM_shape = self.__CM["Direct"].shape()
1275 else: raise TypeError("The control model (CM) has no attribute of shape: problem !")
1277 # Vérification des conditions
1278 # ---------------------------
1279 if not( len(__Xb_shape) == 1 or min(__Xb_shape) == 1 ):
1280 raise ValueError("Shape characteristic of background (Xb) is incorrect: \"%s\"."%(__Xb_shape,))
1281 if not( len(__Y_shape) == 1 or min(__Y_shape) == 1 ):
1282 raise ValueError("Shape characteristic of observation (Y) is incorrect: \"%s\"."%(__Y_shape,))
1284 if not( min(__B_shape) == max(__B_shape) ):
1285 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) is incorrect: \"%s\"."%(__B_shape,))
1286 if not( min(__R_shape) == max(__R_shape) ):
1287 raise ValueError("Shape characteristic of observation errors covariance matrix (R) is incorrect: \"%s\"."%(__R_shape,))
1288 if not( min(__Q_shape) == max(__Q_shape) ):
1289 raise ValueError("Shape characteristic of evolution errors covariance matrix (Q) is incorrect: \"%s\"."%(__Q_shape,))
1290 if not( min(__EM_shape) == max(__EM_shape) ):
1291 raise ValueError("Shape characteristic of evolution operator (EM) is incorrect: \"%s\"."%(__EM_shape,))
1293 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[1] == max(__Xb_shape) ):
1294 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and state (X) \"%s\" are incompatible."%(__HO_shape,__Xb_shape))
1295 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[0] == max(__Y_shape) ):
1296 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation (Y) \"%s\" are incompatible."%(__HO_shape,__Y_shape))
1297 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__B) > 0 and not( __HO_shape[1] == __B_shape[0] ):
1298 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and a priori errors covariance matrix (B) \"%s\" are incompatible."%(__HO_shape,__B_shape))
1299 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__R) > 0 and not( __HO_shape[0] == __R_shape[1] ):
1300 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation errors covariance matrix (R) \"%s\" are incompatible."%(__HO_shape,__R_shape))
1302 if self.__B is not None and len(self.__B) > 0 and not( __B_shape[1] == max(__Xb_shape) ):
1303 if self.__algorithmName in ["EnsembleBlue",]:
1304 asPersistentVector = self.__Xb.reshape((-1,min(__B_shape)))
1305 self.__Xb = Persistence.OneVector("Background", basetype=numpy.matrix)
1306 for member in asPersistentVector:
1307 self.__Xb.store( numpy.matrix( numpy.ravel(member), numpy.float ).T )
1308 __Xb_shape = min(__B_shape)
1310 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) \"%s\" and background (Xb) \"%s\" are incompatible."%(__B_shape,__Xb_shape))
1312 if self.__R is not None and len(self.__R) > 0 and not( __R_shape[1] == max(__Y_shape) ):
1313 raise ValueError("Shape characteristic of observation errors covariance matrix (R) \"%s\" and observation (Y) \"%s\" are incompatible."%(__R_shape,__Y_shape))
1315 if self.__EM is not None and len(self.__EM) > 0 and not isinstance(self.__EM, dict) and not( __EM_shape[1] == max(__Xb_shape) ):
1316 raise ValueError("Shape characteristic of evolution model (EM) \"%s\" and state (X) \"%s\" are incompatible."%(__EM_shape,__Xb_shape))
1318 if self.__CM is not None and len(self.__CM) > 0 and not isinstance(self.__CM, dict) and not( __CM_shape[1] == max(__U_shape) ):
1319 raise ValueError("Shape characteristic of control model (CM) \"%s\" and control (U) \"%s\" are incompatible."%(__CM_shape,__U_shape))
1321 if ("Bounds" in self.__P) \
1322 and (isinstance(self.__P["Bounds"], list) or isinstance(self.__P["Bounds"], tuple)) \
1323 and (len(self.__P["Bounds"]) != max(__Xb_shape)):
1324 raise ValueError("The number \"%s\" of bound pairs for the state (X) components is different of the size \"%s\" of the state itself." \
1325 %(len(self.__P["Bounds"]),max(__Xb_shape)))
1329 # ==============================================================================
1330 class RegulationAndParameters(object):
1332 Classe générale d'interface d'action pour la régulation et ses paramètres
1335 name = "GenericRegulation",
1342 self.__name = str(name)
1345 if asAlgorithm is None and asScript is not None:
1346 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
1348 __Algo = asAlgorithm
1350 if asDict is None and asScript is not None:
1351 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1355 if __Dict is not None:
1356 self.__P.update( dict(__Dict) )
1358 if __Algo is not None:
1359 self.__P.update( {"Algorithm":str(__Algo)} )
1361 def get(self, key = None):
1362 "Vérifie l'existence d'une clé de variable ou de paramètres"
1364 return self.__P[key]
1368 # ==============================================================================
1369 class DataObserver(object):
1371 Classe générale d'interface de type observer
1374 name = "GenericObserver",
1386 self.__name = str(name)
1391 if onVariable is None:
1392 raise ValueError("setting an observer has to be done over a variable name or a list of variable names, not over None.")
1393 elif type(onVariable) in (tuple, list):
1394 self.__V = tuple(map( str, onVariable ))
1395 if withInfo is None:
1398 self.__I = (str(withInfo),)*len(self.__V)
1399 elif isinstance(onVariable, str):
1400 self.__V = (onVariable,)
1401 if withInfo is None:
1402 self.__I = (onVariable,)
1404 self.__I = (str(withInfo),)
1406 raise ValueError("setting an observer has to be done over a variable name or a list of variable names.")
1408 if asObsObject is not None:
1409 self.__O = asObsObject
1411 __FunctionText = str(UserScript('Observer', asTemplate, asString, asScript))
1412 __Function = Observer2Func(__FunctionText)
1413 self.__O = __Function.getfunc()
1415 for k in range(len(self.__V)):
1418 if ename not in withAlgo:
1419 raise ValueError("An observer is asked to be set on a variable named %s which does not exist."%ename)
1421 withAlgo.setObserver(ename, self.__O, einfo, scheduledBy)
1424 "x.__repr__() <==> repr(x)"
1425 return repr(self.__V)+"\n"+repr(self.__O)
1428 "x.__str__() <==> str(x)"
1429 return str(self.__V)+"\n"+str(self.__O)
1431 # ==============================================================================
1432 class UserScript(object):
1434 Classe générale d'interface de type texte de script utilisateur
1437 name = "GenericUserScript",
1444 self.__name = str(name)
1446 if asString is not None:
1448 elif self.__name == "UserPostAnalysis" and (asTemplate is not None) and (asTemplate in Templates.UserPostAnalysisTemplates):
1449 self.__F = Templates.UserPostAnalysisTemplates[asTemplate]
1450 elif self.__name == "Observer" and (asTemplate is not None) and (asTemplate in Templates.ObserverTemplates):
1451 self.__F = Templates.ObserverTemplates[asTemplate]
1452 elif asScript is not None:
1453 self.__F = Interfaces.ImportFromScript(asScript).getstring()
1458 "x.__repr__() <==> repr(x)"
1459 return repr(self.__F)
1462 "x.__str__() <==> str(x)"
1463 return str(self.__F)
1465 # ==============================================================================
1466 class ExternalParameters(object):
1468 Classe générale d'interface de type texte de script utilisateur
1471 name = "GenericExternalParameters",
1477 self.__name = str(name)
1480 self.updateParameters( asDict, asScript )
1482 def updateParameters(self,
1486 "Mise a jour des parametres"
1487 if asDict is None and asScript is not None:
1488 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "ExternalParameters" )
1492 if __Dict is not None:
1493 self.__P.update( dict(__Dict) )
1495 def get(self, key = None):
1497 return self.__P[key]
1499 return list(self.__P.keys())
1502 return list(self.__P.keys())
1504 def pop(self, k, d):
1505 return self.__P.pop(k, d)
1508 return self.__P.items()
1510 def __contains__(self, key=None):
1511 "D.__contains__(k) -> True if D has a key k, else False"
1512 return key in self.__P
1514 # ==============================================================================
1515 class State(object):
1517 Classe générale d'interface de type état
1520 name = "GenericVector",
1522 asPersistentVector = None,
1528 toBeChecked = False,
1531 Permet de définir un vecteur :
1532 - asVector : entrée des données, comme un vecteur compatible avec le
1533 constructeur de numpy.matrix, ou "True" si entrée par script.
1534 - asPersistentVector : entrée des données, comme une série de vecteurs
1535 compatible avec le constructeur de numpy.matrix, ou comme un objet de
1536 type Persistence, ou "True" si entrée par script.
1537 - asScript : si un script valide est donné contenant une variable
1538 nommée "name", la variable est de type "asVector" (par défaut) ou
1539 "asPersistentVector" selon que l'une de ces variables est placée à
1541 - asDataFile : si un ou plusieurs fichiers valides sont donnés
1542 contenant des valeurs en colonnes, elles-mêmes nommées "colNames"
1543 (s'il n'y a pas de nom de colonne indiquée, on cherche une colonne
1544 nommée "name"), on récupère les colonnes et on les range ligne après
1545 ligne (colMajor=False, par défaut) ou colonne après colonne
1546 (colMajor=True). La variable résultante est de type "asVector" (par
1547 défaut) ou "asPersistentVector" selon que l'une de ces variables est
1550 self.__name = str(name)
1551 self.__check = bool(toBeChecked)
1555 self.__is_vector = False
1556 self.__is_series = False
1558 if asScript is not None:
1559 __Vector, __Series = None, None
1560 if asPersistentVector:
1561 __Series = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1563 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1564 elif asDataFile is not None:
1565 __Vector, __Series = None, None
1566 if asPersistentVector:
1567 if colNames is not None:
1568 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
1570 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1571 if bool(colMajor) and not Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1572 __Series = numpy.transpose(__Series)
1573 elif not bool(colMajor) and Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1574 __Series = numpy.transpose(__Series)
1576 if colNames is not None:
1577 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
1579 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1581 __Vector = numpy.ravel(__Vector, order = "F")
1583 __Vector = numpy.ravel(__Vector, order = "C")
1585 __Vector, __Series = asVector, asPersistentVector
1587 if __Vector is not None:
1588 self.__is_vector = True
1589 self.__V = numpy.matrix( numpy.asmatrix(__Vector).A1, numpy.float ).T
1590 self.shape = self.__V.shape
1591 self.size = self.__V.size
1592 elif __Series is not None:
1593 self.__is_series = True
1594 if isinstance(__Series, (tuple, list, numpy.ndarray, numpy.matrix, str)):
1595 self.__V = Persistence.OneVector(self.__name, basetype=numpy.matrix)
1596 if isinstance(__Series, str): __Series = eval(__Series)
1597 for member in __Series:
1598 self.__V.store( numpy.matrix( numpy.asmatrix(member).A1, numpy.float ).T )
1601 if isinstance(self.__V.shape, (tuple, list)):
1602 self.shape = self.__V.shape
1604 self.shape = self.__V.shape()
1605 if len(self.shape) == 1:
1606 self.shape = (self.shape[0],1)
1607 self.size = self.shape[0] * self.shape[1]
1609 raise ValueError("The %s object is improperly defined or undefined, it requires at minima either a vector, a list/tuple of vectors or a persistent object. Please check your vector input."%self.__name)
1611 if scheduledBy is not None:
1612 self.__T = scheduledBy
1614 def getO(self, withScheduler=False):
1616 return self.__V, self.__T
1617 elif self.__T is None:
1623 "Vérification du type interne"
1624 return self.__is_vector
1627 "Vérification du type interne"
1628 return self.__is_series
1631 "x.__repr__() <==> repr(x)"
1632 return repr(self.__V)
1635 "x.__str__() <==> str(x)"
1636 return str(self.__V)
1638 # ==============================================================================
1639 class Covariance(object):
1641 Classe générale d'interface de type covariance
1644 name = "GenericCovariance",
1645 asCovariance = None,
1646 asEyeByScalar = None,
1647 asEyeByVector = None,
1650 toBeChecked = False,
1653 Permet de définir une covariance :
1654 - asCovariance : entrée des données, comme une matrice compatible avec
1655 le constructeur de numpy.matrix
1656 - asEyeByScalar : entrée des données comme un seul scalaire de variance,
1657 multiplicatif d'une matrice de corrélation identité, aucune matrice
1658 n'étant donc explicitement à donner
1659 - asEyeByVector : entrée des données comme un seul vecteur de variance,
1660 à mettre sur la diagonale d'une matrice de corrélation, aucune matrice
1661 n'étant donc explicitement à donner
1662 - asCovObject : entrée des données comme un objet python, qui a les
1663 methodes obligatoires "getT", "getI", "diag", "trace", "__add__",
1664 "__sub__", "__neg__", "__mul__", "__rmul__" et facultatives "shape",
1665 "size", "cholesky", "choleskyI", "asfullmatrix", "__repr__", "__str__"
1666 - toBeChecked : booléen indiquant si le caractère SDP de la matrice
1667 pleine doit être vérifié
1669 self.__name = str(name)
1670 self.__check = bool(toBeChecked)
1673 self.__is_scalar = False
1674 self.__is_vector = False
1675 self.__is_matrix = False
1676 self.__is_object = False
1678 if asScript is not None:
1679 __Matrix, __Scalar, __Vector, __Object = None, None, None, None
1681 __Scalar = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1683 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1685 __Object = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1687 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1689 __Matrix, __Scalar, __Vector, __Object = asCovariance, asEyeByScalar, asEyeByVector, asCovObject
1691 if __Scalar is not None:
1692 if numpy.matrix(__Scalar).size != 1:
1693 raise ValueError(' The diagonal multiplier given to define a sparse matrix is not a unique scalar value.\n Its actual measured size is %i. Please check your scalar input.'%numpy.matrix(__Scalar).size)
1694 self.__is_scalar = True
1695 self.__C = numpy.abs( float(__Scalar) )
1698 elif __Vector is not None:
1699 self.__is_vector = True
1700 self.__C = numpy.abs( numpy.array( numpy.ravel( numpy.matrix(__Vector, float ) ) ) )
1701 self.shape = (self.__C.size,self.__C.size)
1702 self.size = self.__C.size**2
1703 elif __Matrix is not None:
1704 self.__is_matrix = True
1705 self.__C = numpy.matrix( __Matrix, float )
1706 self.shape = self.__C.shape
1707 self.size = self.__C.size
1708 elif __Object is not None:
1709 self.__is_object = True
1711 for at in ("getT","getI","diag","trace","__add__","__sub__","__neg__","__mul__","__rmul__"):
1712 if not hasattr(self.__C,at):
1713 raise ValueError("The matrix given for %s as an object has no attribute \"%s\". Please check your object input."%(self.__name,at))
1714 if hasattr(self.__C,"shape"):
1715 self.shape = self.__C.shape
1718 if hasattr(self.__C,"size"):
1719 self.size = self.__C.size
1724 # raise ValueError("The %s covariance matrix has to be specified either as a matrix, a vector for its diagonal or a scalar multiplying an identity matrix."%self.__name)
1728 def __validate(self):
1730 if self.__C is None:
1731 raise UnboundLocalError("%s covariance matrix value has not been set!"%(self.__name,))
1732 if self.ismatrix() and min(self.shape) != max(self.shape):
1733 raise ValueError("The given matrix for %s is not a square one, its shape is %s. Please check your matrix input."%(self.__name,self.shape))
1734 if self.isobject() and min(self.shape) != max(self.shape):
1735 raise ValueError("The matrix given for \"%s\" is not a square one, its shape is %s. Please check your object input."%(self.__name,self.shape))
1736 if self.isscalar() and self.__C <= 0:
1737 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your scalar input %s."%(self.__name,self.__C))
1738 if self.isvector() and (self.__C <= 0).any():
1739 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your vector input."%(self.__name,))
1740 if self.ismatrix() and (self.__check or logging.getLogger().level < logging.WARNING):
1742 L = numpy.linalg.cholesky( self.__C )
1744 raise ValueError("The %s covariance matrix is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1745 if self.isobject() and (self.__check or logging.getLogger().level < logging.WARNING):
1747 L = self.__C.cholesky()
1749 raise ValueError("The %s covariance object is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1752 "Vérification du type interne"
1753 return self.__is_scalar
1756 "Vérification du type interne"
1757 return self.__is_vector
1760 "Vérification du type interne"
1761 return self.__is_matrix
1764 "Vérification du type interne"
1765 return self.__is_object
1770 return Covariance(self.__name+"I", asCovariance = self.__C.I )
1771 elif self.isvector():
1772 return Covariance(self.__name+"I", asEyeByVector = 1. / self.__C )
1773 elif self.isscalar():
1774 return Covariance(self.__name+"I", asEyeByScalar = 1. / self.__C )
1775 elif self.isobject():
1776 return Covariance(self.__name+"I", asCovObject = self.__C.getI() )
1778 return None # Indispensable
1783 return Covariance(self.__name+"T", asCovariance = self.__C.T )
1784 elif self.isvector():
1785 return Covariance(self.__name+"T", asEyeByVector = self.__C )
1786 elif self.isscalar():
1787 return Covariance(self.__name+"T", asEyeByScalar = self.__C )
1788 elif self.isobject():
1789 return Covariance(self.__name+"T", asCovObject = self.__C.getT() )
1792 "Décomposition de Cholesky"
1794 return Covariance(self.__name+"C", asCovariance = numpy.linalg.cholesky(self.__C) )
1795 elif self.isvector():
1796 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
1797 elif self.isscalar():
1798 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
1799 elif self.isobject() and hasattr(self.__C,"cholesky"):
1800 return Covariance(self.__name+"C", asCovObject = self.__C.cholesky() )
1802 def choleskyI(self):
1803 "Inversion de la décomposition de Cholesky"
1805 return Covariance(self.__name+"H", asCovariance = numpy.linalg.cholesky(self.__C).I )
1806 elif self.isvector():
1807 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
1808 elif self.isscalar():
1809 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
1810 elif self.isobject() and hasattr(self.__C,"choleskyI"):
1811 return Covariance(self.__name+"H", asCovObject = self.__C.choleskyI() )
1813 def diag(self, msize=None):
1814 "Diagonale de la matrice"
1816 return numpy.diag(self.__C)
1817 elif self.isvector():
1819 elif self.isscalar():
1821 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1823 return self.__C * numpy.ones(int(msize))
1824 elif self.isobject():
1825 return self.__C.diag()
1827 def asfullmatrix(self, msize=None):
1831 elif self.isvector():
1832 return numpy.matrix( numpy.diag(self.__C), float )
1833 elif self.isscalar():
1835 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1837 return numpy.matrix( self.__C * numpy.eye(int(msize)), float )
1838 elif self.isobject() and hasattr(self.__C,"asfullmatrix"):
1839 return self.__C.asfullmatrix()
1841 def trace(self, msize=None):
1842 "Trace de la matrice"
1844 return numpy.trace(self.__C)
1845 elif self.isvector():
1846 return float(numpy.sum(self.__C))
1847 elif self.isscalar():
1849 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1851 return self.__C * int(msize)
1852 elif self.isobject():
1853 return self.__C.trace()
1859 "x.__repr__() <==> repr(x)"
1860 return repr(self.__C)
1863 "x.__str__() <==> str(x)"
1864 return str(self.__C)
1866 def __add__(self, other):
1867 "x.__add__(y) <==> x+y"
1868 if self.ismatrix() or self.isobject():
1869 return self.__C + numpy.asmatrix(other)
1870 elif self.isvector() or self.isscalar():
1871 _A = numpy.asarray(other)
1872 _A.reshape(_A.size)[::_A.shape[1]+1] += self.__C
1873 return numpy.asmatrix(_A)
1875 def __radd__(self, other):
1876 "x.__radd__(y) <==> y+x"
1877 raise NotImplementedError("%s covariance matrix __radd__ method not available for %s type!"%(self.__name,type(other)))
1879 def __sub__(self, other):
1880 "x.__sub__(y) <==> x-y"
1881 if self.ismatrix() or self.isobject():
1882 return self.__C - numpy.asmatrix(other)
1883 elif self.isvector() or self.isscalar():
1884 _A = numpy.asarray(other)
1885 _A.reshape(_A.size)[::_A.shape[1]+1] = self.__C - _A.reshape(_A.size)[::_A.shape[1]+1]
1886 return numpy.asmatrix(_A)
1888 def __rsub__(self, other):
1889 "x.__rsub__(y) <==> y-x"
1890 raise NotImplementedError("%s covariance matrix __rsub__ method not available for %s type!"%(self.__name,type(other)))
1893 "x.__neg__() <==> -x"
1896 def __mul__(self, other):
1897 "x.__mul__(y) <==> x*y"
1898 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
1899 return self.__C * other
1900 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
1901 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1902 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
1903 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1904 return self.__C * numpy.asmatrix(other)
1906 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asmatrix(other).shape,self.__name))
1907 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
1908 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1909 return numpy.asmatrix(self.__C * numpy.ravel(other)).T
1910 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1911 return numpy.asmatrix((self.__C * (numpy.asarray(other).transpose())).transpose())
1913 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
1914 elif self.isscalar() and isinstance(other,numpy.matrix):
1915 return self.__C * other
1916 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
1917 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
1918 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
1920 return self.__C * numpy.asmatrix(other)
1921 elif self.isobject():
1922 return self.__C.__mul__(other)
1924 raise NotImplementedError("%s covariance matrix __mul__ method not available for %s type!"%(self.__name,type(other)))
1926 def __rmul__(self, other):
1927 "x.__rmul__(y) <==> y*x"
1928 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
1929 return other * self.__C
1930 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
1931 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1932 return numpy.asmatrix(numpy.ravel(other)) * self.__C
1933 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1934 return numpy.asmatrix(other) * self.__C
1936 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name))
1937 elif self.isvector() and isinstance(other,numpy.matrix):
1938 if numpy.ravel(other).size == self.shape[0]: # Vecteur
1939 return numpy.asmatrix(numpy.ravel(other) * self.__C)
1940 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
1941 return numpy.asmatrix(numpy.array(other) * self.__C)
1943 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
1944 elif self.isscalar() and isinstance(other,numpy.matrix):
1945 return other * self.__C
1946 elif self.isobject():
1947 return self.__C.__rmul__(other)
1949 raise NotImplementedError("%s covariance matrix __rmul__ method not available for %s type!"%(self.__name,type(other)))
1952 "x.__len__() <==> len(x)"
1953 return self.shape[0]
1955 # ==============================================================================
1956 class Observer2Func(object):
1958 Creation d'une fonction d'observateur a partir de son texte
1960 def __init__(self, corps=""):
1961 self.__corps = corps
1962 def func(self,var,info):
1963 "Fonction d'observation"
1966 "Restitution du pointeur de fonction dans l'objet"
1969 # ==============================================================================
1970 class CaseLogger(object):
1972 Conservation des commandes de creation d'un cas
1974 def __init__(self, __name="", __objname="case", __addViewers=None, __addLoaders=None):
1975 self.__name = str(__name)
1976 self.__objname = str(__objname)
1977 self.__logSerie = []
1978 self.__switchoff = False
1980 "TUI" :Interfaces._TUIViewer,
1981 "SCD" :Interfaces._SCDViewer,
1982 "YACS":Interfaces._YACSViewer,
1985 "TUI" :Interfaces._TUIViewer,
1986 "COM" :Interfaces._COMViewer,
1988 if __addViewers is not None:
1989 self.__viewers.update(dict(__addViewers))
1990 if __addLoaders is not None:
1991 self.__loaders.update(dict(__addLoaders))
1993 def register(self, __command=None, __keys=None, __local=None, __pre=None, __switchoff=False):
1994 "Enregistrement d'une commande individuelle"
1995 if __command is not None and __keys is not None and __local is not None and not self.__switchoff:
1996 if "self" in __keys: __keys.remove("self")
1997 self.__logSerie.append( (str(__command), __keys, __local, __pre, __switchoff) )
1999 self.__switchoff = True
2001 self.__switchoff = False
2003 def dump(self, __filename=None, __format="TUI", __upa=""):
2004 "Restitution normalisée des commandes"
2005 if __format in self.__viewers:
2006 __formater = self.__viewers[__format](self.__name, self.__objname, self.__logSerie)
2008 raise ValueError("Dumping as \"%s\" is not available"%__format)
2009 return __formater.dump(__filename, __upa)
2011 def load(self, __filename=None, __content=None, __object=None, __format="TUI"):
2012 "Chargement normalisé des commandes"
2013 if __format in self.__loaders:
2014 __formater = self.__loaders[__format]()
2016 raise ValueError("Loading as \"%s\" is not available"%__format)
2017 return __formater.load(__filename, __content, __object)
2019 # ==============================================================================
2022 _extraArguments = None,
2023 _sFunction = lambda x: x,
2028 Pour une liste ordonnée de vecteurs en entrée, renvoie en sortie la liste
2029 correspondante de valeurs de la fonction en argument
2031 # Vérifications et définitions initiales
2032 # logging.debug("MULTF Internal multifonction calculations begin with function %s"%(_sFunction.__name__,))
2033 if not PlatformInfo.isIterable( __xserie ):
2034 raise TypeError("MultiFonction not iterable unkown input type: %s"%(type(__xserie),))
2036 if (_mpWorkers is None) or (_mpWorkers is not None and _mpWorkers < 1):
2039 __mpWorkers = int(_mpWorkers)
2041 import multiprocessing
2052 if _extraArguments is None:
2054 elif _extraArguments is not None and isinstance(_extraArguments, (list, tuple, map)):
2055 for __xvalue in __xserie:
2056 _jobs.append( [__xvalue, ] + list(_extraArguments) )
2058 raise TypeError("MultiFonction extra arguments unkown input type: %s"%(type(_extraArguments),))
2059 # logging.debug("MULTF Internal multiprocessing calculations begin : evaluation of %i point(s)"%(len(_jobs),))
2060 import multiprocessing
2061 with multiprocessing.Pool(__mpWorkers) as pool:
2062 __multiHX = pool.map( _sFunction, _jobs )
2065 # logging.debug("MULTF Internal multiprocessing calculation end")
2067 # logging.debug("MULTF Internal monoprocessing calculation begin")
2069 if _extraArguments is None:
2070 for __xvalue in __xserie:
2071 __multiHX.append( _sFunction( __xvalue ) )
2072 elif _extraArguments is not None and isinstance(_extraArguments, (list, tuple, map)):
2073 for __xvalue in __xserie:
2074 __multiHX.append( _sFunction( __xvalue, *_extraArguments ) )
2075 elif _extraArguments is not None and isinstance(_extraArguments, dict):
2076 for __xvalue in __xserie:
2077 __multiHX.append( _sFunction( __xvalue, **_extraArguments ) )
2079 raise TypeError("MultiFonction extra arguments unkown input type: %s"%(type(_extraArguments),))
2080 # logging.debug("MULTF Internal monoprocessing calculation end")
2082 # logging.debug("MULTF Internal multifonction calculations end")
2085 # ==============================================================================
2086 def CostFunction3D(_x,
2087 _Hm = None, # Pour simuler Hm(x) : HO["Direct"].appliedTo
2088 _HmX = None, # Simulation déjà faite de Hm(x)
2089 _arg = None, # Arguments supplementaires pour Hm, sous la forme d'un tuple
2094 _SIV = False, # A résorber pour la 8.0
2095 _SSC = [], # self._parameters["StoreSupplementaryCalculations"]
2096 _nPS = 0, # nbPreviousSteps
2097 _QM = "DA", # QualityMeasure
2098 _SSV = {}, # Entrée et/ou sortie : self.StoredVariables
2099 _fRt = False, # Restitue ou pas la sortie étendue
2100 _sSc = True, # Stocke ou pas les SSC
2103 Fonction-coût générale utile pour les algorithmes statiques/3D : 3DVAR, BLUE
2104 et dérivés, Kalman et dérivés, LeastSquares, SamplingTest, PSO, SA, Tabu,
2105 DFO, QuantileRegression
2111 for k in ["CostFunctionJ",
2117 "SimulatedObservationAtCurrentOptimum",
2118 "SimulatedObservationAtCurrentState",
2122 if hasattr(_SSV[k],"store"):
2123 _SSV[k].append = _SSV[k].store # Pour utiliser "append" au lieu de "store"
2125 _X = numpy.asmatrix(numpy.ravel( _x )).T
2126 if _SIV or "CurrentState" in _SSC or "CurrentOptimum" in _SSC:
2127 _SSV["CurrentState"].append( _X )
2129 if _HmX is not None:
2133 raise ValueError("COSTFUNCTION3D Operator has to be defined.")
2137 _HX = _Hm( _X, *_arg )
2138 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
2140 if "SimulatedObservationAtCurrentState" in _SSC or \
2141 "SimulatedObservationAtCurrentOptimum" in _SSC:
2142 _SSV["SimulatedObservationAtCurrentState"].append( _HX )
2144 if numpy.any(numpy.isnan(_HX)):
2145 Jb, Jo, J = numpy.nan, numpy.nan, numpy.nan
2147 _Y = numpy.asmatrix(numpy.ravel( _Y )).T
2148 if _QM in ["AugmentedWeightedLeastSquares", "AWLS", "AugmentedPonderatedLeastSquares", "APLS", "DA"]:
2149 if _BI is None or _RI is None:
2150 raise ValueError("Background and Observation error covariance matrix has to be properly defined!")
2151 _Xb = numpy.asmatrix(numpy.ravel( _Xb )).T
2152 Jb = 0.5 * (_X - _Xb).T * _BI * (_X - _Xb)
2153 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
2154 elif _QM in ["WeightedLeastSquares", "WLS", "PonderatedLeastSquares", "PLS"]:
2156 raise ValueError("Observation error covariance matrix has to be properly defined!")
2158 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
2159 elif _QM in ["LeastSquares", "LS", "L2"]:
2161 Jo = 0.5 * (_Y - _HX).T * (_Y - _HX)
2162 elif _QM in ["AbsoluteValue", "L1"]:
2164 Jo = numpy.sum( numpy.abs(_Y - _HX) )
2165 elif _QM in ["MaximumError", "ME"]:
2167 Jo = numpy.max( numpy.abs(_Y - _HX) )
2168 elif _QM in ["QR", "Null"]:
2172 raise ValueError("Unknown asked quality measure!")
2174 J = float( Jb ) + float( Jo )
2177 _SSV["CostFunctionJb"].append( Jb )
2178 _SSV["CostFunctionJo"].append( Jo )
2179 _SSV["CostFunctionJ" ].append( J )
2181 if "IndexOfOptimum" in _SSC or \
2182 "CurrentOptimum" in _SSC or \
2183 "SimulatedObservationAtCurrentOptimum" in _SSC:
2184 IndexMin = numpy.argmin( _SSV["CostFunctionJ"][_nPS:] ) + _nPS
2185 if "IndexOfOptimum" in _SSC:
2186 _SSV["IndexOfOptimum"].append( IndexMin )
2187 if "CurrentOptimum" in _SSC:
2188 _SSV["CurrentOptimum"].append( _SSV["CurrentState"][IndexMin] )
2189 if "SimulatedObservationAtCurrentOptimum" in _SSC:
2190 _SSV["SimulatedObservationAtCurrentOptimum"].append( _SSV["SimulatedObservationAtCurrentState"][IndexMin] )
2195 if _QM in ["QR"]: # Pour le QuantileRegression
2200 # ==============================================================================
2201 if __name__ == "__main__":
2202 print('\n AUTODIAGNOSTIC\n')