1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2020 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les outils généraux élémentaires.
26 __author__ = "Jean-Philippe ARGAUD"
34 from functools import partial
35 from daCore import Persistence, PlatformInfo, Interfaces
36 from daCore import Templates
38 # ==============================================================================
39 class CacheManager(object):
41 Classe générale de gestion d'un cache de calculs
44 toleranceInRedundancy = 1.e-18,
45 lenghtOfRedundancy = -1,
48 Les caractéristiques de tolérance peuvent être modifiées à la création.
50 self.__tolerBP = float(toleranceInRedundancy)
51 self.__lenghtOR = int(lenghtOfRedundancy)
52 self.__initlnOR = self.__lenghtOR
59 self.__listOPCV = [] # Previous Calculated Points, Results, Point Norms, Operator
61 # logging.debug("CM Tolerance de determination des doublons : %.2e", self.__tolerBP)
63 def wasCalculatedIn(self, xValue, oName="" ): #, info="" ):
64 "Vérifie l'existence d'un calcul correspondant à la valeur"
68 for i in range(min(len(self.__listOPCV),self.__lenghtOR)-1,-1,-1):
69 if not hasattr(xValue, 'size') or (str(oName) != self.__listOPCV[i][3]) or (xValue.size != self.__listOPCV[i][0].size):
70 # logging.debug("CM Différence de la taille %s de X et de celle %s du point %i déjà calculé", xValue.shape,i,self.__listOPCP[i].shape)
72 elif numpy.linalg.norm(numpy.ravel(xValue) - self.__listOPCV[i][0]) < self.__tolerBP * self.__listOPCV[i][2]:
74 __HxV = self.__listOPCV[i][1]
75 # logging.debug("CM Cas%s déja calculé, portant le numéro %i", info, i)
79 def storeValueInX(self, xValue, HxValue, oName="" ):
80 "Stocke pour un opérateur o un calcul Hx correspondant à la valeur x"
81 if self.__lenghtOR < 0:
82 self.__lenghtOR = 2 * xValue.size + 2
83 self.__initlnOR = self.__lenghtOR
84 self.__seenNames.append(str(oName))
85 if str(oName) not in self.__seenNames: # Etend la liste si nouveau
86 self.__lenghtOR += 2 * xValue.size + 2
87 self.__initlnOR += self.__lenghtOR
88 self.__seenNames.append(str(oName))
89 while len(self.__listOPCV) > self.__lenghtOR:
90 # logging.debug("CM Réduction de la liste des cas à %i éléments par suppression du premier", self.__lenghtOR)
91 self.__listOPCV.pop(0)
92 self.__listOPCV.append( (
93 copy.copy(numpy.ravel(xValue)),
95 numpy.linalg.norm(xValue),
101 self.__initlnOR = self.__lenghtOR
103 self.__enabled = False
107 self.__lenghtOR = self.__initlnOR
108 self.__enabled = True
110 # ==============================================================================
111 class Operator(object):
113 Classe générale d'interface de type opérateur simple
121 name = "GenericOperator",
124 avoidingRedundancy = True,
125 inputAsMultiFunction = False,
126 enableMultiProcess = False,
127 extraArguments = None,
130 On construit un objet de ce type en fournissant, à l'aide de l'un des
131 deux mots-clé, soit une fonction ou un multi-fonction python, soit une
134 - name : nom d'opérateur
135 - fromMethod : argument de type fonction Python
136 - fromMatrix : argument adapté au constructeur numpy.matrix
137 - avoidingRedundancy : booléen évitant (ou pas) les calculs redondants
138 - inputAsMultiFunction : booléen indiquant une fonction explicitement
139 définie (ou pas) en multi-fonction
140 - extraArguments : arguments supplémentaires passés à la fonction de
141 base et ses dérivées (tuple ou dictionnaire)
143 self.__name = str(name)
144 self.__NbCallsAsMatrix, self.__NbCallsAsMethod, self.__NbCallsOfCached = 0, 0, 0
145 self.__AvoidRC = bool( avoidingRedundancy )
146 self.__inputAsMF = bool( inputAsMultiFunction )
147 self.__mpEnabled = bool( enableMultiProcess )
148 self.__extraArgs = extraArguments
149 if fromMethod is not None and self.__inputAsMF:
150 self.__Method = fromMethod # logtimer(fromMethod)
152 self.__Type = "Method"
153 elif fromMethod is not None and not self.__inputAsMF:
154 self.__Method = partial( MultiFonction, _sFunction=fromMethod, _mpEnabled=self.__mpEnabled)
156 self.__Type = "Method"
157 elif fromMatrix is not None:
159 self.__Matrix = numpy.matrix( fromMatrix, numpy.float )
160 self.__Type = "Matrix"
166 def disableAvoidingRedundancy(self):
168 Operator.CM.disable()
170 def enableAvoidingRedundancy(self):
175 Operator.CM.disable()
181 def appliedTo(self, xValue, HValue = None, argsAsSerie = False):
183 Permet de restituer le résultat de l'application de l'opérateur à une
184 série d'arguments xValue. Cette méthode se contente d'appliquer, chaque
185 argument devant a priori être du bon type.
187 - les arguments par série sont :
188 - xValue : argument adapté pour appliquer l'opérateur
189 - HValue : valeur précalculée de l'opérateur en ce point
190 - argsAsSerie : indique si les arguments sont une mono ou multi-valeur
197 if HValue is not None:
201 PlatformInfo.isIterable( _xValue, True, " in Operator.appliedTo" )
203 if _HValue is not None:
204 assert len(_xValue) == len(_HValue), "Incompatible number of elements in xValue and HValue"
206 for i in range(len(_HValue)):
207 HxValue.append( numpy.asmatrix( numpy.ravel( _HValue[i] ) ).T )
209 Operator.CM.storeValueInX(_xValue[i],HxValue[-1],self.__name)
214 for i, xv in enumerate(_xValue):
216 __alreadyCalculated, __HxV = Operator.CM.wasCalculatedIn(xv,self.__name)
218 __alreadyCalculated = False
220 if __alreadyCalculated:
221 self.__addOneCacheCall()
224 if self.__Matrix is not None:
225 self.__addOneMatrixCall()
226 _hv = self.__Matrix * xv
228 self.__addOneMethodCall()
232 HxValue.append( _hv )
234 if len(_xserie)>0 and self.__Matrix is None:
235 if self.__extraArgs is None:
236 _hserie = self.__Method( _xserie ) # Calcul MF
238 _hserie = self.__Method( _xserie, self.__extraArgs ) # Calcul MF
239 if not hasattr(_hserie, "pop"):
240 raise TypeError("The user input multi-function doesn't seem to return sequence results, behaving like a mono-function. It has to be checked.")
246 Operator.CM.storeValueInX(_xv,_hv,self.__name)
248 if argsAsSerie: return HxValue
249 else: return HxValue[-1]
251 def appliedControledFormTo(self, paires, argsAsSerie = False):
253 Permet de restituer le résultat de l'application de l'opérateur à des
254 paires (xValue, uValue). Cette méthode se contente d'appliquer, son
255 argument devant a priori être du bon type. Si la uValue est None,
256 on suppose que l'opérateur ne s'applique qu'à xValue.
258 - paires : les arguments par paire sont :
259 - xValue : argument X adapté pour appliquer l'opérateur
260 - uValue : argument U adapté pour appliquer l'opérateur
261 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
263 if argsAsSerie: _xuValue = paires
264 else: _xuValue = (paires,)
265 PlatformInfo.isIterable( _xuValue, True, " in Operator.appliedControledFormTo" )
267 if self.__Matrix is not None:
269 for paire in _xuValue:
270 _xValue, _uValue = paire
271 self.__addOneMatrixCall()
272 HxValue.append( self.__Matrix * _xValue )
275 for paire in _xuValue:
277 _xValue, _uValue = paire
278 if _uValue is not None:
279 _xuValue.append( paire )
281 _xuValue.append( _xValue )
282 self.__addOneMethodCall( len(_xuValue) )
283 if self.__extraArgs is None:
284 HxValue = self.__Method( _xuValue ) # Calcul MF
286 HxValue = self.__Method( _xuValue, self.__extraArgs ) # Calcul MF
288 if argsAsSerie: return HxValue
289 else: return HxValue[-1]
291 def appliedInXTo(self, paires, argsAsSerie = False):
293 Permet de restituer le résultat de l'application de l'opérateur à une
294 série d'arguments xValue, sachant que l'opérateur est valable en
295 xNominal. Cette méthode se contente d'appliquer, son argument devant a
296 priori être du bon type. Si l'opérateur est linéaire car c'est une
297 matrice, alors il est valable en tout point nominal et xNominal peut
298 être quelconque. Il n'y a qu'une seule paire par défaut, et argsAsSerie
299 permet d'indiquer que l'argument est multi-paires.
301 - paires : les arguments par paire sont :
302 - xNominal : série d'arguments permettant de donner le point où
303 l'opérateur est construit pour être ensuite appliqué
304 - xValue : série d'arguments adaptés pour appliquer l'opérateur
305 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
307 if argsAsSerie: _nxValue = paires
308 else: _nxValue = (paires,)
309 PlatformInfo.isIterable( _nxValue, True, " in Operator.appliedInXTo" )
311 if self.__Matrix is not None:
313 for paire in _nxValue:
314 _xNominal, _xValue = paire
315 self.__addOneMatrixCall()
316 HxValue.append( self.__Matrix * _xValue )
318 self.__addOneMethodCall( len(_nxValue) )
319 if self.__extraArgs is None:
320 HxValue = self.__Method( _nxValue ) # Calcul MF
322 HxValue = self.__Method( _nxValue, self.__extraArgs ) # Calcul MF
324 if argsAsSerie: return HxValue
325 else: return HxValue[-1]
327 def asMatrix(self, ValueForMethodForm = "UnknownVoidValue", argsAsSerie = False):
329 Permet de renvoyer l'opérateur sous la forme d'une matrice
331 if self.__Matrix is not None:
332 self.__addOneMatrixCall()
333 mValue = [self.__Matrix,]
334 elif not isinstance(ValueForMethodForm,str) or ValueForMethodForm != "UnknownVoidValue": # Ne pas utiliser "None"
337 self.__addOneMethodCall( len(ValueForMethodForm) )
338 for _vfmf in ValueForMethodForm:
339 mValue.append( numpy.matrix( self.__Method(((_vfmf, None),)) ) )
341 self.__addOneMethodCall()
342 mValue = self.__Method(((ValueForMethodForm, None),))
344 raise ValueError("Matrix form of the operator defined as a function/method requires to give an operating point.")
346 if argsAsSerie: return mValue
347 else: return mValue[-1]
351 Renvoie la taille sous forme numpy si l'opérateur est disponible sous
352 la forme d'une matrice
354 if self.__Matrix is not None:
355 return self.__Matrix.shape
357 raise ValueError("Matrix form of the operator is not available, nor the shape")
359 def nbcalls(self, which=None):
361 Renvoie les nombres d'évaluations de l'opérateur
364 self.__NbCallsAsMatrix+self.__NbCallsAsMethod,
365 self.__NbCallsAsMatrix,
366 self.__NbCallsAsMethod,
367 self.__NbCallsOfCached,
368 Operator.NbCallsAsMatrix+Operator.NbCallsAsMethod,
369 Operator.NbCallsAsMatrix,
370 Operator.NbCallsAsMethod,
371 Operator.NbCallsOfCached,
373 if which is None: return __nbcalls
374 else: return __nbcalls[which]
376 def __addOneMatrixCall(self):
377 "Comptabilise un appel"
378 self.__NbCallsAsMatrix += 1 # Decompte local
379 Operator.NbCallsAsMatrix += 1 # Decompte global
381 def __addOneMethodCall(self, nb = 1):
382 "Comptabilise un appel"
383 self.__NbCallsAsMethod += nb # Decompte local
384 Operator.NbCallsAsMethod += nb # Decompte global
386 def __addOneCacheCall(self):
387 "Comptabilise un appel"
388 self.__NbCallsOfCached += 1 # Decompte local
389 Operator.NbCallsOfCached += 1 # Decompte global
391 # ==============================================================================
392 class FullOperator(object):
394 Classe générale d'interface de type opérateur complet
395 (Direct, Linéaire Tangent, Adjoint)
398 name = "GenericFullOperator",
400 asOneFunction = None, # 1 Fonction
401 asThreeFunctions = None, # 3 Fonctions in a dictionary
402 asScript = None, # 1 or 3 Fonction(s) by script
403 asDict = None, # Parameters
405 extraArguments = None,
407 inputAsMF = False,# Fonction(s) as Multi-Functions
412 self.__name = str(name)
413 self.__check = bool(toBeChecked)
414 self.__extraArgs = extraArguments
419 if (asDict is not None) and isinstance(asDict, dict):
420 __Parameters.update( asDict )
421 # Priorité à EnableMultiProcessingInDerivatives=True
422 if "EnableMultiProcessing" in __Parameters and __Parameters["EnableMultiProcessing"]:
423 __Parameters["EnableMultiProcessingInDerivatives"] = True
424 __Parameters["EnableMultiProcessingInEvaluation"] = False
425 if "EnableMultiProcessingInDerivatives" not in __Parameters:
426 __Parameters["EnableMultiProcessingInDerivatives"] = False
427 if __Parameters["EnableMultiProcessingInDerivatives"]:
428 __Parameters["EnableMultiProcessingInEvaluation"] = False
429 if "EnableMultiProcessingInEvaluation" not in __Parameters:
430 __Parameters["EnableMultiProcessingInEvaluation"] = False
431 if "withIncrement" in __Parameters: # Temporaire
432 __Parameters["DifferentialIncrement"] = __Parameters["withIncrement"]
434 if asScript is not None:
435 __Matrix, __Function = None, None
437 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
439 __Function = { "Direct":Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ) }
440 __Function.update({"useApproximatedDerivatives":True})
441 __Function.update(__Parameters)
442 elif asThreeFunctions:
444 "Direct" :Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ),
445 "Tangent":Interfaces.ImportFromScript(asScript).getvalue( "TangentOperator" ),
446 "Adjoint":Interfaces.ImportFromScript(asScript).getvalue( "AdjointOperator" ),
448 __Function.update(__Parameters)
451 if asOneFunction is not None:
452 if isinstance(asOneFunction, dict) and "Direct" in asOneFunction:
453 if asOneFunction["Direct"] is not None:
454 __Function = asOneFunction
456 raise ValueError("The function has to be given in a dictionnary which have 1 key (\"Direct\")")
458 __Function = { "Direct":asOneFunction }
459 __Function.update({"useApproximatedDerivatives":True})
460 __Function.update(__Parameters)
461 elif asThreeFunctions is not None:
462 if isinstance(asThreeFunctions, dict) and \
463 ("Tangent" in asThreeFunctions) and (asThreeFunctions["Tangent"] is not None) and \
464 ("Adjoint" in asThreeFunctions) and (asThreeFunctions["Adjoint"] is not None) and \
465 (("useApproximatedDerivatives" not in asThreeFunctions) or not bool(asThreeFunctions["useApproximatedDerivatives"])):
466 __Function = asThreeFunctions
467 elif isinstance(asThreeFunctions, dict) and \
468 ("Direct" in asThreeFunctions) and (asThreeFunctions["Direct"] is not None):
469 __Function = asThreeFunctions
470 __Function.update({"useApproximatedDerivatives":True})
472 raise ValueError("The functions has to be given in a dictionnary which have either 1 key (\"Direct\") or 3 keys (\"Direct\" (optionnal), \"Tangent\" and \"Adjoint\")")
473 if "Direct" not in asThreeFunctions:
474 __Function["Direct"] = asThreeFunctions["Tangent"]
475 __Function.update(__Parameters)
479 # if sys.version_info[0] < 3 and isinstance(__Function, dict):
480 # for k in ("Direct", "Tangent", "Adjoint"):
481 # if k in __Function and hasattr(__Function[k],"__class__"):
482 # if type(__Function[k]) is type(self.__init__):
483 # raise TypeError("can't use a class method (%s) as a function for the \"%s\" operator. Use a real function instead."%(type(__Function[k]),k))
485 if appliedInX is not None and isinstance(appliedInX, dict):
486 __appliedInX = appliedInX
487 elif appliedInX is not None:
488 __appliedInX = {"HXb":appliedInX}
492 if scheduledBy is not None:
493 self.__T = scheduledBy
495 if isinstance(__Function, dict) and \
496 ("useApproximatedDerivatives" in __Function) and bool(__Function["useApproximatedDerivatives"]) and \
497 ("Direct" in __Function) and (__Function["Direct"] is not None):
498 if "CenteredFiniteDifference" not in __Function: __Function["CenteredFiniteDifference"] = False
499 if "DifferentialIncrement" not in __Function: __Function["DifferentialIncrement"] = 0.01
500 if "withdX" not in __Function: __Function["withdX"] = None
501 if "withAvoidingRedundancy" not in __Function: __Function["withAvoidingRedundancy"] = avoidRC
502 if "withToleranceInRedundancy" not in __Function: __Function["withToleranceInRedundancy"] = 1.e-18
503 if "withLenghtOfRedundancy" not in __Function: __Function["withLenghtOfRedundancy"] = -1
504 if "NumberOfProcesses" not in __Function: __Function["NumberOfProcesses"] = None
505 if "withmfEnabled" not in __Function: __Function["withmfEnabled"] = inputAsMF
506 from daCore import NumericObjects
507 FDA = NumericObjects.FDApproximation(
509 Function = __Function["Direct"],
510 centeredDF = __Function["CenteredFiniteDifference"],
511 increment = __Function["DifferentialIncrement"],
512 dX = __Function["withdX"],
513 avoidingRedundancy = __Function["withAvoidingRedundancy"],
514 toleranceInRedundancy = __Function["withToleranceInRedundancy"],
515 lenghtOfRedundancy = __Function["withLenghtOfRedundancy"],
516 mpEnabled = __Function["EnableMultiProcessingInDerivatives"],
517 mpWorkers = __Function["NumberOfProcesses"],
518 mfEnabled = __Function["withmfEnabled"],
520 self.__FO["Direct"] = Operator( name = self.__name, fromMethod = FDA.DirectOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
521 self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMethod = FDA.TangentOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
522 self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMethod = FDA.AdjointOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
523 elif isinstance(__Function, dict) and \
524 ("Direct" in __Function) and ("Tangent" in __Function) and ("Adjoint" in __Function) and \
525 (__Function["Direct"] is not None) and (__Function["Tangent"] is not None) and (__Function["Adjoint"] is not None):
526 self.__FO["Direct"] = Operator( name = self.__name, fromMethod = __Function["Direct"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
527 self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMethod = __Function["Tangent"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
528 self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMethod = __Function["Adjoint"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
529 elif asMatrix is not None:
530 __matrice = numpy.matrix( __Matrix, numpy.float )
531 self.__FO["Direct"] = Operator( name = self.__name, fromMatrix = __matrice, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
532 self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMatrix = __matrice, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
533 self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMatrix = __matrice.T, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
536 raise ValueError("The %s object is improperly defined or undefined, it requires at minima either a matrix, a Direct operator for approximate derivatives or a Tangent/Adjoint operators pair. Please check your operator input."%self.__name)
538 if __appliedInX is not None:
539 self.__FO["AppliedInX"] = {}
540 for key in list(__appliedInX.keys()):
541 if type( __appliedInX[key] ) is type( numpy.matrix([]) ):
542 # Pour le cas où l'on a une vraie matrice
543 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].A1, numpy.float ).T
544 elif type( __appliedInX[key] ) is type( numpy.array([]) ) and len(__appliedInX[key].shape) > 1:
545 # Pour le cas où l'on a un vecteur représenté en array avec 2 dimensions
546 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].reshape(len(__appliedInX[key]),), numpy.float ).T
548 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key], numpy.float ).T
550 self.__FO["AppliedInX"] = None
556 "x.__repr__() <==> repr(x)"
557 return repr(self.__FO)
560 "x.__str__() <==> str(x)"
561 return str(self.__FO)
563 # ==============================================================================
564 class Algorithm(object):
566 Classe générale d'interface de type algorithme
568 Elle donne un cadre pour l'écriture d'une classe élémentaire d'algorithme
569 d'assimilation, en fournissant un container (dictionnaire) de variables
570 persistantes initialisées, et des méthodes d'accès à ces variables stockées.
572 Une classe élémentaire d'algorithme doit implémenter la méthode "run".
574 def __init__(self, name):
576 L'initialisation présente permet de fabriquer des variables de stockage
577 disponibles de manière générique dans les algorithmes élémentaires. Ces
578 variables de stockage sont ensuite conservées dans un dictionnaire
579 interne à l'objet, mais auquel on accède par la méthode "get".
581 Les variables prévues sont :
582 - APosterioriCorrelations : matrice de corrélations de la matrice A
583 - APosterioriCovariance : matrice de covariances a posteriori : A
584 - APosterioriStandardDeviations : vecteur des écart-types de la matrice A
585 - APosterioriVariances : vecteur des variances de la matrice A
586 - Analysis : vecteur d'analyse : Xa
587 - BMA : Background moins Analysis : Xa - Xb
588 - CostFunctionJ : fonction-coût globale, somme des deux parties suivantes Jb et Jo
589 - CostFunctionJAtCurrentOptimum : fonction-coût globale à l'état optimal courant lors d'itérations
590 - CostFunctionJb : partie ébauche ou background de la fonction-coût : Jb
591 - CostFunctionJbAtCurrentOptimum : partie ébauche à l'état optimal courant lors d'itérations
592 - CostFunctionJo : partie observations de la fonction-coût : Jo
593 - CostFunctionJoAtCurrentOptimum : partie observations à l'état optimal courant lors d'itérations
594 - CurrentOptimum : état optimal courant lors d'itérations
595 - CurrentState : état courant lors d'itérations
596 - GradientOfCostFunctionJ : gradient de la fonction-coût globale
597 - GradientOfCostFunctionJb : gradient de la partie ébauche de la fonction-coût
598 - GradientOfCostFunctionJo : gradient de la partie observations de la fonction-coût
599 - IndexOfOptimum : index de l'état optimal courant lors d'itérations
600 - Innovation : l'innovation : d = Y - H(X)
601 - InnovationAtCurrentState : l'innovation à l'état courant : dn = Y - H(Xn)
602 - JacobianMatrixAtBackground : matrice jacobienne à l'état d'ébauche
603 - JacobianMatrixAtCurrentState : matrice jacobienne à l'état courant
604 - JacobianMatrixAtOptimum : matrice jacobienne à l'optimum
605 - KalmanGainAtOptimum : gain de Kalman à l'optimum
606 - MahalanobisConsistency : indicateur de consistance des covariances
607 - OMA : Observation moins Analyse : Y - Xa
608 - OMB : Observation moins Background : Y - Xb
609 - ForecastState : état prédit courant lors d'itérations
610 - Residu : dans le cas des algorithmes de vérification
611 - SigmaBck2 : indicateur de correction optimale des erreurs d'ébauche
612 - SigmaObs2 : indicateur de correction optimale des erreurs d'observation
613 - SimulatedObservationAtBackground : l'état observé H(Xb) à l'ébauche
614 - SimulatedObservationAtCurrentOptimum : l'état observé H(X) à l'état optimal courant
615 - SimulatedObservationAtCurrentState : l'état observé H(X) à l'état courant
616 - SimulatedObservationAtOptimum : l'état observé H(Xa) à l'optimum
617 - SimulationQuantiles : états observés H(X) pour les quantiles demandés
618 On peut rajouter des variables à stocker dans l'initialisation de
619 l'algorithme élémentaire qui va hériter de cette classe
621 logging.debug("%s Initialisation", str(name))
622 self._m = PlatformInfo.SystemUsage()
624 self._name = str( name )
625 self._parameters = {"StoreSupplementaryCalculations":[]}
626 self.__required_parameters = {}
627 self.__required_inputs = {
628 "RequiredInputValues":{"mandatory":(), "optional":()},
629 "ClassificationTags":[],
631 self.__variable_names_not_public = {"nextStep":False} # Duplication dans AlgorithmAndParameters
632 self.__canonical_parameter_name = {} # Correspondance "lower"->"correct"
633 self.__canonical_stored_name = {} # Correspondance "lower"->"correct"
635 self.StoredVariables = {}
636 self.StoredVariables["APosterioriCorrelations"] = Persistence.OneMatrix(name = "APosterioriCorrelations")
637 self.StoredVariables["APosterioriCovariance"] = Persistence.OneMatrix(name = "APosterioriCovariance")
638 self.StoredVariables["APosterioriStandardDeviations"] = Persistence.OneVector(name = "APosterioriStandardDeviations")
639 self.StoredVariables["APosterioriVariances"] = Persistence.OneVector(name = "APosterioriVariances")
640 self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
641 self.StoredVariables["BMA"] = Persistence.OneVector(name = "BMA")
642 self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
643 self.StoredVariables["CostFunctionJAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJAtCurrentOptimum")
644 self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
645 self.StoredVariables["CostFunctionJbAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJbAtCurrentOptimum")
646 self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
647 self.StoredVariables["CostFunctionJoAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJoAtCurrentOptimum")
648 self.StoredVariables["CurrentOptimum"] = Persistence.OneVector(name = "CurrentOptimum")
649 self.StoredVariables["CurrentState"] = Persistence.OneVector(name = "CurrentState")
650 self.StoredVariables["ForecastState"] = Persistence.OneVector(name = "ForecastState")
651 self.StoredVariables["GradientOfCostFunctionJ"] = Persistence.OneVector(name = "GradientOfCostFunctionJ")
652 self.StoredVariables["GradientOfCostFunctionJb"] = Persistence.OneVector(name = "GradientOfCostFunctionJb")
653 self.StoredVariables["GradientOfCostFunctionJo"] = Persistence.OneVector(name = "GradientOfCostFunctionJo")
654 self.StoredVariables["IndexOfOptimum"] = Persistence.OneIndex(name = "IndexOfOptimum")
655 self.StoredVariables["Innovation"] = Persistence.OneVector(name = "Innovation")
656 self.StoredVariables["InnovationAtCurrentAnalysis"] = Persistence.OneVector(name = "InnovationAtCurrentAnalysis")
657 self.StoredVariables["InnovationAtCurrentState"] = Persistence.OneVector(name = "InnovationAtCurrentState")
658 self.StoredVariables["JacobianMatrixAtBackground"] = Persistence.OneMatrix(name = "JacobianMatrixAtBackground")
659 self.StoredVariables["JacobianMatrixAtCurrentState"] = Persistence.OneMatrix(name = "JacobianMatrixAtCurrentState")
660 self.StoredVariables["JacobianMatrixAtOptimum"] = Persistence.OneMatrix(name = "JacobianMatrixAtOptimum")
661 self.StoredVariables["KalmanGainAtOptimum"] = Persistence.OneMatrix(name = "KalmanGainAtOptimum")
662 self.StoredVariables["MahalanobisConsistency"] = Persistence.OneScalar(name = "MahalanobisConsistency")
663 self.StoredVariables["OMA"] = Persistence.OneVector(name = "OMA")
664 self.StoredVariables["OMB"] = Persistence.OneVector(name = "OMB")
665 self.StoredVariables["Residu"] = Persistence.OneScalar(name = "Residu")
666 self.StoredVariables["SigmaBck2"] = Persistence.OneScalar(name = "SigmaBck2")
667 self.StoredVariables["SigmaObs2"] = Persistence.OneScalar(name = "SigmaObs2")
668 self.StoredVariables["SimulatedObservationAtBackground"] = Persistence.OneVector(name = "SimulatedObservationAtBackground")
669 self.StoredVariables["SimulatedObservationAtCurrentAnalysis"]= Persistence.OneVector(name = "SimulatedObservationAtCurrentAnalysis")
670 self.StoredVariables["SimulatedObservationAtCurrentOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentOptimum")
671 self.StoredVariables["SimulatedObservationAtCurrentState"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentState")
672 self.StoredVariables["SimulatedObservationAtOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtOptimum")
673 self.StoredVariables["SimulationQuantiles"] = Persistence.OneMatrix(name = "SimulationQuantiles")
675 for k in self.StoredVariables:
676 self.__canonical_stored_name[k.lower()] = k
678 for k, v in self.__variable_names_not_public.items():
679 self.__canonical_parameter_name[k.lower()] = k
680 self.__canonical_parameter_name["algorithm"] = "Algorithm"
681 self.__canonical_parameter_name["storesupplementarycalculations"] = "StoreSupplementaryCalculations"
683 def _pre_run(self, Parameters, Xb=None, Y=None, R=None, B=None, Q=None ):
685 logging.debug("%s Lancement", self._name)
686 logging.debug("%s Taille mémoire utilisée de %.0f Mio"%(self._name, self._m.getUsedMemory("Mio")))
688 # Mise a jour des paramètres internes avec le contenu de Parameters, en
689 # reprenant les valeurs par défauts pour toutes celles non définies
690 self.__setParameters(Parameters, reset=True)
691 for k, v in self.__variable_names_not_public.items():
692 if k not in self._parameters: self.__setParameters( {k:v} )
694 # Corrections et compléments
695 def __test_vvalue(argument, variable, argname):
697 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
698 raise ValueError("%s %s vector %s has to be properly defined!"%(self._name,argname,variable))
699 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
700 logging.debug("%s %s vector %s is not set, but is optional."%(self._name,argname,variable))
702 logging.debug("%s %s vector %s is not set, but is not required."%(self._name,argname,variable))
704 logging.debug("%s %s vector %s is set, and its size is %i."%(self._name,argname,variable,numpy.array(argument).size))
706 __test_vvalue( Xb, "Xb", "Background or initial state" )
707 __test_vvalue( Y, "Y", "Observation" )
709 def __test_cvalue(argument, variable, argname):
711 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
712 raise ValueError("%s %s error covariance matrix %s has to be properly defined!"%(self._name,argname,variable))
713 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
714 logging.debug("%s %s error covariance matrix %s is not set, but is optional."%(self._name,argname,variable))
716 logging.debug("%s %s error covariance matrix %s is not set, but is not required."%(self._name,argname,variable))
718 logging.debug("%s %s error covariance matrix %s is set."%(self._name,argname,variable))
720 __test_cvalue( R, "R", "Observation" )
721 __test_cvalue( B, "B", "Background" )
722 __test_cvalue( Q, "Q", "Evolution" )
724 if ("Bounds" in self._parameters) and isinstance(self._parameters["Bounds"], (list, tuple)) and (len(self._parameters["Bounds"]) > 0):
725 logging.debug("%s Prise en compte des bornes effectuee"%(self._name,))
727 self._parameters["Bounds"] = None
729 if logging.getLogger().level < logging.WARNING:
730 self._parameters["optiprint"], self._parameters["optdisp"] = 1, 1
731 if PlatformInfo.has_scipy:
732 import scipy.optimize
733 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_ALL
735 self._parameters["optmessages"] = 15
737 self._parameters["optiprint"], self._parameters["optdisp"] = -1, 0
738 if PlatformInfo.has_scipy:
739 import scipy.optimize
740 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_NONE
742 self._parameters["optmessages"] = 15
746 def _post_run(self,_oH=None):
748 if ("StoreSupplementaryCalculations" in self._parameters) and \
749 "APosterioriCovariance" in self._parameters["StoreSupplementaryCalculations"]:
750 for _A in self.StoredVariables["APosterioriCovariance"]:
751 if "APosterioriVariances" in self._parameters["StoreSupplementaryCalculations"]:
752 self.StoredVariables["APosterioriVariances"].store( numpy.diag(_A) )
753 if "APosterioriStandardDeviations" in self._parameters["StoreSupplementaryCalculations"]:
754 self.StoredVariables["APosterioriStandardDeviations"].store( numpy.sqrt(numpy.diag(_A)) )
755 if "APosterioriCorrelations" in self._parameters["StoreSupplementaryCalculations"]:
756 _EI = numpy.diag(1./numpy.sqrt(numpy.diag(_A)))
757 _C = numpy.dot(_EI, numpy.dot(_A, _EI))
758 self.StoredVariables["APosterioriCorrelations"].store( _C )
759 if _oH is not None and "Direct" in _oH and "Tangent" in _oH and "Adjoint" in _oH:
760 logging.debug("%s Nombre d'évaluation(s) de l'opérateur d'observation direct/tangent/adjoint.: %i/%i/%i", self._name, _oH["Direct"].nbcalls(0),_oH["Tangent"].nbcalls(0),_oH["Adjoint"].nbcalls(0))
761 logging.debug("%s Nombre d'appels au cache d'opérateur d'observation direct/tangent/adjoint..: %i/%i/%i", self._name, _oH["Direct"].nbcalls(3),_oH["Tangent"].nbcalls(3),_oH["Adjoint"].nbcalls(3))
762 logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
763 logging.debug("%s Terminé", self._name)
766 def _toStore(self, key):
767 "True if in StoreSupplementaryCalculations, else False"
768 return key in self._parameters["StoreSupplementaryCalculations"]
770 def get(self, key=None):
772 Renvoie l'une des variables stockées identifiée par la clé, ou le
773 dictionnaire de l'ensemble des variables disponibles en l'absence de
774 clé. Ce sont directement les variables sous forme objet qui sont
775 renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
776 des classes de persistance.
779 return self.StoredVariables[self.__canonical_stored_name[key.lower()]]
781 return self.StoredVariables
783 def __contains__(self, key=None):
784 "D.__contains__(k) -> True if D has a key k, else False"
785 if key is None or key.lower() not in self.__canonical_stored_name:
788 return self.__canonical_stored_name[key.lower()] in self.StoredVariables
791 "D.keys() -> list of D's keys"
792 if hasattr(self, "StoredVariables"):
793 return self.StoredVariables.keys()
798 "D.pop(k[,d]) -> v, remove specified key and return the corresponding value"
799 if hasattr(self, "StoredVariables") and k.lower() in self.__canonical_stored_name:
800 return self.StoredVariables.pop(self.__canonical_stored_name[k.lower()], d)
805 raise TypeError("pop expected at least 1 arguments, got 0")
806 "If key is not found, d is returned if given, otherwise KeyError is raised"
812 def run(self, Xb=None, Y=None, H=None, M=None, R=None, B=None, Q=None, Parameters=None):
814 Doit implémenter l'opération élémentaire de calcul d'assimilation sous
815 sa forme mathématique la plus naturelle possible.
817 raise NotImplementedError("Mathematical assimilation calculation has not been implemented!")
819 def defineRequiredParameter(self, name = None, default = None, typecast = None, message = None, minval = None, maxval = None, listval = None):
821 Permet de définir dans l'algorithme des paramètres requis et leurs
822 caractéristiques par défaut.
825 raise ValueError("A name is mandatory to define a required parameter.")
827 self.__required_parameters[name] = {
829 "typecast" : typecast,
835 self.__canonical_parameter_name[name.lower()] = name
836 logging.debug("%s %s (valeur par défaut = %s)", self._name, message, self.setParameterValue(name))
838 def getRequiredParameters(self, noDetails=True):
840 Renvoie la liste des noms de paramètres requis ou directement le
841 dictionnaire des paramètres requis.
844 return sorted(self.__required_parameters.keys())
846 return self.__required_parameters
848 def setParameterValue(self, name=None, value=None):
850 Renvoie la valeur d'un paramètre requis de manière contrôlée
852 __k = self.__canonical_parameter_name[name.lower()]
853 default = self.__required_parameters[__k]["default"]
854 typecast = self.__required_parameters[__k]["typecast"]
855 minval = self.__required_parameters[__k]["minval"]
856 maxval = self.__required_parameters[__k]["maxval"]
857 listval = self.__required_parameters[__k]["listval"]
859 if value is None and default is None:
861 elif value is None and default is not None:
862 if typecast is None: __val = default
863 else: __val = typecast( default )
865 if typecast is None: __val = value
868 __val = typecast( value )
870 raise ValueError("The value '%s' for the parameter named '%s' can not be correctly evaluated with type '%s'."%(value, __k, typecast))
872 if minval is not None and (numpy.array(__val, float) < minval).any():
873 raise ValueError("The parameter named '%s' of value '%s' can not be less than %s."%(__k, __val, minval))
874 if maxval is not None and (numpy.array(__val, float) > maxval).any():
875 raise ValueError("The parameter named '%s' of value '%s' can not be greater than %s."%(__k, __val, maxval))
876 if listval is not None:
877 if typecast is list or typecast is tuple or isinstance(__val,list) or isinstance(__val,tuple):
880 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%(v, __k, listval))
881 elif __val not in listval:
882 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%( __val, __k,listval))
886 def requireInputArguments(self, mandatory=(), optional=()):
888 Permet d'imposer des arguments de calcul requis en entrée.
890 self.__required_inputs["RequiredInputValues"]["mandatory"] = tuple( mandatory )
891 self.__required_inputs["RequiredInputValues"]["optional"] = tuple( optional )
893 def getInputArguments(self):
895 Permet d'obtenir les listes des arguments de calcul requis en entrée.
897 return self.__required_inputs["RequiredInputValues"]["mandatory"], self.__required_inputs["RequiredInputValues"]["optional"]
899 def setAttributes(self, tags=()):
901 Permet d'adjoindre des attributs comme les tags de classification.
902 Renvoie la liste actuelle dans tous les cas.
904 self.__required_inputs["ClassificationTags"].extend( tags )
905 return self.__required_inputs["ClassificationTags"]
907 def __setParameters(self, fromDico={}, reset=False):
909 Permet de stocker les paramètres reçus dans le dictionnaire interne.
911 self._parameters.update( fromDico )
912 __inverse_fromDico_keys = {}
913 for k in fromDico.keys():
914 if k.lower() in self.__canonical_parameter_name:
915 __inverse_fromDico_keys[self.__canonical_parameter_name[k.lower()]] = k
916 #~ __inverse_fromDico_keys = dict([(self.__canonical_parameter_name[k.lower()],k) for k in fromDico.keys()])
917 __canonic_fromDico_keys = __inverse_fromDico_keys.keys()
918 for k in self.__required_parameters.keys():
919 if k in __canonic_fromDico_keys:
920 self._parameters[k] = self.setParameterValue(k,fromDico[__inverse_fromDico_keys[k]])
922 self._parameters[k] = self.setParameterValue(k)
925 logging.debug("%s %s : %s", self._name, self.__required_parameters[k]["message"], self._parameters[k])
927 # ==============================================================================
928 class AlgorithmAndParameters(object):
930 Classe générale d'interface d'action pour l'algorithme et ses paramètres
933 name = "GenericAlgorithm",
940 self.__name = str(name)
944 self.__algorithm = {}
945 self.__algorithmFile = None
946 self.__algorithmName = None
948 self.updateParameters( asDict, asScript )
950 if asAlgorithm is None and asScript is not None:
951 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
955 if __Algo is not None:
956 self.__A = str(__Algo)
957 self.__P.update( {"Algorithm":self.__A} )
959 self.__setAlgorithm( self.__A )
961 self.__variable_names_not_public = {"nextStep":False} # Duplication dans Algorithm
963 def updateParameters(self,
967 "Mise a jour des parametres"
968 if asDict is None and asScript is not None:
969 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
973 if __Dict is not None:
974 self.__P.update( dict(__Dict) )
976 def executePythonScheme(self, asDictAO = None):
977 "Permet de lancer le calcul d'assimilation"
978 Operator.CM.clearCache()
980 if not isinstance(asDictAO, dict):
981 raise ValueError("The objects for algorithm calculation have to be given together as a dictionnary, and they are not")
982 if hasattr(asDictAO["Background"],"getO"): self.__Xb = asDictAO["Background"].getO()
983 elif hasattr(asDictAO["CheckingPoint"],"getO"): self.__Xb = asDictAO["CheckingPoint"].getO()
984 else: self.__Xb = None
985 if hasattr(asDictAO["Observation"],"getO"): self.__Y = asDictAO["Observation"].getO()
986 else: self.__Y = asDictAO["Observation"]
987 if hasattr(asDictAO["ControlInput"],"getO"): self.__U = asDictAO["ControlInput"].getO()
988 else: self.__U = asDictAO["ControlInput"]
989 if hasattr(asDictAO["ObservationOperator"],"getO"): self.__HO = asDictAO["ObservationOperator"].getO()
990 else: self.__HO = asDictAO["ObservationOperator"]
991 if hasattr(asDictAO["EvolutionModel"],"getO"): self.__EM = asDictAO["EvolutionModel"].getO()
992 else: self.__EM = asDictAO["EvolutionModel"]
993 if hasattr(asDictAO["ControlModel"],"getO"): self.__CM = asDictAO["ControlModel"].getO()
994 else: self.__CM = asDictAO["ControlModel"]
995 self.__B = asDictAO["BackgroundError"]
996 self.__R = asDictAO["ObservationError"]
997 self.__Q = asDictAO["EvolutionError"]
999 self.__shape_validate()
1001 self.__algorithm.run(
1011 Parameters = self.__P,
1015 def executeYACSScheme(self, FileName=None):
1016 "Permet de lancer le calcul d'assimilation"
1017 if FileName is None or not os.path.exists(FileName):
1018 raise ValueError("a YACS file name has to be given for YACS execution.\n")
1020 __file = os.path.abspath(FileName)
1021 logging.debug("The YACS file name is \"%s\"."%__file)
1022 if not PlatformInfo.has_salome or \
1023 not PlatformInfo.has_yacs or \
1024 not PlatformInfo.has_adao:
1025 raise ImportError("\n\n"+\
1026 "Unable to get SALOME, YACS or ADAO environnement variables.\n"+\
1027 "Please load the right environnement before trying to use it.\n")
1030 import SALOMERuntime
1032 SALOMERuntime.RuntimeSALOME_setRuntime()
1034 r = pilot.getRuntime()
1035 xmlLoader = loader.YACSLoader()
1036 xmlLoader.registerProcCataLoader()
1038 catalogAd = r.loadCatalog("proc", __file)
1039 r.addCatalog(catalogAd)
1044 p = xmlLoader.load(__file)
1045 except IOError as ex:
1046 print("The YACS XML schema file can not be loaded: %s"%(ex,))
1048 logger = p.getLogger("parser")
1049 if not logger.isEmpty():
1050 print("The imported YACS XML schema has errors on parsing:")
1051 print(logger.getStr())
1054 print("The YACS XML schema is not valid and will not be executed:")
1055 print(p.getErrorReport())
1057 info=pilot.LinkInfo(pilot.LinkInfo.ALL_DONT_STOP)
1058 p.checkConsistency(info)
1059 if info.areWarningsOrErrors():
1060 print("The YACS XML schema is not coherent and will not be executed:")
1061 print(info.getGlobalRepr())
1063 e = pilot.ExecutorSwig()
1065 if p.getEffectiveState() != pilot.DONE:
1066 print(p.getErrorReport())
1070 def get(self, key = None):
1071 "Vérifie l'existence d'une clé de variable ou de paramètres"
1072 if key in self.__algorithm:
1073 return self.__algorithm.get( key )
1074 elif key in self.__P:
1075 return self.__P[key]
1077 allvariables = self.__P
1078 for k in self.__variable_names_not_public: allvariables.pop(k, None)
1081 def pop(self, k, d):
1082 "Necessaire pour le pickling"
1083 return self.__algorithm.pop(k, d)
1085 def getAlgorithmRequiredParameters(self, noDetails=True):
1086 "Renvoie la liste des paramètres requis selon l'algorithme"
1087 return self.__algorithm.getRequiredParameters(noDetails)
1089 def getAlgorithmInputArguments(self):
1090 "Renvoie la liste des entrées requises selon l'algorithme"
1091 return self.__algorithm.getInputArguments()
1093 def getAlgorithmAttributes(self):
1094 "Renvoie la liste des attributs selon l'algorithme"
1095 return self.__algorithm.setAttributes()
1097 def setObserver(self, __V, __O, __I, __S):
1098 if self.__algorithm is None \
1099 or isinstance(self.__algorithm, dict) \
1100 or not hasattr(self.__algorithm,"StoredVariables"):
1101 raise ValueError("No observer can be build before choosing an algorithm.")
1102 if __V not in self.__algorithm:
1103 raise ValueError("An observer requires to be set on a variable named %s which does not exist."%__V)
1105 self.__algorithm.StoredVariables[ __V ].setDataObserver(
1108 HookParameters = __I,
1111 def removeObserver(self, __V, __O, __A = False):
1112 if self.__algorithm is None \
1113 or isinstance(self.__algorithm, dict) \
1114 or not hasattr(self.__algorithm,"StoredVariables"):
1115 raise ValueError("No observer can be removed before choosing an algorithm.")
1116 if __V not in self.__algorithm:
1117 raise ValueError("An observer requires to be removed on a variable named %s which does not exist."%__V)
1119 return self.__algorithm.StoredVariables[ __V ].removeDataObserver(
1124 def hasObserver(self, __V):
1125 if self.__algorithm is None \
1126 or isinstance(self.__algorithm, dict) \
1127 or not hasattr(self.__algorithm,"StoredVariables"):
1129 if __V not in self.__algorithm:
1131 return self.__algorithm.StoredVariables[ __V ].hasDataObserver()
1134 __allvariables = list(self.__algorithm.keys()) + list(self.__P.keys())
1135 for k in self.__variable_names_not_public:
1136 if k in __allvariables: __allvariables.remove(k)
1137 return __allvariables
1139 def __contains__(self, key=None):
1140 "D.__contains__(k) -> True if D has a key k, else False"
1141 return key in self.__algorithm or key in self.__P
1144 "x.__repr__() <==> repr(x)"
1145 return repr(self.__A)+", "+repr(self.__P)
1148 "x.__str__() <==> str(x)"
1149 return str(self.__A)+", "+str(self.__P)
1151 def __setAlgorithm(self, choice = None ):
1153 Permet de sélectionner l'algorithme à utiliser pour mener à bien l'étude
1154 d'assimilation. L'argument est un champ caractère se rapportant au nom
1155 d'un algorithme réalisant l'opération sur les arguments fixes.
1158 raise ValueError("Error: algorithm choice has to be given")
1159 if self.__algorithmName is not None:
1160 raise ValueError("Error: algorithm choice has already been done as \"%s\", it can't be changed."%self.__algorithmName)
1161 daDirectory = "daAlgorithms"
1163 # Recherche explicitement le fichier complet
1164 # ------------------------------------------
1166 for directory in sys.path:
1167 if os.path.isfile(os.path.join(directory, daDirectory, str(choice)+'.py')):
1168 module_path = os.path.abspath(os.path.join(directory, daDirectory))
1169 if module_path is None:
1170 raise ImportError("No algorithm module named \"%s\" has been found in the search path.\n The search path is %s"%(choice, sys.path))
1172 # Importe le fichier complet comme un module
1173 # ------------------------------------------
1175 sys_path_tmp = sys.path ; sys.path.insert(0,module_path)
1176 self.__algorithmFile = __import__(str(choice), globals(), locals(), [])
1177 if not hasattr(self.__algorithmFile, "ElementaryAlgorithm"):
1178 raise ImportError("this module does not define a valid elementary algorithm.")
1179 self.__algorithmName = str(choice)
1180 sys.path = sys_path_tmp ; del sys_path_tmp
1181 except ImportError as e:
1182 raise ImportError("The module named \"%s\" was found, but is incorrect at the import stage.\n The import error message is: %s"%(choice,e))
1184 # Instancie un objet du type élémentaire du fichier
1185 # -------------------------------------------------
1186 self.__algorithm = self.__algorithmFile.ElementaryAlgorithm()
1189 def __shape_validate(self):
1191 Validation de la correspondance correcte des tailles des variables et
1192 des matrices s'il y en a.
1194 if self.__Xb is None: __Xb_shape = (0,)
1195 elif hasattr(self.__Xb,"size"): __Xb_shape = (self.__Xb.size,)
1196 elif hasattr(self.__Xb,"shape"):
1197 if isinstance(self.__Xb.shape, tuple): __Xb_shape = self.__Xb.shape
1198 else: __Xb_shape = self.__Xb.shape()
1199 else: raise TypeError("The background (Xb) has no attribute of shape: problem !")
1201 if self.__Y is None: __Y_shape = (0,)
1202 elif hasattr(self.__Y,"size"): __Y_shape = (self.__Y.size,)
1203 elif hasattr(self.__Y,"shape"):
1204 if isinstance(self.__Y.shape, tuple): __Y_shape = self.__Y.shape
1205 else: __Y_shape = self.__Y.shape()
1206 else: raise TypeError("The observation (Y) has no attribute of shape: problem !")
1208 if self.__U is None: __U_shape = (0,)
1209 elif hasattr(self.__U,"size"): __U_shape = (self.__U.size,)
1210 elif hasattr(self.__U,"shape"):
1211 if isinstance(self.__U.shape, tuple): __U_shape = self.__U.shape
1212 else: __U_shape = self.__U.shape()
1213 else: raise TypeError("The control (U) has no attribute of shape: problem !")
1215 if self.__B is None: __B_shape = (0,0)
1216 elif hasattr(self.__B,"shape"):
1217 if isinstance(self.__B.shape, tuple): __B_shape = self.__B.shape
1218 else: __B_shape = self.__B.shape()
1219 else: raise TypeError("The a priori errors covariance matrix (B) has no attribute of shape: problem !")
1221 if self.__R is None: __R_shape = (0,0)
1222 elif hasattr(self.__R,"shape"):
1223 if isinstance(self.__R.shape, tuple): __R_shape = self.__R.shape
1224 else: __R_shape = self.__R.shape()
1225 else: raise TypeError("The observation errors covariance matrix (R) has no attribute of shape: problem !")
1227 if self.__Q is None: __Q_shape = (0,0)
1228 elif hasattr(self.__Q,"shape"):
1229 if isinstance(self.__Q.shape, tuple): __Q_shape = self.__Q.shape
1230 else: __Q_shape = self.__Q.shape()
1231 else: raise TypeError("The evolution errors covariance matrix (Q) has no attribute of shape: problem !")
1233 if len(self.__HO) == 0: __HO_shape = (0,0)
1234 elif isinstance(self.__HO, dict): __HO_shape = (0,0)
1235 elif hasattr(self.__HO["Direct"],"shape"):
1236 if isinstance(self.__HO["Direct"].shape, tuple): __HO_shape = self.__HO["Direct"].shape
1237 else: __HO_shape = self.__HO["Direct"].shape()
1238 else: raise TypeError("The observation operator (H) has no attribute of shape: problem !")
1240 if len(self.__EM) == 0: __EM_shape = (0,0)
1241 elif isinstance(self.__EM, dict): __EM_shape = (0,0)
1242 elif hasattr(self.__EM["Direct"],"shape"):
1243 if isinstance(self.__EM["Direct"].shape, tuple): __EM_shape = self.__EM["Direct"].shape
1244 else: __EM_shape = self.__EM["Direct"].shape()
1245 else: raise TypeError("The evolution model (EM) has no attribute of shape: problem !")
1247 if len(self.__CM) == 0: __CM_shape = (0,0)
1248 elif isinstance(self.__CM, dict): __CM_shape = (0,0)
1249 elif hasattr(self.__CM["Direct"],"shape"):
1250 if isinstance(self.__CM["Direct"].shape, tuple): __CM_shape = self.__CM["Direct"].shape
1251 else: __CM_shape = self.__CM["Direct"].shape()
1252 else: raise TypeError("The control model (CM) has no attribute of shape: problem !")
1254 # Vérification des conditions
1255 # ---------------------------
1256 if not( len(__Xb_shape) == 1 or min(__Xb_shape) == 1 ):
1257 raise ValueError("Shape characteristic of background (Xb) is incorrect: \"%s\"."%(__Xb_shape,))
1258 if not( len(__Y_shape) == 1 or min(__Y_shape) == 1 ):
1259 raise ValueError("Shape characteristic of observation (Y) is incorrect: \"%s\"."%(__Y_shape,))
1261 if not( min(__B_shape) == max(__B_shape) ):
1262 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) is incorrect: \"%s\"."%(__B_shape,))
1263 if not( min(__R_shape) == max(__R_shape) ):
1264 raise ValueError("Shape characteristic of observation errors covariance matrix (R) is incorrect: \"%s\"."%(__R_shape,))
1265 if not( min(__Q_shape) == max(__Q_shape) ):
1266 raise ValueError("Shape characteristic of evolution errors covariance matrix (Q) is incorrect: \"%s\"."%(__Q_shape,))
1267 if not( min(__EM_shape) == max(__EM_shape) ):
1268 raise ValueError("Shape characteristic of evolution operator (EM) is incorrect: \"%s\"."%(__EM_shape,))
1270 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[1] == max(__Xb_shape) ):
1271 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and state (X) \"%s\" are incompatible."%(__HO_shape,__Xb_shape))
1272 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[0] == max(__Y_shape) ):
1273 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation (Y) \"%s\" are incompatible."%(__HO_shape,__Y_shape))
1274 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__B) > 0 and not( __HO_shape[1] == __B_shape[0] ):
1275 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and a priori errors covariance matrix (B) \"%s\" are incompatible."%(__HO_shape,__B_shape))
1276 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__R) > 0 and not( __HO_shape[0] == __R_shape[1] ):
1277 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation errors covariance matrix (R) \"%s\" are incompatible."%(__HO_shape,__R_shape))
1279 if self.__B is not None and len(self.__B) > 0 and not( __B_shape[1] == max(__Xb_shape) ):
1280 if self.__algorithmName in ["EnsembleBlue",]:
1281 asPersistentVector = self.__Xb.reshape((-1,min(__B_shape)))
1282 self.__Xb = Persistence.OneVector("Background", basetype=numpy.matrix)
1283 for member in asPersistentVector:
1284 self.__Xb.store( numpy.matrix( numpy.ravel(member), numpy.float ).T )
1285 __Xb_shape = min(__B_shape)
1287 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) \"%s\" and background (Xb) \"%s\" are incompatible."%(__B_shape,__Xb_shape))
1289 if self.__R is not None and len(self.__R) > 0 and not( __R_shape[1] == max(__Y_shape) ):
1290 raise ValueError("Shape characteristic of observation errors covariance matrix (R) \"%s\" and observation (Y) \"%s\" are incompatible."%(__R_shape,__Y_shape))
1292 if self.__EM is not None and len(self.__EM) > 0 and not isinstance(self.__EM, dict) and not( __EM_shape[1] == max(__Xb_shape) ):
1293 raise ValueError("Shape characteristic of evolution model (EM) \"%s\" and state (X) \"%s\" are incompatible."%(__EM_shape,__Xb_shape))
1295 if self.__CM is not None and len(self.__CM) > 0 and not isinstance(self.__CM, dict) and not( __CM_shape[1] == max(__U_shape) ):
1296 raise ValueError("Shape characteristic of control model (CM) \"%s\" and control (U) \"%s\" are incompatible."%(__CM_shape,__U_shape))
1298 if ("Bounds" in self.__P) \
1299 and (isinstance(self.__P["Bounds"], list) or isinstance(self.__P["Bounds"], tuple)) \
1300 and (len(self.__P["Bounds"]) != max(__Xb_shape)):
1301 raise ValueError("The number \"%s\" of bound pairs for the state (X) components is different of the size \"%s\" of the state itself." \
1302 %(len(self.__P["Bounds"]),max(__Xb_shape)))
1306 # ==============================================================================
1307 class RegulationAndParameters(object):
1309 Classe générale d'interface d'action pour la régulation et ses paramètres
1312 name = "GenericRegulation",
1319 self.__name = str(name)
1322 if asAlgorithm is None and asScript is not None:
1323 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
1325 __Algo = asAlgorithm
1327 if asDict is None and asScript is not None:
1328 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1332 if __Dict is not None:
1333 self.__P.update( dict(__Dict) )
1335 if __Algo is not None:
1336 self.__P.update( {"Algorithm":str(__Algo)} )
1338 def get(self, key = None):
1339 "Vérifie l'existence d'une clé de variable ou de paramètres"
1341 return self.__P[key]
1345 # ==============================================================================
1346 class DataObserver(object):
1348 Classe générale d'interface de type observer
1351 name = "GenericObserver",
1363 self.__name = str(name)
1368 if onVariable is None:
1369 raise ValueError("setting an observer has to be done over a variable name or a list of variable names, not over None.")
1370 elif type(onVariable) in (tuple, list):
1371 self.__V = tuple(map( str, onVariable ))
1372 if withInfo is None:
1375 self.__I = (str(withInfo),)*len(self.__V)
1376 elif isinstance(onVariable, str):
1377 self.__V = (onVariable,)
1378 if withInfo is None:
1379 self.__I = (onVariable,)
1381 self.__I = (str(withInfo),)
1383 raise ValueError("setting an observer has to be done over a variable name or a list of variable names.")
1385 if asObsObject is not None:
1386 self.__O = asObsObject
1388 __FunctionText = str(UserScript('Observer', asTemplate, asString, asScript))
1389 __Function = Observer2Func(__FunctionText)
1390 self.__O = __Function.getfunc()
1392 for k in range(len(self.__V)):
1395 if ename not in withAlgo:
1396 raise ValueError("An observer is asked to be set on a variable named %s which does not exist."%ename)
1398 withAlgo.setObserver(ename, self.__O, einfo, scheduledBy)
1401 "x.__repr__() <==> repr(x)"
1402 return repr(self.__V)+"\n"+repr(self.__O)
1405 "x.__str__() <==> str(x)"
1406 return str(self.__V)+"\n"+str(self.__O)
1408 # ==============================================================================
1409 class UserScript(object):
1411 Classe générale d'interface de type texte de script utilisateur
1414 name = "GenericUserScript",
1421 self.__name = str(name)
1423 if asString is not None:
1425 elif self.__name == "UserPostAnalysis" and (asTemplate is not None) and (asTemplate in Templates.UserPostAnalysisTemplates):
1426 self.__F = Templates.UserPostAnalysisTemplates[asTemplate]
1427 elif self.__name == "Observer" and (asTemplate is not None) and (asTemplate in Templates.ObserverTemplates):
1428 self.__F = Templates.ObserverTemplates[asTemplate]
1429 elif asScript is not None:
1430 self.__F = Interfaces.ImportFromScript(asScript).getstring()
1435 "x.__repr__() <==> repr(x)"
1436 return repr(self.__F)
1439 "x.__str__() <==> str(x)"
1440 return str(self.__F)
1442 # ==============================================================================
1443 class ExternalParameters(object):
1445 Classe générale d'interface de type texte de script utilisateur
1448 name = "GenericExternalParameters",
1454 self.__name = str(name)
1457 self.updateParameters( asDict, asScript )
1459 def updateParameters(self,
1463 "Mise a jour des parametres"
1464 if asDict is None and asScript is not None:
1465 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "ExternalParameters" )
1469 if __Dict is not None:
1470 self.__P.update( dict(__Dict) )
1472 def get(self, key = None):
1474 return self.__P[key]
1476 return list(self.__P.keys())
1479 return list(self.__P.keys())
1481 def pop(self, k, d):
1482 return self.__P.pop(k, d)
1485 return self.__P.items()
1487 def __contains__(self, key=None):
1488 "D.__contains__(k) -> True if D has a key k, else False"
1489 return key in self.__P
1491 # ==============================================================================
1492 class State(object):
1494 Classe générale d'interface de type état
1497 name = "GenericVector",
1499 asPersistentVector = None,
1505 toBeChecked = False,
1508 Permet de définir un vecteur :
1509 - asVector : entrée des données, comme un vecteur compatible avec le
1510 constructeur de numpy.matrix, ou "True" si entrée par script.
1511 - asPersistentVector : entrée des données, comme une série de vecteurs
1512 compatible avec le constructeur de numpy.matrix, ou comme un objet de
1513 type Persistence, ou "True" si entrée par script.
1514 - asScript : si un script valide est donné contenant une variable
1515 nommée "name", la variable est de type "asVector" (par défaut) ou
1516 "asPersistentVector" selon que l'une de ces variables est placée à
1518 - asDataFile : si un ou plusieurs fichiers valides sont donnés
1519 contenant des valeurs en colonnes, elles-mêmes nommées "colNames"
1520 (s'il n'y a pas de nom de colonne indiquée, on cherche une colonne
1521 nommée "name"), on récupère les colonnes et on les range ligne après
1522 ligne (colMajor=False, par défaut) ou colonne après colonne
1523 (colMajor=True). La variable résultante est de type "asVector" (par
1524 défaut) ou "asPersistentVector" selon que l'une de ces variables est
1527 self.__name = str(name)
1528 self.__check = bool(toBeChecked)
1532 self.__is_vector = False
1533 self.__is_series = False
1535 if asScript is not None:
1536 __Vector, __Series = None, None
1537 if asPersistentVector:
1538 __Series = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1540 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1541 elif asDataFile is not None:
1542 __Vector, __Series = None, None
1543 if asPersistentVector:
1544 if colNames is not None:
1545 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
1547 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1548 if bool(colMajor) and not Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1549 __Series = numpy.transpose(__Series)
1550 elif not bool(colMajor) and Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1551 __Series = numpy.transpose(__Series)
1553 if colNames is not None:
1554 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
1556 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1558 __Vector = numpy.ravel(__Vector, order = "F")
1560 __Vector = numpy.ravel(__Vector, order = "C")
1562 __Vector, __Series = asVector, asPersistentVector
1564 if __Vector is not None:
1565 self.__is_vector = True
1566 self.__V = numpy.matrix( numpy.asmatrix(__Vector).A1, numpy.float ).T
1567 self.shape = self.__V.shape
1568 self.size = self.__V.size
1569 elif __Series is not None:
1570 self.__is_series = True
1571 if isinstance(__Series, (tuple, list, numpy.ndarray, numpy.matrix, str)):
1572 self.__V = Persistence.OneVector(self.__name, basetype=numpy.matrix)
1573 if isinstance(__Series, str): __Series = eval(__Series)
1574 for member in __Series:
1575 self.__V.store( numpy.matrix( numpy.asmatrix(member).A1, numpy.float ).T )
1578 if isinstance(self.__V.shape, (tuple, list)):
1579 self.shape = self.__V.shape
1581 self.shape = self.__V.shape()
1582 if len(self.shape) == 1:
1583 self.shape = (self.shape[0],1)
1584 self.size = self.shape[0] * self.shape[1]
1586 raise ValueError("The %s object is improperly defined or undefined, it requires at minima either a vector, a list/tuple of vectors or a persistent object. Please check your vector input."%self.__name)
1588 if scheduledBy is not None:
1589 self.__T = scheduledBy
1591 def getO(self, withScheduler=False):
1593 return self.__V, self.__T
1594 elif self.__T is None:
1600 "Vérification du type interne"
1601 return self.__is_vector
1604 "Vérification du type interne"
1605 return self.__is_series
1608 "x.__repr__() <==> repr(x)"
1609 return repr(self.__V)
1612 "x.__str__() <==> str(x)"
1613 return str(self.__V)
1615 # ==============================================================================
1616 class Covariance(object):
1618 Classe générale d'interface de type covariance
1621 name = "GenericCovariance",
1622 asCovariance = None,
1623 asEyeByScalar = None,
1624 asEyeByVector = None,
1627 toBeChecked = False,
1630 Permet de définir une covariance :
1631 - asCovariance : entrée des données, comme une matrice compatible avec
1632 le constructeur de numpy.matrix
1633 - asEyeByScalar : entrée des données comme un seul scalaire de variance,
1634 multiplicatif d'une matrice de corrélation identité, aucune matrice
1635 n'étant donc explicitement à donner
1636 - asEyeByVector : entrée des données comme un seul vecteur de variance,
1637 à mettre sur la diagonale d'une matrice de corrélation, aucune matrice
1638 n'étant donc explicitement à donner
1639 - asCovObject : entrée des données comme un objet python, qui a les
1640 methodes obligatoires "getT", "getI", "diag", "trace", "__add__",
1641 "__sub__", "__neg__", "__mul__", "__rmul__" et facultatives "shape",
1642 "size", "cholesky", "choleskyI", "asfullmatrix", "__repr__", "__str__"
1643 - toBeChecked : booléen indiquant si le caractère SDP de la matrice
1644 pleine doit être vérifié
1646 self.__name = str(name)
1647 self.__check = bool(toBeChecked)
1650 self.__is_scalar = False
1651 self.__is_vector = False
1652 self.__is_matrix = False
1653 self.__is_object = False
1655 if asScript is not None:
1656 __Matrix, __Scalar, __Vector, __Object = None, None, None, None
1658 __Scalar = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1660 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1662 __Object = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1664 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1666 __Matrix, __Scalar, __Vector, __Object = asCovariance, asEyeByScalar, asEyeByVector, asCovObject
1668 if __Scalar is not None:
1669 if numpy.matrix(__Scalar).size != 1:
1670 raise ValueError(' The diagonal multiplier given to define a sparse matrix is not a unique scalar value.\n Its actual measured size is %i. Please check your scalar input.'%numpy.matrix(__Scalar).size)
1671 self.__is_scalar = True
1672 self.__C = numpy.abs( float(__Scalar) )
1675 elif __Vector is not None:
1676 self.__is_vector = True
1677 self.__C = numpy.abs( numpy.array( numpy.ravel( numpy.matrix(__Vector, float ) ) ) )
1678 self.shape = (self.__C.size,self.__C.size)
1679 self.size = self.__C.size**2
1680 elif __Matrix is not None:
1681 self.__is_matrix = True
1682 self.__C = numpy.matrix( __Matrix, float )
1683 self.shape = self.__C.shape
1684 self.size = self.__C.size
1685 elif __Object is not None:
1686 self.__is_object = True
1688 for at in ("getT","getI","diag","trace","__add__","__sub__","__neg__","__mul__","__rmul__"):
1689 if not hasattr(self.__C,at):
1690 raise ValueError("The matrix given for %s as an object has no attribute \"%s\". Please check your object input."%(self.__name,at))
1691 if hasattr(self.__C,"shape"):
1692 self.shape = self.__C.shape
1695 if hasattr(self.__C,"size"):
1696 self.size = self.__C.size
1701 # raise ValueError("The %s covariance matrix has to be specified either as a matrix, a vector for its diagonal or a scalar multiplying an identity matrix."%self.__name)
1705 def __validate(self):
1707 if self.__C is None:
1708 raise UnboundLocalError("%s covariance matrix value has not been set!"%(self.__name,))
1709 if self.ismatrix() and min(self.shape) != max(self.shape):
1710 raise ValueError("The given matrix for %s is not a square one, its shape is %s. Please check your matrix input."%(self.__name,self.shape))
1711 if self.isobject() and min(self.shape) != max(self.shape):
1712 raise ValueError("The matrix given for \"%s\" is not a square one, its shape is %s. Please check your object input."%(self.__name,self.shape))
1713 if self.isscalar() and self.__C <= 0:
1714 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your scalar input %s."%(self.__name,self.__C))
1715 if self.isvector() and (self.__C <= 0).any():
1716 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your vector input."%(self.__name,))
1717 if self.ismatrix() and (self.__check or logging.getLogger().level < logging.WARNING):
1719 L = numpy.linalg.cholesky( self.__C )
1721 raise ValueError("The %s covariance matrix is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1722 if self.isobject() and (self.__check or logging.getLogger().level < logging.WARNING):
1724 L = self.__C.cholesky()
1726 raise ValueError("The %s covariance object is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1729 "Vérification du type interne"
1730 return self.__is_scalar
1733 "Vérification du type interne"
1734 return self.__is_vector
1737 "Vérification du type interne"
1738 return self.__is_matrix
1741 "Vérification du type interne"
1742 return self.__is_object
1747 return Covariance(self.__name+"I", asCovariance = self.__C.I )
1748 elif self.isvector():
1749 return Covariance(self.__name+"I", asEyeByVector = 1. / self.__C )
1750 elif self.isscalar():
1751 return Covariance(self.__name+"I", asEyeByScalar = 1. / self.__C )
1752 elif self.isobject():
1753 return Covariance(self.__name+"I", asCovObject = self.__C.getI() )
1755 return None # Indispensable
1760 return Covariance(self.__name+"T", asCovariance = self.__C.T )
1761 elif self.isvector():
1762 return Covariance(self.__name+"T", asEyeByVector = self.__C )
1763 elif self.isscalar():
1764 return Covariance(self.__name+"T", asEyeByScalar = self.__C )
1765 elif self.isobject():
1766 return Covariance(self.__name+"T", asCovObject = self.__C.getT() )
1769 "Décomposition de Cholesky"
1771 return Covariance(self.__name+"C", asCovariance = numpy.linalg.cholesky(self.__C) )
1772 elif self.isvector():
1773 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
1774 elif self.isscalar():
1775 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
1776 elif self.isobject() and hasattr(self.__C,"cholesky"):
1777 return Covariance(self.__name+"C", asCovObject = self.__C.cholesky() )
1779 def choleskyI(self):
1780 "Inversion de la décomposition de Cholesky"
1782 return Covariance(self.__name+"H", asCovariance = numpy.linalg.cholesky(self.__C).I )
1783 elif self.isvector():
1784 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
1785 elif self.isscalar():
1786 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
1787 elif self.isobject() and hasattr(self.__C,"choleskyI"):
1788 return Covariance(self.__name+"H", asCovObject = self.__C.choleskyI() )
1790 def diag(self, msize=None):
1791 "Diagonale de la matrice"
1793 return numpy.diag(self.__C)
1794 elif self.isvector():
1796 elif self.isscalar():
1798 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1800 return self.__C * numpy.ones(int(msize))
1801 elif self.isobject():
1802 return self.__C.diag()
1804 def asfullmatrix(self, msize=None):
1808 elif self.isvector():
1809 return numpy.matrix( numpy.diag(self.__C), float )
1810 elif self.isscalar():
1812 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1814 return numpy.matrix( self.__C * numpy.eye(int(msize)), float )
1815 elif self.isobject() and hasattr(self.__C,"asfullmatrix"):
1816 return self.__C.asfullmatrix()
1818 def trace(self, msize=None):
1819 "Trace de la matrice"
1821 return numpy.trace(self.__C)
1822 elif self.isvector():
1823 return float(numpy.sum(self.__C))
1824 elif self.isscalar():
1826 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1828 return self.__C * int(msize)
1829 elif self.isobject():
1830 return self.__C.trace()
1836 "x.__repr__() <==> repr(x)"
1837 return repr(self.__C)
1840 "x.__str__() <==> str(x)"
1841 return str(self.__C)
1843 def __add__(self, other):
1844 "x.__add__(y) <==> x+y"
1845 if self.ismatrix() or self.isobject():
1846 return self.__C + numpy.asmatrix(other)
1847 elif self.isvector() or self.isscalar():
1848 _A = numpy.asarray(other)
1849 _A.reshape(_A.size)[::_A.shape[1]+1] += self.__C
1850 return numpy.asmatrix(_A)
1852 def __radd__(self, other):
1853 "x.__radd__(y) <==> y+x"
1854 raise NotImplementedError("%s covariance matrix __radd__ method not available for %s type!"%(self.__name,type(other)))
1856 def __sub__(self, other):
1857 "x.__sub__(y) <==> x-y"
1858 if self.ismatrix() or self.isobject():
1859 return self.__C - numpy.asmatrix(other)
1860 elif self.isvector() or self.isscalar():
1861 _A = numpy.asarray(other)
1862 _A.reshape(_A.size)[::_A.shape[1]+1] = self.__C - _A.reshape(_A.size)[::_A.shape[1]+1]
1863 return numpy.asmatrix(_A)
1865 def __rsub__(self, other):
1866 "x.__rsub__(y) <==> y-x"
1867 raise NotImplementedError("%s covariance matrix __rsub__ method not available for %s type!"%(self.__name,type(other)))
1870 "x.__neg__() <==> -x"
1873 def __mul__(self, other):
1874 "x.__mul__(y) <==> x*y"
1875 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
1876 return self.__C * other
1877 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
1878 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1879 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
1880 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1881 return self.__C * numpy.asmatrix(other)
1883 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asmatrix(other).shape,self.__name))
1884 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
1885 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1886 return numpy.asmatrix(self.__C * numpy.ravel(other)).T
1887 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1888 return numpy.asmatrix((self.__C * (numpy.asarray(other).transpose())).transpose())
1890 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
1891 elif self.isscalar() and isinstance(other,numpy.matrix):
1892 return self.__C * other
1893 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
1894 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
1895 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
1897 return self.__C * numpy.asmatrix(other)
1898 elif self.isobject():
1899 return self.__C.__mul__(other)
1901 raise NotImplementedError("%s covariance matrix __mul__ method not available for %s type!"%(self.__name,type(other)))
1903 def __rmul__(self, other):
1904 "x.__rmul__(y) <==> y*x"
1905 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
1906 return other * self.__C
1907 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
1908 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1909 return numpy.asmatrix(numpy.ravel(other)) * self.__C
1910 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1911 return numpy.asmatrix(other) * self.__C
1913 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name))
1914 elif self.isvector() and isinstance(other,numpy.matrix):
1915 if numpy.ravel(other).size == self.shape[0]: # Vecteur
1916 return numpy.asmatrix(numpy.ravel(other) * self.__C)
1917 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
1918 return numpy.asmatrix(numpy.array(other) * self.__C)
1920 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
1921 elif self.isscalar() and isinstance(other,numpy.matrix):
1922 return other * self.__C
1923 elif self.isobject():
1924 return self.__C.__rmul__(other)
1926 raise NotImplementedError("%s covariance matrix __rmul__ method not available for %s type!"%(self.__name,type(other)))
1929 "x.__len__() <==> len(x)"
1930 return self.shape[0]
1932 # ==============================================================================
1933 class Observer2Func(object):
1935 Creation d'une fonction d'observateur a partir de son texte
1937 def __init__(self, corps=""):
1938 self.__corps = corps
1939 def func(self,var,info):
1940 "Fonction d'observation"
1943 "Restitution du pointeur de fonction dans l'objet"
1946 # ==============================================================================
1947 class CaseLogger(object):
1949 Conservation des commandes de creation d'un cas
1951 def __init__(self, __name="", __objname="case", __addViewers=None, __addLoaders=None):
1952 self.__name = str(__name)
1953 self.__objname = str(__objname)
1954 self.__logSerie = []
1955 self.__switchoff = False
1957 "TUI" :Interfaces._TUIViewer,
1958 "SCD" :Interfaces._SCDViewer,
1959 "YACS":Interfaces._YACSViewer,
1962 "TUI" :Interfaces._TUIViewer,
1963 "COM" :Interfaces._COMViewer,
1965 if __addViewers is not None:
1966 self.__viewers.update(dict(__addViewers))
1967 if __addLoaders is not None:
1968 self.__loaders.update(dict(__addLoaders))
1970 def register(self, __command=None, __keys=None, __local=None, __pre=None, __switchoff=False):
1971 "Enregistrement d'une commande individuelle"
1972 if __command is not None and __keys is not None and __local is not None and not self.__switchoff:
1973 if "self" in __keys: __keys.remove("self")
1974 self.__logSerie.append( (str(__command), __keys, __local, __pre, __switchoff) )
1976 self.__switchoff = True
1978 self.__switchoff = False
1980 def dump(self, __filename=None, __format="TUI", __upa=""):
1981 "Restitution normalisée des commandes"
1982 if __format in self.__viewers:
1983 __formater = self.__viewers[__format](self.__name, self.__objname, self.__logSerie)
1985 raise ValueError("Dumping as \"%s\" is not available"%__format)
1986 return __formater.dump(__filename, __upa)
1988 def load(self, __filename=None, __content=None, __object=None, __format="TUI"):
1989 "Chargement normalisé des commandes"
1990 if __format in self.__loaders:
1991 __formater = self.__loaders[__format]()
1993 raise ValueError("Loading as \"%s\" is not available"%__format)
1994 return __formater.load(__filename, __content, __object)
1996 # ==============================================================================
1999 _extraArguments = None,
2000 _sFunction = lambda x: x,
2005 Pour une liste ordonnée de vecteurs en entrée, renvoie en sortie la liste
2006 correspondante de valeurs de la fonction en argument
2008 # Vérifications et définitions initiales
2009 # logging.debug("MULTF Internal multifonction calculations begin with function %s"%(_sFunction.__name__,))
2010 if not PlatformInfo.isIterable( __xserie ):
2011 raise TypeError("MultiFonction not iterable unkown input type: %s"%(type(__xserie),))
2013 if (_mpWorkers is None) or (_mpWorkers is not None and _mpWorkers < 1):
2016 __mpWorkers = int(_mpWorkers)
2018 import multiprocessing
2029 if _extraArguments is None:
2031 elif _extraArguments is not None and isinstance(_extraArguments, (list, tuple, map)):
2032 for __xvalue in __xserie:
2033 _jobs.append( [__xvalue, ] + list(_extraArguments) )
2035 raise TypeError("MultiFonction extra arguments unkown input type: %s"%(type(_extraArguments),))
2036 # logging.debug("MULTF Internal multiprocessing calculations begin : evaluation of %i point(s)"%(len(_jobs),))
2037 import multiprocessing
2038 with multiprocessing.Pool(__mpWorkers) as pool:
2039 __multiHX = pool.map( _sFunction, _jobs )
2042 # logging.debug("MULTF Internal multiprocessing calculation end")
2044 # logging.debug("MULTF Internal monoprocessing calculation begin")
2046 if _extraArguments is None:
2047 for __xvalue in __xserie:
2048 __multiHX.append( _sFunction( __xvalue ) )
2049 elif _extraArguments is not None and isinstance(_extraArguments, (list, tuple, map)):
2050 for __xvalue in __xserie:
2051 __multiHX.append( _sFunction( __xvalue, *_extraArguments ) )
2052 elif _extraArguments is not None and isinstance(_extraArguments, dict):
2053 for __xvalue in __xserie:
2054 __multiHX.append( _sFunction( __xvalue, **_extraArguments ) )
2056 raise TypeError("MultiFonction extra arguments unkown input type: %s"%(type(_extraArguments),))
2057 # logging.debug("MULTF Internal monoprocessing calculation end")
2059 # logging.debug("MULTF Internal multifonction calculations end")
2062 # ==============================================================================
2063 def CostFunction3D(_x,
2064 _Hm = None, # Pour simuler Hm(x) : HO["Direct"].appliedTo
2065 _HmX = None, # Simulation déjà faite de Hm(x)
2066 _arg = None, # Arguments supplementaires pour Hm, sous la forme d'un tuple
2071 _SIV = False, # A résorber pour la 8.0
2072 _SSC = [], # self._parameters["StoreSupplementaryCalculations"]
2073 _nPS = 0, # nbPreviousSteps
2074 _QM = "DA", # QualityMeasure
2075 _SSV = {}, # Entrée et/ou sortie : self.StoredVariables
2076 _fRt = False, # Restitue ou pas la sortie étendue
2077 _sSc = True, # Stocke ou pas les SSC
2080 Fonction-coût générale utile pour les algorithmes statiques/3D : 3DVAR, BLUE
2081 et dérivés, Kalman et dérivés, LeastSquares, SamplingTest, PSO, SA, Tabu,
2082 DFO, QuantileRegression
2088 for k in ["CostFunctionJ",
2094 "SimulatedObservationAtCurrentOptimum",
2095 "SimulatedObservationAtCurrentState",
2099 if hasattr(_SSV[k],"store"):
2100 _SSV[k].append = _SSV[k].store # Pour utiliser "append" au lieu de "store"
2102 _X = numpy.asmatrix(numpy.ravel( _x )).T
2103 if _SIV or "CurrentState" in _SSC or "CurrentOptimum" in _SSC:
2104 _SSV["CurrentState"].append( _X )
2106 if _HmX is not None:
2110 raise ValueError("COSTFUNCTION3D Operator has to be defined.")
2114 _HX = _Hm( _X, *_arg )
2115 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
2117 if "SimulatedObservationAtCurrentState" in _SSC or \
2118 "SimulatedObservationAtCurrentOptimum" in _SSC:
2119 _SSV["SimulatedObservationAtCurrentState"].append( _HX )
2121 if numpy.any(numpy.isnan(_HX)):
2122 Jb, Jo, J = numpy.nan, numpy.nan, numpy.nan
2124 _Y = numpy.asmatrix(numpy.ravel( _Y )).T
2125 if _QM in ["AugmentedWeightedLeastSquares", "AWLS", "AugmentedPonderatedLeastSquares", "APLS", "DA"]:
2126 if _BI is None or _RI is None:
2127 raise ValueError("Background and Observation error covariance matrix has to be properly defined!")
2128 _Xb = numpy.asmatrix(numpy.ravel( _Xb )).T
2129 Jb = 0.5 * (_X - _Xb).T * _BI * (_X - _Xb)
2130 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
2131 elif _QM in ["WeightedLeastSquares", "WLS", "PonderatedLeastSquares", "PLS"]:
2133 raise ValueError("Observation error covariance matrix has to be properly defined!")
2135 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
2136 elif _QM in ["LeastSquares", "LS", "L2"]:
2138 Jo = 0.5 * (_Y - _HX).T * (_Y - _HX)
2139 elif _QM in ["AbsoluteValue", "L1"]:
2141 Jo = numpy.sum( numpy.abs(_Y - _HX) )
2142 elif _QM in ["MaximumError", "ME"]:
2144 Jo = numpy.max( numpy.abs(_Y - _HX) )
2145 elif _QM in ["QR", "Null"]:
2149 raise ValueError("Unknown asked quality measure!")
2151 J = float( Jb ) + float( Jo )
2154 _SSV["CostFunctionJb"].append( Jb )
2155 _SSV["CostFunctionJo"].append( Jo )
2156 _SSV["CostFunctionJ" ].append( J )
2158 if "IndexOfOptimum" in _SSC or \
2159 "CurrentOptimum" in _SSC or \
2160 "SimulatedObservationAtCurrentOptimum" in _SSC:
2161 IndexMin = numpy.argmin( _SSV["CostFunctionJ"][_nPS:] ) + _nPS
2162 if "IndexOfOptimum" in _SSC:
2163 _SSV["IndexOfOptimum"].append( IndexMin )
2164 if "CurrentOptimum" in _SSC:
2165 _SSV["CurrentOptimum"].append( _SSV["CurrentState"][IndexMin] )
2166 if "SimulatedObservationAtCurrentOptimum" in _SSC:
2167 _SSV["SimulatedObservationAtCurrentOptimum"].append( _SSV["SimulatedObservationAtCurrentState"][IndexMin] )
2172 if _QM in ["QR"]: # Pour le QuantileRegression
2177 # ==============================================================================
2178 if __name__ == "__main__":
2179 print('\n AUTODIAGNOSTIC\n')