1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2020 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les outils généraux élémentaires.
26 __author__ = "Jean-Philippe ARGAUD"
34 from functools import partial
35 from daCore import Persistence, PlatformInfo, Interfaces
36 from daCore import Templates
38 # ==============================================================================
39 class CacheManager(object):
41 Classe générale de gestion d'un cache de calculs
44 toleranceInRedundancy = 1.e-18,
45 lenghtOfRedundancy = -1,
48 Les caractéristiques de tolérance peuvent être modifiées à la création.
50 self.__tolerBP = float(toleranceInRedundancy)
51 self.__lenghtOR = int(lenghtOfRedundancy)
52 self.__initlnOR = self.__lenghtOR
59 self.__listOPCV = [] # Previous Calculated Points, Results, Point Norms, Operator
61 # logging.debug("CM Tolerance de determination des doublons : %.2e", self.__tolerBP)
63 def wasCalculatedIn(self, xValue, oName="" ): #, info="" ):
64 "Vérifie l'existence d'un calcul correspondant à la valeur"
68 for i in range(min(len(self.__listOPCV),self.__lenghtOR)-1,-1,-1):
69 if not hasattr(xValue, 'size') or (str(oName) != self.__listOPCV[i][3]) or (xValue.size != self.__listOPCV[i][0].size):
70 # logging.debug("CM Différence de la taille %s de X et de celle %s du point %i déjà calculé", xValue.shape,i,self.__listOPCP[i].shape)
72 elif numpy.linalg.norm(numpy.ravel(xValue) - self.__listOPCV[i][0]) < self.__tolerBP * self.__listOPCV[i][2]:
74 __HxV = self.__listOPCV[i][1]
75 # logging.debug("CM Cas%s déja calculé, portant le numéro %i", info, i)
79 def storeValueInX(self, xValue, HxValue, oName="" ):
80 "Stocke pour un opérateur o un calcul Hx correspondant à la valeur x"
81 if self.__lenghtOR < 0:
82 self.__lenghtOR = 2 * xValue.size + 2
83 self.__initlnOR = self.__lenghtOR
84 self.__seenNames.append(str(oName))
85 if str(oName) not in self.__seenNames: # Etend la liste si nouveau
86 self.__lenghtOR += 2 * xValue.size + 2
87 self.__initlnOR += self.__lenghtOR
88 self.__seenNames.append(str(oName))
89 while len(self.__listOPCV) > self.__lenghtOR:
90 # logging.debug("CM Réduction de la liste des cas à %i éléments par suppression du premier", self.__lenghtOR)
91 self.__listOPCV.pop(0)
92 self.__listOPCV.append( (
93 copy.copy(numpy.ravel(xValue)),
95 numpy.linalg.norm(xValue),
101 self.__initlnOR = self.__lenghtOR
103 self.__enabled = False
107 self.__lenghtOR = self.__initlnOR
108 self.__enabled = True
110 # ==============================================================================
111 class Operator(object):
113 Classe générale d'interface de type opérateur simple
121 name = "GenericOperator",
124 avoidingRedundancy = True,
125 inputAsMultiFunction = False,
126 enableMultiProcess = False,
127 extraArguments = None,
130 On construit un objet de ce type en fournissant, à l'aide de l'un des
131 deux mots-clé, soit une fonction ou un multi-fonction python, soit une
134 - name : nom d'opérateur
135 - fromMethod : argument de type fonction Python
136 - fromMatrix : argument adapté au constructeur numpy.matrix
137 - avoidingRedundancy : booléen évitant (ou pas) les calculs redondants
138 - inputAsMultiFunction : booléen indiquant une fonction explicitement
139 définie (ou pas) en multi-fonction
140 - extraArguments : arguments supplémentaires passés à la fonction de
141 base et ses dérivées (tuple ou dictionnaire)
143 self.__name = str(name)
144 self.__NbCallsAsMatrix, self.__NbCallsAsMethod, self.__NbCallsOfCached = 0, 0, 0
145 self.__AvoidRC = bool( avoidingRedundancy )
146 self.__inputAsMF = bool( inputAsMultiFunction )
147 self.__mpEnabled = bool( enableMultiProcess )
148 self.__extraArgs = extraArguments
149 if fromMethod is not None and self.__inputAsMF:
150 self.__Method = fromMethod # logtimer(fromMethod)
152 self.__Type = "Method"
153 elif fromMethod is not None and not self.__inputAsMF:
154 self.__Method = partial( MultiFonction, _sFunction=fromMethod, _mpEnabled=self.__mpEnabled)
156 self.__Type = "Method"
157 elif fromMatrix is not None:
159 self.__Matrix = numpy.matrix( fromMatrix, numpy.float )
160 self.__Type = "Matrix"
166 def disableAvoidingRedundancy(self):
168 Operator.CM.disable()
170 def enableAvoidingRedundancy(self):
175 Operator.CM.disable()
181 def appliedTo(self, xValue, HValue = None, argsAsSerie = False):
183 Permet de restituer le résultat de l'application de l'opérateur à une
184 série d'arguments xValue. Cette méthode se contente d'appliquer, chaque
185 argument devant a priori être du bon type.
187 - les arguments par série sont :
188 - xValue : argument adapté pour appliquer l'opérateur
189 - HValue : valeur précalculée de l'opérateur en ce point
190 - argsAsSerie : indique si les arguments sont une mono ou multi-valeur
197 if HValue is not None:
201 PlatformInfo.isIterable( _xValue, True, " in Operator.appliedTo" )
203 if _HValue is not None:
204 assert len(_xValue) == len(_HValue), "Incompatible number of elements in xValue and HValue"
206 for i in range(len(_HValue)):
207 HxValue.append( numpy.asmatrix( numpy.ravel( _HValue[i] ) ).T )
209 Operator.CM.storeValueInX(_xValue[i],HxValue[-1],self.__name)
214 for i, xv in enumerate(_xValue):
216 __alreadyCalculated, __HxV = Operator.CM.wasCalculatedIn(xv,self.__name)
218 __alreadyCalculated = False
220 if __alreadyCalculated:
221 self.__addOneCacheCall()
224 if self.__Matrix is not None:
225 self.__addOneMatrixCall()
226 _hv = self.__Matrix * xv
228 self.__addOneMethodCall()
232 HxValue.append( _hv )
234 if len(_xserie)>0 and self.__Matrix is None:
235 if self.__extraArgs is None:
236 _hserie = self.__Method( _xserie ) # Calcul MF
238 _hserie = self.__Method( _xserie, self.__extraArgs ) # Calcul MF
239 if not hasattr(_hserie, "pop"):
240 raise TypeError("The user input multi-function doesn't seem to return sequence results, behaving like a mono-function. It has to be checked.")
246 Operator.CM.storeValueInX(_xv,_hv,self.__name)
248 if argsAsSerie: return HxValue
249 else: return HxValue[-1]
251 def appliedControledFormTo(self, paires, argsAsSerie = False):
253 Permet de restituer le résultat de l'application de l'opérateur à des
254 paires (xValue, uValue). Cette méthode se contente d'appliquer, son
255 argument devant a priori être du bon type. Si la uValue est None,
256 on suppose que l'opérateur ne s'applique qu'à xValue.
258 - paires : les arguments par paire sont :
259 - xValue : argument X adapté pour appliquer l'opérateur
260 - uValue : argument U adapté pour appliquer l'opérateur
261 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
263 if argsAsSerie: _xuValue = paires
264 else: _xuValue = (paires,)
265 PlatformInfo.isIterable( _xuValue, True, " in Operator.appliedControledFormTo" )
267 if self.__Matrix is not None:
269 for paire in _xuValue:
270 _xValue, _uValue = paire
271 self.__addOneMatrixCall()
272 HxValue.append( self.__Matrix * _xValue )
275 for paire in _xuValue:
277 _xValue, _uValue = paire
278 if _uValue is not None:
279 _xuValue.append( paire )
281 _xuValue.append( _xValue )
282 self.__addOneMethodCall( len(_xuValue) )
283 if self.__extraArgs is None:
284 HxValue = self.__Method( _xuValue ) # Calcul MF
286 HxValue = self.__Method( _xuValue, self.__extraArgs ) # Calcul MF
288 if argsAsSerie: return HxValue
289 else: return HxValue[-1]
291 def appliedInXTo(self, paires, argsAsSerie = False):
293 Permet de restituer le résultat de l'application de l'opérateur à une
294 série d'arguments xValue, sachant que l'opérateur est valable en
295 xNominal. Cette méthode se contente d'appliquer, son argument devant a
296 priori être du bon type. Si l'opérateur est linéaire car c'est une
297 matrice, alors il est valable en tout point nominal et xNominal peut
298 être quelconque. Il n'y a qu'une seule paire par défaut, et argsAsSerie
299 permet d'indiquer que l'argument est multi-paires.
301 - paires : les arguments par paire sont :
302 - xNominal : série d'arguments permettant de donner le point où
303 l'opérateur est construit pour être ensuite appliqué
304 - xValue : série d'arguments adaptés pour appliquer l'opérateur
305 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
307 if argsAsSerie: _nxValue = paires
308 else: _nxValue = (paires,)
309 PlatformInfo.isIterable( _nxValue, True, " in Operator.appliedInXTo" )
311 if self.__Matrix is not None:
313 for paire in _nxValue:
314 _xNominal, _xValue = paire
315 self.__addOneMatrixCall()
316 HxValue.append( self.__Matrix * _xValue )
318 self.__addOneMethodCall( len(_nxValue) )
319 if self.__extraArgs is None:
320 HxValue = self.__Method( _nxValue ) # Calcul MF
322 HxValue = self.__Method( _nxValue, self.__extraArgs ) # Calcul MF
324 if argsAsSerie: return HxValue
325 else: return HxValue[-1]
327 def asMatrix(self, ValueForMethodForm = "UnknownVoidValue", argsAsSerie = False):
329 Permet de renvoyer l'opérateur sous la forme d'une matrice
331 if self.__Matrix is not None:
332 self.__addOneMatrixCall()
333 mValue = [self.__Matrix,]
334 elif not isinstance(ValueForMethodForm,str) or ValueForMethodForm != "UnknownVoidValue": # Ne pas utiliser "None"
337 self.__addOneMethodCall( len(ValueForMethodForm) )
338 for _vfmf in ValueForMethodForm:
339 mValue.append( numpy.matrix( self.__Method(((_vfmf, None),)) ) )
341 self.__addOneMethodCall()
342 mValue = self.__Method(((ValueForMethodForm, None),))
344 raise ValueError("Matrix form of the operator defined as a function/method requires to give an operating point.")
346 if argsAsSerie: return mValue
347 else: return mValue[-1]
351 Renvoie la taille sous forme numpy si l'opérateur est disponible sous
352 la forme d'une matrice
354 if self.__Matrix is not None:
355 return self.__Matrix.shape
357 raise ValueError("Matrix form of the operator is not available, nor the shape")
359 def nbcalls(self, which=None):
361 Renvoie les nombres d'évaluations de l'opérateur
364 self.__NbCallsAsMatrix+self.__NbCallsAsMethod,
365 self.__NbCallsAsMatrix,
366 self.__NbCallsAsMethod,
367 self.__NbCallsOfCached,
368 Operator.NbCallsAsMatrix+Operator.NbCallsAsMethod,
369 Operator.NbCallsAsMatrix,
370 Operator.NbCallsAsMethod,
371 Operator.NbCallsOfCached,
373 if which is None: return __nbcalls
374 else: return __nbcalls[which]
376 def __addOneMatrixCall(self):
377 "Comptabilise un appel"
378 self.__NbCallsAsMatrix += 1 # Decompte local
379 Operator.NbCallsAsMatrix += 1 # Decompte global
381 def __addOneMethodCall(self, nb = 1):
382 "Comptabilise un appel"
383 self.__NbCallsAsMethod += nb # Decompte local
384 Operator.NbCallsAsMethod += nb # Decompte global
386 def __addOneCacheCall(self):
387 "Comptabilise un appel"
388 self.__NbCallsOfCached += 1 # Decompte local
389 Operator.NbCallsOfCached += 1 # Decompte global
391 # ==============================================================================
392 class FullOperator(object):
394 Classe générale d'interface de type opérateur complet
395 (Direct, Linéaire Tangent, Adjoint)
398 name = "GenericFullOperator",
400 asOneFunction = None, # 1 Fonction
401 asThreeFunctions = None, # 3 Fonctions in a dictionary
402 asScript = None, # 1 or 3 Fonction(s) by script
403 asDict = None, # Parameters
405 extraArguments = None,
407 inputAsMF = False,# Fonction(s) as Multi-Functions
412 self.__name = str(name)
413 self.__check = bool(toBeChecked)
414 self.__extraArgs = extraArguments
419 if (asDict is not None) and isinstance(asDict, dict):
420 __Parameters.update( asDict )
421 # Priorité à EnableMultiProcessingInDerivatives=True
422 if "EnableMultiProcessing" in __Parameters and __Parameters["EnableMultiProcessing"]:
423 __Parameters["EnableMultiProcessingInDerivatives"] = True
424 __Parameters["EnableMultiProcessingInEvaluation"] = False
425 if "EnableMultiProcessingInDerivatives" not in __Parameters:
426 __Parameters["EnableMultiProcessingInDerivatives"] = False
427 if __Parameters["EnableMultiProcessingInDerivatives"]:
428 __Parameters["EnableMultiProcessingInEvaluation"] = False
429 if "EnableMultiProcessingInEvaluation" not in __Parameters:
430 __Parameters["EnableMultiProcessingInEvaluation"] = False
431 if "withIncrement" in __Parameters: # Temporaire
432 __Parameters["DifferentialIncrement"] = __Parameters["withIncrement"]
434 if asScript is not None:
435 __Matrix, __Function = None, None
437 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
439 __Function = { "Direct":Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ) }
440 __Function.update({"useApproximatedDerivatives":True})
441 __Function.update(__Parameters)
442 elif asThreeFunctions:
444 "Direct" :Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ),
445 "Tangent":Interfaces.ImportFromScript(asScript).getvalue( "TangentOperator" ),
446 "Adjoint":Interfaces.ImportFromScript(asScript).getvalue( "AdjointOperator" ),
448 __Function.update(__Parameters)
451 if asOneFunction is not None:
452 if isinstance(asOneFunction, dict) and "Direct" in asOneFunction:
453 if asOneFunction["Direct"] is not None:
454 __Function = asOneFunction
456 raise ValueError("The function has to be given in a dictionnary which have 1 key (\"Direct\")")
458 __Function = { "Direct":asOneFunction }
459 __Function.update({"useApproximatedDerivatives":True})
460 __Function.update(__Parameters)
461 elif asThreeFunctions is not None:
462 if isinstance(asThreeFunctions, dict) and \
463 ("Tangent" in asThreeFunctions) and (asThreeFunctions["Tangent"] is not None) and \
464 ("Adjoint" in asThreeFunctions) and (asThreeFunctions["Adjoint"] is not None) and \
465 (("useApproximatedDerivatives" not in asThreeFunctions) or not bool(asThreeFunctions["useApproximatedDerivatives"])):
466 __Function = asThreeFunctions
467 elif isinstance(asThreeFunctions, dict) and \
468 ("Direct" in asThreeFunctions) and (asThreeFunctions["Direct"] is not None):
469 __Function = asThreeFunctions
470 __Function.update({"useApproximatedDerivatives":True})
472 raise ValueError("The functions has to be given in a dictionnary which have either 1 key (\"Direct\") or 3 keys (\"Direct\" (optionnal), \"Tangent\" and \"Adjoint\")")
473 if "Direct" not in asThreeFunctions:
474 __Function["Direct"] = asThreeFunctions["Tangent"]
475 __Function.update(__Parameters)
479 # if sys.version_info[0] < 3 and isinstance(__Function, dict):
480 # for k in ("Direct", "Tangent", "Adjoint"):
481 # if k in __Function and hasattr(__Function[k],"__class__"):
482 # if type(__Function[k]) is type(self.__init__):
483 # raise TypeError("can't use a class method (%s) as a function for the \"%s\" operator. Use a real function instead."%(type(__Function[k]),k))
485 if appliedInX is not None and isinstance(appliedInX, dict):
486 __appliedInX = appliedInX
487 elif appliedInX is not None:
488 __appliedInX = {"HXb":appliedInX}
492 if scheduledBy is not None:
493 self.__T = scheduledBy
495 if isinstance(__Function, dict) and \
496 ("useApproximatedDerivatives" in __Function) and bool(__Function["useApproximatedDerivatives"]) and \
497 ("Direct" in __Function) and (__Function["Direct"] is not None):
498 if "CenteredFiniteDifference" not in __Function: __Function["CenteredFiniteDifference"] = False
499 if "DifferentialIncrement" not in __Function: __Function["DifferentialIncrement"] = 0.01
500 if "withdX" not in __Function: __Function["withdX"] = None
501 if "withAvoidingRedundancy" not in __Function: __Function["withAvoidingRedundancy"] = avoidRC
502 if "withToleranceInRedundancy" not in __Function: __Function["withToleranceInRedundancy"] = 1.e-18
503 if "withLenghtOfRedundancy" not in __Function: __Function["withLenghtOfRedundancy"] = -1
504 if "NumberOfProcesses" not in __Function: __Function["NumberOfProcesses"] = None
505 if "withmfEnabled" not in __Function: __Function["withmfEnabled"] = inputAsMF
506 from daCore import NumericObjects
507 FDA = NumericObjects.FDApproximation(
509 Function = __Function["Direct"],
510 centeredDF = __Function["CenteredFiniteDifference"],
511 increment = __Function["DifferentialIncrement"],
512 dX = __Function["withdX"],
513 avoidingRedundancy = __Function["withAvoidingRedundancy"],
514 toleranceInRedundancy = __Function["withToleranceInRedundancy"],
515 lenghtOfRedundancy = __Function["withLenghtOfRedundancy"],
516 mpEnabled = __Function["EnableMultiProcessingInDerivatives"],
517 mpWorkers = __Function["NumberOfProcesses"],
518 mfEnabled = __Function["withmfEnabled"],
520 self.__FO["Direct"] = Operator( name = self.__name, fromMethod = FDA.DirectOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
521 self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMethod = FDA.TangentOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
522 self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMethod = FDA.AdjointOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
523 elif isinstance(__Function, dict) and \
524 ("Direct" in __Function) and ("Tangent" in __Function) and ("Adjoint" in __Function) and \
525 (__Function["Direct"] is not None) and (__Function["Tangent"] is not None) and (__Function["Adjoint"] is not None):
526 self.__FO["Direct"] = Operator( name = self.__name, fromMethod = __Function["Direct"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
527 self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMethod = __Function["Tangent"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
528 self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMethod = __Function["Adjoint"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
529 elif asMatrix is not None:
530 __matrice = numpy.matrix( __Matrix, numpy.float )
531 self.__FO["Direct"] = Operator( name = self.__name, fromMatrix = __matrice, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
532 self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMatrix = __matrice, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
533 self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMatrix = __matrice.T, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
536 raise ValueError("The %s object is improperly defined or undefined, it requires at minima either a matrix, a Direct operator for approximate derivatives or a Tangent/Adjoint operators pair. Please check your operator input."%self.__name)
538 if __appliedInX is not None:
539 self.__FO["AppliedInX"] = {}
540 for key in list(__appliedInX.keys()):
541 if type( __appliedInX[key] ) is type( numpy.matrix([]) ):
542 # Pour le cas où l'on a une vraie matrice
543 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].A1, numpy.float ).T
544 elif type( __appliedInX[key] ) is type( numpy.array([]) ) and len(__appliedInX[key].shape) > 1:
545 # Pour le cas où l'on a un vecteur représenté en array avec 2 dimensions
546 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].reshape(len(__appliedInX[key]),), numpy.float ).T
548 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key], numpy.float ).T
550 self.__FO["AppliedInX"] = None
556 "x.__repr__() <==> repr(x)"
557 return repr(self.__FO)
560 "x.__str__() <==> str(x)"
561 return str(self.__FO)
563 # ==============================================================================
564 class Algorithm(object):
566 Classe générale d'interface de type algorithme
568 Elle donne un cadre pour l'écriture d'une classe élémentaire d'algorithme
569 d'assimilation, en fournissant un container (dictionnaire) de variables
570 persistantes initialisées, et des méthodes d'accès à ces variables stockées.
572 Une classe élémentaire d'algorithme doit implémenter la méthode "run".
574 def __init__(self, name):
576 L'initialisation présente permet de fabriquer des variables de stockage
577 disponibles de manière générique dans les algorithmes élémentaires. Ces
578 variables de stockage sont ensuite conservées dans un dictionnaire
579 interne à l'objet, mais auquel on accède par la méthode "get".
581 Les variables prévues sont :
582 - APosterioriCorrelations : matrice de corrélations de la matrice A
583 - APosterioriCovariance : matrice de covariances a posteriori : A
584 - APosterioriStandardDeviations : vecteur des écart-types de la matrice A
585 - APosterioriVariances : vecteur des variances de la matrice A
586 - Analysis : vecteur d'analyse : Xa
587 - BMA : Background moins Analysis : Xa - Xb
588 - CostFunctionJ : fonction-coût globale, somme des deux parties suivantes Jb et Jo
589 - CostFunctionJAtCurrentOptimum : fonction-coût globale à l'état optimal courant lors d'itérations
590 - CostFunctionJb : partie ébauche ou background de la fonction-coût : Jb
591 - CostFunctionJbAtCurrentOptimum : partie ébauche à l'état optimal courant lors d'itérations
592 - CostFunctionJo : partie observations de la fonction-coût : Jo
593 - CostFunctionJoAtCurrentOptimum : partie observations à l'état optimal courant lors d'itérations
594 - CurrentOptimum : état optimal courant lors d'itérations
595 - CurrentState : état courant lors d'itérations
596 - GradientOfCostFunctionJ : gradient de la fonction-coût globale
597 - GradientOfCostFunctionJb : gradient de la partie ébauche de la fonction-coût
598 - GradientOfCostFunctionJo : gradient de la partie observations de la fonction-coût
599 - IndexOfOptimum : index de l'état optimal courant lors d'itérations
600 - Innovation : l'innovation : d = Y - H(X)
601 - InnovationAtCurrentState : l'innovation à l'état courant : dn = Y - H(Xn)
602 - JacobianMatrixAtBackground : matrice jacobienne à l'état d'ébauche
603 - JacobianMatrixAtCurrentState : matrice jacobienne à l'état courant
604 - JacobianMatrixAtOptimum : matrice jacobienne à l'optimum
605 - KalmanGainAtOptimum : gain de Kalman à l'optimum
606 - MahalanobisConsistency : indicateur de consistance des covariances
607 - OMA : Observation moins Analyse : Y - Xa
608 - OMB : Observation moins Background : Y - Xb
609 - ForecastState : état prédit courant lors d'itérations
610 - Residu : dans le cas des algorithmes de vérification
611 - SigmaBck2 : indicateur de correction optimale des erreurs d'ébauche
612 - SigmaObs2 : indicateur de correction optimale des erreurs d'observation
613 - SimulatedObservationAtBackground : l'état observé H(Xb) à l'ébauche
614 - SimulatedObservationAtCurrentOptimum : l'état observé H(X) à l'état optimal courant
615 - SimulatedObservationAtCurrentState : l'état observé H(X) à l'état courant
616 - SimulatedObservationAtOptimum : l'état observé H(Xa) à l'optimum
617 - SimulationQuantiles : états observés H(X) pour les quantiles demandés
618 On peut rajouter des variables à stocker dans l'initialisation de
619 l'algorithme élémentaire qui va hériter de cette classe
621 logging.debug("%s Initialisation", str(name))
622 self._m = PlatformInfo.SystemUsage()
624 self._name = str( name )
625 self._parameters = {"StoreSupplementaryCalculations":[]}
626 self.__required_parameters = {}
627 self.__required_inputs = {
628 "RequiredInputValues":{"mandatory":(), "optional":()},
629 "ClassificationTags":[],
631 self.__variable_names_not_public = {"nextStep":False} # Duplication dans AlgorithmAndParameters
632 self.__canonical_parameter_name = {} # Correspondance "lower"->"correct"
633 self.__canonical_stored_name = {} # Correspondance "lower"->"correct"
635 self.StoredVariables = {}
636 self.StoredVariables["APosterioriCorrelations"] = Persistence.OneMatrix(name = "APosterioriCorrelations")
637 self.StoredVariables["APosterioriCovariance"] = Persistence.OneMatrix(name = "APosterioriCovariance")
638 self.StoredVariables["APosterioriStandardDeviations"] = Persistence.OneVector(name = "APosterioriStandardDeviations")
639 self.StoredVariables["APosterioriVariances"] = Persistence.OneVector(name = "APosterioriVariances")
640 self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
641 self.StoredVariables["BMA"] = Persistence.OneVector(name = "BMA")
642 self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
643 self.StoredVariables["CostFunctionJAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJAtCurrentOptimum")
644 self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
645 self.StoredVariables["CostFunctionJbAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJbAtCurrentOptimum")
646 self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
647 self.StoredVariables["CostFunctionJoAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJoAtCurrentOptimum")
648 self.StoredVariables["CurrentOptimum"] = Persistence.OneVector(name = "CurrentOptimum")
649 self.StoredVariables["CurrentState"] = Persistence.OneVector(name = "CurrentState")
650 self.StoredVariables["ForecastState"] = Persistence.OneVector(name = "ForecastState")
651 self.StoredVariables["GradientOfCostFunctionJ"] = Persistence.OneVector(name = "GradientOfCostFunctionJ")
652 self.StoredVariables["GradientOfCostFunctionJb"] = Persistence.OneVector(name = "GradientOfCostFunctionJb")
653 self.StoredVariables["GradientOfCostFunctionJo"] = Persistence.OneVector(name = "GradientOfCostFunctionJo")
654 self.StoredVariables["IndexOfOptimum"] = Persistence.OneIndex(name = "IndexOfOptimum")
655 self.StoredVariables["Innovation"] = Persistence.OneVector(name = "Innovation")
656 self.StoredVariables["InnovationAtCurrentAnalysis"] = Persistence.OneVector(name = "InnovationAtCurrentAnalysis")
657 self.StoredVariables["InnovationAtCurrentState"] = Persistence.OneVector(name = "InnovationAtCurrentState")
658 self.StoredVariables["JacobianMatrixAtBackground"] = Persistence.OneMatrix(name = "JacobianMatrixAtBackground")
659 self.StoredVariables["JacobianMatrixAtCurrentState"] = Persistence.OneMatrix(name = "JacobianMatrixAtCurrentState")
660 self.StoredVariables["JacobianMatrixAtOptimum"] = Persistence.OneMatrix(name = "JacobianMatrixAtOptimum")
661 self.StoredVariables["KalmanGainAtOptimum"] = Persistence.OneMatrix(name = "KalmanGainAtOptimum")
662 self.StoredVariables["MahalanobisConsistency"] = Persistence.OneScalar(name = "MahalanobisConsistency")
663 self.StoredVariables["OMA"] = Persistence.OneVector(name = "OMA")
664 self.StoredVariables["OMB"] = Persistence.OneVector(name = "OMB")
665 self.StoredVariables["Residu"] = Persistence.OneScalar(name = "Residu")
666 self.StoredVariables["SigmaBck2"] = Persistence.OneScalar(name = "SigmaBck2")
667 self.StoredVariables["SigmaObs2"] = Persistence.OneScalar(name = "SigmaObs2")
668 self.StoredVariables["SimulatedObservationAtBackground"] = Persistence.OneVector(name = "SimulatedObservationAtBackground")
669 self.StoredVariables["SimulatedObservationAtCurrentAnalysis"]= Persistence.OneVector(name = "SimulatedObservationAtCurrentAnalysis")
670 self.StoredVariables["SimulatedObservationAtCurrentOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentOptimum")
671 self.StoredVariables["SimulatedObservationAtCurrentState"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentState")
672 self.StoredVariables["SimulatedObservationAtOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtOptimum")
673 self.StoredVariables["SimulationQuantiles"] = Persistence.OneMatrix(name = "SimulationQuantiles")
675 for k in self.StoredVariables:
676 self.__canonical_stored_name[k.lower()] = k
678 for k, v in self.__variable_names_not_public.items():
679 self.__canonical_parameter_name[k.lower()] = k
680 self.__canonical_parameter_name["algorithm"] = "Algorithm"
681 self.__canonical_parameter_name["storesupplementarycalculations"] = "StoreSupplementaryCalculations"
683 def _pre_run(self, Parameters, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None ):
685 logging.debug("%s Lancement", self._name)
686 logging.debug("%s Taille mémoire utilisée de %.0f Mio"%(self._name, self._m.getUsedMemory("Mio")))
688 # Mise a jour des paramètres internes avec le contenu de Parameters, en
689 # reprenant les valeurs par défauts pour toutes celles non définies
690 self.__setParameters(Parameters, reset=True)
691 for k, v in self.__variable_names_not_public.items():
692 if k not in self._parameters: self.__setParameters( {k:v} )
694 # Corrections et compléments des vecteurs
695 def __test_vvalue(argument, variable, argname, symbol=None):
696 if symbol is None: symbol = variable
698 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
699 raise ValueError("%s %s vector %s is not set and has to be properly defined!"%(self._name,argname,symbol))
700 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
701 logging.debug("%s %s vector %s is not set, but is optional."%(self._name,argname,symbol))
703 logging.debug("%s %s vector %s is not set, but is not required."%(self._name,argname,symbol))
705 logging.debug("%s %s vector %s is set, and its size is %i."%(self._name,argname,symbol,numpy.array(argument).size))
707 __test_vvalue( Xb, "Xb", "Background or initial state" )
708 __test_vvalue( Y, "Y", "Observation" )
709 __test_vvalue( U, "U", "Control" )
711 # Corrections et compléments des covariances
712 def __test_cvalue(argument, variable, argname, symbol=None):
713 if symbol is None: symbol = variable
715 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
716 raise ValueError("%s %s error covariance matrix %s is not set and has to be properly defined!"%(self._name,argname,symbol))
717 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
718 logging.debug("%s %s error covariance matrix %s is not set, but is optional."%(self._name,argname,symbol))
720 logging.debug("%s %s error covariance matrix %s is not set, but is not required."%(self._name,argname,symbol))
722 logging.debug("%s %s error covariance matrix %s is set."%(self._name,argname,symbol))
724 __test_cvalue( B, "B", "Background" )
725 __test_cvalue( R, "R", "Observation" )
726 __test_cvalue( Q, "Q", "Evolution" )
728 # Corrections et compléments des opérateurs
729 def __test_ovalue(argument, variable, argname, symbol=None):
730 if symbol is None: symbol = variable
731 if argument is None or (isinstance(argument,dict) and len(argument)==0):
732 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
733 raise ValueError("%s %s operator %s is not set and has to be properly defined!"%(self._name,argname,symbol))
734 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
735 logging.debug("%s %s operator %s is not set, but is optional."%(self._name,argname,symbol))
737 logging.debug("%s %s operator %s is not set, but is not required."%(self._name,argname,symbol))
739 logging.debug("%s %s operator %s is set."%(self._name,argname,symbol))
741 __test_ovalue( HO, "HO", "Observation", "H" )
742 __test_ovalue( EM, "EM", "Evolution", "M" )
743 __test_ovalue( CM, "CM", "Control Model", "C" )
745 if ("Bounds" in self._parameters) and isinstance(self._parameters["Bounds"], (list, tuple)) and (len(self._parameters["Bounds"]) > 0):
746 logging.debug("%s Prise en compte des bornes effectuee"%(self._name,))
748 self._parameters["Bounds"] = None
750 if logging.getLogger().level < logging.WARNING:
751 self._parameters["optiprint"], self._parameters["optdisp"] = 1, 1
752 if PlatformInfo.has_scipy:
753 import scipy.optimize
754 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_ALL
756 self._parameters["optmessages"] = 15
758 self._parameters["optiprint"], self._parameters["optdisp"] = -1, 0
759 if PlatformInfo.has_scipy:
760 import scipy.optimize
761 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_NONE
763 self._parameters["optmessages"] = 15
767 def _post_run(self,_oH=None):
769 if ("StoreSupplementaryCalculations" in self._parameters) and \
770 "APosterioriCovariance" in self._parameters["StoreSupplementaryCalculations"]:
771 for _A in self.StoredVariables["APosterioriCovariance"]:
772 if "APosterioriVariances" in self._parameters["StoreSupplementaryCalculations"]:
773 self.StoredVariables["APosterioriVariances"].store( numpy.diag(_A) )
774 if "APosterioriStandardDeviations" in self._parameters["StoreSupplementaryCalculations"]:
775 self.StoredVariables["APosterioriStandardDeviations"].store( numpy.sqrt(numpy.diag(_A)) )
776 if "APosterioriCorrelations" in self._parameters["StoreSupplementaryCalculations"]:
777 _EI = numpy.diag(1./numpy.sqrt(numpy.diag(_A)))
778 _C = numpy.dot(_EI, numpy.dot(_A, _EI))
779 self.StoredVariables["APosterioriCorrelations"].store( _C )
780 if _oH is not None and "Direct" in _oH and "Tangent" in _oH and "Adjoint" in _oH:
781 logging.debug("%s Nombre d'évaluation(s) de l'opérateur d'observation direct/tangent/adjoint.: %i/%i/%i", self._name, _oH["Direct"].nbcalls(0),_oH["Tangent"].nbcalls(0),_oH["Adjoint"].nbcalls(0))
782 logging.debug("%s Nombre d'appels au cache d'opérateur d'observation direct/tangent/adjoint..: %i/%i/%i", self._name, _oH["Direct"].nbcalls(3),_oH["Tangent"].nbcalls(3),_oH["Adjoint"].nbcalls(3))
783 logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
784 logging.debug("%s Terminé", self._name)
787 def _toStore(self, key):
788 "True if in StoreSupplementaryCalculations, else False"
789 return key in self._parameters["StoreSupplementaryCalculations"]
791 def get(self, key=None):
793 Renvoie l'une des variables stockées identifiée par la clé, ou le
794 dictionnaire de l'ensemble des variables disponibles en l'absence de
795 clé. Ce sont directement les variables sous forme objet qui sont
796 renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
797 des classes de persistance.
800 return self.StoredVariables[self.__canonical_stored_name[key.lower()]]
802 return self.StoredVariables
804 def __contains__(self, key=None):
805 "D.__contains__(k) -> True if D has a key k, else False"
806 if key is None or key.lower() not in self.__canonical_stored_name:
809 return self.__canonical_stored_name[key.lower()] in self.StoredVariables
812 "D.keys() -> list of D's keys"
813 if hasattr(self, "StoredVariables"):
814 return self.StoredVariables.keys()
819 "D.pop(k[,d]) -> v, remove specified key and return the corresponding value"
820 if hasattr(self, "StoredVariables") and k.lower() in self.__canonical_stored_name:
821 return self.StoredVariables.pop(self.__canonical_stored_name[k.lower()], d)
826 raise TypeError("pop expected at least 1 arguments, got 0")
827 "If key is not found, d is returned if given, otherwise KeyError is raised"
833 def run(self, Xb=None, Y=None, H=None, M=None, R=None, B=None, Q=None, Parameters=None):
835 Doit implémenter l'opération élémentaire de calcul d'assimilation sous
836 sa forme mathématique la plus naturelle possible.
838 raise NotImplementedError("Mathematical assimilation calculation has not been implemented!")
840 def defineRequiredParameter(self, name = None, default = None, typecast = None, message = None, minval = None, maxval = None, listval = None):
842 Permet de définir dans l'algorithme des paramètres requis et leurs
843 caractéristiques par défaut.
846 raise ValueError("A name is mandatory to define a required parameter.")
848 self.__required_parameters[name] = {
850 "typecast" : typecast,
856 self.__canonical_parameter_name[name.lower()] = name
857 logging.debug("%s %s (valeur par défaut = %s)", self._name, message, self.setParameterValue(name))
859 def getRequiredParameters(self, noDetails=True):
861 Renvoie la liste des noms de paramètres requis ou directement le
862 dictionnaire des paramètres requis.
865 return sorted(self.__required_parameters.keys())
867 return self.__required_parameters
869 def setParameterValue(self, name=None, value=None):
871 Renvoie la valeur d'un paramètre requis de manière contrôlée
873 __k = self.__canonical_parameter_name[name.lower()]
874 default = self.__required_parameters[__k]["default"]
875 typecast = self.__required_parameters[__k]["typecast"]
876 minval = self.__required_parameters[__k]["minval"]
877 maxval = self.__required_parameters[__k]["maxval"]
878 listval = self.__required_parameters[__k]["listval"]
880 if value is None and default is None:
882 elif value is None and default is not None:
883 if typecast is None: __val = default
884 else: __val = typecast( default )
886 if typecast is None: __val = value
889 __val = typecast( value )
891 raise ValueError("The value '%s' for the parameter named '%s' can not be correctly evaluated with type '%s'."%(value, __k, typecast))
893 if minval is not None and (numpy.array(__val, float) < minval).any():
894 raise ValueError("The parameter named '%s' of value '%s' can not be less than %s."%(__k, __val, minval))
895 if maxval is not None and (numpy.array(__val, float) > maxval).any():
896 raise ValueError("The parameter named '%s' of value '%s' can not be greater than %s."%(__k, __val, maxval))
897 if listval is not None:
898 if typecast is list or typecast is tuple or isinstance(__val,list) or isinstance(__val,tuple):
901 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%(v, __k, listval))
902 elif __val not in listval:
903 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%( __val, __k,listval))
907 def requireInputArguments(self, mandatory=(), optional=()):
909 Permet d'imposer des arguments de calcul requis en entrée.
911 self.__required_inputs["RequiredInputValues"]["mandatory"] = tuple( mandatory )
912 self.__required_inputs["RequiredInputValues"]["optional"] = tuple( optional )
914 def getInputArguments(self):
916 Permet d'obtenir les listes des arguments de calcul requis en entrée.
918 return self.__required_inputs["RequiredInputValues"]["mandatory"], self.__required_inputs["RequiredInputValues"]["optional"]
920 def setAttributes(self, tags=()):
922 Permet d'adjoindre des attributs comme les tags de classification.
923 Renvoie la liste actuelle dans tous les cas.
925 self.__required_inputs["ClassificationTags"].extend( tags )
926 return self.__required_inputs["ClassificationTags"]
928 def __setParameters(self, fromDico={}, reset=False):
930 Permet de stocker les paramètres reçus dans le dictionnaire interne.
932 self._parameters.update( fromDico )
933 __inverse_fromDico_keys = {}
934 for k in fromDico.keys():
935 if k.lower() in self.__canonical_parameter_name:
936 __inverse_fromDico_keys[self.__canonical_parameter_name[k.lower()]] = k
937 #~ __inverse_fromDico_keys = dict([(self.__canonical_parameter_name[k.lower()],k) for k in fromDico.keys()])
938 __canonic_fromDico_keys = __inverse_fromDico_keys.keys()
939 for k in self.__required_parameters.keys():
940 if k in __canonic_fromDico_keys:
941 self._parameters[k] = self.setParameterValue(k,fromDico[__inverse_fromDico_keys[k]])
943 self._parameters[k] = self.setParameterValue(k)
946 logging.debug("%s %s : %s", self._name, self.__required_parameters[k]["message"], self._parameters[k])
948 # ==============================================================================
949 class AlgorithmAndParameters(object):
951 Classe générale d'interface d'action pour l'algorithme et ses paramètres
954 name = "GenericAlgorithm",
961 self.__name = str(name)
965 self.__algorithm = {}
966 self.__algorithmFile = None
967 self.__algorithmName = None
969 self.updateParameters( asDict, asScript )
971 if asAlgorithm is None and asScript is not None:
972 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
976 if __Algo is not None:
977 self.__A = str(__Algo)
978 self.__P.update( {"Algorithm":self.__A} )
980 self.__setAlgorithm( self.__A )
982 self.__variable_names_not_public = {"nextStep":False} # Duplication dans Algorithm
984 def updateParameters(self,
988 "Mise a jour des parametres"
989 if asDict is None and asScript is not None:
990 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
994 if __Dict is not None:
995 self.__P.update( dict(__Dict) )
997 def executePythonScheme(self, asDictAO = None):
998 "Permet de lancer le calcul d'assimilation"
999 Operator.CM.clearCache()
1001 if not isinstance(asDictAO, dict):
1002 raise ValueError("The objects for algorithm calculation have to be given together as a dictionnary, and they are not")
1003 if hasattr(asDictAO["Background"],"getO"): self.__Xb = asDictAO["Background"].getO()
1004 elif hasattr(asDictAO["CheckingPoint"],"getO"): self.__Xb = asDictAO["CheckingPoint"].getO()
1005 else: self.__Xb = None
1006 if hasattr(asDictAO["Observation"],"getO"): self.__Y = asDictAO["Observation"].getO()
1007 else: self.__Y = asDictAO["Observation"]
1008 if hasattr(asDictAO["ControlInput"],"getO"): self.__U = asDictAO["ControlInput"].getO()
1009 else: self.__U = asDictAO["ControlInput"]
1010 if hasattr(asDictAO["ObservationOperator"],"getO"): self.__HO = asDictAO["ObservationOperator"].getO()
1011 else: self.__HO = asDictAO["ObservationOperator"]
1012 if hasattr(asDictAO["EvolutionModel"],"getO"): self.__EM = asDictAO["EvolutionModel"].getO()
1013 else: self.__EM = asDictAO["EvolutionModel"]
1014 if hasattr(asDictAO["ControlModel"],"getO"): self.__CM = asDictAO["ControlModel"].getO()
1015 else: self.__CM = asDictAO["ControlModel"]
1016 self.__B = asDictAO["BackgroundError"]
1017 self.__R = asDictAO["ObservationError"]
1018 self.__Q = asDictAO["EvolutionError"]
1020 self.__shape_validate()
1022 self.__algorithm.run(
1032 Parameters = self.__P,
1036 def executeYACSScheme(self, FileName=None):
1037 "Permet de lancer le calcul d'assimilation"
1038 if FileName is None or not os.path.exists(FileName):
1039 raise ValueError("a YACS file name has to be given for YACS execution.\n")
1041 __file = os.path.abspath(FileName)
1042 logging.debug("The YACS file name is \"%s\"."%__file)
1043 if not PlatformInfo.has_salome or \
1044 not PlatformInfo.has_yacs or \
1045 not PlatformInfo.has_adao:
1046 raise ImportError("\n\n"+\
1047 "Unable to get SALOME, YACS or ADAO environnement variables.\n"+\
1048 "Please load the right environnement before trying to use it.\n")
1051 import SALOMERuntime
1053 SALOMERuntime.RuntimeSALOME_setRuntime()
1055 r = pilot.getRuntime()
1056 xmlLoader = loader.YACSLoader()
1057 xmlLoader.registerProcCataLoader()
1059 catalogAd = r.loadCatalog("proc", __file)
1060 r.addCatalog(catalogAd)
1065 p = xmlLoader.load(__file)
1066 except IOError as ex:
1067 print("The YACS XML schema file can not be loaded: %s"%(ex,))
1069 logger = p.getLogger("parser")
1070 if not logger.isEmpty():
1071 print("The imported YACS XML schema has errors on parsing:")
1072 print(logger.getStr())
1075 print("The YACS XML schema is not valid and will not be executed:")
1076 print(p.getErrorReport())
1078 info=pilot.LinkInfo(pilot.LinkInfo.ALL_DONT_STOP)
1079 p.checkConsistency(info)
1080 if info.areWarningsOrErrors():
1081 print("The YACS XML schema is not coherent and will not be executed:")
1082 print(info.getGlobalRepr())
1084 e = pilot.ExecutorSwig()
1086 if p.getEffectiveState() != pilot.DONE:
1087 print(p.getErrorReport())
1091 def get(self, key = None):
1092 "Vérifie l'existence d'une clé de variable ou de paramètres"
1093 if key in self.__algorithm:
1094 return self.__algorithm.get( key )
1095 elif key in self.__P:
1096 return self.__P[key]
1098 allvariables = self.__P
1099 for k in self.__variable_names_not_public: allvariables.pop(k, None)
1102 def pop(self, k, d):
1103 "Necessaire pour le pickling"
1104 return self.__algorithm.pop(k, d)
1106 def getAlgorithmRequiredParameters(self, noDetails=True):
1107 "Renvoie la liste des paramètres requis selon l'algorithme"
1108 return self.__algorithm.getRequiredParameters(noDetails)
1110 def getAlgorithmInputArguments(self):
1111 "Renvoie la liste des entrées requises selon l'algorithme"
1112 return self.__algorithm.getInputArguments()
1114 def getAlgorithmAttributes(self):
1115 "Renvoie la liste des attributs selon l'algorithme"
1116 return self.__algorithm.setAttributes()
1118 def setObserver(self, __V, __O, __I, __S):
1119 if self.__algorithm is None \
1120 or isinstance(self.__algorithm, dict) \
1121 or not hasattr(self.__algorithm,"StoredVariables"):
1122 raise ValueError("No observer can be build before choosing an algorithm.")
1123 if __V not in self.__algorithm:
1124 raise ValueError("An observer requires to be set on a variable named %s which does not exist."%__V)
1126 self.__algorithm.StoredVariables[ __V ].setDataObserver(
1129 HookParameters = __I,
1132 def removeObserver(self, __V, __O, __A = False):
1133 if self.__algorithm is None \
1134 or isinstance(self.__algorithm, dict) \
1135 or not hasattr(self.__algorithm,"StoredVariables"):
1136 raise ValueError("No observer can be removed before choosing an algorithm.")
1137 if __V not in self.__algorithm:
1138 raise ValueError("An observer requires to be removed on a variable named %s which does not exist."%__V)
1140 return self.__algorithm.StoredVariables[ __V ].removeDataObserver(
1145 def hasObserver(self, __V):
1146 if self.__algorithm is None \
1147 or isinstance(self.__algorithm, dict) \
1148 or not hasattr(self.__algorithm,"StoredVariables"):
1150 if __V not in self.__algorithm:
1152 return self.__algorithm.StoredVariables[ __V ].hasDataObserver()
1155 __allvariables = list(self.__algorithm.keys()) + list(self.__P.keys())
1156 for k in self.__variable_names_not_public:
1157 if k in __allvariables: __allvariables.remove(k)
1158 return __allvariables
1160 def __contains__(self, key=None):
1161 "D.__contains__(k) -> True if D has a key k, else False"
1162 return key in self.__algorithm or key in self.__P
1165 "x.__repr__() <==> repr(x)"
1166 return repr(self.__A)+", "+repr(self.__P)
1169 "x.__str__() <==> str(x)"
1170 return str(self.__A)+", "+str(self.__P)
1172 def __setAlgorithm(self, choice = None ):
1174 Permet de sélectionner l'algorithme à utiliser pour mener à bien l'étude
1175 d'assimilation. L'argument est un champ caractère se rapportant au nom
1176 d'un algorithme réalisant l'opération sur les arguments fixes.
1179 raise ValueError("Error: algorithm choice has to be given")
1180 if self.__algorithmName is not None:
1181 raise ValueError("Error: algorithm choice has already been done as \"%s\", it can't be changed."%self.__algorithmName)
1182 daDirectory = "daAlgorithms"
1184 # Recherche explicitement le fichier complet
1185 # ------------------------------------------
1187 for directory in sys.path:
1188 if os.path.isfile(os.path.join(directory, daDirectory, str(choice)+'.py')):
1189 module_path = os.path.abspath(os.path.join(directory, daDirectory))
1190 if module_path is None:
1191 raise ImportError("No algorithm module named \"%s\" has been found in the search path.\n The search path is %s"%(choice, sys.path))
1193 # Importe le fichier complet comme un module
1194 # ------------------------------------------
1196 sys_path_tmp = sys.path ; sys.path.insert(0,module_path)
1197 self.__algorithmFile = __import__(str(choice), globals(), locals(), [])
1198 if not hasattr(self.__algorithmFile, "ElementaryAlgorithm"):
1199 raise ImportError("this module does not define a valid elementary algorithm.")
1200 self.__algorithmName = str(choice)
1201 sys.path = sys_path_tmp ; del sys_path_tmp
1202 except ImportError as e:
1203 raise ImportError("The module named \"%s\" was found, but is incorrect at the import stage.\n The import error message is: %s"%(choice,e))
1205 # Instancie un objet du type élémentaire du fichier
1206 # -------------------------------------------------
1207 self.__algorithm = self.__algorithmFile.ElementaryAlgorithm()
1210 def __shape_validate(self):
1212 Validation de la correspondance correcte des tailles des variables et
1213 des matrices s'il y en a.
1215 if self.__Xb is None: __Xb_shape = (0,)
1216 elif hasattr(self.__Xb,"size"): __Xb_shape = (self.__Xb.size,)
1217 elif hasattr(self.__Xb,"shape"):
1218 if isinstance(self.__Xb.shape, tuple): __Xb_shape = self.__Xb.shape
1219 else: __Xb_shape = self.__Xb.shape()
1220 else: raise TypeError("The background (Xb) has no attribute of shape: problem !")
1222 if self.__Y is None: __Y_shape = (0,)
1223 elif hasattr(self.__Y,"size"): __Y_shape = (self.__Y.size,)
1224 elif hasattr(self.__Y,"shape"):
1225 if isinstance(self.__Y.shape, tuple): __Y_shape = self.__Y.shape
1226 else: __Y_shape = self.__Y.shape()
1227 else: raise TypeError("The observation (Y) has no attribute of shape: problem !")
1229 if self.__U is None: __U_shape = (0,)
1230 elif hasattr(self.__U,"size"): __U_shape = (self.__U.size,)
1231 elif hasattr(self.__U,"shape"):
1232 if isinstance(self.__U.shape, tuple): __U_shape = self.__U.shape
1233 else: __U_shape = self.__U.shape()
1234 else: raise TypeError("The control (U) has no attribute of shape: problem !")
1236 if self.__B is None: __B_shape = (0,0)
1237 elif hasattr(self.__B,"shape"):
1238 if isinstance(self.__B.shape, tuple): __B_shape = self.__B.shape
1239 else: __B_shape = self.__B.shape()
1240 else: raise TypeError("The a priori errors covariance matrix (B) has no attribute of shape: problem !")
1242 if self.__R is None: __R_shape = (0,0)
1243 elif hasattr(self.__R,"shape"):
1244 if isinstance(self.__R.shape, tuple): __R_shape = self.__R.shape
1245 else: __R_shape = self.__R.shape()
1246 else: raise TypeError("The observation errors covariance matrix (R) has no attribute of shape: problem !")
1248 if self.__Q is None: __Q_shape = (0,0)
1249 elif hasattr(self.__Q,"shape"):
1250 if isinstance(self.__Q.shape, tuple): __Q_shape = self.__Q.shape
1251 else: __Q_shape = self.__Q.shape()
1252 else: raise TypeError("The evolution errors covariance matrix (Q) has no attribute of shape: problem !")
1254 if len(self.__HO) == 0: __HO_shape = (0,0)
1255 elif isinstance(self.__HO, dict): __HO_shape = (0,0)
1256 elif hasattr(self.__HO["Direct"],"shape"):
1257 if isinstance(self.__HO["Direct"].shape, tuple): __HO_shape = self.__HO["Direct"].shape
1258 else: __HO_shape = self.__HO["Direct"].shape()
1259 else: raise TypeError("The observation operator (H) has no attribute of shape: problem !")
1261 if len(self.__EM) == 0: __EM_shape = (0,0)
1262 elif isinstance(self.__EM, dict): __EM_shape = (0,0)
1263 elif hasattr(self.__EM["Direct"],"shape"):
1264 if isinstance(self.__EM["Direct"].shape, tuple): __EM_shape = self.__EM["Direct"].shape
1265 else: __EM_shape = self.__EM["Direct"].shape()
1266 else: raise TypeError("The evolution model (EM) has no attribute of shape: problem !")
1268 if len(self.__CM) == 0: __CM_shape = (0,0)
1269 elif isinstance(self.__CM, dict): __CM_shape = (0,0)
1270 elif hasattr(self.__CM["Direct"],"shape"):
1271 if isinstance(self.__CM["Direct"].shape, tuple): __CM_shape = self.__CM["Direct"].shape
1272 else: __CM_shape = self.__CM["Direct"].shape()
1273 else: raise TypeError("The control model (CM) has no attribute of shape: problem !")
1275 # Vérification des conditions
1276 # ---------------------------
1277 if not( len(__Xb_shape) == 1 or min(__Xb_shape) == 1 ):
1278 raise ValueError("Shape characteristic of background (Xb) is incorrect: \"%s\"."%(__Xb_shape,))
1279 if not( len(__Y_shape) == 1 or min(__Y_shape) == 1 ):
1280 raise ValueError("Shape characteristic of observation (Y) is incorrect: \"%s\"."%(__Y_shape,))
1282 if not( min(__B_shape) == max(__B_shape) ):
1283 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) is incorrect: \"%s\"."%(__B_shape,))
1284 if not( min(__R_shape) == max(__R_shape) ):
1285 raise ValueError("Shape characteristic of observation errors covariance matrix (R) is incorrect: \"%s\"."%(__R_shape,))
1286 if not( min(__Q_shape) == max(__Q_shape) ):
1287 raise ValueError("Shape characteristic of evolution errors covariance matrix (Q) is incorrect: \"%s\"."%(__Q_shape,))
1288 if not( min(__EM_shape) == max(__EM_shape) ):
1289 raise ValueError("Shape characteristic of evolution operator (EM) is incorrect: \"%s\"."%(__EM_shape,))
1291 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[1] == max(__Xb_shape) ):
1292 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and state (X) \"%s\" are incompatible."%(__HO_shape,__Xb_shape))
1293 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[0] == max(__Y_shape) ):
1294 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation (Y) \"%s\" are incompatible."%(__HO_shape,__Y_shape))
1295 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__B) > 0 and not( __HO_shape[1] == __B_shape[0] ):
1296 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and a priori errors covariance matrix (B) \"%s\" are incompatible."%(__HO_shape,__B_shape))
1297 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__R) > 0 and not( __HO_shape[0] == __R_shape[1] ):
1298 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation errors covariance matrix (R) \"%s\" are incompatible."%(__HO_shape,__R_shape))
1300 if self.__B is not None and len(self.__B) > 0 and not( __B_shape[1] == max(__Xb_shape) ):
1301 if self.__algorithmName in ["EnsembleBlue",]:
1302 asPersistentVector = self.__Xb.reshape((-1,min(__B_shape)))
1303 self.__Xb = Persistence.OneVector("Background", basetype=numpy.matrix)
1304 for member in asPersistentVector:
1305 self.__Xb.store( numpy.matrix( numpy.ravel(member), numpy.float ).T )
1306 __Xb_shape = min(__B_shape)
1308 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) \"%s\" and background (Xb) \"%s\" are incompatible."%(__B_shape,__Xb_shape))
1310 if self.__R is not None and len(self.__R) > 0 and not( __R_shape[1] == max(__Y_shape) ):
1311 raise ValueError("Shape characteristic of observation errors covariance matrix (R) \"%s\" and observation (Y) \"%s\" are incompatible."%(__R_shape,__Y_shape))
1313 if self.__EM is not None and len(self.__EM) > 0 and not isinstance(self.__EM, dict) and not( __EM_shape[1] == max(__Xb_shape) ):
1314 raise ValueError("Shape characteristic of evolution model (EM) \"%s\" and state (X) \"%s\" are incompatible."%(__EM_shape,__Xb_shape))
1316 if self.__CM is not None and len(self.__CM) > 0 and not isinstance(self.__CM, dict) and not( __CM_shape[1] == max(__U_shape) ):
1317 raise ValueError("Shape characteristic of control model (CM) \"%s\" and control (U) \"%s\" are incompatible."%(__CM_shape,__U_shape))
1319 if ("Bounds" in self.__P) \
1320 and (isinstance(self.__P["Bounds"], list) or isinstance(self.__P["Bounds"], tuple)) \
1321 and (len(self.__P["Bounds"]) != max(__Xb_shape)):
1322 raise ValueError("The number \"%s\" of bound pairs for the state (X) components is different of the size \"%s\" of the state itself." \
1323 %(len(self.__P["Bounds"]),max(__Xb_shape)))
1327 # ==============================================================================
1328 class RegulationAndParameters(object):
1330 Classe générale d'interface d'action pour la régulation et ses paramètres
1333 name = "GenericRegulation",
1340 self.__name = str(name)
1343 if asAlgorithm is None and asScript is not None:
1344 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
1346 __Algo = asAlgorithm
1348 if asDict is None and asScript is not None:
1349 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1353 if __Dict is not None:
1354 self.__P.update( dict(__Dict) )
1356 if __Algo is not None:
1357 self.__P.update( {"Algorithm":str(__Algo)} )
1359 def get(self, key = None):
1360 "Vérifie l'existence d'une clé de variable ou de paramètres"
1362 return self.__P[key]
1366 # ==============================================================================
1367 class DataObserver(object):
1369 Classe générale d'interface de type observer
1372 name = "GenericObserver",
1384 self.__name = str(name)
1389 if onVariable is None:
1390 raise ValueError("setting an observer has to be done over a variable name or a list of variable names, not over None.")
1391 elif type(onVariable) in (tuple, list):
1392 self.__V = tuple(map( str, onVariable ))
1393 if withInfo is None:
1396 self.__I = (str(withInfo),)*len(self.__V)
1397 elif isinstance(onVariable, str):
1398 self.__V = (onVariable,)
1399 if withInfo is None:
1400 self.__I = (onVariable,)
1402 self.__I = (str(withInfo),)
1404 raise ValueError("setting an observer has to be done over a variable name or a list of variable names.")
1406 if asObsObject is not None:
1407 self.__O = asObsObject
1409 __FunctionText = str(UserScript('Observer', asTemplate, asString, asScript))
1410 __Function = Observer2Func(__FunctionText)
1411 self.__O = __Function.getfunc()
1413 for k in range(len(self.__V)):
1416 if ename not in withAlgo:
1417 raise ValueError("An observer is asked to be set on a variable named %s which does not exist."%ename)
1419 withAlgo.setObserver(ename, self.__O, einfo, scheduledBy)
1422 "x.__repr__() <==> repr(x)"
1423 return repr(self.__V)+"\n"+repr(self.__O)
1426 "x.__str__() <==> str(x)"
1427 return str(self.__V)+"\n"+str(self.__O)
1429 # ==============================================================================
1430 class UserScript(object):
1432 Classe générale d'interface de type texte de script utilisateur
1435 name = "GenericUserScript",
1442 self.__name = str(name)
1444 if asString is not None:
1446 elif self.__name == "UserPostAnalysis" and (asTemplate is not None) and (asTemplate in Templates.UserPostAnalysisTemplates):
1447 self.__F = Templates.UserPostAnalysisTemplates[asTemplate]
1448 elif self.__name == "Observer" and (asTemplate is not None) and (asTemplate in Templates.ObserverTemplates):
1449 self.__F = Templates.ObserverTemplates[asTemplate]
1450 elif asScript is not None:
1451 self.__F = Interfaces.ImportFromScript(asScript).getstring()
1456 "x.__repr__() <==> repr(x)"
1457 return repr(self.__F)
1460 "x.__str__() <==> str(x)"
1461 return str(self.__F)
1463 # ==============================================================================
1464 class ExternalParameters(object):
1466 Classe générale d'interface de type texte de script utilisateur
1469 name = "GenericExternalParameters",
1475 self.__name = str(name)
1478 self.updateParameters( asDict, asScript )
1480 def updateParameters(self,
1484 "Mise a jour des parametres"
1485 if asDict is None and asScript is not None:
1486 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "ExternalParameters" )
1490 if __Dict is not None:
1491 self.__P.update( dict(__Dict) )
1493 def get(self, key = None):
1495 return self.__P[key]
1497 return list(self.__P.keys())
1500 return list(self.__P.keys())
1502 def pop(self, k, d):
1503 return self.__P.pop(k, d)
1506 return self.__P.items()
1508 def __contains__(self, key=None):
1509 "D.__contains__(k) -> True if D has a key k, else False"
1510 return key in self.__P
1512 # ==============================================================================
1513 class State(object):
1515 Classe générale d'interface de type état
1518 name = "GenericVector",
1520 asPersistentVector = None,
1526 toBeChecked = False,
1529 Permet de définir un vecteur :
1530 - asVector : entrée des données, comme un vecteur compatible avec le
1531 constructeur de numpy.matrix, ou "True" si entrée par script.
1532 - asPersistentVector : entrée des données, comme une série de vecteurs
1533 compatible avec le constructeur de numpy.matrix, ou comme un objet de
1534 type Persistence, ou "True" si entrée par script.
1535 - asScript : si un script valide est donné contenant une variable
1536 nommée "name", la variable est de type "asVector" (par défaut) ou
1537 "asPersistentVector" selon que l'une de ces variables est placée à
1539 - asDataFile : si un ou plusieurs fichiers valides sont donnés
1540 contenant des valeurs en colonnes, elles-mêmes nommées "colNames"
1541 (s'il n'y a pas de nom de colonne indiquée, on cherche une colonne
1542 nommée "name"), on récupère les colonnes et on les range ligne après
1543 ligne (colMajor=False, par défaut) ou colonne après colonne
1544 (colMajor=True). La variable résultante est de type "asVector" (par
1545 défaut) ou "asPersistentVector" selon que l'une de ces variables est
1548 self.__name = str(name)
1549 self.__check = bool(toBeChecked)
1553 self.__is_vector = False
1554 self.__is_series = False
1556 if asScript is not None:
1557 __Vector, __Series = None, None
1558 if asPersistentVector:
1559 __Series = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1561 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1562 elif asDataFile is not None:
1563 __Vector, __Series = None, None
1564 if asPersistentVector:
1565 if colNames is not None:
1566 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
1568 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1569 if bool(colMajor) and not Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1570 __Series = numpy.transpose(__Series)
1571 elif not bool(colMajor) and Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1572 __Series = numpy.transpose(__Series)
1574 if colNames is not None:
1575 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
1577 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1579 __Vector = numpy.ravel(__Vector, order = "F")
1581 __Vector = numpy.ravel(__Vector, order = "C")
1583 __Vector, __Series = asVector, asPersistentVector
1585 if __Vector is not None:
1586 self.__is_vector = True
1587 self.__V = numpy.matrix( numpy.asmatrix(__Vector).A1, numpy.float ).T
1588 self.shape = self.__V.shape
1589 self.size = self.__V.size
1590 elif __Series is not None:
1591 self.__is_series = True
1592 if isinstance(__Series, (tuple, list, numpy.ndarray, numpy.matrix, str)):
1593 self.__V = Persistence.OneVector(self.__name, basetype=numpy.matrix)
1594 if isinstance(__Series, str): __Series = eval(__Series)
1595 for member in __Series:
1596 self.__V.store( numpy.matrix( numpy.asmatrix(member).A1, numpy.float ).T )
1599 if isinstance(self.__V.shape, (tuple, list)):
1600 self.shape = self.__V.shape
1602 self.shape = self.__V.shape()
1603 if len(self.shape) == 1:
1604 self.shape = (self.shape[0],1)
1605 self.size = self.shape[0] * self.shape[1]
1607 raise ValueError("The %s object is improperly defined or undefined, it requires at minima either a vector, a list/tuple of vectors or a persistent object. Please check your vector input."%self.__name)
1609 if scheduledBy is not None:
1610 self.__T = scheduledBy
1612 def getO(self, withScheduler=False):
1614 return self.__V, self.__T
1615 elif self.__T is None:
1621 "Vérification du type interne"
1622 return self.__is_vector
1625 "Vérification du type interne"
1626 return self.__is_series
1629 "x.__repr__() <==> repr(x)"
1630 return repr(self.__V)
1633 "x.__str__() <==> str(x)"
1634 return str(self.__V)
1636 # ==============================================================================
1637 class Covariance(object):
1639 Classe générale d'interface de type covariance
1642 name = "GenericCovariance",
1643 asCovariance = None,
1644 asEyeByScalar = None,
1645 asEyeByVector = None,
1648 toBeChecked = False,
1651 Permet de définir une covariance :
1652 - asCovariance : entrée des données, comme une matrice compatible avec
1653 le constructeur de numpy.matrix
1654 - asEyeByScalar : entrée des données comme un seul scalaire de variance,
1655 multiplicatif d'une matrice de corrélation identité, aucune matrice
1656 n'étant donc explicitement à donner
1657 - asEyeByVector : entrée des données comme un seul vecteur de variance,
1658 à mettre sur la diagonale d'une matrice de corrélation, aucune matrice
1659 n'étant donc explicitement à donner
1660 - asCovObject : entrée des données comme un objet python, qui a les
1661 methodes obligatoires "getT", "getI", "diag", "trace", "__add__",
1662 "__sub__", "__neg__", "__mul__", "__rmul__" et facultatives "shape",
1663 "size", "cholesky", "choleskyI", "asfullmatrix", "__repr__", "__str__"
1664 - toBeChecked : booléen indiquant si le caractère SDP de la matrice
1665 pleine doit être vérifié
1667 self.__name = str(name)
1668 self.__check = bool(toBeChecked)
1671 self.__is_scalar = False
1672 self.__is_vector = False
1673 self.__is_matrix = False
1674 self.__is_object = False
1676 if asScript is not None:
1677 __Matrix, __Scalar, __Vector, __Object = None, None, None, None
1679 __Scalar = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1681 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1683 __Object = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1685 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1687 __Matrix, __Scalar, __Vector, __Object = asCovariance, asEyeByScalar, asEyeByVector, asCovObject
1689 if __Scalar is not None:
1690 if numpy.matrix(__Scalar).size != 1:
1691 raise ValueError(' The diagonal multiplier given to define a sparse matrix is not a unique scalar value.\n Its actual measured size is %i. Please check your scalar input.'%numpy.matrix(__Scalar).size)
1692 self.__is_scalar = True
1693 self.__C = numpy.abs( float(__Scalar) )
1696 elif __Vector is not None:
1697 self.__is_vector = True
1698 self.__C = numpy.abs( numpy.array( numpy.ravel( numpy.matrix(__Vector, float ) ) ) )
1699 self.shape = (self.__C.size,self.__C.size)
1700 self.size = self.__C.size**2
1701 elif __Matrix is not None:
1702 self.__is_matrix = True
1703 self.__C = numpy.matrix( __Matrix, float )
1704 self.shape = self.__C.shape
1705 self.size = self.__C.size
1706 elif __Object is not None:
1707 self.__is_object = True
1709 for at in ("getT","getI","diag","trace","__add__","__sub__","__neg__","__mul__","__rmul__"):
1710 if not hasattr(self.__C,at):
1711 raise ValueError("The matrix given for %s as an object has no attribute \"%s\". Please check your object input."%(self.__name,at))
1712 if hasattr(self.__C,"shape"):
1713 self.shape = self.__C.shape
1716 if hasattr(self.__C,"size"):
1717 self.size = self.__C.size
1722 # raise ValueError("The %s covariance matrix has to be specified either as a matrix, a vector for its diagonal or a scalar multiplying an identity matrix."%self.__name)
1726 def __validate(self):
1728 if self.__C is None:
1729 raise UnboundLocalError("%s covariance matrix value has not been set!"%(self.__name,))
1730 if self.ismatrix() and min(self.shape) != max(self.shape):
1731 raise ValueError("The given matrix for %s is not a square one, its shape is %s. Please check your matrix input."%(self.__name,self.shape))
1732 if self.isobject() and min(self.shape) != max(self.shape):
1733 raise ValueError("The matrix given for \"%s\" is not a square one, its shape is %s. Please check your object input."%(self.__name,self.shape))
1734 if self.isscalar() and self.__C <= 0:
1735 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your scalar input %s."%(self.__name,self.__C))
1736 if self.isvector() and (self.__C <= 0).any():
1737 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your vector input."%(self.__name,))
1738 if self.ismatrix() and (self.__check or logging.getLogger().level < logging.WARNING):
1740 L = numpy.linalg.cholesky( self.__C )
1742 raise ValueError("The %s covariance matrix is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1743 if self.isobject() and (self.__check or logging.getLogger().level < logging.WARNING):
1745 L = self.__C.cholesky()
1747 raise ValueError("The %s covariance object is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1750 "Vérification du type interne"
1751 return self.__is_scalar
1754 "Vérification du type interne"
1755 return self.__is_vector
1758 "Vérification du type interne"
1759 return self.__is_matrix
1762 "Vérification du type interne"
1763 return self.__is_object
1768 return Covariance(self.__name+"I", asCovariance = self.__C.I )
1769 elif self.isvector():
1770 return Covariance(self.__name+"I", asEyeByVector = 1. / self.__C )
1771 elif self.isscalar():
1772 return Covariance(self.__name+"I", asEyeByScalar = 1. / self.__C )
1773 elif self.isobject():
1774 return Covariance(self.__name+"I", asCovObject = self.__C.getI() )
1776 return None # Indispensable
1781 return Covariance(self.__name+"T", asCovariance = self.__C.T )
1782 elif self.isvector():
1783 return Covariance(self.__name+"T", asEyeByVector = self.__C )
1784 elif self.isscalar():
1785 return Covariance(self.__name+"T", asEyeByScalar = self.__C )
1786 elif self.isobject():
1787 return Covariance(self.__name+"T", asCovObject = self.__C.getT() )
1790 "Décomposition de Cholesky"
1792 return Covariance(self.__name+"C", asCovariance = numpy.linalg.cholesky(self.__C) )
1793 elif self.isvector():
1794 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
1795 elif self.isscalar():
1796 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
1797 elif self.isobject() and hasattr(self.__C,"cholesky"):
1798 return Covariance(self.__name+"C", asCovObject = self.__C.cholesky() )
1800 def choleskyI(self):
1801 "Inversion de la décomposition de Cholesky"
1803 return Covariance(self.__name+"H", asCovariance = numpy.linalg.cholesky(self.__C).I )
1804 elif self.isvector():
1805 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
1806 elif self.isscalar():
1807 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
1808 elif self.isobject() and hasattr(self.__C,"choleskyI"):
1809 return Covariance(self.__name+"H", asCovObject = self.__C.choleskyI() )
1811 def diag(self, msize=None):
1812 "Diagonale de la matrice"
1814 return numpy.diag(self.__C)
1815 elif self.isvector():
1817 elif self.isscalar():
1819 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1821 return self.__C * numpy.ones(int(msize))
1822 elif self.isobject():
1823 return self.__C.diag()
1825 def asfullmatrix(self, msize=None):
1829 elif self.isvector():
1830 return numpy.matrix( numpy.diag(self.__C), float )
1831 elif self.isscalar():
1833 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1835 return numpy.matrix( self.__C * numpy.eye(int(msize)), float )
1836 elif self.isobject() and hasattr(self.__C,"asfullmatrix"):
1837 return self.__C.asfullmatrix()
1839 def trace(self, msize=None):
1840 "Trace de la matrice"
1842 return numpy.trace(self.__C)
1843 elif self.isvector():
1844 return float(numpy.sum(self.__C))
1845 elif self.isscalar():
1847 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1849 return self.__C * int(msize)
1850 elif self.isobject():
1851 return self.__C.trace()
1857 "x.__repr__() <==> repr(x)"
1858 return repr(self.__C)
1861 "x.__str__() <==> str(x)"
1862 return str(self.__C)
1864 def __add__(self, other):
1865 "x.__add__(y) <==> x+y"
1866 if self.ismatrix() or self.isobject():
1867 return self.__C + numpy.asmatrix(other)
1868 elif self.isvector() or self.isscalar():
1869 _A = numpy.asarray(other)
1870 _A.reshape(_A.size)[::_A.shape[1]+1] += self.__C
1871 return numpy.asmatrix(_A)
1873 def __radd__(self, other):
1874 "x.__radd__(y) <==> y+x"
1875 raise NotImplementedError("%s covariance matrix __radd__ method not available for %s type!"%(self.__name,type(other)))
1877 def __sub__(self, other):
1878 "x.__sub__(y) <==> x-y"
1879 if self.ismatrix() or self.isobject():
1880 return self.__C - numpy.asmatrix(other)
1881 elif self.isvector() or self.isscalar():
1882 _A = numpy.asarray(other)
1883 _A.reshape(_A.size)[::_A.shape[1]+1] = self.__C - _A.reshape(_A.size)[::_A.shape[1]+1]
1884 return numpy.asmatrix(_A)
1886 def __rsub__(self, other):
1887 "x.__rsub__(y) <==> y-x"
1888 raise NotImplementedError("%s covariance matrix __rsub__ method not available for %s type!"%(self.__name,type(other)))
1891 "x.__neg__() <==> -x"
1894 def __mul__(self, other):
1895 "x.__mul__(y) <==> x*y"
1896 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
1897 return self.__C * other
1898 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
1899 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1900 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
1901 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1902 return self.__C * numpy.asmatrix(other)
1904 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asmatrix(other).shape,self.__name))
1905 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
1906 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1907 return numpy.asmatrix(self.__C * numpy.ravel(other)).T
1908 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1909 return numpy.asmatrix((self.__C * (numpy.asarray(other).transpose())).transpose())
1911 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
1912 elif self.isscalar() and isinstance(other,numpy.matrix):
1913 return self.__C * other
1914 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
1915 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
1916 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
1918 return self.__C * numpy.asmatrix(other)
1919 elif self.isobject():
1920 return self.__C.__mul__(other)
1922 raise NotImplementedError("%s covariance matrix __mul__ method not available for %s type!"%(self.__name,type(other)))
1924 def __rmul__(self, other):
1925 "x.__rmul__(y) <==> y*x"
1926 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
1927 return other * self.__C
1928 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
1929 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1930 return numpy.asmatrix(numpy.ravel(other)) * self.__C
1931 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1932 return numpy.asmatrix(other) * self.__C
1934 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name))
1935 elif self.isvector() and isinstance(other,numpy.matrix):
1936 if numpy.ravel(other).size == self.shape[0]: # Vecteur
1937 return numpy.asmatrix(numpy.ravel(other) * self.__C)
1938 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
1939 return numpy.asmatrix(numpy.array(other) * self.__C)
1941 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
1942 elif self.isscalar() and isinstance(other,numpy.matrix):
1943 return other * self.__C
1944 elif self.isobject():
1945 return self.__C.__rmul__(other)
1947 raise NotImplementedError("%s covariance matrix __rmul__ method not available for %s type!"%(self.__name,type(other)))
1950 "x.__len__() <==> len(x)"
1951 return self.shape[0]
1953 # ==============================================================================
1954 class Observer2Func(object):
1956 Creation d'une fonction d'observateur a partir de son texte
1958 def __init__(self, corps=""):
1959 self.__corps = corps
1960 def func(self,var,info):
1961 "Fonction d'observation"
1964 "Restitution du pointeur de fonction dans l'objet"
1967 # ==============================================================================
1968 class CaseLogger(object):
1970 Conservation des commandes de creation d'un cas
1972 def __init__(self, __name="", __objname="case", __addViewers=None, __addLoaders=None):
1973 self.__name = str(__name)
1974 self.__objname = str(__objname)
1975 self.__logSerie = []
1976 self.__switchoff = False
1978 "TUI" :Interfaces._TUIViewer,
1979 "SCD" :Interfaces._SCDViewer,
1980 "YACS":Interfaces._YACSViewer,
1983 "TUI" :Interfaces._TUIViewer,
1984 "COM" :Interfaces._COMViewer,
1986 if __addViewers is not None:
1987 self.__viewers.update(dict(__addViewers))
1988 if __addLoaders is not None:
1989 self.__loaders.update(dict(__addLoaders))
1991 def register(self, __command=None, __keys=None, __local=None, __pre=None, __switchoff=False):
1992 "Enregistrement d'une commande individuelle"
1993 if __command is not None and __keys is not None and __local is not None and not self.__switchoff:
1994 if "self" in __keys: __keys.remove("self")
1995 self.__logSerie.append( (str(__command), __keys, __local, __pre, __switchoff) )
1997 self.__switchoff = True
1999 self.__switchoff = False
2001 def dump(self, __filename=None, __format="TUI", __upa=""):
2002 "Restitution normalisée des commandes"
2003 if __format in self.__viewers:
2004 __formater = self.__viewers[__format](self.__name, self.__objname, self.__logSerie)
2006 raise ValueError("Dumping as \"%s\" is not available"%__format)
2007 return __formater.dump(__filename, __upa)
2009 def load(self, __filename=None, __content=None, __object=None, __format="TUI"):
2010 "Chargement normalisé des commandes"
2011 if __format in self.__loaders:
2012 __formater = self.__loaders[__format]()
2014 raise ValueError("Loading as \"%s\" is not available"%__format)
2015 return __formater.load(__filename, __content, __object)
2017 # ==============================================================================
2020 _extraArguments = None,
2021 _sFunction = lambda x: x,
2026 Pour une liste ordonnée de vecteurs en entrée, renvoie en sortie la liste
2027 correspondante de valeurs de la fonction en argument
2029 # Vérifications et définitions initiales
2030 # logging.debug("MULTF Internal multifonction calculations begin with function %s"%(_sFunction.__name__,))
2031 if not PlatformInfo.isIterable( __xserie ):
2032 raise TypeError("MultiFonction not iterable unkown input type: %s"%(type(__xserie),))
2034 if (_mpWorkers is None) or (_mpWorkers is not None and _mpWorkers < 1):
2037 __mpWorkers = int(_mpWorkers)
2039 import multiprocessing
2050 if _extraArguments is None:
2052 elif _extraArguments is not None and isinstance(_extraArguments, (list, tuple, map)):
2053 for __xvalue in __xserie:
2054 _jobs.append( [__xvalue, ] + list(_extraArguments) )
2056 raise TypeError("MultiFonction extra arguments unkown input type: %s"%(type(_extraArguments),))
2057 # logging.debug("MULTF Internal multiprocessing calculations begin : evaluation of %i point(s)"%(len(_jobs),))
2058 import multiprocessing
2059 with multiprocessing.Pool(__mpWorkers) as pool:
2060 __multiHX = pool.map( _sFunction, _jobs )
2063 # logging.debug("MULTF Internal multiprocessing calculation end")
2065 # logging.debug("MULTF Internal monoprocessing calculation begin")
2067 if _extraArguments is None:
2068 for __xvalue in __xserie:
2069 __multiHX.append( _sFunction( __xvalue ) )
2070 elif _extraArguments is not None and isinstance(_extraArguments, (list, tuple, map)):
2071 for __xvalue in __xserie:
2072 __multiHX.append( _sFunction( __xvalue, *_extraArguments ) )
2073 elif _extraArguments is not None and isinstance(_extraArguments, dict):
2074 for __xvalue in __xserie:
2075 __multiHX.append( _sFunction( __xvalue, **_extraArguments ) )
2077 raise TypeError("MultiFonction extra arguments unkown input type: %s"%(type(_extraArguments),))
2078 # logging.debug("MULTF Internal monoprocessing calculation end")
2080 # logging.debug("MULTF Internal multifonction calculations end")
2083 # ==============================================================================
2084 def CostFunction3D(_x,
2085 _Hm = None, # Pour simuler Hm(x) : HO["Direct"].appliedTo
2086 _HmX = None, # Simulation déjà faite de Hm(x)
2087 _arg = None, # Arguments supplementaires pour Hm, sous la forme d'un tuple
2092 _SIV = False, # A résorber pour la 8.0
2093 _SSC = [], # self._parameters["StoreSupplementaryCalculations"]
2094 _nPS = 0, # nbPreviousSteps
2095 _QM = "DA", # QualityMeasure
2096 _SSV = {}, # Entrée et/ou sortie : self.StoredVariables
2097 _fRt = False, # Restitue ou pas la sortie étendue
2098 _sSc = True, # Stocke ou pas les SSC
2101 Fonction-coût générale utile pour les algorithmes statiques/3D : 3DVAR, BLUE
2102 et dérivés, Kalman et dérivés, LeastSquares, SamplingTest, PSO, SA, Tabu,
2103 DFO, QuantileRegression
2109 for k in ["CostFunctionJ",
2115 "SimulatedObservationAtCurrentOptimum",
2116 "SimulatedObservationAtCurrentState",
2120 if hasattr(_SSV[k],"store"):
2121 _SSV[k].append = _SSV[k].store # Pour utiliser "append" au lieu de "store"
2123 _X = numpy.asmatrix(numpy.ravel( _x )).T
2124 if _SIV or "CurrentState" in _SSC or "CurrentOptimum" in _SSC:
2125 _SSV["CurrentState"].append( _X )
2127 if _HmX is not None:
2131 raise ValueError("COSTFUNCTION3D Operator has to be defined.")
2135 _HX = _Hm( _X, *_arg )
2136 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
2138 if "SimulatedObservationAtCurrentState" in _SSC or \
2139 "SimulatedObservationAtCurrentOptimum" in _SSC:
2140 _SSV["SimulatedObservationAtCurrentState"].append( _HX )
2142 if numpy.any(numpy.isnan(_HX)):
2143 Jb, Jo, J = numpy.nan, numpy.nan, numpy.nan
2145 _Y = numpy.asmatrix(numpy.ravel( _Y )).T
2146 if _QM in ["AugmentedWeightedLeastSquares", "AWLS", "AugmentedPonderatedLeastSquares", "APLS", "DA"]:
2147 if _BI is None or _RI is None:
2148 raise ValueError("Background and Observation error covariance matrix has to be properly defined!")
2149 _Xb = numpy.asmatrix(numpy.ravel( _Xb )).T
2150 Jb = 0.5 * (_X - _Xb).T * _BI * (_X - _Xb)
2151 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
2152 elif _QM in ["WeightedLeastSquares", "WLS", "PonderatedLeastSquares", "PLS"]:
2154 raise ValueError("Observation error covariance matrix has to be properly defined!")
2156 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
2157 elif _QM in ["LeastSquares", "LS", "L2"]:
2159 Jo = 0.5 * (_Y - _HX).T * (_Y - _HX)
2160 elif _QM in ["AbsoluteValue", "L1"]:
2162 Jo = numpy.sum( numpy.abs(_Y - _HX) )
2163 elif _QM in ["MaximumError", "ME"]:
2165 Jo = numpy.max( numpy.abs(_Y - _HX) )
2166 elif _QM in ["QR", "Null"]:
2170 raise ValueError("Unknown asked quality measure!")
2172 J = float( Jb ) + float( Jo )
2175 _SSV["CostFunctionJb"].append( Jb )
2176 _SSV["CostFunctionJo"].append( Jo )
2177 _SSV["CostFunctionJ" ].append( J )
2179 if "IndexOfOptimum" in _SSC or \
2180 "CurrentOptimum" in _SSC or \
2181 "SimulatedObservationAtCurrentOptimum" in _SSC:
2182 IndexMin = numpy.argmin( _SSV["CostFunctionJ"][_nPS:] ) + _nPS
2183 if "IndexOfOptimum" in _SSC:
2184 _SSV["IndexOfOptimum"].append( IndexMin )
2185 if "CurrentOptimum" in _SSC:
2186 _SSV["CurrentOptimum"].append( _SSV["CurrentState"][IndexMin] )
2187 if "SimulatedObservationAtCurrentOptimum" in _SSC:
2188 _SSV["SimulatedObservationAtCurrentOptimum"].append( _SSV["SimulatedObservationAtCurrentState"][IndexMin] )
2193 if _QM in ["QR"]: # Pour le QuantileRegression
2198 # ==============================================================================
2199 if __name__ == "__main__":
2200 print('\n AUTODIAGNOSTIC\n')