1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2021 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les outils généraux élémentaires.
26 __author__ = "Jean-Philippe ARGAUD"
35 from functools import partial
36 from daCore import Persistence, PlatformInfo, Interfaces
37 from daCore import Templates
39 # ==============================================================================
40 class CacheManager(object):
42 Classe générale de gestion d'un cache de calculs
45 toleranceInRedundancy = 1.e-18,
46 lenghtOfRedundancy = -1,
49 Les caractéristiques de tolérance peuvent être modifiées à la création.
51 self.__tolerBP = float(toleranceInRedundancy)
52 self.__lenghtOR = int(lenghtOfRedundancy)
53 self.__initlnOR = self.__lenghtOR
60 self.__listOPCV = [] # Previous Calculated Points, Results, Point Norms, Operator
62 # logging.debug("CM Tolerance de determination des doublons : %.2e", self.__tolerBP)
64 def wasCalculatedIn(self, xValue, oName="" ): #, info="" ):
65 "Vérifie l'existence d'un calcul correspondant à la valeur"
69 for i in range(min(len(self.__listOPCV),self.__lenghtOR)-1,-1,-1):
70 if not hasattr(xValue, 'size') or (str(oName) != self.__listOPCV[i][3]) or (xValue.size != self.__listOPCV[i][0].size):
71 # logging.debug("CM Différence de la taille %s de X et de celle %s du point %i déjà calculé", xValue.shape,i,self.__listOPCP[i].shape)
73 elif numpy.linalg.norm(numpy.ravel(xValue) - self.__listOPCV[i][0]) < self.__tolerBP * self.__listOPCV[i][2]:
75 __HxV = self.__listOPCV[i][1]
76 # logging.debug("CM Cas%s déja calculé, portant le numéro %i", info, i)
80 def storeValueInX(self, xValue, HxValue, oName="" ):
81 "Stocke pour un opérateur o un calcul Hx correspondant à la valeur x"
82 if self.__lenghtOR < 0:
83 self.__lenghtOR = 2 * xValue.size + 2
84 self.__initlnOR = self.__lenghtOR
85 self.__seenNames.append(str(oName))
86 if str(oName) not in self.__seenNames: # Etend la liste si nouveau
87 self.__lenghtOR += 2 * xValue.size + 2
88 self.__initlnOR += self.__lenghtOR
89 self.__seenNames.append(str(oName))
90 while len(self.__listOPCV) > self.__lenghtOR:
91 # logging.debug("CM Réduction de la liste des cas à %i éléments par suppression du premier", self.__lenghtOR)
92 self.__listOPCV.pop(0)
93 self.__listOPCV.append( (
94 copy.copy(numpy.ravel(xValue)),
96 numpy.linalg.norm(xValue),
102 self.__initlnOR = self.__lenghtOR
104 self.__enabled = False
108 self.__lenghtOR = self.__initlnOR
109 self.__enabled = True
111 # ==============================================================================
112 class Operator(object):
114 Classe générale d'interface de type opérateur simple
122 name = "GenericOperator",
125 avoidingRedundancy = True,
126 inputAsMultiFunction = False,
127 enableMultiProcess = False,
128 extraArguments = None,
131 On construit un objet de ce type en fournissant, à l'aide de l'un des
132 deux mots-clé, soit une fonction ou un multi-fonction python, soit une
135 - name : nom d'opérateur
136 - fromMethod : argument de type fonction Python
137 - fromMatrix : argument adapté au constructeur numpy.matrix
138 - avoidingRedundancy : booléen évitant (ou pas) les calculs redondants
139 - inputAsMultiFunction : booléen indiquant une fonction explicitement
140 définie (ou pas) en multi-fonction
141 - extraArguments : arguments supplémentaires passés à la fonction de
142 base et ses dérivées (tuple ou dictionnaire)
144 self.__name = str(name)
145 self.__NbCallsAsMatrix, self.__NbCallsAsMethod, self.__NbCallsOfCached = 0, 0, 0
146 self.__AvoidRC = bool( avoidingRedundancy )
147 self.__inputAsMF = bool( inputAsMultiFunction )
148 self.__mpEnabled = bool( enableMultiProcess )
149 self.__extraArgs = extraArguments
150 if fromMethod is not None and self.__inputAsMF:
151 self.__Method = fromMethod # logtimer(fromMethod)
153 self.__Type = "Method"
154 elif fromMethod is not None and not self.__inputAsMF:
155 self.__Method = partial( MultiFonction, _sFunction=fromMethod, _mpEnabled=self.__mpEnabled)
157 self.__Type = "Method"
158 elif fromMatrix is not None:
160 self.__Matrix = numpy.matrix( fromMatrix, numpy.float )
161 self.__Type = "Matrix"
167 def disableAvoidingRedundancy(self):
169 Operator.CM.disable()
171 def enableAvoidingRedundancy(self):
176 Operator.CM.disable()
182 def appliedTo(self, xValue, HValue = None, argsAsSerie = False):
184 Permet de restituer le résultat de l'application de l'opérateur à une
185 série d'arguments xValue. Cette méthode se contente d'appliquer, chaque
186 argument devant a priori être du bon type.
188 - les arguments par série sont :
189 - xValue : argument adapté pour appliquer l'opérateur
190 - HValue : valeur précalculée de l'opérateur en ce point
191 - argsAsSerie : indique si les arguments sont une mono ou multi-valeur
198 if HValue is not None:
202 PlatformInfo.isIterable( _xValue, True, " in Operator.appliedTo" )
204 if _HValue is not None:
205 assert len(_xValue) == len(_HValue), "Incompatible number of elements in xValue and HValue"
207 for i in range(len(_HValue)):
208 HxValue.append( numpy.asmatrix( numpy.ravel( _HValue[i] ) ).T )
210 Operator.CM.storeValueInX(_xValue[i],HxValue[-1],self.__name)
215 for i, xv in enumerate(_xValue):
217 __alreadyCalculated, __HxV = Operator.CM.wasCalculatedIn(xv,self.__name)
219 __alreadyCalculated = False
221 if __alreadyCalculated:
222 self.__addOneCacheCall()
225 if self.__Matrix is not None:
226 self.__addOneMatrixCall()
227 _hv = self.__Matrix * xv
229 self.__addOneMethodCall()
233 HxValue.append( _hv )
235 if len(_xserie)>0 and self.__Matrix is None:
236 if self.__extraArgs is None:
237 _hserie = self.__Method( _xserie ) # Calcul MF
239 _hserie = self.__Method( _xserie, self.__extraArgs ) # Calcul MF
240 if not hasattr(_hserie, "pop"):
241 raise TypeError("The user input multi-function doesn't seem to return sequence results, behaving like a mono-function. It has to be checked.")
247 Operator.CM.storeValueInX(_xv,_hv,self.__name)
249 if argsAsSerie: return HxValue
250 else: return HxValue[-1]
252 def appliedControledFormTo(self, paires, argsAsSerie = False):
254 Permet de restituer le résultat de l'application de l'opérateur à des
255 paires (xValue, uValue). Cette méthode se contente d'appliquer, son
256 argument devant a priori être du bon type. Si la uValue est None,
257 on suppose que l'opérateur ne s'applique qu'à xValue.
259 - paires : les arguments par paire sont :
260 - xValue : argument X adapté pour appliquer l'opérateur
261 - uValue : argument U adapté pour appliquer l'opérateur
262 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
264 if argsAsSerie: _xuValue = paires
265 else: _xuValue = (paires,)
266 PlatformInfo.isIterable( _xuValue, True, " in Operator.appliedControledFormTo" )
268 if self.__Matrix is not None:
270 for paire in _xuValue:
271 _xValue, _uValue = paire
272 self.__addOneMatrixCall()
273 HxValue.append( self.__Matrix * _xValue )
276 for paire in _xuValue:
278 _xValue, _uValue = paire
279 if _uValue is not None:
280 _xuValue.append( paire )
282 _xuValue.append( _xValue )
283 self.__addOneMethodCall( len(_xuValue) )
284 if self.__extraArgs is None:
285 HxValue = self.__Method( _xuValue ) # Calcul MF
287 HxValue = self.__Method( _xuValue, self.__extraArgs ) # Calcul MF
289 if argsAsSerie: return HxValue
290 else: return HxValue[-1]
292 def appliedInXTo(self, paires, argsAsSerie = False):
294 Permet de restituer le résultat de l'application de l'opérateur à une
295 série d'arguments xValue, sachant que l'opérateur est valable en
296 xNominal. Cette méthode se contente d'appliquer, son argument devant a
297 priori être du bon type. Si l'opérateur est linéaire car c'est une
298 matrice, alors il est valable en tout point nominal et xNominal peut
299 être quelconque. Il n'y a qu'une seule paire par défaut, et argsAsSerie
300 permet d'indiquer que l'argument est multi-paires.
302 - paires : les arguments par paire sont :
303 - xNominal : série d'arguments permettant de donner le point où
304 l'opérateur est construit pour être ensuite appliqué
305 - xValue : série d'arguments adaptés pour appliquer l'opérateur
306 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
308 if argsAsSerie: _nxValue = paires
309 else: _nxValue = (paires,)
310 PlatformInfo.isIterable( _nxValue, True, " in Operator.appliedInXTo" )
312 if self.__Matrix is not None:
314 for paire in _nxValue:
315 _xNominal, _xValue = paire
316 self.__addOneMatrixCall()
317 HxValue.append( self.__Matrix * _xValue )
319 self.__addOneMethodCall( len(_nxValue) )
320 if self.__extraArgs is None:
321 HxValue = self.__Method( _nxValue ) # Calcul MF
323 HxValue = self.__Method( _nxValue, self.__extraArgs ) # Calcul MF
325 if argsAsSerie: return HxValue
326 else: return HxValue[-1]
328 def asMatrix(self, ValueForMethodForm = "UnknownVoidValue", argsAsSerie = False):
330 Permet de renvoyer l'opérateur sous la forme d'une matrice
332 if self.__Matrix is not None:
333 self.__addOneMatrixCall()
334 mValue = [self.__Matrix,]
335 elif not isinstance(ValueForMethodForm,str) or ValueForMethodForm != "UnknownVoidValue": # Ne pas utiliser "None"
338 self.__addOneMethodCall( len(ValueForMethodForm) )
339 for _vfmf in ValueForMethodForm:
340 mValue.append( numpy.matrix( self.__Method(((_vfmf, None),)) ) )
342 self.__addOneMethodCall()
343 mValue = self.__Method(((ValueForMethodForm, None),))
345 raise ValueError("Matrix form of the operator defined as a function/method requires to give an operating point.")
347 if argsAsSerie: return mValue
348 else: return mValue[-1]
352 Renvoie la taille sous forme numpy si l'opérateur est disponible sous
353 la forme d'une matrice
355 if self.__Matrix is not None:
356 return self.__Matrix.shape
358 raise ValueError("Matrix form of the operator is not available, nor the shape")
360 def nbcalls(self, which=None):
362 Renvoie les nombres d'évaluations de l'opérateur
365 self.__NbCallsAsMatrix+self.__NbCallsAsMethod,
366 self.__NbCallsAsMatrix,
367 self.__NbCallsAsMethod,
368 self.__NbCallsOfCached,
369 Operator.NbCallsAsMatrix+Operator.NbCallsAsMethod,
370 Operator.NbCallsAsMatrix,
371 Operator.NbCallsAsMethod,
372 Operator.NbCallsOfCached,
374 if which is None: return __nbcalls
375 else: return __nbcalls[which]
377 def __addOneMatrixCall(self):
378 "Comptabilise un appel"
379 self.__NbCallsAsMatrix += 1 # Decompte local
380 Operator.NbCallsAsMatrix += 1 # Decompte global
382 def __addOneMethodCall(self, nb = 1):
383 "Comptabilise un appel"
384 self.__NbCallsAsMethod += nb # Decompte local
385 Operator.NbCallsAsMethod += nb # Decompte global
387 def __addOneCacheCall(self):
388 "Comptabilise un appel"
389 self.__NbCallsOfCached += 1 # Decompte local
390 Operator.NbCallsOfCached += 1 # Decompte global
392 # ==============================================================================
393 class FullOperator(object):
395 Classe générale d'interface de type opérateur complet
396 (Direct, Linéaire Tangent, Adjoint)
399 name = "GenericFullOperator",
401 asOneFunction = None, # 1 Fonction
402 asThreeFunctions = None, # 3 Fonctions in a dictionary
403 asScript = None, # 1 or 3 Fonction(s) by script
404 asDict = None, # Parameters
406 extraArguments = None,
408 inputAsMF = False,# Fonction(s) as Multi-Functions
413 self.__name = str(name)
414 self.__check = bool(toBeChecked)
415 self.__extraArgs = extraArguments
420 if (asDict is not None) and isinstance(asDict, dict):
421 __Parameters.update( asDict )
422 # Priorité à EnableMultiProcessingInDerivatives=True
423 if "EnableMultiProcessing" in __Parameters and __Parameters["EnableMultiProcessing"]:
424 __Parameters["EnableMultiProcessingInDerivatives"] = True
425 __Parameters["EnableMultiProcessingInEvaluation"] = False
426 if "EnableMultiProcessingInDerivatives" not in __Parameters:
427 __Parameters["EnableMultiProcessingInDerivatives"] = False
428 if __Parameters["EnableMultiProcessingInDerivatives"]:
429 __Parameters["EnableMultiProcessingInEvaluation"] = False
430 if "EnableMultiProcessingInEvaluation" not in __Parameters:
431 __Parameters["EnableMultiProcessingInEvaluation"] = False
432 if "withIncrement" in __Parameters: # Temporaire
433 __Parameters["DifferentialIncrement"] = __Parameters["withIncrement"]
435 if asScript is not None:
436 __Matrix, __Function = None, None
438 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
440 __Function = { "Direct":Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ) }
441 __Function.update({"useApproximatedDerivatives":True})
442 __Function.update(__Parameters)
443 elif asThreeFunctions:
445 "Direct" :Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ),
446 "Tangent":Interfaces.ImportFromScript(asScript).getvalue( "TangentOperator" ),
447 "Adjoint":Interfaces.ImportFromScript(asScript).getvalue( "AdjointOperator" ),
449 __Function.update(__Parameters)
452 if asOneFunction is not None:
453 if isinstance(asOneFunction, dict) and "Direct" in asOneFunction:
454 if asOneFunction["Direct"] is not None:
455 __Function = asOneFunction
457 raise ValueError("The function has to be given in a dictionnary which have 1 key (\"Direct\")")
459 __Function = { "Direct":asOneFunction }
460 __Function.update({"useApproximatedDerivatives":True})
461 __Function.update(__Parameters)
462 elif asThreeFunctions is not None:
463 if isinstance(asThreeFunctions, dict) and \
464 ("Tangent" in asThreeFunctions) and (asThreeFunctions["Tangent"] is not None) and \
465 ("Adjoint" in asThreeFunctions) and (asThreeFunctions["Adjoint"] is not None) and \
466 (("useApproximatedDerivatives" not in asThreeFunctions) or not bool(asThreeFunctions["useApproximatedDerivatives"])):
467 __Function = asThreeFunctions
468 elif isinstance(asThreeFunctions, dict) and \
469 ("Direct" in asThreeFunctions) and (asThreeFunctions["Direct"] is not None):
470 __Function = asThreeFunctions
471 __Function.update({"useApproximatedDerivatives":True})
473 raise ValueError("The functions has to be given in a dictionnary which have either 1 key (\"Direct\") or 3 keys (\"Direct\" (optionnal), \"Tangent\" and \"Adjoint\")")
474 if "Direct" not in asThreeFunctions:
475 __Function["Direct"] = asThreeFunctions["Tangent"]
476 __Function.update(__Parameters)
480 # if sys.version_info[0] < 3 and isinstance(__Function, dict):
481 # for k in ("Direct", "Tangent", "Adjoint"):
482 # if k in __Function and hasattr(__Function[k],"__class__"):
483 # if type(__Function[k]) is type(self.__init__):
484 # raise TypeError("can't use a class method (%s) as a function for the \"%s\" operator. Use a real function instead."%(type(__Function[k]),k))
486 if appliedInX is not None and isinstance(appliedInX, dict):
487 __appliedInX = appliedInX
488 elif appliedInX is not None:
489 __appliedInX = {"HXb":appliedInX}
493 if scheduledBy is not None:
494 self.__T = scheduledBy
496 if isinstance(__Function, dict) and \
497 ("useApproximatedDerivatives" in __Function) and bool(__Function["useApproximatedDerivatives"]) and \
498 ("Direct" in __Function) and (__Function["Direct"] is not None):
499 if "CenteredFiniteDifference" not in __Function: __Function["CenteredFiniteDifference"] = False
500 if "DifferentialIncrement" not in __Function: __Function["DifferentialIncrement"] = 0.01
501 if "withdX" not in __Function: __Function["withdX"] = None
502 if "withAvoidingRedundancy" not in __Function: __Function["withAvoidingRedundancy"] = avoidRC
503 if "withToleranceInRedundancy" not in __Function: __Function["withToleranceInRedundancy"] = 1.e-18
504 if "withLenghtOfRedundancy" not in __Function: __Function["withLenghtOfRedundancy"] = -1
505 if "NumberOfProcesses" not in __Function: __Function["NumberOfProcesses"] = None
506 if "withmfEnabled" not in __Function: __Function["withmfEnabled"] = inputAsMF
507 from daCore import NumericObjects
508 FDA = NumericObjects.FDApproximation(
510 Function = __Function["Direct"],
511 centeredDF = __Function["CenteredFiniteDifference"],
512 increment = __Function["DifferentialIncrement"],
513 dX = __Function["withdX"],
514 avoidingRedundancy = __Function["withAvoidingRedundancy"],
515 toleranceInRedundancy = __Function["withToleranceInRedundancy"],
516 lenghtOfRedundancy = __Function["withLenghtOfRedundancy"],
517 mpEnabled = __Function["EnableMultiProcessingInDerivatives"],
518 mpWorkers = __Function["NumberOfProcesses"],
519 mfEnabled = __Function["withmfEnabled"],
521 self.__FO["Direct"] = Operator( name = self.__name, fromMethod = FDA.DirectOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
522 self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMethod = FDA.TangentOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
523 self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMethod = FDA.AdjointOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
524 elif isinstance(__Function, dict) and \
525 ("Direct" in __Function) and ("Tangent" in __Function) and ("Adjoint" in __Function) and \
526 (__Function["Direct"] is not None) and (__Function["Tangent"] is not None) and (__Function["Adjoint"] is not None):
527 self.__FO["Direct"] = Operator( name = self.__name, fromMethod = __Function["Direct"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
528 self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMethod = __Function["Tangent"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
529 self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMethod = __Function["Adjoint"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
530 elif asMatrix is not None:
531 __matrice = numpy.matrix( __Matrix, numpy.float )
532 self.__FO["Direct"] = Operator( name = self.__name, fromMatrix = __matrice, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
533 self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMatrix = __matrice, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
534 self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMatrix = __matrice.T, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
537 raise ValueError("The %s object is improperly defined or undefined, it requires at minima either a matrix, a Direct operator for approximate derivatives or a Tangent/Adjoint operators pair. Please check your operator input."%self.__name)
539 if __appliedInX is not None:
540 self.__FO["AppliedInX"] = {}
541 for key in list(__appliedInX.keys()):
542 if type( __appliedInX[key] ) is type( numpy.matrix([]) ):
543 # Pour le cas où l'on a une vraie matrice
544 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].A1, numpy.float ).T
545 elif type( __appliedInX[key] ) is type( numpy.array([]) ) and len(__appliedInX[key].shape) > 1:
546 # Pour le cas où l'on a un vecteur représenté en array avec 2 dimensions
547 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].reshape(len(__appliedInX[key]),), numpy.float ).T
549 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key], numpy.float ).T
551 self.__FO["AppliedInX"] = None
557 "x.__repr__() <==> repr(x)"
558 return repr(self.__FO)
561 "x.__str__() <==> str(x)"
562 return str(self.__FO)
564 # ==============================================================================
565 class Algorithm(object):
567 Classe générale d'interface de type algorithme
569 Elle donne un cadre pour l'écriture d'une classe élémentaire d'algorithme
570 d'assimilation, en fournissant un container (dictionnaire) de variables
571 persistantes initialisées, et des méthodes d'accès à ces variables stockées.
573 Une classe élémentaire d'algorithme doit implémenter la méthode "run".
575 def __init__(self, name):
577 L'initialisation présente permet de fabriquer des variables de stockage
578 disponibles de manière générique dans les algorithmes élémentaires. Ces
579 variables de stockage sont ensuite conservées dans un dictionnaire
580 interne à l'objet, mais auquel on accède par la méthode "get".
582 Les variables prévues sont :
583 - APosterioriCorrelations : matrice de corrélations de la matrice A
584 - APosterioriCovariance : matrice de covariances a posteriori : A
585 - APosterioriStandardDeviations : vecteur des écart-types de la matrice A
586 - APosterioriVariances : vecteur des variances de la matrice A
587 - Analysis : vecteur d'analyse : Xa
588 - BMA : Background moins Analysis : Xa - Xb
589 - CostFunctionJ : fonction-coût globale, somme des deux parties suivantes Jb et Jo
590 - CostFunctionJAtCurrentOptimum : fonction-coût globale à l'état optimal courant lors d'itérations
591 - CostFunctionJb : partie ébauche ou background de la fonction-coût : Jb
592 - CostFunctionJbAtCurrentOptimum : partie ébauche à l'état optimal courant lors d'itérations
593 - CostFunctionJo : partie observations de la fonction-coût : Jo
594 - CostFunctionJoAtCurrentOptimum : partie observations à l'état optimal courant lors d'itérations
595 - CurrentIterationNumber : numéro courant d'itération dans les algorithmes itératifs, à partir de 0
596 - CurrentOptimum : état optimal courant lors d'itérations
597 - CurrentState : état courant lors d'itérations
598 - GradientOfCostFunctionJ : gradient de la fonction-coût globale
599 - GradientOfCostFunctionJb : gradient de la partie ébauche de la fonction-coût
600 - GradientOfCostFunctionJo : gradient de la partie observations de la fonction-coût
601 - IndexOfOptimum : index de l'état optimal courant lors d'itérations
602 - Innovation : l'innovation : d = Y - H(X)
603 - InnovationAtCurrentState : l'innovation à l'état courant : dn = Y - H(Xn)
604 - JacobianMatrixAtBackground : matrice jacobienne à l'état d'ébauche
605 - JacobianMatrixAtCurrentState : matrice jacobienne à l'état courant
606 - JacobianMatrixAtOptimum : matrice jacobienne à l'optimum
607 - KalmanGainAtOptimum : gain de Kalman à l'optimum
608 - MahalanobisConsistency : indicateur de consistance des covariances
609 - OMA : Observation moins Analyse : Y - Xa
610 - OMB : Observation moins Background : Y - Xb
611 - ForecastState : état prédit courant lors d'itérations
612 - Residu : dans le cas des algorithmes de vérification
613 - SigmaBck2 : indicateur de correction optimale des erreurs d'ébauche
614 - SigmaObs2 : indicateur de correction optimale des erreurs d'observation
615 - SimulatedObservationAtBackground : l'état observé H(Xb) à l'ébauche
616 - SimulatedObservationAtCurrentOptimum : l'état observé H(X) à l'état optimal courant
617 - SimulatedObservationAtCurrentState : l'état observé H(X) à l'état courant
618 - SimulatedObservationAtOptimum : l'état observé H(Xa) à l'optimum
619 - SimulationQuantiles : états observés H(X) pour les quantiles demandés
620 On peut rajouter des variables à stocker dans l'initialisation de
621 l'algorithme élémentaire qui va hériter de cette classe
623 logging.debug("%s Initialisation", str(name))
624 self._m = PlatformInfo.SystemUsage()
626 self._name = str( name )
627 self._parameters = {"StoreSupplementaryCalculations":[]}
628 self.__required_parameters = {}
629 self.__required_inputs = {
630 "RequiredInputValues":{"mandatory":(), "optional":()},
631 "ClassificationTags":[],
633 self.__variable_names_not_public = {"nextStep":False} # Duplication dans AlgorithmAndParameters
634 self.__canonical_parameter_name = {} # Correspondance "lower"->"correct"
635 self.__canonical_stored_name = {} # Correspondance "lower"->"correct"
637 self.StoredVariables = {}
638 self.StoredVariables["APosterioriCorrelations"] = Persistence.OneMatrix(name = "APosterioriCorrelations")
639 self.StoredVariables["APosterioriCovariance"] = Persistence.OneMatrix(name = "APosterioriCovariance")
640 self.StoredVariables["APosterioriStandardDeviations"] = Persistence.OneVector(name = "APosterioriStandardDeviations")
641 self.StoredVariables["APosterioriVariances"] = Persistence.OneVector(name = "APosterioriVariances")
642 self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
643 self.StoredVariables["BMA"] = Persistence.OneVector(name = "BMA")
644 self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
645 self.StoredVariables["CostFunctionJAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJAtCurrentOptimum")
646 self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
647 self.StoredVariables["CostFunctionJbAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJbAtCurrentOptimum")
648 self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
649 self.StoredVariables["CostFunctionJoAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJoAtCurrentOptimum")
650 self.StoredVariables["CurrentIterationNumber"] = Persistence.OneIndex(name = "CurrentIterationNumber")
651 self.StoredVariables["CurrentOptimum"] = Persistence.OneVector(name = "CurrentOptimum")
652 self.StoredVariables["CurrentState"] = Persistence.OneVector(name = "CurrentState")
653 self.StoredVariables["ForecastState"] = Persistence.OneVector(name = "ForecastState")
654 self.StoredVariables["GradientOfCostFunctionJ"] = Persistence.OneVector(name = "GradientOfCostFunctionJ")
655 self.StoredVariables["GradientOfCostFunctionJb"] = Persistence.OneVector(name = "GradientOfCostFunctionJb")
656 self.StoredVariables["GradientOfCostFunctionJo"] = Persistence.OneVector(name = "GradientOfCostFunctionJo")
657 self.StoredVariables["IndexOfOptimum"] = Persistence.OneIndex(name = "IndexOfOptimum")
658 self.StoredVariables["Innovation"] = Persistence.OneVector(name = "Innovation")
659 self.StoredVariables["InnovationAtCurrentAnalysis"] = Persistence.OneVector(name = "InnovationAtCurrentAnalysis")
660 self.StoredVariables["InnovationAtCurrentState"] = Persistence.OneVector(name = "InnovationAtCurrentState")
661 self.StoredVariables["JacobianMatrixAtBackground"] = Persistence.OneMatrix(name = "JacobianMatrixAtBackground")
662 self.StoredVariables["JacobianMatrixAtCurrentState"] = Persistence.OneMatrix(name = "JacobianMatrixAtCurrentState")
663 self.StoredVariables["JacobianMatrixAtOptimum"] = Persistence.OneMatrix(name = "JacobianMatrixAtOptimum")
664 self.StoredVariables["KalmanGainAtOptimum"] = Persistence.OneMatrix(name = "KalmanGainAtOptimum")
665 self.StoredVariables["MahalanobisConsistency"] = Persistence.OneScalar(name = "MahalanobisConsistency")
666 self.StoredVariables["OMA"] = Persistence.OneVector(name = "OMA")
667 self.StoredVariables["OMB"] = Persistence.OneVector(name = "OMB")
668 self.StoredVariables["Residu"] = Persistence.OneScalar(name = "Residu")
669 self.StoredVariables["SigmaBck2"] = Persistence.OneScalar(name = "SigmaBck2")
670 self.StoredVariables["SigmaObs2"] = Persistence.OneScalar(name = "SigmaObs2")
671 self.StoredVariables["SimulatedObservationAtBackground"] = Persistence.OneVector(name = "SimulatedObservationAtBackground")
672 self.StoredVariables["SimulatedObservationAtCurrentAnalysis"]= Persistence.OneVector(name = "SimulatedObservationAtCurrentAnalysis")
673 self.StoredVariables["SimulatedObservationAtCurrentOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentOptimum")
674 self.StoredVariables["SimulatedObservationAtCurrentState"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentState")
675 self.StoredVariables["SimulatedObservationAtOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtOptimum")
676 self.StoredVariables["SimulationQuantiles"] = Persistence.OneMatrix(name = "SimulationQuantiles")
678 for k in self.StoredVariables:
679 self.__canonical_stored_name[k.lower()] = k
681 for k, v in self.__variable_names_not_public.items():
682 self.__canonical_parameter_name[k.lower()] = k
683 self.__canonical_parameter_name["algorithm"] = "Algorithm"
684 self.__canonical_parameter_name["storesupplementarycalculations"] = "StoreSupplementaryCalculations"
686 def _pre_run(self, Parameters, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None ):
688 logging.debug("%s Lancement", self._name)
689 logging.debug("%s Taille mémoire utilisée de %.0f Mio"%(self._name, self._m.getUsedMemory("Mio")))
690 self._getTimeState(reset=True)
692 # Mise a jour des paramètres internes avec le contenu de Parameters, en
693 # reprenant les valeurs par défauts pour toutes celles non définies
694 self.__setParameters(Parameters, reset=True)
695 for k, v in self.__variable_names_not_public.items():
696 if k not in self._parameters: self.__setParameters( {k:v} )
698 # Corrections et compléments des vecteurs
699 def __test_vvalue(argument, variable, argname, symbol=None):
700 if symbol is None: symbol = variable
702 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
703 raise ValueError("%s %s vector %s is not set and has to be properly defined!"%(self._name,argname,symbol))
704 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
705 logging.debug("%s %s vector %s is not set, but is optional."%(self._name,argname,symbol))
707 logging.debug("%s %s vector %s is not set, but is not required."%(self._name,argname,symbol))
709 logging.debug("%s %s vector %s is set, and its size is %i."%(self._name,argname,symbol,numpy.array(argument).size))
711 __test_vvalue( Xb, "Xb", "Background or initial state" )
712 __test_vvalue( Y, "Y", "Observation" )
713 __test_vvalue( U, "U", "Control" )
715 # Corrections et compléments des covariances
716 def __test_cvalue(argument, variable, argname, symbol=None):
717 if symbol is None: symbol = variable
719 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
720 raise ValueError("%s %s error covariance matrix %s is not set and has to be properly defined!"%(self._name,argname,symbol))
721 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
722 logging.debug("%s %s error covariance matrix %s is not set, but is optional."%(self._name,argname,symbol))
724 logging.debug("%s %s error covariance matrix %s is not set, but is not required."%(self._name,argname,symbol))
726 logging.debug("%s %s error covariance matrix %s is set."%(self._name,argname,symbol))
728 __test_cvalue( B, "B", "Background" )
729 __test_cvalue( R, "R", "Observation" )
730 __test_cvalue( Q, "Q", "Evolution" )
732 # Corrections et compléments des opérateurs
733 def __test_ovalue(argument, variable, argname, symbol=None):
734 if symbol is None: symbol = variable
735 if argument is None or (isinstance(argument,dict) and len(argument)==0):
736 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
737 raise ValueError("%s %s operator %s is not set and has to be properly defined!"%(self._name,argname,symbol))
738 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
739 logging.debug("%s %s operator %s is not set, but is optional."%(self._name,argname,symbol))
741 logging.debug("%s %s operator %s is not set, but is not required."%(self._name,argname,symbol))
743 logging.debug("%s %s operator %s is set."%(self._name,argname,symbol))
745 __test_ovalue( HO, "HO", "Observation", "H" )
746 __test_ovalue( EM, "EM", "Evolution", "M" )
747 __test_ovalue( CM, "CM", "Control Model", "C" )
749 if ("Bounds" in self._parameters) and isinstance(self._parameters["Bounds"], (list, tuple)) and (len(self._parameters["Bounds"]) > 0):
750 logging.debug("%s Prise en compte des bornes effectuee"%(self._name,))
752 self._parameters["Bounds"] = None
754 if logging.getLogger().level < logging.WARNING:
755 self._parameters["optiprint"], self._parameters["optdisp"] = 1, 1
756 if PlatformInfo.has_scipy:
757 import scipy.optimize
758 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_ALL
760 self._parameters["optmessages"] = 15
762 self._parameters["optiprint"], self._parameters["optdisp"] = -1, 0
763 if PlatformInfo.has_scipy:
764 import scipy.optimize
765 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_NONE
767 self._parameters["optmessages"] = 15
771 def _post_run(self,_oH=None):
773 if ("StoreSupplementaryCalculations" in self._parameters) and \
774 "APosterioriCovariance" in self._parameters["StoreSupplementaryCalculations"]:
775 for _A in self.StoredVariables["APosterioriCovariance"]:
776 if "APosterioriVariances" in self._parameters["StoreSupplementaryCalculations"]:
777 self.StoredVariables["APosterioriVariances"].store( numpy.diag(_A) )
778 if "APosterioriStandardDeviations" in self._parameters["StoreSupplementaryCalculations"]:
779 self.StoredVariables["APosterioriStandardDeviations"].store( numpy.sqrt(numpy.diag(_A)) )
780 if "APosterioriCorrelations" in self._parameters["StoreSupplementaryCalculations"]:
781 _EI = numpy.diag(1./numpy.sqrt(numpy.diag(_A)))
782 _C = numpy.dot(_EI, numpy.dot(_A, _EI))
783 self.StoredVariables["APosterioriCorrelations"].store( _C )
784 if _oH is not None and "Direct" in _oH and "Tangent" in _oH and "Adjoint" in _oH:
785 logging.debug("%s Nombre d'évaluation(s) de l'opérateur d'observation direct/tangent/adjoint.: %i/%i/%i", self._name, _oH["Direct"].nbcalls(0),_oH["Tangent"].nbcalls(0),_oH["Adjoint"].nbcalls(0))
786 logging.debug("%s Nombre d'appels au cache d'opérateur d'observation direct/tangent/adjoint..: %i/%i/%i", self._name, _oH["Direct"].nbcalls(3),_oH["Tangent"].nbcalls(3),_oH["Adjoint"].nbcalls(3))
787 logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
788 logging.debug("%s Durées d'utilisation CPU de %.1fs et elapsed de %.1fs", self._name, self._getTimeState()[0], self._getTimeState()[1])
789 logging.debug("%s Terminé", self._name)
792 def _toStore(self, key):
793 "True if in StoreSupplementaryCalculations, else False"
794 return key in self._parameters["StoreSupplementaryCalculations"]
796 def get(self, key=None):
798 Renvoie l'une des variables stockées identifiée par la clé, ou le
799 dictionnaire de l'ensemble des variables disponibles en l'absence de
800 clé. Ce sont directement les variables sous forme objet qui sont
801 renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
802 des classes de persistance.
805 return self.StoredVariables[self.__canonical_stored_name[key.lower()]]
807 return self.StoredVariables
809 def __contains__(self, key=None):
810 "D.__contains__(k) -> True if D has a key k, else False"
811 if key is None or key.lower() not in self.__canonical_stored_name:
814 return self.__canonical_stored_name[key.lower()] in self.StoredVariables
817 "D.keys() -> list of D's keys"
818 if hasattr(self, "StoredVariables"):
819 return self.StoredVariables.keys()
824 "D.pop(k[,d]) -> v, remove specified key and return the corresponding value"
825 if hasattr(self, "StoredVariables") and k.lower() in self.__canonical_stored_name:
826 return self.StoredVariables.pop(self.__canonical_stored_name[k.lower()], d)
831 raise TypeError("pop expected at least 1 arguments, got 0")
832 "If key is not found, d is returned if given, otherwise KeyError is raised"
838 def run(self, Xb=None, Y=None, H=None, M=None, R=None, B=None, Q=None, Parameters=None):
840 Doit implémenter l'opération élémentaire de calcul d'assimilation sous
841 sa forme mathématique la plus naturelle possible.
843 raise NotImplementedError("Mathematical assimilation calculation has not been implemented!")
845 def defineRequiredParameter(self, name = None, default = None, typecast = None, message = None, minval = None, maxval = None, listval = None):
847 Permet de définir dans l'algorithme des paramètres requis et leurs
848 caractéristiques par défaut.
851 raise ValueError("A name is mandatory to define a required parameter.")
853 self.__required_parameters[name] = {
855 "typecast" : typecast,
861 self.__canonical_parameter_name[name.lower()] = name
862 logging.debug("%s %s (valeur par défaut = %s)", self._name, message, self.setParameterValue(name))
864 def getRequiredParameters(self, noDetails=True):
866 Renvoie la liste des noms de paramètres requis ou directement le
867 dictionnaire des paramètres requis.
870 return sorted(self.__required_parameters.keys())
872 return self.__required_parameters
874 def setParameterValue(self, name=None, value=None):
876 Renvoie la valeur d'un paramètre requis de manière contrôlée
878 __k = self.__canonical_parameter_name[name.lower()]
879 default = self.__required_parameters[__k]["default"]
880 typecast = self.__required_parameters[__k]["typecast"]
881 minval = self.__required_parameters[__k]["minval"]
882 maxval = self.__required_parameters[__k]["maxval"]
883 listval = self.__required_parameters[__k]["listval"]
885 if value is None and default is None:
887 elif value is None and default is not None:
888 if typecast is None: __val = default
889 else: __val = typecast( default )
891 if typecast is None: __val = value
894 __val = typecast( value )
896 raise ValueError("The value '%s' for the parameter named '%s' can not be correctly evaluated with type '%s'."%(value, __k, typecast))
898 if minval is not None and (numpy.array(__val, float) < minval).any():
899 raise ValueError("The parameter named '%s' of value '%s' can not be less than %s."%(__k, __val, minval))
900 if maxval is not None and (numpy.array(__val, float) > maxval).any():
901 raise ValueError("The parameter named '%s' of value '%s' can not be greater than %s."%(__k, __val, maxval))
902 if listval is not None:
903 if typecast is list or typecast is tuple or isinstance(__val,list) or isinstance(__val,tuple):
906 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%(v, __k, listval))
907 elif __val not in listval:
908 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%( __val, __k,listval))
912 def requireInputArguments(self, mandatory=(), optional=()):
914 Permet d'imposer des arguments de calcul requis en entrée.
916 self.__required_inputs["RequiredInputValues"]["mandatory"] = tuple( mandatory )
917 self.__required_inputs["RequiredInputValues"]["optional"] = tuple( optional )
919 def getInputArguments(self):
921 Permet d'obtenir les listes des arguments de calcul requis en entrée.
923 return self.__required_inputs["RequiredInputValues"]["mandatory"], self.__required_inputs["RequiredInputValues"]["optional"]
925 def setAttributes(self, tags=()):
927 Permet d'adjoindre des attributs comme les tags de classification.
928 Renvoie la liste actuelle dans tous les cas.
930 self.__required_inputs["ClassificationTags"].extend( tags )
931 return self.__required_inputs["ClassificationTags"]
933 def __setParameters(self, fromDico={}, reset=False):
935 Permet de stocker les paramètres reçus dans le dictionnaire interne.
937 self._parameters.update( fromDico )
938 __inverse_fromDico_keys = {}
939 for k in fromDico.keys():
940 if k.lower() in self.__canonical_parameter_name:
941 __inverse_fromDico_keys[self.__canonical_parameter_name[k.lower()]] = k
942 #~ __inverse_fromDico_keys = dict([(self.__canonical_parameter_name[k.lower()],k) for k in fromDico.keys()])
943 __canonic_fromDico_keys = __inverse_fromDico_keys.keys()
944 for k in self.__required_parameters.keys():
945 if k in __canonic_fromDico_keys:
946 self._parameters[k] = self.setParameterValue(k,fromDico[__inverse_fromDico_keys[k]])
948 self._parameters[k] = self.setParameterValue(k)
951 logging.debug("%s %s : %s", self._name, self.__required_parameters[k]["message"], self._parameters[k])
953 def _getTimeState(self, reset=False):
955 Initialise ou restitue le temps de calcul (cpu/elapsed) à la seconde
958 self.__initial_cpu_time = time.process_time()
959 self.__initial_elapsed_time = time.perf_counter()
962 self.__cpu_time = time.process_time() - self.__initial_cpu_time
963 self.__elapsed_time = time.perf_counter() - self.__initial_elapsed_time
964 return self.__cpu_time, self.__elapsed_time
966 # ==============================================================================
967 class AlgorithmAndParameters(object):
969 Classe générale d'interface d'action pour l'algorithme et ses paramètres
972 name = "GenericAlgorithm",
979 self.__name = str(name)
983 self.__algorithm = {}
984 self.__algorithmFile = None
985 self.__algorithmName = None
987 self.updateParameters( asDict, asScript )
989 if asAlgorithm is None and asScript is not None:
990 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
994 if __Algo is not None:
995 self.__A = str(__Algo)
996 self.__P.update( {"Algorithm":self.__A} )
998 self.__setAlgorithm( self.__A )
1000 self.__variable_names_not_public = {"nextStep":False} # Duplication dans Algorithm
1002 def updateParameters(self,
1006 "Mise a jour des parametres"
1007 if asDict is None and asScript is not None:
1008 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1012 if __Dict is not None:
1013 self.__P.update( dict(__Dict) )
1015 def executePythonScheme(self, asDictAO = None):
1016 "Permet de lancer le calcul d'assimilation"
1017 Operator.CM.clearCache()
1019 if not isinstance(asDictAO, dict):
1020 raise ValueError("The objects for algorithm calculation have to be given together as a dictionnary, and they are not")
1021 if hasattr(asDictAO["Background"],"getO"): self.__Xb = asDictAO["Background"].getO()
1022 elif hasattr(asDictAO["CheckingPoint"],"getO"): self.__Xb = asDictAO["CheckingPoint"].getO()
1023 else: self.__Xb = None
1024 if hasattr(asDictAO["Observation"],"getO"): self.__Y = asDictAO["Observation"].getO()
1025 else: self.__Y = asDictAO["Observation"]
1026 if hasattr(asDictAO["ControlInput"],"getO"): self.__U = asDictAO["ControlInput"].getO()
1027 else: self.__U = asDictAO["ControlInput"]
1028 if hasattr(asDictAO["ObservationOperator"],"getO"): self.__HO = asDictAO["ObservationOperator"].getO()
1029 else: self.__HO = asDictAO["ObservationOperator"]
1030 if hasattr(asDictAO["EvolutionModel"],"getO"): self.__EM = asDictAO["EvolutionModel"].getO()
1031 else: self.__EM = asDictAO["EvolutionModel"]
1032 if hasattr(asDictAO["ControlModel"],"getO"): self.__CM = asDictAO["ControlModel"].getO()
1033 else: self.__CM = asDictAO["ControlModel"]
1034 self.__B = asDictAO["BackgroundError"]
1035 self.__R = asDictAO["ObservationError"]
1036 self.__Q = asDictAO["EvolutionError"]
1038 self.__shape_validate()
1040 self.__algorithm.run(
1050 Parameters = self.__P,
1054 def executeYACSScheme(self, FileName=None):
1055 "Permet de lancer le calcul d'assimilation"
1056 if FileName is None or not os.path.exists(FileName):
1057 raise ValueError("a YACS file name has to be given for YACS execution.\n")
1059 __file = os.path.abspath(FileName)
1060 logging.debug("The YACS file name is \"%s\"."%__file)
1061 if not PlatformInfo.has_salome or \
1062 not PlatformInfo.has_yacs or \
1063 not PlatformInfo.has_adao:
1064 raise ImportError("\n\n"+\
1065 "Unable to get SALOME, YACS or ADAO environnement variables.\n"+\
1066 "Please load the right environnement before trying to use it.\n")
1069 import SALOMERuntime
1071 SALOMERuntime.RuntimeSALOME_setRuntime()
1073 r = pilot.getRuntime()
1074 xmlLoader = loader.YACSLoader()
1075 xmlLoader.registerProcCataLoader()
1077 catalogAd = r.loadCatalog("proc", __file)
1078 r.addCatalog(catalogAd)
1083 p = xmlLoader.load(__file)
1084 except IOError as ex:
1085 print("The YACS XML schema file can not be loaded: %s"%(ex,))
1087 logger = p.getLogger("parser")
1088 if not logger.isEmpty():
1089 print("The imported YACS XML schema has errors on parsing:")
1090 print(logger.getStr())
1093 print("The YACS XML schema is not valid and will not be executed:")
1094 print(p.getErrorReport())
1096 info=pilot.LinkInfo(pilot.LinkInfo.ALL_DONT_STOP)
1097 p.checkConsistency(info)
1098 if info.areWarningsOrErrors():
1099 print("The YACS XML schema is not coherent and will not be executed:")
1100 print(info.getGlobalRepr())
1102 e = pilot.ExecutorSwig()
1104 if p.getEffectiveState() != pilot.DONE:
1105 print(p.getErrorReport())
1109 def get(self, key = None):
1110 "Vérifie l'existence d'une clé de variable ou de paramètres"
1111 if key in self.__algorithm:
1112 return self.__algorithm.get( key )
1113 elif key in self.__P:
1114 return self.__P[key]
1116 allvariables = self.__P
1117 for k in self.__variable_names_not_public: allvariables.pop(k, None)
1120 def pop(self, k, d):
1121 "Necessaire pour le pickling"
1122 return self.__algorithm.pop(k, d)
1124 def getAlgorithmRequiredParameters(self, noDetails=True):
1125 "Renvoie la liste des paramètres requis selon l'algorithme"
1126 return self.__algorithm.getRequiredParameters(noDetails)
1128 def getAlgorithmInputArguments(self):
1129 "Renvoie la liste des entrées requises selon l'algorithme"
1130 return self.__algorithm.getInputArguments()
1132 def getAlgorithmAttributes(self):
1133 "Renvoie la liste des attributs selon l'algorithme"
1134 return self.__algorithm.setAttributes()
1136 def setObserver(self, __V, __O, __I, __S):
1137 if self.__algorithm is None \
1138 or isinstance(self.__algorithm, dict) \
1139 or not hasattr(self.__algorithm,"StoredVariables"):
1140 raise ValueError("No observer can be build before choosing an algorithm.")
1141 if __V not in self.__algorithm:
1142 raise ValueError("An observer requires to be set on a variable named %s which does not exist."%__V)
1144 self.__algorithm.StoredVariables[ __V ].setDataObserver(
1147 HookParameters = __I,
1150 def removeObserver(self, __V, __O, __A = False):
1151 if self.__algorithm is None \
1152 or isinstance(self.__algorithm, dict) \
1153 or not hasattr(self.__algorithm,"StoredVariables"):
1154 raise ValueError("No observer can be removed before choosing an algorithm.")
1155 if __V not in self.__algorithm:
1156 raise ValueError("An observer requires to be removed on a variable named %s which does not exist."%__V)
1158 return self.__algorithm.StoredVariables[ __V ].removeDataObserver(
1163 def hasObserver(self, __V):
1164 if self.__algorithm is None \
1165 or isinstance(self.__algorithm, dict) \
1166 or not hasattr(self.__algorithm,"StoredVariables"):
1168 if __V not in self.__algorithm:
1170 return self.__algorithm.StoredVariables[ __V ].hasDataObserver()
1173 __allvariables = list(self.__algorithm.keys()) + list(self.__P.keys())
1174 for k in self.__variable_names_not_public:
1175 if k in __allvariables: __allvariables.remove(k)
1176 return __allvariables
1178 def __contains__(self, key=None):
1179 "D.__contains__(k) -> True if D has a key k, else False"
1180 return key in self.__algorithm or key in self.__P
1183 "x.__repr__() <==> repr(x)"
1184 return repr(self.__A)+", "+repr(self.__P)
1187 "x.__str__() <==> str(x)"
1188 return str(self.__A)+", "+str(self.__P)
1190 def __setAlgorithm(self, choice = None ):
1192 Permet de sélectionner l'algorithme à utiliser pour mener à bien l'étude
1193 d'assimilation. L'argument est un champ caractère se rapportant au nom
1194 d'un algorithme réalisant l'opération sur les arguments fixes.
1197 raise ValueError("Error: algorithm choice has to be given")
1198 if self.__algorithmName is not None:
1199 raise ValueError("Error: algorithm choice has already been done as \"%s\", it can't be changed."%self.__algorithmName)
1200 daDirectory = "daAlgorithms"
1202 # Recherche explicitement le fichier complet
1203 # ------------------------------------------
1205 for directory in sys.path:
1206 if os.path.isfile(os.path.join(directory, daDirectory, str(choice)+'.py')):
1207 module_path = os.path.abspath(os.path.join(directory, daDirectory))
1208 if module_path is None:
1209 raise ImportError("No algorithm module named \"%s\" has been found in the search path.\n The search path is %s"%(choice, sys.path))
1211 # Importe le fichier complet comme un module
1212 # ------------------------------------------
1214 sys_path_tmp = sys.path ; sys.path.insert(0,module_path)
1215 self.__algorithmFile = __import__(str(choice), globals(), locals(), [])
1216 if not hasattr(self.__algorithmFile, "ElementaryAlgorithm"):
1217 raise ImportError("this module does not define a valid elementary algorithm.")
1218 self.__algorithmName = str(choice)
1219 sys.path = sys_path_tmp ; del sys_path_tmp
1220 except ImportError as e:
1221 raise ImportError("The module named \"%s\" was found, but is incorrect at the import stage.\n The import error message is: %s"%(choice,e))
1223 # Instancie un objet du type élémentaire du fichier
1224 # -------------------------------------------------
1225 self.__algorithm = self.__algorithmFile.ElementaryAlgorithm()
1228 def __shape_validate(self):
1230 Validation de la correspondance correcte des tailles des variables et
1231 des matrices s'il y en a.
1233 if self.__Xb is None: __Xb_shape = (0,)
1234 elif hasattr(self.__Xb,"size"): __Xb_shape = (self.__Xb.size,)
1235 elif hasattr(self.__Xb,"shape"):
1236 if isinstance(self.__Xb.shape, tuple): __Xb_shape = self.__Xb.shape
1237 else: __Xb_shape = self.__Xb.shape()
1238 else: raise TypeError("The background (Xb) has no attribute of shape: problem !")
1240 if self.__Y is None: __Y_shape = (0,)
1241 elif hasattr(self.__Y,"size"): __Y_shape = (self.__Y.size,)
1242 elif hasattr(self.__Y,"shape"):
1243 if isinstance(self.__Y.shape, tuple): __Y_shape = self.__Y.shape
1244 else: __Y_shape = self.__Y.shape()
1245 else: raise TypeError("The observation (Y) has no attribute of shape: problem !")
1247 if self.__U is None: __U_shape = (0,)
1248 elif hasattr(self.__U,"size"): __U_shape = (self.__U.size,)
1249 elif hasattr(self.__U,"shape"):
1250 if isinstance(self.__U.shape, tuple): __U_shape = self.__U.shape
1251 else: __U_shape = self.__U.shape()
1252 else: raise TypeError("The control (U) has no attribute of shape: problem !")
1254 if self.__B is None: __B_shape = (0,0)
1255 elif hasattr(self.__B,"shape"):
1256 if isinstance(self.__B.shape, tuple): __B_shape = self.__B.shape
1257 else: __B_shape = self.__B.shape()
1258 else: raise TypeError("The a priori errors covariance matrix (B) has no attribute of shape: problem !")
1260 if self.__R is None: __R_shape = (0,0)
1261 elif hasattr(self.__R,"shape"):
1262 if isinstance(self.__R.shape, tuple): __R_shape = self.__R.shape
1263 else: __R_shape = self.__R.shape()
1264 else: raise TypeError("The observation errors covariance matrix (R) has no attribute of shape: problem !")
1266 if self.__Q is None: __Q_shape = (0,0)
1267 elif hasattr(self.__Q,"shape"):
1268 if isinstance(self.__Q.shape, tuple): __Q_shape = self.__Q.shape
1269 else: __Q_shape = self.__Q.shape()
1270 else: raise TypeError("The evolution errors covariance matrix (Q) has no attribute of shape: problem !")
1272 if len(self.__HO) == 0: __HO_shape = (0,0)
1273 elif isinstance(self.__HO, dict): __HO_shape = (0,0)
1274 elif hasattr(self.__HO["Direct"],"shape"):
1275 if isinstance(self.__HO["Direct"].shape, tuple): __HO_shape = self.__HO["Direct"].shape
1276 else: __HO_shape = self.__HO["Direct"].shape()
1277 else: raise TypeError("The observation operator (H) has no attribute of shape: problem !")
1279 if len(self.__EM) == 0: __EM_shape = (0,0)
1280 elif isinstance(self.__EM, dict): __EM_shape = (0,0)
1281 elif hasattr(self.__EM["Direct"],"shape"):
1282 if isinstance(self.__EM["Direct"].shape, tuple): __EM_shape = self.__EM["Direct"].shape
1283 else: __EM_shape = self.__EM["Direct"].shape()
1284 else: raise TypeError("The evolution model (EM) has no attribute of shape: problem !")
1286 if len(self.__CM) == 0: __CM_shape = (0,0)
1287 elif isinstance(self.__CM, dict): __CM_shape = (0,0)
1288 elif hasattr(self.__CM["Direct"],"shape"):
1289 if isinstance(self.__CM["Direct"].shape, tuple): __CM_shape = self.__CM["Direct"].shape
1290 else: __CM_shape = self.__CM["Direct"].shape()
1291 else: raise TypeError("The control model (CM) has no attribute of shape: problem !")
1293 # Vérification des conditions
1294 # ---------------------------
1295 if not( len(__Xb_shape) == 1 or min(__Xb_shape) == 1 ):
1296 raise ValueError("Shape characteristic of background (Xb) is incorrect: \"%s\"."%(__Xb_shape,))
1297 if not( len(__Y_shape) == 1 or min(__Y_shape) == 1 ):
1298 raise ValueError("Shape characteristic of observation (Y) is incorrect: \"%s\"."%(__Y_shape,))
1300 if not( min(__B_shape) == max(__B_shape) ):
1301 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) is incorrect: \"%s\"."%(__B_shape,))
1302 if not( min(__R_shape) == max(__R_shape) ):
1303 raise ValueError("Shape characteristic of observation errors covariance matrix (R) is incorrect: \"%s\"."%(__R_shape,))
1304 if not( min(__Q_shape) == max(__Q_shape) ):
1305 raise ValueError("Shape characteristic of evolution errors covariance matrix (Q) is incorrect: \"%s\"."%(__Q_shape,))
1306 if not( min(__EM_shape) == max(__EM_shape) ):
1307 raise ValueError("Shape characteristic of evolution operator (EM) is incorrect: \"%s\"."%(__EM_shape,))
1309 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[1] == max(__Xb_shape) ):
1310 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and state (X) \"%s\" are incompatible."%(__HO_shape,__Xb_shape))
1311 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[0] == max(__Y_shape) ):
1312 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation (Y) \"%s\" are incompatible."%(__HO_shape,__Y_shape))
1313 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__B) > 0 and not( __HO_shape[1] == __B_shape[0] ):
1314 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and a priori errors covariance matrix (B) \"%s\" are incompatible."%(__HO_shape,__B_shape))
1315 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__R) > 0 and not( __HO_shape[0] == __R_shape[1] ):
1316 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation errors covariance matrix (R) \"%s\" are incompatible."%(__HO_shape,__R_shape))
1318 if self.__B is not None and len(self.__B) > 0 and not( __B_shape[1] == max(__Xb_shape) ):
1319 if self.__algorithmName in ["EnsembleBlue",]:
1320 asPersistentVector = self.__Xb.reshape((-1,min(__B_shape)))
1321 self.__Xb = Persistence.OneVector("Background", basetype=numpy.matrix)
1322 for member in asPersistentVector:
1323 self.__Xb.store( numpy.matrix( numpy.ravel(member), numpy.float ).T )
1324 __Xb_shape = min(__B_shape)
1326 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) \"%s\" and background (Xb) \"%s\" are incompatible."%(__B_shape,__Xb_shape))
1328 if self.__R is not None and len(self.__R) > 0 and not( __R_shape[1] == max(__Y_shape) ):
1329 raise ValueError("Shape characteristic of observation errors covariance matrix (R) \"%s\" and observation (Y) \"%s\" are incompatible."%(__R_shape,__Y_shape))
1331 if self.__EM is not None and len(self.__EM) > 0 and not isinstance(self.__EM, dict) and not( __EM_shape[1] == max(__Xb_shape) ):
1332 raise ValueError("Shape characteristic of evolution model (EM) \"%s\" and state (X) \"%s\" are incompatible."%(__EM_shape,__Xb_shape))
1334 if self.__CM is not None and len(self.__CM) > 0 and not isinstance(self.__CM, dict) and not( __CM_shape[1] == max(__U_shape) ):
1335 raise ValueError("Shape characteristic of control model (CM) \"%s\" and control (U) \"%s\" are incompatible."%(__CM_shape,__U_shape))
1337 if ("Bounds" in self.__P) \
1338 and (isinstance(self.__P["Bounds"], list) or isinstance(self.__P["Bounds"], tuple)) \
1339 and (len(self.__P["Bounds"]) != max(__Xb_shape)):
1340 raise ValueError("The number \"%s\" of bound pairs for the state (X) components is different of the size \"%s\" of the state itself." \
1341 %(len(self.__P["Bounds"]),max(__Xb_shape)))
1345 # ==============================================================================
1346 class RegulationAndParameters(object):
1348 Classe générale d'interface d'action pour la régulation et ses paramètres
1351 name = "GenericRegulation",
1358 self.__name = str(name)
1361 if asAlgorithm is None and asScript is not None:
1362 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
1364 __Algo = asAlgorithm
1366 if asDict is None and asScript is not None:
1367 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1371 if __Dict is not None:
1372 self.__P.update( dict(__Dict) )
1374 if __Algo is not None:
1375 self.__P.update( {"Algorithm":str(__Algo)} )
1377 def get(self, key = None):
1378 "Vérifie l'existence d'une clé de variable ou de paramètres"
1380 return self.__P[key]
1384 # ==============================================================================
1385 class DataObserver(object):
1387 Classe générale d'interface de type observer
1390 name = "GenericObserver",
1402 self.__name = str(name)
1407 if onVariable is None:
1408 raise ValueError("setting an observer has to be done over a variable name or a list of variable names, not over None.")
1409 elif type(onVariable) in (tuple, list):
1410 self.__V = tuple(map( str, onVariable ))
1411 if withInfo is None:
1414 self.__I = (str(withInfo),)*len(self.__V)
1415 elif isinstance(onVariable, str):
1416 self.__V = (onVariable,)
1417 if withInfo is None:
1418 self.__I = (onVariable,)
1420 self.__I = (str(withInfo),)
1422 raise ValueError("setting an observer has to be done over a variable name or a list of variable names.")
1424 if asObsObject is not None:
1425 self.__O = asObsObject
1427 __FunctionText = str(UserScript('Observer', asTemplate, asString, asScript))
1428 __Function = Observer2Func(__FunctionText)
1429 self.__O = __Function.getfunc()
1431 for k in range(len(self.__V)):
1434 if ename not in withAlgo:
1435 raise ValueError("An observer is asked to be set on a variable named %s which does not exist."%ename)
1437 withAlgo.setObserver(ename, self.__O, einfo, scheduledBy)
1440 "x.__repr__() <==> repr(x)"
1441 return repr(self.__V)+"\n"+repr(self.__O)
1444 "x.__str__() <==> str(x)"
1445 return str(self.__V)+"\n"+str(self.__O)
1447 # ==============================================================================
1448 class UserScript(object):
1450 Classe générale d'interface de type texte de script utilisateur
1453 name = "GenericUserScript",
1460 self.__name = str(name)
1462 if asString is not None:
1464 elif self.__name == "UserPostAnalysis" and (asTemplate is not None) and (asTemplate in Templates.UserPostAnalysisTemplates):
1465 self.__F = Templates.UserPostAnalysisTemplates[asTemplate]
1466 elif self.__name == "Observer" and (asTemplate is not None) and (asTemplate in Templates.ObserverTemplates):
1467 self.__F = Templates.ObserverTemplates[asTemplate]
1468 elif asScript is not None:
1469 self.__F = Interfaces.ImportFromScript(asScript).getstring()
1474 "x.__repr__() <==> repr(x)"
1475 return repr(self.__F)
1478 "x.__str__() <==> str(x)"
1479 return str(self.__F)
1481 # ==============================================================================
1482 class ExternalParameters(object):
1484 Classe générale d'interface de type texte de script utilisateur
1487 name = "GenericExternalParameters",
1493 self.__name = str(name)
1496 self.updateParameters( asDict, asScript )
1498 def updateParameters(self,
1502 "Mise a jour des parametres"
1503 if asDict is None and asScript is not None:
1504 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "ExternalParameters" )
1508 if __Dict is not None:
1509 self.__P.update( dict(__Dict) )
1511 def get(self, key = None):
1513 return self.__P[key]
1515 return list(self.__P.keys())
1518 return list(self.__P.keys())
1520 def pop(self, k, d):
1521 return self.__P.pop(k, d)
1524 return self.__P.items()
1526 def __contains__(self, key=None):
1527 "D.__contains__(k) -> True if D has a key k, else False"
1528 return key in self.__P
1530 # ==============================================================================
1531 class State(object):
1533 Classe générale d'interface de type état
1536 name = "GenericVector",
1538 asPersistentVector = None,
1544 toBeChecked = False,
1547 Permet de définir un vecteur :
1548 - asVector : entrée des données, comme un vecteur compatible avec le
1549 constructeur de numpy.matrix, ou "True" si entrée par script.
1550 - asPersistentVector : entrée des données, comme une série de vecteurs
1551 compatible avec le constructeur de numpy.matrix, ou comme un objet de
1552 type Persistence, ou "True" si entrée par script.
1553 - asScript : si un script valide est donné contenant une variable
1554 nommée "name", la variable est de type "asVector" (par défaut) ou
1555 "asPersistentVector" selon que l'une de ces variables est placée à
1557 - asDataFile : si un ou plusieurs fichiers valides sont donnés
1558 contenant des valeurs en colonnes, elles-mêmes nommées "colNames"
1559 (s'il n'y a pas de nom de colonne indiquée, on cherche une colonne
1560 nommée "name"), on récupère les colonnes et on les range ligne après
1561 ligne (colMajor=False, par défaut) ou colonne après colonne
1562 (colMajor=True). La variable résultante est de type "asVector" (par
1563 défaut) ou "asPersistentVector" selon que l'une de ces variables est
1566 self.__name = str(name)
1567 self.__check = bool(toBeChecked)
1571 self.__is_vector = False
1572 self.__is_series = False
1574 if asScript is not None:
1575 __Vector, __Series = None, None
1576 if asPersistentVector:
1577 __Series = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1579 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1580 elif asDataFile is not None:
1581 __Vector, __Series = None, None
1582 if asPersistentVector:
1583 if colNames is not None:
1584 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
1586 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1587 if bool(colMajor) and not Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1588 __Series = numpy.transpose(__Series)
1589 elif not bool(colMajor) and Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1590 __Series = numpy.transpose(__Series)
1592 if colNames is not None:
1593 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
1595 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1597 __Vector = numpy.ravel(__Vector, order = "F")
1599 __Vector = numpy.ravel(__Vector, order = "C")
1601 __Vector, __Series = asVector, asPersistentVector
1603 if __Vector is not None:
1604 self.__is_vector = True
1605 self.__V = numpy.matrix( numpy.asmatrix(__Vector).A1, numpy.float ).T
1606 self.shape = self.__V.shape
1607 self.size = self.__V.size
1608 elif __Series is not None:
1609 self.__is_series = True
1610 if isinstance(__Series, (tuple, list, numpy.ndarray, numpy.matrix, str)):
1611 self.__V = Persistence.OneVector(self.__name, basetype=numpy.matrix)
1612 if isinstance(__Series, str): __Series = eval(__Series)
1613 for member in __Series:
1614 self.__V.store( numpy.matrix( numpy.asmatrix(member).A1, numpy.float ).T )
1617 if isinstance(self.__V.shape, (tuple, list)):
1618 self.shape = self.__V.shape
1620 self.shape = self.__V.shape()
1621 if len(self.shape) == 1:
1622 self.shape = (self.shape[0],1)
1623 self.size = self.shape[0] * self.shape[1]
1625 raise ValueError("The %s object is improperly defined or undefined, it requires at minima either a vector, a list/tuple of vectors or a persistent object. Please check your vector input."%self.__name)
1627 if scheduledBy is not None:
1628 self.__T = scheduledBy
1630 def getO(self, withScheduler=False):
1632 return self.__V, self.__T
1633 elif self.__T is None:
1639 "Vérification du type interne"
1640 return self.__is_vector
1643 "Vérification du type interne"
1644 return self.__is_series
1647 "x.__repr__() <==> repr(x)"
1648 return repr(self.__V)
1651 "x.__str__() <==> str(x)"
1652 return str(self.__V)
1654 # ==============================================================================
1655 class Covariance(object):
1657 Classe générale d'interface de type covariance
1660 name = "GenericCovariance",
1661 asCovariance = None,
1662 asEyeByScalar = None,
1663 asEyeByVector = None,
1666 toBeChecked = False,
1669 Permet de définir une covariance :
1670 - asCovariance : entrée des données, comme une matrice compatible avec
1671 le constructeur de numpy.matrix
1672 - asEyeByScalar : entrée des données comme un seul scalaire de variance,
1673 multiplicatif d'une matrice de corrélation identité, aucune matrice
1674 n'étant donc explicitement à donner
1675 - asEyeByVector : entrée des données comme un seul vecteur de variance,
1676 à mettre sur la diagonale d'une matrice de corrélation, aucune matrice
1677 n'étant donc explicitement à donner
1678 - asCovObject : entrée des données comme un objet python, qui a les
1679 methodes obligatoires "getT", "getI", "diag", "trace", "__add__",
1680 "__sub__", "__neg__", "__mul__", "__rmul__" et facultatives "shape",
1681 "size", "cholesky", "choleskyI", "asfullmatrix", "__repr__", "__str__"
1682 - toBeChecked : booléen indiquant si le caractère SDP de la matrice
1683 pleine doit être vérifié
1685 self.__name = str(name)
1686 self.__check = bool(toBeChecked)
1689 self.__is_scalar = False
1690 self.__is_vector = False
1691 self.__is_matrix = False
1692 self.__is_object = False
1694 if asScript is not None:
1695 __Matrix, __Scalar, __Vector, __Object = None, None, None, None
1697 __Scalar = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1699 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1701 __Object = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1703 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1705 __Matrix, __Scalar, __Vector, __Object = asCovariance, asEyeByScalar, asEyeByVector, asCovObject
1707 if __Scalar is not None:
1708 if numpy.matrix(__Scalar).size != 1:
1709 raise ValueError(' The diagonal multiplier given to define a sparse matrix is not a unique scalar value.\n Its actual measured size is %i. Please check your scalar input.'%numpy.matrix(__Scalar).size)
1710 self.__is_scalar = True
1711 self.__C = numpy.abs( float(__Scalar) )
1714 elif __Vector is not None:
1715 self.__is_vector = True
1716 self.__C = numpy.abs( numpy.array( numpy.ravel( numpy.matrix(__Vector, float ) ) ) )
1717 self.shape = (self.__C.size,self.__C.size)
1718 self.size = self.__C.size**2
1719 elif __Matrix is not None:
1720 self.__is_matrix = True
1721 self.__C = numpy.matrix( __Matrix, float )
1722 self.shape = self.__C.shape
1723 self.size = self.__C.size
1724 elif __Object is not None:
1725 self.__is_object = True
1727 for at in ("getT","getI","diag","trace","__add__","__sub__","__neg__","__mul__","__rmul__"):
1728 if not hasattr(self.__C,at):
1729 raise ValueError("The matrix given for %s as an object has no attribute \"%s\". Please check your object input."%(self.__name,at))
1730 if hasattr(self.__C,"shape"):
1731 self.shape = self.__C.shape
1734 if hasattr(self.__C,"size"):
1735 self.size = self.__C.size
1740 # raise ValueError("The %s covariance matrix has to be specified either as a matrix, a vector for its diagonal or a scalar multiplying an identity matrix."%self.__name)
1744 def __validate(self):
1746 if self.__C is None:
1747 raise UnboundLocalError("%s covariance matrix value has not been set!"%(self.__name,))
1748 if self.ismatrix() and min(self.shape) != max(self.shape):
1749 raise ValueError("The given matrix for %s is not a square one, its shape is %s. Please check your matrix input."%(self.__name,self.shape))
1750 if self.isobject() and min(self.shape) != max(self.shape):
1751 raise ValueError("The matrix given for \"%s\" is not a square one, its shape is %s. Please check your object input."%(self.__name,self.shape))
1752 if self.isscalar() and self.__C <= 0:
1753 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your scalar input %s."%(self.__name,self.__C))
1754 if self.isvector() and (self.__C <= 0).any():
1755 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your vector input."%(self.__name,))
1756 if self.ismatrix() and (self.__check or logging.getLogger().level < logging.WARNING):
1758 L = numpy.linalg.cholesky( self.__C )
1760 raise ValueError("The %s covariance matrix is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1761 if self.isobject() and (self.__check or logging.getLogger().level < logging.WARNING):
1763 L = self.__C.cholesky()
1765 raise ValueError("The %s covariance object is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1768 "Vérification du type interne"
1769 return self.__is_scalar
1772 "Vérification du type interne"
1773 return self.__is_vector
1776 "Vérification du type interne"
1777 return self.__is_matrix
1780 "Vérification du type interne"
1781 return self.__is_object
1786 return Covariance(self.__name+"I", asCovariance = self.__C.I )
1787 elif self.isvector():
1788 return Covariance(self.__name+"I", asEyeByVector = 1. / self.__C )
1789 elif self.isscalar():
1790 return Covariance(self.__name+"I", asEyeByScalar = 1. / self.__C )
1791 elif self.isobject():
1792 return Covariance(self.__name+"I", asCovObject = self.__C.getI() )
1794 return None # Indispensable
1799 return Covariance(self.__name+"T", asCovariance = self.__C.T )
1800 elif self.isvector():
1801 return Covariance(self.__name+"T", asEyeByVector = self.__C )
1802 elif self.isscalar():
1803 return Covariance(self.__name+"T", asEyeByScalar = self.__C )
1804 elif self.isobject():
1805 return Covariance(self.__name+"T", asCovObject = self.__C.getT() )
1808 "Décomposition de Cholesky"
1810 return Covariance(self.__name+"C", asCovariance = numpy.linalg.cholesky(self.__C) )
1811 elif self.isvector():
1812 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
1813 elif self.isscalar():
1814 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
1815 elif self.isobject() and hasattr(self.__C,"cholesky"):
1816 return Covariance(self.__name+"C", asCovObject = self.__C.cholesky() )
1818 def choleskyI(self):
1819 "Inversion de la décomposition de Cholesky"
1821 return Covariance(self.__name+"H", asCovariance = numpy.linalg.cholesky(self.__C).I )
1822 elif self.isvector():
1823 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
1824 elif self.isscalar():
1825 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
1826 elif self.isobject() and hasattr(self.__C,"choleskyI"):
1827 return Covariance(self.__name+"H", asCovObject = self.__C.choleskyI() )
1830 "Racine carrée matricielle"
1833 return Covariance(self.__name+"C", asCovariance = scipy.linalg.sqrtm(self.__C) )
1834 elif self.isvector():
1835 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
1836 elif self.isscalar():
1837 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
1838 elif self.isobject() and hasattr(self.__C,"sqrt"):
1839 return Covariance(self.__name+"C", asCovObject = self.__C.sqrt() )
1842 "Inversion de la racine carrée matricielle"
1845 return Covariance(self.__name+"H", asCovariance = scipy.linalg.sqrtm(self.__C).I )
1846 elif self.isvector():
1847 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
1848 elif self.isscalar():
1849 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
1850 elif self.isobject() and hasattr(self.__C,"sqrtI"):
1851 return Covariance(self.__name+"H", asCovObject = self.__C.sqrtI() )
1853 def diag(self, msize=None):
1854 "Diagonale de la matrice"
1856 return numpy.diag(self.__C)
1857 elif self.isvector():
1859 elif self.isscalar():
1861 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1863 return self.__C * numpy.ones(int(msize))
1864 elif self.isobject():
1865 return self.__C.diag()
1867 def asfullmatrix(self, msize=None):
1871 elif self.isvector():
1872 return numpy.matrix( numpy.diag(self.__C), float )
1873 elif self.isscalar():
1875 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1877 return numpy.matrix( self.__C * numpy.eye(int(msize)), float )
1878 elif self.isobject() and hasattr(self.__C,"asfullmatrix"):
1879 return self.__C.asfullmatrix()
1881 def trace(self, msize=None):
1882 "Trace de la matrice"
1884 return numpy.trace(self.__C)
1885 elif self.isvector():
1886 return float(numpy.sum(self.__C))
1887 elif self.isscalar():
1889 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1891 return self.__C * int(msize)
1892 elif self.isobject():
1893 return self.__C.trace()
1899 "x.__repr__() <==> repr(x)"
1900 return repr(self.__C)
1903 "x.__str__() <==> str(x)"
1904 return str(self.__C)
1906 def __add__(self, other):
1907 "x.__add__(y) <==> x+y"
1908 if self.ismatrix() or self.isobject():
1909 return self.__C + numpy.asmatrix(other)
1910 elif self.isvector() or self.isscalar():
1911 _A = numpy.asarray(other)
1912 _A.reshape(_A.size)[::_A.shape[1]+1] += self.__C
1913 return numpy.asmatrix(_A)
1915 def __radd__(self, other):
1916 "x.__radd__(y) <==> y+x"
1917 raise NotImplementedError("%s covariance matrix __radd__ method not available for %s type!"%(self.__name,type(other)))
1919 def __sub__(self, other):
1920 "x.__sub__(y) <==> x-y"
1921 if self.ismatrix() or self.isobject():
1922 return self.__C - numpy.asmatrix(other)
1923 elif self.isvector() or self.isscalar():
1924 _A = numpy.asarray(other)
1925 _A.reshape(_A.size)[::_A.shape[1]+1] = self.__C - _A.reshape(_A.size)[::_A.shape[1]+1]
1926 return numpy.asmatrix(_A)
1928 def __rsub__(self, other):
1929 "x.__rsub__(y) <==> y-x"
1930 raise NotImplementedError("%s covariance matrix __rsub__ method not available for %s type!"%(self.__name,type(other)))
1933 "x.__neg__() <==> -x"
1936 def __mul__(self, other):
1937 "x.__mul__(y) <==> x*y"
1938 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
1939 return self.__C * other
1940 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
1941 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1942 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
1943 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1944 return self.__C * numpy.asmatrix(other)
1946 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asmatrix(other).shape,self.__name))
1947 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
1948 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1949 return numpy.asmatrix(self.__C * numpy.ravel(other)).T
1950 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1951 return numpy.asmatrix((self.__C * (numpy.asarray(other).transpose())).transpose())
1953 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
1954 elif self.isscalar() and isinstance(other,numpy.matrix):
1955 return self.__C * other
1956 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
1957 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
1958 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
1960 return self.__C * numpy.asmatrix(other)
1961 elif self.isobject():
1962 return self.__C.__mul__(other)
1964 raise NotImplementedError("%s covariance matrix __mul__ method not available for %s type!"%(self.__name,type(other)))
1966 def __rmul__(self, other):
1967 "x.__rmul__(y) <==> y*x"
1968 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
1969 return other * self.__C
1970 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
1971 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1972 return numpy.asmatrix(numpy.ravel(other)) * self.__C
1973 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1974 return numpy.asmatrix(other) * self.__C
1976 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name))
1977 elif self.isvector() and isinstance(other,numpy.matrix):
1978 if numpy.ravel(other).size == self.shape[0]: # Vecteur
1979 return numpy.asmatrix(numpy.ravel(other) * self.__C)
1980 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
1981 return numpy.asmatrix(numpy.array(other) * self.__C)
1983 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
1984 elif self.isscalar() and isinstance(other,numpy.matrix):
1985 return other * self.__C
1986 elif self.isobject():
1987 return self.__C.__rmul__(other)
1989 raise NotImplementedError("%s covariance matrix __rmul__ method not available for %s type!"%(self.__name,type(other)))
1992 "x.__len__() <==> len(x)"
1993 return self.shape[0]
1995 # ==============================================================================
1996 class Observer2Func(object):
1998 Creation d'une fonction d'observateur a partir de son texte
2000 def __init__(self, corps=""):
2001 self.__corps = corps
2002 def func(self,var,info):
2003 "Fonction d'observation"
2006 "Restitution du pointeur de fonction dans l'objet"
2009 # ==============================================================================
2010 class CaseLogger(object):
2012 Conservation des commandes de creation d'un cas
2014 def __init__(self, __name="", __objname="case", __addViewers=None, __addLoaders=None):
2015 self.__name = str(__name)
2016 self.__objname = str(__objname)
2017 self.__logSerie = []
2018 self.__switchoff = False
2020 "TUI" :Interfaces._TUIViewer,
2021 "SCD" :Interfaces._SCDViewer,
2022 "YACS":Interfaces._YACSViewer,
2025 "TUI" :Interfaces._TUIViewer,
2026 "COM" :Interfaces._COMViewer,
2028 if __addViewers is not None:
2029 self.__viewers.update(dict(__addViewers))
2030 if __addLoaders is not None:
2031 self.__loaders.update(dict(__addLoaders))
2033 def register(self, __command=None, __keys=None, __local=None, __pre=None, __switchoff=False):
2034 "Enregistrement d'une commande individuelle"
2035 if __command is not None and __keys is not None and __local is not None and not self.__switchoff:
2036 if "self" in __keys: __keys.remove("self")
2037 self.__logSerie.append( (str(__command), __keys, __local, __pre, __switchoff) )
2039 self.__switchoff = True
2041 self.__switchoff = False
2043 def dump(self, __filename=None, __format="TUI", __upa=""):
2044 "Restitution normalisée des commandes"
2045 if __format in self.__viewers:
2046 __formater = self.__viewers[__format](self.__name, self.__objname, self.__logSerie)
2048 raise ValueError("Dumping as \"%s\" is not available"%__format)
2049 return __formater.dump(__filename, __upa)
2051 def load(self, __filename=None, __content=None, __object=None, __format="TUI"):
2052 "Chargement normalisé des commandes"
2053 if __format in self.__loaders:
2054 __formater = self.__loaders[__format]()
2056 raise ValueError("Loading as \"%s\" is not available"%__format)
2057 return __formater.load(__filename, __content, __object)
2059 # ==============================================================================
2062 _extraArguments = None,
2063 _sFunction = lambda x: x,
2068 Pour une liste ordonnée de vecteurs en entrée, renvoie en sortie la liste
2069 correspondante de valeurs de la fonction en argument
2071 # Vérifications et définitions initiales
2072 # logging.debug("MULTF Internal multifonction calculations begin with function %s"%(_sFunction.__name__,))
2073 if not PlatformInfo.isIterable( __xserie ):
2074 raise TypeError("MultiFonction not iterable unkown input type: %s"%(type(__xserie),))
2076 if (_mpWorkers is None) or (_mpWorkers is not None and _mpWorkers < 1):
2079 __mpWorkers = int(_mpWorkers)
2081 import multiprocessing
2092 if _extraArguments is None:
2094 elif _extraArguments is not None and isinstance(_extraArguments, (list, tuple, map)):
2095 for __xvalue in __xserie:
2096 _jobs.append( [__xvalue, ] + list(_extraArguments) )
2098 raise TypeError("MultiFonction extra arguments unkown input type: %s"%(type(_extraArguments),))
2099 # logging.debug("MULTF Internal multiprocessing calculations begin : evaluation of %i point(s)"%(len(_jobs),))
2100 import multiprocessing
2101 with multiprocessing.Pool(__mpWorkers) as pool:
2102 __multiHX = pool.map( _sFunction, _jobs )
2105 # logging.debug("MULTF Internal multiprocessing calculation end")
2107 # logging.debug("MULTF Internal monoprocessing calculation begin")
2109 if _extraArguments is None:
2110 for __xvalue in __xserie:
2111 __multiHX.append( _sFunction( __xvalue ) )
2112 elif _extraArguments is not None and isinstance(_extraArguments, (list, tuple, map)):
2113 for __xvalue in __xserie:
2114 __multiHX.append( _sFunction( __xvalue, *_extraArguments ) )
2115 elif _extraArguments is not None and isinstance(_extraArguments, dict):
2116 for __xvalue in __xserie:
2117 __multiHX.append( _sFunction( __xvalue, **_extraArguments ) )
2119 raise TypeError("MultiFonction extra arguments unkown input type: %s"%(type(_extraArguments),))
2120 # logging.debug("MULTF Internal monoprocessing calculation end")
2122 # logging.debug("MULTF Internal multifonction calculations end")
2125 # ==============================================================================
2126 def CostFunction3D(_x,
2127 _Hm = None, # Pour simuler Hm(x) : HO["Direct"].appliedTo
2128 _HmX = None, # Simulation déjà faite de Hm(x)
2129 _arg = None, # Arguments supplementaires pour Hm, sous la forme d'un tuple
2134 _SIV = False, # A résorber pour la 8.0
2135 _SSC = [], # self._parameters["StoreSupplementaryCalculations"]
2136 _nPS = 0, # nbPreviousSteps
2137 _QM = "DA", # QualityMeasure
2138 _SSV = {}, # Entrée et/ou sortie : self.StoredVariables
2139 _fRt = False, # Restitue ou pas la sortie étendue
2140 _sSc = True, # Stocke ou pas les SSC
2143 Fonction-coût générale utile pour les algorithmes statiques/3D : 3DVAR, BLUE
2144 et dérivés, Kalman et dérivés, LeastSquares, SamplingTest, PSO, SA, Tabu,
2145 DFO, QuantileRegression
2151 for k in ["CostFunctionJ",
2157 "SimulatedObservationAtCurrentOptimum",
2158 "SimulatedObservationAtCurrentState",
2162 if hasattr(_SSV[k],"store"):
2163 _SSV[k].append = _SSV[k].store # Pour utiliser "append" au lieu de "store"
2165 _X = numpy.asmatrix(numpy.ravel( _x )).T
2166 if _SIV or "CurrentState" in _SSC or "CurrentOptimum" in _SSC:
2167 _SSV["CurrentState"].append( _X )
2169 if _HmX is not None:
2173 raise ValueError("COSTFUNCTION3D Operator has to be defined.")
2177 _HX = _Hm( _X, *_arg )
2178 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
2180 if "SimulatedObservationAtCurrentState" in _SSC or \
2181 "SimulatedObservationAtCurrentOptimum" in _SSC:
2182 _SSV["SimulatedObservationAtCurrentState"].append( _HX )
2184 if numpy.any(numpy.isnan(_HX)):
2185 Jb, Jo, J = numpy.nan, numpy.nan, numpy.nan
2187 _Y = numpy.asmatrix(numpy.ravel( _Y )).T
2188 if _QM in ["AugmentedWeightedLeastSquares", "AWLS", "AugmentedPonderatedLeastSquares", "APLS", "DA"]:
2189 if _BI is None or _RI is None:
2190 raise ValueError("Background and Observation error covariance matrix has to be properly defined!")
2191 _Xb = numpy.asmatrix(numpy.ravel( _Xb )).T
2192 Jb = 0.5 * (_X - _Xb).T * _BI * (_X - _Xb)
2193 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
2194 elif _QM in ["WeightedLeastSquares", "WLS", "PonderatedLeastSquares", "PLS"]:
2196 raise ValueError("Observation error covariance matrix has to be properly defined!")
2198 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
2199 elif _QM in ["LeastSquares", "LS", "L2"]:
2201 Jo = 0.5 * (_Y - _HX).T * (_Y - _HX)
2202 elif _QM in ["AbsoluteValue", "L1"]:
2204 Jo = numpy.sum( numpy.abs(_Y - _HX) )
2205 elif _QM in ["MaximumError", "ME"]:
2207 Jo = numpy.max( numpy.abs(_Y - _HX) )
2208 elif _QM in ["QR", "Null"]:
2212 raise ValueError("Unknown asked quality measure!")
2214 J = float( Jb ) + float( Jo )
2217 _SSV["CostFunctionJb"].append( Jb )
2218 _SSV["CostFunctionJo"].append( Jo )
2219 _SSV["CostFunctionJ" ].append( J )
2221 if "IndexOfOptimum" in _SSC or \
2222 "CurrentOptimum" in _SSC or \
2223 "SimulatedObservationAtCurrentOptimum" in _SSC:
2224 IndexMin = numpy.argmin( _SSV["CostFunctionJ"][_nPS:] ) + _nPS
2225 if "IndexOfOptimum" in _SSC:
2226 _SSV["IndexOfOptimum"].append( IndexMin )
2227 if "CurrentOptimum" in _SSC:
2228 _SSV["CurrentOptimum"].append( _SSV["CurrentState"][IndexMin] )
2229 if "SimulatedObservationAtCurrentOptimum" in _SSC:
2230 _SSV["SimulatedObservationAtCurrentOptimum"].append( _SSV["SimulatedObservationAtCurrentState"][IndexMin] )
2235 if _QM in ["QR"]: # Pour le QuantileRegression
2240 # ==============================================================================
2241 if __name__ == "__main__":
2242 print('\n AUTODIAGNOSTIC\n')