1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2021 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les outils généraux élémentaires.
26 __author__ = "Jean-Philippe ARGAUD"
35 from functools import partial
36 from daCore import Persistence, PlatformInfo, Interfaces
37 from daCore import Templates
39 # ==============================================================================
40 class CacheManager(object):
42 Classe générale de gestion d'un cache de calculs
45 toleranceInRedundancy = 1.e-18,
46 lenghtOfRedundancy = -1,
49 Les caractéristiques de tolérance peuvent être modifiées à la création.
51 self.__tolerBP = float(toleranceInRedundancy)
52 self.__lenghtOR = int(lenghtOfRedundancy)
53 self.__initlnOR = self.__lenghtOR
60 self.__listOPCV = [] # Previous Calculated Points, Results, Point Norms, Operator
62 # logging.debug("CM Tolerance de determination des doublons : %.2e", self.__tolerBP)
64 def wasCalculatedIn(self, xValue, oName="" ): #, info="" ):
65 "Vérifie l'existence d'un calcul correspondant à la valeur"
69 for i in range(min(len(self.__listOPCV),self.__lenghtOR)-1,-1,-1):
70 if not hasattr(xValue, 'size') or (str(oName) != self.__listOPCV[i][3]) or (xValue.size != self.__listOPCV[i][0].size):
71 # logging.debug("CM Différence de la taille %s de X et de celle %s du point %i déjà calculé", xValue.shape,i,self.__listOPCP[i].shape)
73 elif numpy.linalg.norm(numpy.ravel(xValue) - self.__listOPCV[i][0]) < self.__tolerBP * self.__listOPCV[i][2]:
75 __HxV = self.__listOPCV[i][1]
76 # logging.debug("CM Cas%s déja calculé, portant le numéro %i", info, i)
80 def storeValueInX(self, xValue, HxValue, oName="" ):
81 "Stocke pour un opérateur o un calcul Hx correspondant à la valeur x"
82 if self.__lenghtOR < 0:
83 self.__lenghtOR = 2 * xValue.size + 2
84 self.__initlnOR = self.__lenghtOR
85 self.__seenNames.append(str(oName))
86 if str(oName) not in self.__seenNames: # Etend la liste si nouveau
87 self.__lenghtOR += 2 * xValue.size + 2
88 self.__initlnOR += self.__lenghtOR
89 self.__seenNames.append(str(oName))
90 while len(self.__listOPCV) > self.__lenghtOR:
91 # logging.debug("CM Réduction de la liste des cas à %i éléments par suppression du premier", self.__lenghtOR)
92 self.__listOPCV.pop(0)
93 self.__listOPCV.append( (
94 copy.copy(numpy.ravel(xValue)),
96 numpy.linalg.norm(xValue),
102 self.__initlnOR = self.__lenghtOR
104 self.__enabled = False
108 self.__lenghtOR = self.__initlnOR
109 self.__enabled = True
111 # ==============================================================================
112 class Operator(object):
114 Classe générale d'interface de type opérateur simple
122 name = "GenericOperator",
125 avoidingRedundancy = True,
126 inputAsMultiFunction = False,
127 enableMultiProcess = False,
128 extraArguments = None,
131 On construit un objet de ce type en fournissant, à l'aide de l'un des
132 deux mots-clé, soit une fonction ou un multi-fonction python, soit une
135 - name : nom d'opérateur
136 - fromMethod : argument de type fonction Python
137 - fromMatrix : argument adapté au constructeur numpy.matrix
138 - avoidingRedundancy : booléen évitant (ou pas) les calculs redondants
139 - inputAsMultiFunction : booléen indiquant une fonction explicitement
140 définie (ou pas) en multi-fonction
141 - extraArguments : arguments supplémentaires passés à la fonction de
142 base et ses dérivées (tuple ou dictionnaire)
144 self.__name = str(name)
145 self.__NbCallsAsMatrix, self.__NbCallsAsMethod, self.__NbCallsOfCached = 0, 0, 0
146 self.__AvoidRC = bool( avoidingRedundancy )
147 self.__inputAsMF = bool( inputAsMultiFunction )
148 self.__mpEnabled = bool( enableMultiProcess )
149 self.__extraArgs = extraArguments
150 if fromMethod is not None and self.__inputAsMF:
151 self.__Method = fromMethod # logtimer(fromMethod)
153 self.__Type = "Method"
154 elif fromMethod is not None and not self.__inputAsMF:
155 self.__Method = partial( MultiFonction, _sFunction=fromMethod, _mpEnabled=self.__mpEnabled)
157 self.__Type = "Method"
158 elif fromMatrix is not None:
160 self.__Matrix = numpy.matrix( fromMatrix, numpy.float )
161 self.__Type = "Matrix"
167 def disableAvoidingRedundancy(self):
169 Operator.CM.disable()
171 def enableAvoidingRedundancy(self):
176 Operator.CM.disable()
182 def appliedTo(self, xValue, HValue = None, argsAsSerie = False, returnSerieAsArrayMatrix = False):
184 Permet de restituer le résultat de l'application de l'opérateur à une
185 série d'arguments xValue. Cette méthode se contente d'appliquer, chaque
186 argument devant a priori être du bon type.
188 - les arguments par série sont :
189 - xValue : argument adapté pour appliquer l'opérateur
190 - HValue : valeur précalculée de l'opérateur en ce point
191 - argsAsSerie : indique si les arguments sont une mono ou multi-valeur
198 if HValue is not None:
202 PlatformInfo.isIterable( _xValue, True, " in Operator.appliedTo" )
204 if _HValue is not None:
205 assert len(_xValue) == len(_HValue), "Incompatible number of elements in xValue and HValue"
207 for i in range(len(_HValue)):
208 _HxValue.append( numpy.asmatrix( numpy.ravel( _HValue[i] ) ).T )
210 Operator.CM.storeValueInX(_xValue[i],_HxValue[-1],self.__name)
215 for i, xv in enumerate(_xValue):
217 __alreadyCalculated, __HxV = Operator.CM.wasCalculatedIn(xv,self.__name)
219 __alreadyCalculated = False
221 if __alreadyCalculated:
222 self.__addOneCacheCall()
225 if self.__Matrix is not None:
226 self.__addOneMatrixCall()
227 _xv = numpy.matrix(numpy.ravel(xv)).T
228 _hv = self.__Matrix * _xv
230 self.__addOneMethodCall()
234 _HxValue.append( _hv )
236 if len(_xserie)>0 and self.__Matrix is None:
237 if self.__extraArgs is None:
238 _hserie = self.__Method( _xserie ) # Calcul MF
240 _hserie = self.__Method( _xserie, self.__extraArgs ) # Calcul MF
241 if not hasattr(_hserie, "pop"):
242 raise TypeError("The user input multi-function doesn't seem to return sequence results, behaving like a mono-function. It has to be checked.")
248 Operator.CM.storeValueInX(_xv,_hv,self.__name)
250 if returnSerieAsArrayMatrix:
251 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
253 if argsAsSerie: return _HxValue
254 else: return _HxValue[-1]
256 def appliedControledFormTo(self, paires, argsAsSerie = False, returnSerieAsArrayMatrix = False):
258 Permet de restituer le résultat de l'application de l'opérateur à des
259 paires (xValue, uValue). Cette méthode se contente d'appliquer, son
260 argument devant a priori être du bon type. Si la uValue est None,
261 on suppose que l'opérateur ne s'applique qu'à xValue.
263 - paires : les arguments par paire sont :
264 - xValue : argument X adapté pour appliquer l'opérateur
265 - uValue : argument U adapté pour appliquer l'opérateur
266 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
268 if argsAsSerie: _xuValue = paires
269 else: _xuValue = (paires,)
270 PlatformInfo.isIterable( _xuValue, True, " in Operator.appliedControledFormTo" )
272 if self.__Matrix is not None:
274 for paire in _xuValue:
275 _xValue, _uValue = paire
276 _xValue = numpy.matrix(numpy.ravel(_xValue)).T
277 self.__addOneMatrixCall()
278 _HxValue.append( self.__Matrix * _xValue )
281 for paire in _xuValue:
282 _xValue, _uValue = paire
283 if _uValue is not None:
284 _xuArgs.append( paire )
286 _xuArgs.append( _xValue )
287 self.__addOneMethodCall( len(_xuArgs) )
288 if self.__extraArgs is None:
289 _HxValue = self.__Method( _xuArgs ) # Calcul MF
291 _HxValue = self.__Method( _xuArgs, self.__extraArgs ) # Calcul MF
293 if returnSerieAsArrayMatrix:
294 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
296 if argsAsSerie: return _HxValue
297 else: return _HxValue[-1]
299 def appliedInXTo(self, paires, argsAsSerie = False, returnSerieAsArrayMatrix = False):
301 Permet de restituer le résultat de l'application de l'opérateur à une
302 série d'arguments xValue, sachant que l'opérateur est valable en
303 xNominal. Cette méthode se contente d'appliquer, son argument devant a
304 priori être du bon type. Si l'opérateur est linéaire car c'est une
305 matrice, alors il est valable en tout point nominal et xNominal peut
306 être quelconque. Il n'y a qu'une seule paire par défaut, et argsAsSerie
307 permet d'indiquer que l'argument est multi-paires.
309 - paires : les arguments par paire sont :
310 - xNominal : série d'arguments permettant de donner le point où
311 l'opérateur est construit pour être ensuite appliqué
312 - xValue : série d'arguments adaptés pour appliquer l'opérateur
313 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
315 if argsAsSerie: _nxValue = paires
316 else: _nxValue = (paires,)
317 PlatformInfo.isIterable( _nxValue, True, " in Operator.appliedInXTo" )
319 if self.__Matrix is not None:
321 for paire in _nxValue:
322 _xNominal, _xValue = paire
323 _xValue = numpy.matrix(numpy.ravel(_xValue)).T
324 self.__addOneMatrixCall()
325 _HxValue.append( self.__Matrix * _xValue )
327 self.__addOneMethodCall( len(_nxValue) )
328 if self.__extraArgs is None:
329 _HxValue = self.__Method( _nxValue ) # Calcul MF
331 _HxValue = self.__Method( _nxValue, self.__extraArgs ) # Calcul MF
333 if returnSerieAsArrayMatrix:
334 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
336 if argsAsSerie: return _HxValue
337 else: return _HxValue[-1]
339 def asMatrix(self, ValueForMethodForm = "UnknownVoidValue", argsAsSerie = False):
341 Permet de renvoyer l'opérateur sous la forme d'une matrice
343 if self.__Matrix is not None:
344 self.__addOneMatrixCall()
345 mValue = [self.__Matrix,]
346 elif not isinstance(ValueForMethodForm,str) or ValueForMethodForm != "UnknownVoidValue": # Ne pas utiliser "None"
349 self.__addOneMethodCall( len(ValueForMethodForm) )
350 for _vfmf in ValueForMethodForm:
351 mValue.append( numpy.matrix( self.__Method(((_vfmf, None),)) ) )
353 self.__addOneMethodCall()
354 mValue = self.__Method(((ValueForMethodForm, None),))
356 raise ValueError("Matrix form of the operator defined as a function/method requires to give an operating point.")
358 if argsAsSerie: return mValue
359 else: return mValue[-1]
363 Renvoie la taille sous forme numpy si l'opérateur est disponible sous
364 la forme d'une matrice
366 if self.__Matrix is not None:
367 return self.__Matrix.shape
369 raise ValueError("Matrix form of the operator is not available, nor the shape")
371 def nbcalls(self, which=None):
373 Renvoie les nombres d'évaluations de l'opérateur
376 self.__NbCallsAsMatrix+self.__NbCallsAsMethod,
377 self.__NbCallsAsMatrix,
378 self.__NbCallsAsMethod,
379 self.__NbCallsOfCached,
380 Operator.NbCallsAsMatrix+Operator.NbCallsAsMethod,
381 Operator.NbCallsAsMatrix,
382 Operator.NbCallsAsMethod,
383 Operator.NbCallsOfCached,
385 if which is None: return __nbcalls
386 else: return __nbcalls[which]
388 def __addOneMatrixCall(self):
389 "Comptabilise un appel"
390 self.__NbCallsAsMatrix += 1 # Decompte local
391 Operator.NbCallsAsMatrix += 1 # Decompte global
393 def __addOneMethodCall(self, nb = 1):
394 "Comptabilise un appel"
395 self.__NbCallsAsMethod += nb # Decompte local
396 Operator.NbCallsAsMethod += nb # Decompte global
398 def __addOneCacheCall(self):
399 "Comptabilise un appel"
400 self.__NbCallsOfCached += 1 # Decompte local
401 Operator.NbCallsOfCached += 1 # Decompte global
403 # ==============================================================================
404 class FullOperator(object):
406 Classe générale d'interface de type opérateur complet
407 (Direct, Linéaire Tangent, Adjoint)
410 name = "GenericFullOperator",
412 asOneFunction = None, # 1 Fonction
413 asThreeFunctions = None, # 3 Fonctions in a dictionary
414 asScript = None, # 1 or 3 Fonction(s) by script
415 asDict = None, # Parameters
417 extraArguments = None,
419 inputAsMF = False,# Fonction(s) as Multi-Functions
424 self.__name = str(name)
425 self.__check = bool(toBeChecked)
426 self.__extraArgs = extraArguments
431 if (asDict is not None) and isinstance(asDict, dict):
432 __Parameters.update( asDict )
433 # Priorité à EnableMultiProcessingInDerivatives=True
434 if "EnableMultiProcessing" in __Parameters and __Parameters["EnableMultiProcessing"]:
435 __Parameters["EnableMultiProcessingInDerivatives"] = True
436 __Parameters["EnableMultiProcessingInEvaluation"] = False
437 if "EnableMultiProcessingInDerivatives" not in __Parameters:
438 __Parameters["EnableMultiProcessingInDerivatives"] = False
439 if __Parameters["EnableMultiProcessingInDerivatives"]:
440 __Parameters["EnableMultiProcessingInEvaluation"] = False
441 if "EnableMultiProcessingInEvaluation" not in __Parameters:
442 __Parameters["EnableMultiProcessingInEvaluation"] = False
443 if "withIncrement" in __Parameters: # Temporaire
444 __Parameters["DifferentialIncrement"] = __Parameters["withIncrement"]
446 if asScript is not None:
447 __Matrix, __Function = None, None
449 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
451 __Function = { "Direct":Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ) }
452 __Function.update({"useApproximatedDerivatives":True})
453 __Function.update(__Parameters)
454 elif asThreeFunctions:
456 "Direct" :Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ),
457 "Tangent":Interfaces.ImportFromScript(asScript).getvalue( "TangentOperator" ),
458 "Adjoint":Interfaces.ImportFromScript(asScript).getvalue( "AdjointOperator" ),
460 __Function.update(__Parameters)
463 if asOneFunction is not None:
464 if isinstance(asOneFunction, dict) and "Direct" in asOneFunction:
465 if asOneFunction["Direct"] is not None:
466 __Function = asOneFunction
468 raise ValueError("The function has to be given in a dictionnary which have 1 key (\"Direct\")")
470 __Function = { "Direct":asOneFunction }
471 __Function.update({"useApproximatedDerivatives":True})
472 __Function.update(__Parameters)
473 elif asThreeFunctions is not None:
474 if isinstance(asThreeFunctions, dict) and \
475 ("Tangent" in asThreeFunctions) and (asThreeFunctions["Tangent"] is not None) and \
476 ("Adjoint" in asThreeFunctions) and (asThreeFunctions["Adjoint"] is not None) and \
477 (("useApproximatedDerivatives" not in asThreeFunctions) or not bool(asThreeFunctions["useApproximatedDerivatives"])):
478 __Function = asThreeFunctions
479 elif isinstance(asThreeFunctions, dict) and \
480 ("Direct" in asThreeFunctions) and (asThreeFunctions["Direct"] is not None):
481 __Function = asThreeFunctions
482 __Function.update({"useApproximatedDerivatives":True})
484 raise ValueError("The functions has to be given in a dictionnary which have either 1 key (\"Direct\") or 3 keys (\"Direct\" (optionnal), \"Tangent\" and \"Adjoint\")")
485 if "Direct" not in asThreeFunctions:
486 __Function["Direct"] = asThreeFunctions["Tangent"]
487 __Function.update(__Parameters)
491 # if sys.version_info[0] < 3 and isinstance(__Function, dict):
492 # for k in ("Direct", "Tangent", "Adjoint"):
493 # if k in __Function and hasattr(__Function[k],"__class__"):
494 # if type(__Function[k]) is type(self.__init__):
495 # raise TypeError("can't use a class method (%s) as a function for the \"%s\" operator. Use a real function instead."%(type(__Function[k]),k))
497 if appliedInX is not None and isinstance(appliedInX, dict):
498 __appliedInX = appliedInX
499 elif appliedInX is not None:
500 __appliedInX = {"HXb":appliedInX}
504 if scheduledBy is not None:
505 self.__T = scheduledBy
507 if isinstance(__Function, dict) and \
508 ("useApproximatedDerivatives" in __Function) and bool(__Function["useApproximatedDerivatives"]) and \
509 ("Direct" in __Function) and (__Function["Direct"] is not None):
510 if "CenteredFiniteDifference" not in __Function: __Function["CenteredFiniteDifference"] = False
511 if "DifferentialIncrement" not in __Function: __Function["DifferentialIncrement"] = 0.01
512 if "withdX" not in __Function: __Function["withdX"] = None
513 if "withAvoidingRedundancy" not in __Function: __Function["withAvoidingRedundancy"] = avoidRC
514 if "withToleranceInRedundancy" not in __Function: __Function["withToleranceInRedundancy"] = 1.e-18
515 if "withLenghtOfRedundancy" not in __Function: __Function["withLenghtOfRedundancy"] = -1
516 if "NumberOfProcesses" not in __Function: __Function["NumberOfProcesses"] = None
517 if "withmfEnabled" not in __Function: __Function["withmfEnabled"] = inputAsMF
518 from daCore import NumericObjects
519 FDA = NumericObjects.FDApproximation(
521 Function = __Function["Direct"],
522 centeredDF = __Function["CenteredFiniteDifference"],
523 increment = __Function["DifferentialIncrement"],
524 dX = __Function["withdX"],
525 avoidingRedundancy = __Function["withAvoidingRedundancy"],
526 toleranceInRedundancy = __Function["withToleranceInRedundancy"],
527 lenghtOfRedundancy = __Function["withLenghtOfRedundancy"],
528 mpEnabled = __Function["EnableMultiProcessingInDerivatives"],
529 mpWorkers = __Function["NumberOfProcesses"],
530 mfEnabled = __Function["withmfEnabled"],
532 self.__FO["Direct"] = Operator( name = self.__name, fromMethod = FDA.DirectOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
533 self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMethod = FDA.TangentOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
534 self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMethod = FDA.AdjointOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
535 elif isinstance(__Function, dict) and \
536 ("Direct" in __Function) and ("Tangent" in __Function) and ("Adjoint" in __Function) and \
537 (__Function["Direct"] is not None) and (__Function["Tangent"] is not None) and (__Function["Adjoint"] is not None):
538 self.__FO["Direct"] = Operator( name = self.__name, fromMethod = __Function["Direct"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
539 self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMethod = __Function["Tangent"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
540 self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMethod = __Function["Adjoint"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
541 elif asMatrix is not None:
542 __matrice = numpy.matrix( __Matrix, numpy.float )
543 self.__FO["Direct"] = Operator( name = self.__name, fromMatrix = __matrice, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
544 self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMatrix = __matrice, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
545 self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMatrix = __matrice.T, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
548 raise ValueError("The %s object is improperly defined or undefined, it requires at minima either a matrix, a Direct operator for approximate derivatives or a Tangent/Adjoint operators pair. Please check your operator input."%self.__name)
550 if __appliedInX is not None:
551 self.__FO["AppliedInX"] = {}
552 for key in list(__appliedInX.keys()):
553 if type( __appliedInX[key] ) is type( numpy.matrix([]) ):
554 # Pour le cas où l'on a une vraie matrice
555 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].A1, numpy.float ).T
556 elif type( __appliedInX[key] ) is type( numpy.array([]) ) and len(__appliedInX[key].shape) > 1:
557 # Pour le cas où l'on a un vecteur représenté en array avec 2 dimensions
558 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].reshape(len(__appliedInX[key]),), numpy.float ).T
560 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key], numpy.float ).T
562 self.__FO["AppliedInX"] = None
568 "x.__repr__() <==> repr(x)"
569 return repr(self.__FO)
572 "x.__str__() <==> str(x)"
573 return str(self.__FO)
575 # ==============================================================================
576 class Algorithm(object):
578 Classe générale d'interface de type algorithme
580 Elle donne un cadre pour l'écriture d'une classe élémentaire d'algorithme
581 d'assimilation, en fournissant un container (dictionnaire) de variables
582 persistantes initialisées, et des méthodes d'accès à ces variables stockées.
584 Une classe élémentaire d'algorithme doit implémenter la méthode "run".
586 def __init__(self, name):
588 L'initialisation présente permet de fabriquer des variables de stockage
589 disponibles de manière générique dans les algorithmes élémentaires. Ces
590 variables de stockage sont ensuite conservées dans un dictionnaire
591 interne à l'objet, mais auquel on accède par la méthode "get".
593 Les variables prévues sont :
594 - APosterioriCorrelations : matrice de corrélations de la matrice A
595 - APosterioriCovariance : matrice de covariances a posteriori : A
596 - APosterioriStandardDeviations : vecteur des écart-types de la matrice A
597 - APosterioriVariances : vecteur des variances de la matrice A
598 - Analysis : vecteur d'analyse : Xa
599 - BMA : Background moins Analysis : Xa - Xb
600 - CostFunctionJ : fonction-coût globale, somme des deux parties suivantes Jb et Jo
601 - CostFunctionJAtCurrentOptimum : fonction-coût globale à l'état optimal courant lors d'itérations
602 - CostFunctionJb : partie ébauche ou background de la fonction-coût : Jb
603 - CostFunctionJbAtCurrentOptimum : partie ébauche à l'état optimal courant lors d'itérations
604 - CostFunctionJo : partie observations de la fonction-coût : Jo
605 - CostFunctionJoAtCurrentOptimum : partie observations à l'état optimal courant lors d'itérations
606 - CurrentIterationNumber : numéro courant d'itération dans les algorithmes itératifs, à partir de 0
607 - CurrentOptimum : état optimal courant lors d'itérations
608 - CurrentState : état courant lors d'itérations
609 - GradientOfCostFunctionJ : gradient de la fonction-coût globale
610 - GradientOfCostFunctionJb : gradient de la partie ébauche de la fonction-coût
611 - GradientOfCostFunctionJo : gradient de la partie observations de la fonction-coût
612 - IndexOfOptimum : index de l'état optimal courant lors d'itérations
613 - Innovation : l'innovation : d = Y - H(X)
614 - InnovationAtCurrentState : l'innovation à l'état courant : dn = Y - H(Xn)
615 - JacobianMatrixAtBackground : matrice jacobienne à l'état d'ébauche
616 - JacobianMatrixAtCurrentState : matrice jacobienne à l'état courant
617 - JacobianMatrixAtOptimum : matrice jacobienne à l'optimum
618 - KalmanGainAtOptimum : gain de Kalman à l'optimum
619 - MahalanobisConsistency : indicateur de consistance des covariances
620 - OMA : Observation moins Analyse : Y - Xa
621 - OMB : Observation moins Background : Y - Xb
622 - ForecastState : état prédit courant lors d'itérations
623 - Residu : dans le cas des algorithmes de vérification
624 - SigmaBck2 : indicateur de correction optimale des erreurs d'ébauche
625 - SigmaObs2 : indicateur de correction optimale des erreurs d'observation
626 - SimulatedObservationAtBackground : l'état observé H(Xb) à l'ébauche
627 - SimulatedObservationAtCurrentOptimum : l'état observé H(X) à l'état optimal courant
628 - SimulatedObservationAtCurrentState : l'état observé H(X) à l'état courant
629 - SimulatedObservationAtOptimum : l'état observé H(Xa) à l'optimum
630 - SimulationQuantiles : états observés H(X) pour les quantiles demandés
631 On peut rajouter des variables à stocker dans l'initialisation de
632 l'algorithme élémentaire qui va hériter de cette classe
634 logging.debug("%s Initialisation", str(name))
635 self._m = PlatformInfo.SystemUsage()
637 self._name = str( name )
638 self._parameters = {"StoreSupplementaryCalculations":[]}
639 self.__required_parameters = {}
640 self.__required_inputs = {
641 "RequiredInputValues":{"mandatory":(), "optional":()},
642 "ClassificationTags":[],
644 self.__variable_names_not_public = {"nextStep":False} # Duplication dans AlgorithmAndParameters
645 self.__canonical_parameter_name = {} # Correspondance "lower"->"correct"
646 self.__canonical_stored_name = {} # Correspondance "lower"->"correct"
648 self.StoredVariables = {}
649 self.StoredVariables["APosterioriCorrelations"] = Persistence.OneMatrix(name = "APosterioriCorrelations")
650 self.StoredVariables["APosterioriCovariance"] = Persistence.OneMatrix(name = "APosterioriCovariance")
651 self.StoredVariables["APosterioriStandardDeviations"] = Persistence.OneVector(name = "APosterioriStandardDeviations")
652 self.StoredVariables["APosterioriVariances"] = Persistence.OneVector(name = "APosterioriVariances")
653 self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
654 self.StoredVariables["BMA"] = Persistence.OneVector(name = "BMA")
655 self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
656 self.StoredVariables["CostFunctionJAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJAtCurrentOptimum")
657 self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
658 self.StoredVariables["CostFunctionJbAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJbAtCurrentOptimum")
659 self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
660 self.StoredVariables["CostFunctionJoAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJoAtCurrentOptimum")
661 self.StoredVariables["CurrentIterationNumber"] = Persistence.OneIndex(name = "CurrentIterationNumber")
662 self.StoredVariables["CurrentOptimum"] = Persistence.OneVector(name = "CurrentOptimum")
663 self.StoredVariables["CurrentState"] = Persistence.OneVector(name = "CurrentState")
664 self.StoredVariables["ForecastState"] = Persistence.OneVector(name = "ForecastState")
665 self.StoredVariables["GradientOfCostFunctionJ"] = Persistence.OneVector(name = "GradientOfCostFunctionJ")
666 self.StoredVariables["GradientOfCostFunctionJb"] = Persistence.OneVector(name = "GradientOfCostFunctionJb")
667 self.StoredVariables["GradientOfCostFunctionJo"] = Persistence.OneVector(name = "GradientOfCostFunctionJo")
668 self.StoredVariables["IndexOfOptimum"] = Persistence.OneIndex(name = "IndexOfOptimum")
669 self.StoredVariables["Innovation"] = Persistence.OneVector(name = "Innovation")
670 self.StoredVariables["InnovationAtCurrentAnalysis"] = Persistence.OneVector(name = "InnovationAtCurrentAnalysis")
671 self.StoredVariables["InnovationAtCurrentState"] = Persistence.OneVector(name = "InnovationAtCurrentState")
672 self.StoredVariables["JacobianMatrixAtBackground"] = Persistence.OneMatrix(name = "JacobianMatrixAtBackground")
673 self.StoredVariables["JacobianMatrixAtCurrentState"] = Persistence.OneMatrix(name = "JacobianMatrixAtCurrentState")
674 self.StoredVariables["JacobianMatrixAtOptimum"] = Persistence.OneMatrix(name = "JacobianMatrixAtOptimum")
675 self.StoredVariables["KalmanGainAtOptimum"] = Persistence.OneMatrix(name = "KalmanGainAtOptimum")
676 self.StoredVariables["MahalanobisConsistency"] = Persistence.OneScalar(name = "MahalanobisConsistency")
677 self.StoredVariables["OMA"] = Persistence.OneVector(name = "OMA")
678 self.StoredVariables["OMB"] = Persistence.OneVector(name = "OMB")
679 self.StoredVariables["Residu"] = Persistence.OneScalar(name = "Residu")
680 self.StoredVariables["SigmaBck2"] = Persistence.OneScalar(name = "SigmaBck2")
681 self.StoredVariables["SigmaObs2"] = Persistence.OneScalar(name = "SigmaObs2")
682 self.StoredVariables["SimulatedObservationAtBackground"] = Persistence.OneVector(name = "SimulatedObservationAtBackground")
683 self.StoredVariables["SimulatedObservationAtCurrentAnalysis"]= Persistence.OneVector(name = "SimulatedObservationAtCurrentAnalysis")
684 self.StoredVariables["SimulatedObservationAtCurrentOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentOptimum")
685 self.StoredVariables["SimulatedObservationAtCurrentState"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentState")
686 self.StoredVariables["SimulatedObservationAtOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtOptimum")
687 self.StoredVariables["SimulationQuantiles"] = Persistence.OneMatrix(name = "SimulationQuantiles")
689 for k in self.StoredVariables:
690 self.__canonical_stored_name[k.lower()] = k
692 for k, v in self.__variable_names_not_public.items():
693 self.__canonical_parameter_name[k.lower()] = k
694 self.__canonical_parameter_name["algorithm"] = "Algorithm"
695 self.__canonical_parameter_name["storesupplementarycalculations"] = "StoreSupplementaryCalculations"
697 def _pre_run(self, Parameters, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None ):
699 logging.debug("%s Lancement", self._name)
700 logging.debug("%s Taille mémoire utilisée de %.0f Mio"%(self._name, self._m.getUsedMemory("Mio")))
701 self._getTimeState(reset=True)
703 # Mise a jour des paramètres internes avec le contenu de Parameters, en
704 # reprenant les valeurs par défauts pour toutes celles non définies
705 self.__setParameters(Parameters, reset=True)
706 for k, v in self.__variable_names_not_public.items():
707 if k not in self._parameters: self.__setParameters( {k:v} )
709 # Corrections et compléments des vecteurs
710 def __test_vvalue(argument, variable, argname, symbol=None):
711 if symbol is None: symbol = variable
713 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
714 raise ValueError("%s %s vector %s is not set and has to be properly defined!"%(self._name,argname,symbol))
715 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
716 logging.debug("%s %s vector %s is not set, but is optional."%(self._name,argname,symbol))
718 logging.debug("%s %s vector %s is not set, but is not required."%(self._name,argname,symbol))
720 logging.debug("%s %s vector %s is set, and its size is %i."%(self._name,argname,symbol,numpy.array(argument).size))
722 __test_vvalue( Xb, "Xb", "Background or initial state" )
723 __test_vvalue( Y, "Y", "Observation" )
724 __test_vvalue( U, "U", "Control" )
726 # Corrections et compléments des covariances
727 def __test_cvalue(argument, variable, argname, symbol=None):
728 if symbol is None: symbol = variable
730 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
731 raise ValueError("%s %s error covariance matrix %s is not set and has to be properly defined!"%(self._name,argname,symbol))
732 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
733 logging.debug("%s %s error covariance matrix %s is not set, but is optional."%(self._name,argname,symbol))
735 logging.debug("%s %s error covariance matrix %s is not set, but is not required."%(self._name,argname,symbol))
737 logging.debug("%s %s error covariance matrix %s is set."%(self._name,argname,symbol))
739 __test_cvalue( B, "B", "Background" )
740 __test_cvalue( R, "R", "Observation" )
741 __test_cvalue( Q, "Q", "Evolution" )
743 # Corrections et compléments des opérateurs
744 def __test_ovalue(argument, variable, argname, symbol=None):
745 if symbol is None: symbol = variable
746 if argument is None or (isinstance(argument,dict) and len(argument)==0):
747 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
748 raise ValueError("%s %s operator %s is not set and has to be properly defined!"%(self._name,argname,symbol))
749 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
750 logging.debug("%s %s operator %s is not set, but is optional."%(self._name,argname,symbol))
752 logging.debug("%s %s operator %s is not set, but is not required."%(self._name,argname,symbol))
754 logging.debug("%s %s operator %s is set."%(self._name,argname,symbol))
756 __test_ovalue( HO, "HO", "Observation", "H" )
757 __test_ovalue( EM, "EM", "Evolution", "M" )
758 __test_ovalue( CM, "CM", "Control Model", "C" )
760 if ("Bounds" in self._parameters) and isinstance(self._parameters["Bounds"], (list, tuple)) and (len(self._parameters["Bounds"]) > 0):
761 logging.debug("%s Prise en compte des bornes effectuee"%(self._name,))
763 self._parameters["Bounds"] = None
765 if logging.getLogger().level < logging.WARNING:
766 self._parameters["optiprint"], self._parameters["optdisp"] = 1, 1
767 if PlatformInfo.has_scipy:
768 import scipy.optimize
769 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_ALL
771 self._parameters["optmessages"] = 15
773 self._parameters["optiprint"], self._parameters["optdisp"] = -1, 0
774 if PlatformInfo.has_scipy:
775 import scipy.optimize
776 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_NONE
778 self._parameters["optmessages"] = 15
782 def _post_run(self,_oH=None):
784 if ("StoreSupplementaryCalculations" in self._parameters) and \
785 "APosterioriCovariance" in self._parameters["StoreSupplementaryCalculations"]:
786 for _A in self.StoredVariables["APosterioriCovariance"]:
787 if "APosterioriVariances" in self._parameters["StoreSupplementaryCalculations"]:
788 self.StoredVariables["APosterioriVariances"].store( numpy.diag(_A) )
789 if "APosterioriStandardDeviations" in self._parameters["StoreSupplementaryCalculations"]:
790 self.StoredVariables["APosterioriStandardDeviations"].store( numpy.sqrt(numpy.diag(_A)) )
791 if "APosterioriCorrelations" in self._parameters["StoreSupplementaryCalculations"]:
792 _EI = numpy.diag(1./numpy.sqrt(numpy.diag(_A)))
793 _C = numpy.dot(_EI, numpy.dot(_A, _EI))
794 self.StoredVariables["APosterioriCorrelations"].store( _C )
795 if _oH is not None and "Direct" in _oH and "Tangent" in _oH and "Adjoint" in _oH:
796 logging.debug("%s Nombre d'évaluation(s) de l'opérateur d'observation direct/tangent/adjoint.: %i/%i/%i", self._name, _oH["Direct"].nbcalls(0),_oH["Tangent"].nbcalls(0),_oH["Adjoint"].nbcalls(0))
797 logging.debug("%s Nombre d'appels au cache d'opérateur d'observation direct/tangent/adjoint..: %i/%i/%i", self._name, _oH["Direct"].nbcalls(3),_oH["Tangent"].nbcalls(3),_oH["Adjoint"].nbcalls(3))
798 logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
799 logging.debug("%s Durées d'utilisation CPU de %.1fs et elapsed de %.1fs", self._name, self._getTimeState()[0], self._getTimeState()[1])
800 logging.debug("%s Terminé", self._name)
803 def _toStore(self, key):
804 "True if in StoreSupplementaryCalculations, else False"
805 return key in self._parameters["StoreSupplementaryCalculations"]
807 def get(self, key=None):
809 Renvoie l'une des variables stockées identifiée par la clé, ou le
810 dictionnaire de l'ensemble des variables disponibles en l'absence de
811 clé. Ce sont directement les variables sous forme objet qui sont
812 renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
813 des classes de persistance.
816 return self.StoredVariables[self.__canonical_stored_name[key.lower()]]
818 return self.StoredVariables
820 def __contains__(self, key=None):
821 "D.__contains__(k) -> True if D has a key k, else False"
822 if key is None or key.lower() not in self.__canonical_stored_name:
825 return self.__canonical_stored_name[key.lower()] in self.StoredVariables
828 "D.keys() -> list of D's keys"
829 if hasattr(self, "StoredVariables"):
830 return self.StoredVariables.keys()
835 "D.pop(k[,d]) -> v, remove specified key and return the corresponding value"
836 if hasattr(self, "StoredVariables") and k.lower() in self.__canonical_stored_name:
837 return self.StoredVariables.pop(self.__canonical_stored_name[k.lower()], d)
842 raise TypeError("pop expected at least 1 arguments, got 0")
843 "If key is not found, d is returned if given, otherwise KeyError is raised"
849 def run(self, Xb=None, Y=None, H=None, M=None, R=None, B=None, Q=None, Parameters=None):
851 Doit implémenter l'opération élémentaire de calcul d'assimilation sous
852 sa forme mathématique la plus naturelle possible.
854 raise NotImplementedError("Mathematical assimilation calculation has not been implemented!")
856 def defineRequiredParameter(self, name = None, default = None, typecast = None, message = None, minval = None, maxval = None, listval = None, listadv = None):
858 Permet de définir dans l'algorithme des paramètres requis et leurs
859 caractéristiques par défaut.
862 raise ValueError("A name is mandatory to define a required parameter.")
864 self.__required_parameters[name] = {
866 "typecast" : typecast,
873 self.__canonical_parameter_name[name.lower()] = name
874 logging.debug("%s %s (valeur par défaut = %s)", self._name, message, self.setParameterValue(name))
876 def getRequiredParameters(self, noDetails=True):
878 Renvoie la liste des noms de paramètres requis ou directement le
879 dictionnaire des paramètres requis.
882 return sorted(self.__required_parameters.keys())
884 return self.__required_parameters
886 def setParameterValue(self, name=None, value=None):
888 Renvoie la valeur d'un paramètre requis de manière contrôlée
890 __k = self.__canonical_parameter_name[name.lower()]
891 default = self.__required_parameters[__k]["default"]
892 typecast = self.__required_parameters[__k]["typecast"]
893 minval = self.__required_parameters[__k]["minval"]
894 maxval = self.__required_parameters[__k]["maxval"]
895 listval = self.__required_parameters[__k]["listval"]
896 listadv = self.__required_parameters[__k]["listadv"]
898 if value is None and default is None:
900 elif value is None and default is not None:
901 if typecast is None: __val = default
902 else: __val = typecast( default )
904 if typecast is None: __val = value
907 __val = typecast( value )
909 raise ValueError("The value '%s' for the parameter named '%s' can not be correctly evaluated with type '%s'."%(value, __k, typecast))
911 if minval is not None and (numpy.array(__val, float) < minval).any():
912 raise ValueError("The parameter named '%s' of value '%s' can not be less than %s."%(__k, __val, minval))
913 if maxval is not None and (numpy.array(__val, float) > maxval).any():
914 raise ValueError("The parameter named '%s' of value '%s' can not be greater than %s."%(__k, __val, maxval))
915 if listval is not None or listadv is not None:
916 if typecast is list or typecast is tuple or isinstance(__val,list) or isinstance(__val,tuple):
918 if listval is not None and v in listval: continue
919 elif listadv is not None and v in listadv: continue
921 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%(v, __k, listval))
922 elif not (listval is not None and __val in listval) and not (listadv is not None and __val in listadv):
923 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%( __val, __k,listval))
927 def requireInputArguments(self, mandatory=(), optional=()):
929 Permet d'imposer des arguments de calcul requis en entrée.
931 self.__required_inputs["RequiredInputValues"]["mandatory"] = tuple( mandatory )
932 self.__required_inputs["RequiredInputValues"]["optional"] = tuple( optional )
934 def getInputArguments(self):
936 Permet d'obtenir les listes des arguments de calcul requis en entrée.
938 return self.__required_inputs["RequiredInputValues"]["mandatory"], self.__required_inputs["RequiredInputValues"]["optional"]
940 def setAttributes(self, tags=()):
942 Permet d'adjoindre des attributs comme les tags de classification.
943 Renvoie la liste actuelle dans tous les cas.
945 self.__required_inputs["ClassificationTags"].extend( tags )
946 return self.__required_inputs["ClassificationTags"]
948 def __setParameters(self, fromDico={}, reset=False):
950 Permet de stocker les paramètres reçus dans le dictionnaire interne.
952 self._parameters.update( fromDico )
953 __inverse_fromDico_keys = {}
954 for k in fromDico.keys():
955 if k.lower() in self.__canonical_parameter_name:
956 __inverse_fromDico_keys[self.__canonical_parameter_name[k.lower()]] = k
957 #~ __inverse_fromDico_keys = dict([(self.__canonical_parameter_name[k.lower()],k) for k in fromDico.keys()])
958 __canonic_fromDico_keys = __inverse_fromDico_keys.keys()
959 for k in self.__required_parameters.keys():
960 if k in __canonic_fromDico_keys:
961 self._parameters[k] = self.setParameterValue(k,fromDico[__inverse_fromDico_keys[k]])
963 self._parameters[k] = self.setParameterValue(k)
966 logging.debug("%s %s : %s", self._name, self.__required_parameters[k]["message"], self._parameters[k])
968 def _getTimeState(self, reset=False):
970 Initialise ou restitue le temps de calcul (cpu/elapsed) à la seconde
973 self.__initial_cpu_time = time.process_time()
974 self.__initial_elapsed_time = time.perf_counter()
977 self.__cpu_time = time.process_time() - self.__initial_cpu_time
978 self.__elapsed_time = time.perf_counter() - self.__initial_elapsed_time
979 return self.__cpu_time, self.__elapsed_time
981 def _StopOnTimeLimit(self, X=None, withReason=False):
982 "Stop criteria on time limit: True/False [+ Reason]"
983 c, e = self._getTimeState()
984 if "MaximumCpuTime" in self._parameters and c > self._parameters["MaximumCpuTime"]:
985 __SC, __SR = True, "Reached maximum CPU time (%.1fs > %.1fs)"%(c, self._parameters["MaximumCpuTime"])
986 elif "MaximumElapsedTime" in self._parameters and e > self._parameters["MaximumElapsedTime"]:
987 __SC, __SR = True, "Reached maximum elapsed time (%.1fs > %.1fs)"%(e, self._parameters["MaximumElapsedTime"])
989 __SC, __SR = False, ""
995 # ==============================================================================
996 class AlgorithmAndParameters(object):
998 Classe générale d'interface d'action pour l'algorithme et ses paramètres
1001 name = "GenericAlgorithm",
1008 self.__name = str(name)
1012 self.__algorithm = {}
1013 self.__algorithmFile = None
1014 self.__algorithmName = None
1016 self.updateParameters( asDict, asScript )
1018 if asAlgorithm is None and asScript is not None:
1019 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
1021 __Algo = asAlgorithm
1023 if __Algo is not None:
1024 self.__A = str(__Algo)
1025 self.__P.update( {"Algorithm":self.__A} )
1027 self.__setAlgorithm( self.__A )
1029 self.__variable_names_not_public = {"nextStep":False} # Duplication dans Algorithm
1031 def updateParameters(self,
1035 "Mise a jour des parametres"
1036 if asDict is None and asScript is not None:
1037 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1041 if __Dict is not None:
1042 self.__P.update( dict(__Dict) )
1044 def executePythonScheme(self, asDictAO = None):
1045 "Permet de lancer le calcul d'assimilation"
1046 Operator.CM.clearCache()
1048 if not isinstance(asDictAO, dict):
1049 raise ValueError("The objects for algorithm calculation have to be given together as a dictionnary, and they are not")
1050 if hasattr(asDictAO["Background"],"getO"): self.__Xb = asDictAO["Background"].getO()
1051 elif hasattr(asDictAO["CheckingPoint"],"getO"): self.__Xb = asDictAO["CheckingPoint"].getO()
1052 else: self.__Xb = None
1053 if hasattr(asDictAO["Observation"],"getO"): self.__Y = asDictAO["Observation"].getO()
1054 else: self.__Y = asDictAO["Observation"]
1055 if hasattr(asDictAO["ControlInput"],"getO"): self.__U = asDictAO["ControlInput"].getO()
1056 else: self.__U = asDictAO["ControlInput"]
1057 if hasattr(asDictAO["ObservationOperator"],"getO"): self.__HO = asDictAO["ObservationOperator"].getO()
1058 else: self.__HO = asDictAO["ObservationOperator"]
1059 if hasattr(asDictAO["EvolutionModel"],"getO"): self.__EM = asDictAO["EvolutionModel"].getO()
1060 else: self.__EM = asDictAO["EvolutionModel"]
1061 if hasattr(asDictAO["ControlModel"],"getO"): self.__CM = asDictAO["ControlModel"].getO()
1062 else: self.__CM = asDictAO["ControlModel"]
1063 self.__B = asDictAO["BackgroundError"]
1064 self.__R = asDictAO["ObservationError"]
1065 self.__Q = asDictAO["EvolutionError"]
1067 self.__shape_validate()
1069 self.__algorithm.run(
1079 Parameters = self.__P,
1083 def executeYACSScheme(self, FileName=None):
1084 "Permet de lancer le calcul d'assimilation"
1085 if FileName is None or not os.path.exists(FileName):
1086 raise ValueError("a YACS file name has to be given for YACS execution.\n")
1088 __file = os.path.abspath(FileName)
1089 logging.debug("The YACS file name is \"%s\"."%__file)
1090 if not PlatformInfo.has_salome or \
1091 not PlatformInfo.has_yacs or \
1092 not PlatformInfo.has_adao:
1093 raise ImportError("\n\n"+\
1094 "Unable to get SALOME, YACS or ADAO environnement variables.\n"+\
1095 "Please load the right environnement before trying to use it.\n")
1098 import SALOMERuntime
1100 SALOMERuntime.RuntimeSALOME_setRuntime()
1102 r = pilot.getRuntime()
1103 xmlLoader = loader.YACSLoader()
1104 xmlLoader.registerProcCataLoader()
1106 catalogAd = r.loadCatalog("proc", __file)
1107 r.addCatalog(catalogAd)
1112 p = xmlLoader.load(__file)
1113 except IOError as ex:
1114 print("The YACS XML schema file can not be loaded: %s"%(ex,))
1116 logger = p.getLogger("parser")
1117 if not logger.isEmpty():
1118 print("The imported YACS XML schema has errors on parsing:")
1119 print(logger.getStr())
1122 print("The YACS XML schema is not valid and will not be executed:")
1123 print(p.getErrorReport())
1125 info=pilot.LinkInfo(pilot.LinkInfo.ALL_DONT_STOP)
1126 p.checkConsistency(info)
1127 if info.areWarningsOrErrors():
1128 print("The YACS XML schema is not coherent and will not be executed:")
1129 print(info.getGlobalRepr())
1131 e = pilot.ExecutorSwig()
1133 if p.getEffectiveState() != pilot.DONE:
1134 print(p.getErrorReport())
1138 def get(self, key = None):
1139 "Vérifie l'existence d'une clé de variable ou de paramètres"
1140 if key in self.__algorithm:
1141 return self.__algorithm.get( key )
1142 elif key in self.__P:
1143 return self.__P[key]
1145 allvariables = self.__P
1146 for k in self.__variable_names_not_public: allvariables.pop(k, None)
1149 def pop(self, k, d):
1150 "Necessaire pour le pickling"
1151 return self.__algorithm.pop(k, d)
1153 def getAlgorithmRequiredParameters(self, noDetails=True):
1154 "Renvoie la liste des paramètres requis selon l'algorithme"
1155 return self.__algorithm.getRequiredParameters(noDetails)
1157 def getAlgorithmInputArguments(self):
1158 "Renvoie la liste des entrées requises selon l'algorithme"
1159 return self.__algorithm.getInputArguments()
1161 def getAlgorithmAttributes(self):
1162 "Renvoie la liste des attributs selon l'algorithme"
1163 return self.__algorithm.setAttributes()
1165 def setObserver(self, __V, __O, __I, __S):
1166 if self.__algorithm is None \
1167 or isinstance(self.__algorithm, dict) \
1168 or not hasattr(self.__algorithm,"StoredVariables"):
1169 raise ValueError("No observer can be build before choosing an algorithm.")
1170 if __V not in self.__algorithm:
1171 raise ValueError("An observer requires to be set on a variable named %s which does not exist."%__V)
1173 self.__algorithm.StoredVariables[ __V ].setDataObserver(
1176 HookParameters = __I,
1179 def removeObserver(self, __V, __O, __A = False):
1180 if self.__algorithm is None \
1181 or isinstance(self.__algorithm, dict) \
1182 or not hasattr(self.__algorithm,"StoredVariables"):
1183 raise ValueError("No observer can be removed before choosing an algorithm.")
1184 if __V not in self.__algorithm:
1185 raise ValueError("An observer requires to be removed on a variable named %s which does not exist."%__V)
1187 return self.__algorithm.StoredVariables[ __V ].removeDataObserver(
1192 def hasObserver(self, __V):
1193 if self.__algorithm is None \
1194 or isinstance(self.__algorithm, dict) \
1195 or not hasattr(self.__algorithm,"StoredVariables"):
1197 if __V not in self.__algorithm:
1199 return self.__algorithm.StoredVariables[ __V ].hasDataObserver()
1202 __allvariables = list(self.__algorithm.keys()) + list(self.__P.keys())
1203 for k in self.__variable_names_not_public:
1204 if k in __allvariables: __allvariables.remove(k)
1205 return __allvariables
1207 def __contains__(self, key=None):
1208 "D.__contains__(k) -> True if D has a key k, else False"
1209 return key in self.__algorithm or key in self.__P
1212 "x.__repr__() <==> repr(x)"
1213 return repr(self.__A)+", "+repr(self.__P)
1216 "x.__str__() <==> str(x)"
1217 return str(self.__A)+", "+str(self.__P)
1219 def __setAlgorithm(self, choice = None ):
1221 Permet de sélectionner l'algorithme à utiliser pour mener à bien l'étude
1222 d'assimilation. L'argument est un champ caractère se rapportant au nom
1223 d'un algorithme réalisant l'opération sur les arguments fixes.
1226 raise ValueError("Error: algorithm choice has to be given")
1227 if self.__algorithmName is not None:
1228 raise ValueError("Error: algorithm choice has already been done as \"%s\", it can't be changed."%self.__algorithmName)
1229 daDirectory = "daAlgorithms"
1231 # Recherche explicitement le fichier complet
1232 # ------------------------------------------
1234 for directory in sys.path:
1235 if os.path.isfile(os.path.join(directory, daDirectory, str(choice)+'.py')):
1236 module_path = os.path.abspath(os.path.join(directory, daDirectory))
1237 if module_path is None:
1238 raise ImportError("No algorithm module named \"%s\" has been found in the search path.\n The search path is %s"%(choice, sys.path))
1240 # Importe le fichier complet comme un module
1241 # ------------------------------------------
1243 sys_path_tmp = sys.path ; sys.path.insert(0,module_path)
1244 self.__algorithmFile = __import__(str(choice), globals(), locals(), [])
1245 if not hasattr(self.__algorithmFile, "ElementaryAlgorithm"):
1246 raise ImportError("this module does not define a valid elementary algorithm.")
1247 self.__algorithmName = str(choice)
1248 sys.path = sys_path_tmp ; del sys_path_tmp
1249 except ImportError as e:
1250 raise ImportError("The module named \"%s\" was found, but is incorrect at the import stage.\n The import error message is: %s"%(choice,e))
1252 # Instancie un objet du type élémentaire du fichier
1253 # -------------------------------------------------
1254 self.__algorithm = self.__algorithmFile.ElementaryAlgorithm()
1257 def __shape_validate(self):
1259 Validation de la correspondance correcte des tailles des variables et
1260 des matrices s'il y en a.
1262 if self.__Xb is None: __Xb_shape = (0,)
1263 elif hasattr(self.__Xb,"size"): __Xb_shape = (self.__Xb.size,)
1264 elif hasattr(self.__Xb,"shape"):
1265 if isinstance(self.__Xb.shape, tuple): __Xb_shape = self.__Xb.shape
1266 else: __Xb_shape = self.__Xb.shape()
1267 else: raise TypeError("The background (Xb) has no attribute of shape: problem !")
1269 if self.__Y is None: __Y_shape = (0,)
1270 elif hasattr(self.__Y,"size"): __Y_shape = (self.__Y.size,)
1271 elif hasattr(self.__Y,"shape"):
1272 if isinstance(self.__Y.shape, tuple): __Y_shape = self.__Y.shape
1273 else: __Y_shape = self.__Y.shape()
1274 else: raise TypeError("The observation (Y) has no attribute of shape: problem !")
1276 if self.__U is None: __U_shape = (0,)
1277 elif hasattr(self.__U,"size"): __U_shape = (self.__U.size,)
1278 elif hasattr(self.__U,"shape"):
1279 if isinstance(self.__U.shape, tuple): __U_shape = self.__U.shape
1280 else: __U_shape = self.__U.shape()
1281 else: raise TypeError("The control (U) has no attribute of shape: problem !")
1283 if self.__B is None: __B_shape = (0,0)
1284 elif hasattr(self.__B,"shape"):
1285 if isinstance(self.__B.shape, tuple): __B_shape = self.__B.shape
1286 else: __B_shape = self.__B.shape()
1287 else: raise TypeError("The a priori errors covariance matrix (B) has no attribute of shape: problem !")
1289 if self.__R is None: __R_shape = (0,0)
1290 elif hasattr(self.__R,"shape"):
1291 if isinstance(self.__R.shape, tuple): __R_shape = self.__R.shape
1292 else: __R_shape = self.__R.shape()
1293 else: raise TypeError("The observation errors covariance matrix (R) has no attribute of shape: problem !")
1295 if self.__Q is None: __Q_shape = (0,0)
1296 elif hasattr(self.__Q,"shape"):
1297 if isinstance(self.__Q.shape, tuple): __Q_shape = self.__Q.shape
1298 else: __Q_shape = self.__Q.shape()
1299 else: raise TypeError("The evolution errors covariance matrix (Q) has no attribute of shape: problem !")
1301 if len(self.__HO) == 0: __HO_shape = (0,0)
1302 elif isinstance(self.__HO, dict): __HO_shape = (0,0)
1303 elif hasattr(self.__HO["Direct"],"shape"):
1304 if isinstance(self.__HO["Direct"].shape, tuple): __HO_shape = self.__HO["Direct"].shape
1305 else: __HO_shape = self.__HO["Direct"].shape()
1306 else: raise TypeError("The observation operator (H) has no attribute of shape: problem !")
1308 if len(self.__EM) == 0: __EM_shape = (0,0)
1309 elif isinstance(self.__EM, dict): __EM_shape = (0,0)
1310 elif hasattr(self.__EM["Direct"],"shape"):
1311 if isinstance(self.__EM["Direct"].shape, tuple): __EM_shape = self.__EM["Direct"].shape
1312 else: __EM_shape = self.__EM["Direct"].shape()
1313 else: raise TypeError("The evolution model (EM) has no attribute of shape: problem !")
1315 if len(self.__CM) == 0: __CM_shape = (0,0)
1316 elif isinstance(self.__CM, dict): __CM_shape = (0,0)
1317 elif hasattr(self.__CM["Direct"],"shape"):
1318 if isinstance(self.__CM["Direct"].shape, tuple): __CM_shape = self.__CM["Direct"].shape
1319 else: __CM_shape = self.__CM["Direct"].shape()
1320 else: raise TypeError("The control model (CM) has no attribute of shape: problem !")
1322 # Vérification des conditions
1323 # ---------------------------
1324 if not( len(__Xb_shape) == 1 or min(__Xb_shape) == 1 ):
1325 raise ValueError("Shape characteristic of background (Xb) is incorrect: \"%s\"."%(__Xb_shape,))
1326 if not( len(__Y_shape) == 1 or min(__Y_shape) == 1 ):
1327 raise ValueError("Shape characteristic of observation (Y) is incorrect: \"%s\"."%(__Y_shape,))
1329 if not( min(__B_shape) == max(__B_shape) ):
1330 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) is incorrect: \"%s\"."%(__B_shape,))
1331 if not( min(__R_shape) == max(__R_shape) ):
1332 raise ValueError("Shape characteristic of observation errors covariance matrix (R) is incorrect: \"%s\"."%(__R_shape,))
1333 if not( min(__Q_shape) == max(__Q_shape) ):
1334 raise ValueError("Shape characteristic of evolution errors covariance matrix (Q) is incorrect: \"%s\"."%(__Q_shape,))
1335 if not( min(__EM_shape) == max(__EM_shape) ):
1336 raise ValueError("Shape characteristic of evolution operator (EM) is incorrect: \"%s\"."%(__EM_shape,))
1338 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[1] == max(__Xb_shape) ):
1339 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and state (X) \"%s\" are incompatible."%(__HO_shape,__Xb_shape))
1340 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[0] == max(__Y_shape) ):
1341 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation (Y) \"%s\" are incompatible."%(__HO_shape,__Y_shape))
1342 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__B) > 0 and not( __HO_shape[1] == __B_shape[0] ):
1343 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and a priori errors covariance matrix (B) \"%s\" are incompatible."%(__HO_shape,__B_shape))
1344 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__R) > 0 and not( __HO_shape[0] == __R_shape[1] ):
1345 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation errors covariance matrix (R) \"%s\" are incompatible."%(__HO_shape,__R_shape))
1347 if self.__B is not None and len(self.__B) > 0 and not( __B_shape[1] == max(__Xb_shape) ):
1348 if self.__algorithmName in ["EnsembleBlue",]:
1349 asPersistentVector = self.__Xb.reshape((-1,min(__B_shape)))
1350 self.__Xb = Persistence.OneVector("Background", basetype=numpy.matrix)
1351 for member in asPersistentVector:
1352 self.__Xb.store( numpy.matrix( numpy.ravel(member), numpy.float ).T )
1353 __Xb_shape = min(__B_shape)
1355 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) \"%s\" and background (Xb) \"%s\" are incompatible."%(__B_shape,__Xb_shape))
1357 if self.__R is not None and len(self.__R) > 0 and not( __R_shape[1] == max(__Y_shape) ):
1358 raise ValueError("Shape characteristic of observation errors covariance matrix (R) \"%s\" and observation (Y) \"%s\" are incompatible."%(__R_shape,__Y_shape))
1360 if self.__EM is not None and len(self.__EM) > 0 and not isinstance(self.__EM, dict) and not( __EM_shape[1] == max(__Xb_shape) ):
1361 raise ValueError("Shape characteristic of evolution model (EM) \"%s\" and state (X) \"%s\" are incompatible."%(__EM_shape,__Xb_shape))
1363 if self.__CM is not None and len(self.__CM) > 0 and not isinstance(self.__CM, dict) and not( __CM_shape[1] == max(__U_shape) ):
1364 raise ValueError("Shape characteristic of control model (CM) \"%s\" and control (U) \"%s\" are incompatible."%(__CM_shape,__U_shape))
1366 if ("Bounds" in self.__P) \
1367 and (isinstance(self.__P["Bounds"], list) or isinstance(self.__P["Bounds"], tuple)) \
1368 and (len(self.__P["Bounds"]) != max(__Xb_shape)):
1369 raise ValueError("The number \"%s\" of bound pairs for the state (X) components is different of the size \"%s\" of the state itself." \
1370 %(len(self.__P["Bounds"]),max(__Xb_shape)))
1374 # ==============================================================================
1375 class RegulationAndParameters(object):
1377 Classe générale d'interface d'action pour la régulation et ses paramètres
1380 name = "GenericRegulation",
1387 self.__name = str(name)
1390 if asAlgorithm is None and asScript is not None:
1391 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
1393 __Algo = asAlgorithm
1395 if asDict is None and asScript is not None:
1396 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1400 if __Dict is not None:
1401 self.__P.update( dict(__Dict) )
1403 if __Algo is not None:
1404 self.__P.update( {"Algorithm":str(__Algo)} )
1406 def get(self, key = None):
1407 "Vérifie l'existence d'une clé de variable ou de paramètres"
1409 return self.__P[key]
1413 # ==============================================================================
1414 class DataObserver(object):
1416 Classe générale d'interface de type observer
1419 name = "GenericObserver",
1431 self.__name = str(name)
1436 if onVariable is None:
1437 raise ValueError("setting an observer has to be done over a variable name or a list of variable names, not over None.")
1438 elif type(onVariable) in (tuple, list):
1439 self.__V = tuple(map( str, onVariable ))
1440 if withInfo is None:
1443 self.__I = (str(withInfo),)*len(self.__V)
1444 elif isinstance(onVariable, str):
1445 self.__V = (onVariable,)
1446 if withInfo is None:
1447 self.__I = (onVariable,)
1449 self.__I = (str(withInfo),)
1451 raise ValueError("setting an observer has to be done over a variable name or a list of variable names.")
1453 if asObsObject is not None:
1454 self.__O = asObsObject
1456 __FunctionText = str(UserScript('Observer', asTemplate, asString, asScript))
1457 __Function = Observer2Func(__FunctionText)
1458 self.__O = __Function.getfunc()
1460 for k in range(len(self.__V)):
1463 if ename not in withAlgo:
1464 raise ValueError("An observer is asked to be set on a variable named %s which does not exist."%ename)
1466 withAlgo.setObserver(ename, self.__O, einfo, scheduledBy)
1469 "x.__repr__() <==> repr(x)"
1470 return repr(self.__V)+"\n"+repr(self.__O)
1473 "x.__str__() <==> str(x)"
1474 return str(self.__V)+"\n"+str(self.__O)
1476 # ==============================================================================
1477 class UserScript(object):
1479 Classe générale d'interface de type texte de script utilisateur
1482 name = "GenericUserScript",
1489 self.__name = str(name)
1491 if asString is not None:
1493 elif self.__name == "UserPostAnalysis" and (asTemplate is not None) and (asTemplate in Templates.UserPostAnalysisTemplates):
1494 self.__F = Templates.UserPostAnalysisTemplates[asTemplate]
1495 elif self.__name == "Observer" and (asTemplate is not None) and (asTemplate in Templates.ObserverTemplates):
1496 self.__F = Templates.ObserverTemplates[asTemplate]
1497 elif asScript is not None:
1498 self.__F = Interfaces.ImportFromScript(asScript).getstring()
1503 "x.__repr__() <==> repr(x)"
1504 return repr(self.__F)
1507 "x.__str__() <==> str(x)"
1508 return str(self.__F)
1510 # ==============================================================================
1511 class ExternalParameters(object):
1513 Classe générale d'interface de type texte de script utilisateur
1516 name = "GenericExternalParameters",
1522 self.__name = str(name)
1525 self.updateParameters( asDict, asScript )
1527 def updateParameters(self,
1531 "Mise a jour des parametres"
1532 if asDict is None and asScript is not None:
1533 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "ExternalParameters" )
1537 if __Dict is not None:
1538 self.__P.update( dict(__Dict) )
1540 def get(self, key = None):
1542 return self.__P[key]
1544 return list(self.__P.keys())
1547 return list(self.__P.keys())
1549 def pop(self, k, d):
1550 return self.__P.pop(k, d)
1553 return self.__P.items()
1555 def __contains__(self, key=None):
1556 "D.__contains__(k) -> True if D has a key k, else False"
1557 return key in self.__P
1559 # ==============================================================================
1560 class State(object):
1562 Classe générale d'interface de type état
1565 name = "GenericVector",
1567 asPersistentVector = None,
1573 toBeChecked = False,
1576 Permet de définir un vecteur :
1577 - asVector : entrée des données, comme un vecteur compatible avec le
1578 constructeur de numpy.matrix, ou "True" si entrée par script.
1579 - asPersistentVector : entrée des données, comme une série de vecteurs
1580 compatible avec le constructeur de numpy.matrix, ou comme un objet de
1581 type Persistence, ou "True" si entrée par script.
1582 - asScript : si un script valide est donné contenant une variable
1583 nommée "name", la variable est de type "asVector" (par défaut) ou
1584 "asPersistentVector" selon que l'une de ces variables est placée à
1586 - asDataFile : si un ou plusieurs fichiers valides sont donnés
1587 contenant des valeurs en colonnes, elles-mêmes nommées "colNames"
1588 (s'il n'y a pas de nom de colonne indiquée, on cherche une colonne
1589 nommée "name"), on récupère les colonnes et on les range ligne après
1590 ligne (colMajor=False, par défaut) ou colonne après colonne
1591 (colMajor=True). La variable résultante est de type "asVector" (par
1592 défaut) ou "asPersistentVector" selon que l'une de ces variables est
1595 self.__name = str(name)
1596 self.__check = bool(toBeChecked)
1600 self.__is_vector = False
1601 self.__is_series = False
1603 if asScript is not None:
1604 __Vector, __Series = None, None
1605 if asPersistentVector:
1606 __Series = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1608 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1609 elif asDataFile is not None:
1610 __Vector, __Series = None, None
1611 if asPersistentVector:
1612 if colNames is not None:
1613 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
1615 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1616 if bool(colMajor) and not Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1617 __Series = numpy.transpose(__Series)
1618 elif not bool(colMajor) and Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1619 __Series = numpy.transpose(__Series)
1621 if colNames is not None:
1622 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
1624 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1626 __Vector = numpy.ravel(__Vector, order = "F")
1628 __Vector = numpy.ravel(__Vector, order = "C")
1630 __Vector, __Series = asVector, asPersistentVector
1632 if __Vector is not None:
1633 self.__is_vector = True
1634 self.__V = numpy.matrix( numpy.asmatrix(__Vector).A1, numpy.float ).T
1635 self.shape = self.__V.shape
1636 self.size = self.__V.size
1637 elif __Series is not None:
1638 self.__is_series = True
1639 if isinstance(__Series, (tuple, list, numpy.ndarray, numpy.matrix, str)):
1640 self.__V = Persistence.OneVector(self.__name, basetype=numpy.matrix)
1641 if isinstance(__Series, str): __Series = eval(__Series)
1642 for member in __Series:
1643 self.__V.store( numpy.matrix( numpy.asmatrix(member).A1, numpy.float ).T )
1646 if isinstance(self.__V.shape, (tuple, list)):
1647 self.shape = self.__V.shape
1649 self.shape = self.__V.shape()
1650 if len(self.shape) == 1:
1651 self.shape = (self.shape[0],1)
1652 self.size = self.shape[0] * self.shape[1]
1654 raise ValueError("The %s object is improperly defined or undefined, it requires at minima either a vector, a list/tuple of vectors or a persistent object. Please check your vector input."%self.__name)
1656 if scheduledBy is not None:
1657 self.__T = scheduledBy
1659 def getO(self, withScheduler=False):
1661 return self.__V, self.__T
1662 elif self.__T is None:
1668 "Vérification du type interne"
1669 return self.__is_vector
1672 "Vérification du type interne"
1673 return self.__is_series
1676 "x.__repr__() <==> repr(x)"
1677 return repr(self.__V)
1680 "x.__str__() <==> str(x)"
1681 return str(self.__V)
1683 # ==============================================================================
1684 class Covariance(object):
1686 Classe générale d'interface de type covariance
1689 name = "GenericCovariance",
1690 asCovariance = None,
1691 asEyeByScalar = None,
1692 asEyeByVector = None,
1695 toBeChecked = False,
1698 Permet de définir une covariance :
1699 - asCovariance : entrée des données, comme une matrice compatible avec
1700 le constructeur de numpy.matrix
1701 - asEyeByScalar : entrée des données comme un seul scalaire de variance,
1702 multiplicatif d'une matrice de corrélation identité, aucune matrice
1703 n'étant donc explicitement à donner
1704 - asEyeByVector : entrée des données comme un seul vecteur de variance,
1705 à mettre sur la diagonale d'une matrice de corrélation, aucune matrice
1706 n'étant donc explicitement à donner
1707 - asCovObject : entrée des données comme un objet python, qui a les
1708 methodes obligatoires "getT", "getI", "diag", "trace", "__add__",
1709 "__sub__", "__neg__", "__mul__", "__rmul__" et facultatives "shape",
1710 "size", "cholesky", "choleskyI", "asfullmatrix", "__repr__", "__str__"
1711 - toBeChecked : booléen indiquant si le caractère SDP de la matrice
1712 pleine doit être vérifié
1714 self.__name = str(name)
1715 self.__check = bool(toBeChecked)
1718 self.__is_scalar = False
1719 self.__is_vector = False
1720 self.__is_matrix = False
1721 self.__is_object = False
1723 if asScript is not None:
1724 __Matrix, __Scalar, __Vector, __Object = None, None, None, None
1726 __Scalar = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1728 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1730 __Object = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1732 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1734 __Matrix, __Scalar, __Vector, __Object = asCovariance, asEyeByScalar, asEyeByVector, asCovObject
1736 if __Scalar is not None:
1737 if numpy.matrix(__Scalar).size != 1:
1738 raise ValueError(' The diagonal multiplier given to define a sparse matrix is not a unique scalar value.\n Its actual measured size is %i. Please check your scalar input.'%numpy.matrix(__Scalar).size)
1739 self.__is_scalar = True
1740 self.__C = numpy.abs( float(__Scalar) )
1743 elif __Vector is not None:
1744 self.__is_vector = True
1745 self.__C = numpy.abs( numpy.array( numpy.ravel( numpy.matrix(__Vector, float ) ) ) )
1746 self.shape = (self.__C.size,self.__C.size)
1747 self.size = self.__C.size**2
1748 elif __Matrix is not None:
1749 self.__is_matrix = True
1750 self.__C = numpy.matrix( __Matrix, float )
1751 self.shape = self.__C.shape
1752 self.size = self.__C.size
1753 elif __Object is not None:
1754 self.__is_object = True
1756 for at in ("getT","getI","diag","trace","__add__","__sub__","__neg__","__mul__","__rmul__"):
1757 if not hasattr(self.__C,at):
1758 raise ValueError("The matrix given for %s as an object has no attribute \"%s\". Please check your object input."%(self.__name,at))
1759 if hasattr(self.__C,"shape"):
1760 self.shape = self.__C.shape
1763 if hasattr(self.__C,"size"):
1764 self.size = self.__C.size
1769 # raise ValueError("The %s covariance matrix has to be specified either as a matrix, a vector for its diagonal or a scalar multiplying an identity matrix."%self.__name)
1773 def __validate(self):
1775 if self.__C is None:
1776 raise UnboundLocalError("%s covariance matrix value has not been set!"%(self.__name,))
1777 if self.ismatrix() and min(self.shape) != max(self.shape):
1778 raise ValueError("The given matrix for %s is not a square one, its shape is %s. Please check your matrix input."%(self.__name,self.shape))
1779 if self.isobject() and min(self.shape) != max(self.shape):
1780 raise ValueError("The matrix given for \"%s\" is not a square one, its shape is %s. Please check your object input."%(self.__name,self.shape))
1781 if self.isscalar() and self.__C <= 0:
1782 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your scalar input %s."%(self.__name,self.__C))
1783 if self.isvector() and (self.__C <= 0).any():
1784 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your vector input."%(self.__name,))
1785 if self.ismatrix() and (self.__check or logging.getLogger().level < logging.WARNING):
1787 L = numpy.linalg.cholesky( self.__C )
1789 raise ValueError("The %s covariance matrix is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1790 if self.isobject() and (self.__check or logging.getLogger().level < logging.WARNING):
1792 L = self.__C.cholesky()
1794 raise ValueError("The %s covariance object is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1797 "Vérification du type interne"
1798 return self.__is_scalar
1801 "Vérification du type interne"
1802 return self.__is_vector
1805 "Vérification du type interne"
1806 return self.__is_matrix
1809 "Vérification du type interne"
1810 return self.__is_object
1815 return Covariance(self.__name+"I", asCovariance = self.__C.I )
1816 elif self.isvector():
1817 return Covariance(self.__name+"I", asEyeByVector = 1. / self.__C )
1818 elif self.isscalar():
1819 return Covariance(self.__name+"I", asEyeByScalar = 1. / self.__C )
1820 elif self.isobject():
1821 return Covariance(self.__name+"I", asCovObject = self.__C.getI() )
1823 return None # Indispensable
1828 return Covariance(self.__name+"T", asCovariance = self.__C.T )
1829 elif self.isvector():
1830 return Covariance(self.__name+"T", asEyeByVector = self.__C )
1831 elif self.isscalar():
1832 return Covariance(self.__name+"T", asEyeByScalar = self.__C )
1833 elif self.isobject():
1834 return Covariance(self.__name+"T", asCovObject = self.__C.getT() )
1837 "Décomposition de Cholesky"
1839 return Covariance(self.__name+"C", asCovariance = numpy.linalg.cholesky(self.__C) )
1840 elif self.isvector():
1841 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
1842 elif self.isscalar():
1843 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
1844 elif self.isobject() and hasattr(self.__C,"cholesky"):
1845 return Covariance(self.__name+"C", asCovObject = self.__C.cholesky() )
1847 def choleskyI(self):
1848 "Inversion de la décomposition de Cholesky"
1850 return Covariance(self.__name+"H", asCovariance = numpy.linalg.cholesky(self.__C).I )
1851 elif self.isvector():
1852 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
1853 elif self.isscalar():
1854 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
1855 elif self.isobject() and hasattr(self.__C,"choleskyI"):
1856 return Covariance(self.__name+"H", asCovObject = self.__C.choleskyI() )
1859 "Racine carrée matricielle"
1862 return Covariance(self.__name+"C", asCovariance = scipy.linalg.sqrtm(self.__C) )
1863 elif self.isvector():
1864 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
1865 elif self.isscalar():
1866 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
1867 elif self.isobject() and hasattr(self.__C,"sqrt"):
1868 return Covariance(self.__name+"C", asCovObject = self.__C.sqrt() )
1871 "Inversion de la racine carrée matricielle"
1874 return Covariance(self.__name+"H", asCovariance = scipy.linalg.sqrtm(self.__C).I )
1875 elif self.isvector():
1876 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
1877 elif self.isscalar():
1878 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
1879 elif self.isobject() and hasattr(self.__C,"sqrtI"):
1880 return Covariance(self.__name+"H", asCovObject = self.__C.sqrtI() )
1882 def diag(self, msize=None):
1883 "Diagonale de la matrice"
1885 return numpy.diag(self.__C)
1886 elif self.isvector():
1888 elif self.isscalar():
1890 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1892 return self.__C * numpy.ones(int(msize))
1893 elif self.isobject():
1894 return self.__C.diag()
1896 def asfullmatrix(self, msize=None):
1900 elif self.isvector():
1901 return numpy.matrix( numpy.diag(self.__C), float )
1902 elif self.isscalar():
1904 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1906 return numpy.matrix( self.__C * numpy.eye(int(msize)), float )
1907 elif self.isobject() and hasattr(self.__C,"asfullmatrix"):
1908 return self.__C.asfullmatrix()
1910 def trace(self, msize=None):
1911 "Trace de la matrice"
1913 return numpy.trace(self.__C)
1914 elif self.isvector():
1915 return float(numpy.sum(self.__C))
1916 elif self.isscalar():
1918 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1920 return self.__C * int(msize)
1921 elif self.isobject():
1922 return self.__C.trace()
1928 "x.__repr__() <==> repr(x)"
1929 return repr(self.__C)
1932 "x.__str__() <==> str(x)"
1933 return str(self.__C)
1935 def __add__(self, other):
1936 "x.__add__(y) <==> x+y"
1937 if self.ismatrix() or self.isobject():
1938 return self.__C + numpy.asmatrix(other)
1939 elif self.isvector() or self.isscalar():
1940 _A = numpy.asarray(other)
1941 _A.reshape(_A.size)[::_A.shape[1]+1] += self.__C
1942 return numpy.asmatrix(_A)
1944 def __radd__(self, other):
1945 "x.__radd__(y) <==> y+x"
1946 raise NotImplementedError("%s covariance matrix __radd__ method not available for %s type!"%(self.__name,type(other)))
1948 def __sub__(self, other):
1949 "x.__sub__(y) <==> x-y"
1950 if self.ismatrix() or self.isobject():
1951 return self.__C - numpy.asmatrix(other)
1952 elif self.isvector() or self.isscalar():
1953 _A = numpy.asarray(other)
1954 _A.reshape(_A.size)[::_A.shape[1]+1] = self.__C - _A.reshape(_A.size)[::_A.shape[1]+1]
1955 return numpy.asmatrix(_A)
1957 def __rsub__(self, other):
1958 "x.__rsub__(y) <==> y-x"
1959 raise NotImplementedError("%s covariance matrix __rsub__ method not available for %s type!"%(self.__name,type(other)))
1962 "x.__neg__() <==> -x"
1965 def __mul__(self, other):
1966 "x.__mul__(y) <==> x*y"
1967 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
1968 return self.__C * other
1969 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
1970 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1971 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
1972 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1973 return self.__C * numpy.asmatrix(other)
1975 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asmatrix(other).shape,self.__name))
1976 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
1977 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1978 return numpy.asmatrix(self.__C * numpy.ravel(other)).T
1979 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1980 return numpy.asmatrix((self.__C * (numpy.asarray(other).transpose())).transpose())
1982 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
1983 elif self.isscalar() and isinstance(other,numpy.matrix):
1984 return self.__C * other
1985 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
1986 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
1987 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
1989 return self.__C * numpy.asmatrix(other)
1990 elif self.isobject():
1991 return self.__C.__mul__(other)
1993 raise NotImplementedError("%s covariance matrix __mul__ method not available for %s type!"%(self.__name,type(other)))
1995 def __rmul__(self, other):
1996 "x.__rmul__(y) <==> y*x"
1997 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
1998 return other * self.__C
1999 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2000 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2001 return numpy.asmatrix(numpy.ravel(other)) * self.__C
2002 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2003 return numpy.asmatrix(other) * self.__C
2005 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name))
2006 elif self.isvector() and isinstance(other,numpy.matrix):
2007 if numpy.ravel(other).size == self.shape[0]: # Vecteur
2008 return numpy.asmatrix(numpy.ravel(other) * self.__C)
2009 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
2010 return numpy.asmatrix(numpy.array(other) * self.__C)
2012 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
2013 elif self.isscalar() and isinstance(other,numpy.matrix):
2014 return other * self.__C
2015 elif self.isobject():
2016 return self.__C.__rmul__(other)
2018 raise NotImplementedError("%s covariance matrix __rmul__ method not available for %s type!"%(self.__name,type(other)))
2021 "x.__len__() <==> len(x)"
2022 return self.shape[0]
2024 # ==============================================================================
2025 class Observer2Func(object):
2027 Creation d'une fonction d'observateur a partir de son texte
2029 def __init__(self, corps=""):
2030 self.__corps = corps
2031 def func(self,var,info):
2032 "Fonction d'observation"
2035 "Restitution du pointeur de fonction dans l'objet"
2038 # ==============================================================================
2039 class CaseLogger(object):
2041 Conservation des commandes de creation d'un cas
2043 def __init__(self, __name="", __objname="case", __addViewers=None, __addLoaders=None):
2044 self.__name = str(__name)
2045 self.__objname = str(__objname)
2046 self.__logSerie = []
2047 self.__switchoff = False
2049 "TUI" :Interfaces._TUIViewer,
2050 "SCD" :Interfaces._SCDViewer,
2051 "YACS":Interfaces._YACSViewer,
2054 "TUI" :Interfaces._TUIViewer,
2055 "COM" :Interfaces._COMViewer,
2057 if __addViewers is not None:
2058 self.__viewers.update(dict(__addViewers))
2059 if __addLoaders is not None:
2060 self.__loaders.update(dict(__addLoaders))
2062 def register(self, __command=None, __keys=None, __local=None, __pre=None, __switchoff=False):
2063 "Enregistrement d'une commande individuelle"
2064 if __command is not None and __keys is not None and __local is not None and not self.__switchoff:
2065 if "self" in __keys: __keys.remove("self")
2066 self.__logSerie.append( (str(__command), __keys, __local, __pre, __switchoff) )
2068 self.__switchoff = True
2070 self.__switchoff = False
2072 def dump(self, __filename=None, __format="TUI", __upa=""):
2073 "Restitution normalisée des commandes"
2074 if __format in self.__viewers:
2075 __formater = self.__viewers[__format](self.__name, self.__objname, self.__logSerie)
2077 raise ValueError("Dumping as \"%s\" is not available"%__format)
2078 return __formater.dump(__filename, __upa)
2080 def load(self, __filename=None, __content=None, __object=None, __format="TUI"):
2081 "Chargement normalisé des commandes"
2082 if __format in self.__loaders:
2083 __formater = self.__loaders[__format]()
2085 raise ValueError("Loading as \"%s\" is not available"%__format)
2086 return __formater.load(__filename, __content, __object)
2088 # ==============================================================================
2091 _extraArguments = None,
2092 _sFunction = lambda x: x,
2097 Pour une liste ordonnée de vecteurs en entrée, renvoie en sortie la liste
2098 correspondante de valeurs de la fonction en argument
2100 # Vérifications et définitions initiales
2101 # logging.debug("MULTF Internal multifonction calculations begin with function %s"%(_sFunction.__name__,))
2102 if not PlatformInfo.isIterable( __xserie ):
2103 raise TypeError("MultiFonction not iterable unkown input type: %s"%(type(__xserie),))
2105 if (_mpWorkers is None) or (_mpWorkers is not None and _mpWorkers < 1):
2108 __mpWorkers = int(_mpWorkers)
2110 import multiprocessing
2121 if _extraArguments is None:
2123 elif _extraArguments is not None and isinstance(_extraArguments, (list, tuple, map)):
2124 for __xvalue in __xserie:
2125 _jobs.append( [__xvalue, ] + list(_extraArguments) )
2127 raise TypeError("MultiFonction extra arguments unkown input type: %s"%(type(_extraArguments),))
2128 # logging.debug("MULTF Internal multiprocessing calculations begin : evaluation of %i point(s)"%(len(_jobs),))
2129 import multiprocessing
2130 with multiprocessing.Pool(__mpWorkers) as pool:
2131 __multiHX = pool.map( _sFunction, _jobs )
2134 # logging.debug("MULTF Internal multiprocessing calculation end")
2136 # logging.debug("MULTF Internal monoprocessing calculation begin")
2138 if _extraArguments is None:
2139 for __xvalue in __xserie:
2140 __multiHX.append( _sFunction( __xvalue ) )
2141 elif _extraArguments is not None and isinstance(_extraArguments, (list, tuple, map)):
2142 for __xvalue in __xserie:
2143 __multiHX.append( _sFunction( __xvalue, *_extraArguments ) )
2144 elif _extraArguments is not None and isinstance(_extraArguments, dict):
2145 for __xvalue in __xserie:
2146 __multiHX.append( _sFunction( __xvalue, **_extraArguments ) )
2148 raise TypeError("MultiFonction extra arguments unkown input type: %s"%(type(_extraArguments),))
2149 # logging.debug("MULTF Internal monoprocessing calculation end")
2151 # logging.debug("MULTF Internal multifonction calculations end")
2154 # ==============================================================================
2155 def CostFunction3D(_x,
2156 _Hm = None, # Pour simuler Hm(x) : HO["Direct"].appliedTo
2157 _HmX = None, # Simulation déjà faite de Hm(x)
2158 _arg = None, # Arguments supplementaires pour Hm, sous la forme d'un tuple
2163 _SIV = False, # A résorber pour la 8.0
2164 _SSC = [], # self._parameters["StoreSupplementaryCalculations"]
2165 _nPS = 0, # nbPreviousSteps
2166 _QM = "DA", # QualityMeasure
2167 _SSV = {}, # Entrée et/ou sortie : self.StoredVariables
2168 _fRt = False, # Restitue ou pas la sortie étendue
2169 _sSc = True, # Stocke ou pas les SSC
2172 Fonction-coût générale utile pour les algorithmes statiques/3D : 3DVAR, BLUE
2173 et dérivés, Kalman et dérivés, LeastSquares, SamplingTest, PSO, SA, Tabu,
2174 DFO, QuantileRegression
2180 for k in ["CostFunctionJ",
2186 "SimulatedObservationAtCurrentOptimum",
2187 "SimulatedObservationAtCurrentState",
2191 if hasattr(_SSV[k],"store"):
2192 _SSV[k].append = _SSV[k].store # Pour utiliser "append" au lieu de "store"
2194 _X = numpy.asmatrix(numpy.ravel( _x )).T
2195 if _SIV or "CurrentState" in _SSC or "CurrentOptimum" in _SSC:
2196 _SSV["CurrentState"].append( _X )
2198 if _HmX is not None:
2202 raise ValueError("COSTFUNCTION3D Operator has to be defined.")
2206 _HX = _Hm( _X, *_arg )
2207 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
2209 if "SimulatedObservationAtCurrentState" in _SSC or \
2210 "SimulatedObservationAtCurrentOptimum" in _SSC:
2211 _SSV["SimulatedObservationAtCurrentState"].append( _HX )
2213 if numpy.any(numpy.isnan(_HX)):
2214 Jb, Jo, J = numpy.nan, numpy.nan, numpy.nan
2216 _Y = numpy.asmatrix(numpy.ravel( _Y )).T
2217 if _QM in ["AugmentedWeightedLeastSquares", "AWLS", "AugmentedPonderatedLeastSquares", "APLS", "DA"]:
2218 if _BI is None or _RI is None:
2219 raise ValueError("Background and Observation error covariance matrix has to be properly defined!")
2220 _Xb = numpy.asmatrix(numpy.ravel( _Xb )).T
2221 Jb = 0.5 * (_X - _Xb).T * _BI * (_X - _Xb)
2222 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
2223 elif _QM in ["WeightedLeastSquares", "WLS", "PonderatedLeastSquares", "PLS"]:
2225 raise ValueError("Observation error covariance matrix has to be properly defined!")
2227 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
2228 elif _QM in ["LeastSquares", "LS", "L2"]:
2230 Jo = 0.5 * (_Y - _HX).T * (_Y - _HX)
2231 elif _QM in ["AbsoluteValue", "L1"]:
2233 Jo = numpy.sum( numpy.abs(_Y - _HX) )
2234 elif _QM in ["MaximumError", "ME"]:
2236 Jo = numpy.max( numpy.abs(_Y - _HX) )
2237 elif _QM in ["QR", "Null"]:
2241 raise ValueError("Unknown asked quality measure!")
2243 J = float( Jb ) + float( Jo )
2246 _SSV["CostFunctionJb"].append( Jb )
2247 _SSV["CostFunctionJo"].append( Jo )
2248 _SSV["CostFunctionJ" ].append( J )
2250 if "IndexOfOptimum" in _SSC or \
2251 "CurrentOptimum" in _SSC or \
2252 "SimulatedObservationAtCurrentOptimum" in _SSC:
2253 IndexMin = numpy.argmin( _SSV["CostFunctionJ"][_nPS:] ) + _nPS
2254 if "IndexOfOptimum" in _SSC:
2255 _SSV["IndexOfOptimum"].append( IndexMin )
2256 if "CurrentOptimum" in _SSC:
2257 _SSV["CurrentOptimum"].append( _SSV["CurrentState"][IndexMin] )
2258 if "SimulatedObservationAtCurrentOptimum" in _SSC:
2259 _SSV["SimulatedObservationAtCurrentOptimum"].append( _SSV["SimulatedObservationAtCurrentState"][IndexMin] )
2264 if _QM in ["QR"]: # Pour le QuantileRegression
2269 # ==============================================================================
2270 if __name__ == "__main__":
2271 print('\n AUTODIAGNOSTIC\n')