1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2021 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les outils généraux élémentaires.
26 __author__ = "Jean-Philippe ARGAUD"
35 from functools import partial
36 from daCore import Persistence, PlatformInfo, Interfaces
37 from daCore import Templates
39 # ==============================================================================
40 class CacheManager(object):
42 Classe générale de gestion d'un cache de calculs
45 toleranceInRedundancy = 1.e-18,
46 lenghtOfRedundancy = -1,
49 Les caractéristiques de tolérance peuvent être modifiées à la création.
51 self.__tolerBP = float(toleranceInRedundancy)
52 self.__lenghtOR = int(lenghtOfRedundancy)
53 self.__initlnOR = self.__lenghtOR
60 self.__listOPCV = [] # Previous Calculated Points, Results, Point Norms, Operator
62 # logging.debug("CM Tolerance de determination des doublons : %.2e", self.__tolerBP)
64 def wasCalculatedIn(self, xValue, oName="" ): #, info="" ):
65 "Vérifie l'existence d'un calcul correspondant à la valeur"
69 for i in range(min(len(self.__listOPCV),self.__lenghtOR)-1,-1,-1):
70 if not hasattr(xValue, 'size') or (str(oName) != self.__listOPCV[i][3]) or (xValue.size != self.__listOPCV[i][0].size):
71 # logging.debug("CM Différence de la taille %s de X et de celle %s du point %i déjà calculé", xValue.shape,i,self.__listOPCP[i].shape)
73 elif numpy.linalg.norm(numpy.ravel(xValue) - self.__listOPCV[i][0]) < self.__tolerBP * self.__listOPCV[i][2]:
75 __HxV = self.__listOPCV[i][1]
76 # logging.debug("CM Cas%s déja calculé, portant le numéro %i", info, i)
80 def storeValueInX(self, xValue, HxValue, oName="" ):
81 "Stocke pour un opérateur o un calcul Hx correspondant à la valeur x"
82 if self.__lenghtOR < 0:
83 self.__lenghtOR = 2 * xValue.size + 2
84 self.__initlnOR = self.__lenghtOR
85 self.__seenNames.append(str(oName))
86 if str(oName) not in self.__seenNames: # Etend la liste si nouveau
87 self.__lenghtOR += 2 * xValue.size + 2
88 self.__initlnOR += self.__lenghtOR
89 self.__seenNames.append(str(oName))
90 while len(self.__listOPCV) > self.__lenghtOR:
91 # logging.debug("CM Réduction de la liste des cas à %i éléments par suppression du premier", self.__lenghtOR)
92 self.__listOPCV.pop(0)
93 self.__listOPCV.append( (
94 copy.copy(numpy.ravel(xValue)),
96 numpy.linalg.norm(xValue),
102 self.__initlnOR = self.__lenghtOR
104 self.__enabled = False
108 self.__lenghtOR = self.__initlnOR
109 self.__enabled = True
111 # ==============================================================================
112 class Operator(object):
114 Classe générale d'interface de type opérateur simple
122 name = "GenericOperator",
125 avoidingRedundancy = True,
126 inputAsMultiFunction = False,
127 enableMultiProcess = False,
128 extraArguments = None,
131 On construit un objet de ce type en fournissant, à l'aide de l'un des
132 deux mots-clé, soit une fonction ou un multi-fonction python, soit une
135 - name : nom d'opérateur
136 - fromMethod : argument de type fonction Python
137 - fromMatrix : argument adapté au constructeur numpy.matrix
138 - avoidingRedundancy : booléen évitant (ou pas) les calculs redondants
139 - inputAsMultiFunction : booléen indiquant une fonction explicitement
140 définie (ou pas) en multi-fonction
141 - extraArguments : arguments supplémentaires passés à la fonction de
142 base et ses dérivées (tuple ou dictionnaire)
144 self.__name = str(name)
145 self.__NbCallsAsMatrix, self.__NbCallsAsMethod, self.__NbCallsOfCached = 0, 0, 0
146 self.__AvoidRC = bool( avoidingRedundancy )
147 self.__inputAsMF = bool( inputAsMultiFunction )
148 self.__mpEnabled = bool( enableMultiProcess )
149 self.__extraArgs = extraArguments
150 if fromMethod is not None and self.__inputAsMF:
151 self.__Method = fromMethod # logtimer(fromMethod)
153 self.__Type = "Method"
154 elif fromMethod is not None and not self.__inputAsMF:
155 self.__Method = partial( MultiFonction, _sFunction=fromMethod, _mpEnabled=self.__mpEnabled)
157 self.__Type = "Method"
158 elif fromMatrix is not None:
160 self.__Matrix = numpy.matrix( fromMatrix, numpy.float )
161 self.__Type = "Matrix"
167 def disableAvoidingRedundancy(self):
169 Operator.CM.disable()
171 def enableAvoidingRedundancy(self):
176 Operator.CM.disable()
182 def appliedTo(self, xValue, HValue = None, argsAsSerie = False, returnSerieAsArrayMatrix = False):
184 Permet de restituer le résultat de l'application de l'opérateur à une
185 série d'arguments xValue. Cette méthode se contente d'appliquer, chaque
186 argument devant a priori être du bon type.
188 - les arguments par série sont :
189 - xValue : argument adapté pour appliquer l'opérateur
190 - HValue : valeur précalculée de l'opérateur en ce point
191 - argsAsSerie : indique si les arguments sont une mono ou multi-valeur
198 if HValue is not None:
202 PlatformInfo.isIterable( _xValue, True, " in Operator.appliedTo" )
204 if _HValue is not None:
205 assert len(_xValue) == len(_HValue), "Incompatible number of elements in xValue and HValue"
207 for i in range(len(_HValue)):
208 _HxValue.append( numpy.asmatrix( numpy.ravel( _HValue[i] ) ).T )
210 Operator.CM.storeValueInX(_xValue[i],_HxValue[-1],self.__name)
215 for i, xv in enumerate(_xValue):
217 __alreadyCalculated, __HxV = Operator.CM.wasCalculatedIn(xv,self.__name)
219 __alreadyCalculated = False
221 if __alreadyCalculated:
222 self.__addOneCacheCall()
225 if self.__Matrix is not None:
226 self.__addOneMatrixCall()
227 _hv = self.__Matrix * xv
229 self.__addOneMethodCall()
233 _HxValue.append( _hv )
235 if len(_xserie)>0 and self.__Matrix is None:
236 if self.__extraArgs is None:
237 _hserie = self.__Method( _xserie ) # Calcul MF
239 _hserie = self.__Method( _xserie, self.__extraArgs ) # Calcul MF
240 if not hasattr(_hserie, "pop"):
241 raise TypeError("The user input multi-function doesn't seem to return sequence results, behaving like a mono-function. It has to be checked.")
247 Operator.CM.storeValueInX(_xv,_hv,self.__name)
249 if returnSerieAsArrayMatrix:
250 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
252 if argsAsSerie: return _HxValue
253 else: return _HxValue[-1]
255 def appliedControledFormTo(self, paires, argsAsSerie = False, returnSerieAsArrayMatrix = False):
257 Permet de restituer le résultat de l'application de l'opérateur à des
258 paires (xValue, uValue). Cette méthode se contente d'appliquer, son
259 argument devant a priori être du bon type. Si la uValue est None,
260 on suppose que l'opérateur ne s'applique qu'à xValue.
262 - paires : les arguments par paire sont :
263 - xValue : argument X adapté pour appliquer l'opérateur
264 - uValue : argument U adapté pour appliquer l'opérateur
265 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
267 if argsAsSerie: _xuValue = paires
268 else: _xuValue = (paires,)
269 PlatformInfo.isIterable( _xuValue, True, " in Operator.appliedControledFormTo" )
271 if self.__Matrix is not None:
273 for paire in _xuValue:
274 _xValue, _uValue = paire
275 self.__addOneMatrixCall()
276 _HxValue.append( self.__Matrix * _xValue )
279 for paire in _xuValue:
280 _xValue, _uValue = paire
281 if _uValue is not None:
282 _xuArgs.append( paire )
284 _xuArgs.append( _xValue )
285 self.__addOneMethodCall( len(_xuArgs) )
286 if self.__extraArgs is None:
287 _HxValue = self.__Method( _xuArgs ) # Calcul MF
289 _HxValue = self.__Method( _xuArgs, self.__extraArgs ) # Calcul MF
291 if returnSerieAsArrayMatrix:
292 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
294 if argsAsSerie: return _HxValue
295 else: return _HxValue[-1]
297 def appliedInXTo(self, paires, argsAsSerie = False, returnSerieAsArrayMatrix = False):
299 Permet de restituer le résultat de l'application de l'opérateur à une
300 série d'arguments xValue, sachant que l'opérateur est valable en
301 xNominal. Cette méthode se contente d'appliquer, son argument devant a
302 priori être du bon type. Si l'opérateur est linéaire car c'est une
303 matrice, alors il est valable en tout point nominal et xNominal peut
304 être quelconque. Il n'y a qu'une seule paire par défaut, et argsAsSerie
305 permet d'indiquer que l'argument est multi-paires.
307 - paires : les arguments par paire sont :
308 - xNominal : série d'arguments permettant de donner le point où
309 l'opérateur est construit pour être ensuite appliqué
310 - xValue : série d'arguments adaptés pour appliquer l'opérateur
311 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
313 if argsAsSerie: _nxValue = paires
314 else: _nxValue = (paires,)
315 PlatformInfo.isIterable( _nxValue, True, " in Operator.appliedInXTo" )
317 if self.__Matrix is not None:
319 for paire in _nxValue:
320 _xNominal, _xValue = paire
321 self.__addOneMatrixCall()
322 _HxValue.append( self.__Matrix * _xValue )
324 self.__addOneMethodCall( len(_nxValue) )
325 if self.__extraArgs is None:
326 _HxValue = self.__Method( _nxValue ) # Calcul MF
328 _HxValue = self.__Method( _nxValue, self.__extraArgs ) # Calcul MF
330 if returnSerieAsArrayMatrix:
331 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
333 if argsAsSerie: return _HxValue
334 else: return _HxValue[-1]
336 def asMatrix(self, ValueForMethodForm = "UnknownVoidValue", argsAsSerie = False):
338 Permet de renvoyer l'opérateur sous la forme d'une matrice
340 if self.__Matrix is not None:
341 self.__addOneMatrixCall()
342 mValue = [self.__Matrix,]
343 elif not isinstance(ValueForMethodForm,str) or ValueForMethodForm != "UnknownVoidValue": # Ne pas utiliser "None"
346 self.__addOneMethodCall( len(ValueForMethodForm) )
347 for _vfmf in ValueForMethodForm:
348 mValue.append( numpy.matrix( self.__Method(((_vfmf, None),)) ) )
350 self.__addOneMethodCall()
351 mValue = self.__Method(((ValueForMethodForm, None),))
353 raise ValueError("Matrix form of the operator defined as a function/method requires to give an operating point.")
355 if argsAsSerie: return mValue
356 else: return mValue[-1]
360 Renvoie la taille sous forme numpy si l'opérateur est disponible sous
361 la forme d'une matrice
363 if self.__Matrix is not None:
364 return self.__Matrix.shape
366 raise ValueError("Matrix form of the operator is not available, nor the shape")
368 def nbcalls(self, which=None):
370 Renvoie les nombres d'évaluations de l'opérateur
373 self.__NbCallsAsMatrix+self.__NbCallsAsMethod,
374 self.__NbCallsAsMatrix,
375 self.__NbCallsAsMethod,
376 self.__NbCallsOfCached,
377 Operator.NbCallsAsMatrix+Operator.NbCallsAsMethod,
378 Operator.NbCallsAsMatrix,
379 Operator.NbCallsAsMethod,
380 Operator.NbCallsOfCached,
382 if which is None: return __nbcalls
383 else: return __nbcalls[which]
385 def __addOneMatrixCall(self):
386 "Comptabilise un appel"
387 self.__NbCallsAsMatrix += 1 # Decompte local
388 Operator.NbCallsAsMatrix += 1 # Decompte global
390 def __addOneMethodCall(self, nb = 1):
391 "Comptabilise un appel"
392 self.__NbCallsAsMethod += nb # Decompte local
393 Operator.NbCallsAsMethod += nb # Decompte global
395 def __addOneCacheCall(self):
396 "Comptabilise un appel"
397 self.__NbCallsOfCached += 1 # Decompte local
398 Operator.NbCallsOfCached += 1 # Decompte global
400 # ==============================================================================
401 class FullOperator(object):
403 Classe générale d'interface de type opérateur complet
404 (Direct, Linéaire Tangent, Adjoint)
407 name = "GenericFullOperator",
409 asOneFunction = None, # 1 Fonction
410 asThreeFunctions = None, # 3 Fonctions in a dictionary
411 asScript = None, # 1 or 3 Fonction(s) by script
412 asDict = None, # Parameters
414 extraArguments = None,
416 inputAsMF = False,# Fonction(s) as Multi-Functions
421 self.__name = str(name)
422 self.__check = bool(toBeChecked)
423 self.__extraArgs = extraArguments
428 if (asDict is not None) and isinstance(asDict, dict):
429 __Parameters.update( asDict )
430 # Priorité à EnableMultiProcessingInDerivatives=True
431 if "EnableMultiProcessing" in __Parameters and __Parameters["EnableMultiProcessing"]:
432 __Parameters["EnableMultiProcessingInDerivatives"] = True
433 __Parameters["EnableMultiProcessingInEvaluation"] = False
434 if "EnableMultiProcessingInDerivatives" not in __Parameters:
435 __Parameters["EnableMultiProcessingInDerivatives"] = False
436 if __Parameters["EnableMultiProcessingInDerivatives"]:
437 __Parameters["EnableMultiProcessingInEvaluation"] = False
438 if "EnableMultiProcessingInEvaluation" not in __Parameters:
439 __Parameters["EnableMultiProcessingInEvaluation"] = False
440 if "withIncrement" in __Parameters: # Temporaire
441 __Parameters["DifferentialIncrement"] = __Parameters["withIncrement"]
443 if asScript is not None:
444 __Matrix, __Function = None, None
446 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
448 __Function = { "Direct":Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ) }
449 __Function.update({"useApproximatedDerivatives":True})
450 __Function.update(__Parameters)
451 elif asThreeFunctions:
453 "Direct" :Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ),
454 "Tangent":Interfaces.ImportFromScript(asScript).getvalue( "TangentOperator" ),
455 "Adjoint":Interfaces.ImportFromScript(asScript).getvalue( "AdjointOperator" ),
457 __Function.update(__Parameters)
460 if asOneFunction is not None:
461 if isinstance(asOneFunction, dict) and "Direct" in asOneFunction:
462 if asOneFunction["Direct"] is not None:
463 __Function = asOneFunction
465 raise ValueError("The function has to be given in a dictionnary which have 1 key (\"Direct\")")
467 __Function = { "Direct":asOneFunction }
468 __Function.update({"useApproximatedDerivatives":True})
469 __Function.update(__Parameters)
470 elif asThreeFunctions is not None:
471 if isinstance(asThreeFunctions, dict) and \
472 ("Tangent" in asThreeFunctions) and (asThreeFunctions["Tangent"] is not None) and \
473 ("Adjoint" in asThreeFunctions) and (asThreeFunctions["Adjoint"] is not None) and \
474 (("useApproximatedDerivatives" not in asThreeFunctions) or not bool(asThreeFunctions["useApproximatedDerivatives"])):
475 __Function = asThreeFunctions
476 elif isinstance(asThreeFunctions, dict) and \
477 ("Direct" in asThreeFunctions) and (asThreeFunctions["Direct"] is not None):
478 __Function = asThreeFunctions
479 __Function.update({"useApproximatedDerivatives":True})
481 raise ValueError("The functions has to be given in a dictionnary which have either 1 key (\"Direct\") or 3 keys (\"Direct\" (optionnal), \"Tangent\" and \"Adjoint\")")
482 if "Direct" not in asThreeFunctions:
483 __Function["Direct"] = asThreeFunctions["Tangent"]
484 __Function.update(__Parameters)
488 # if sys.version_info[0] < 3 and isinstance(__Function, dict):
489 # for k in ("Direct", "Tangent", "Adjoint"):
490 # if k in __Function and hasattr(__Function[k],"__class__"):
491 # if type(__Function[k]) is type(self.__init__):
492 # raise TypeError("can't use a class method (%s) as a function for the \"%s\" operator. Use a real function instead."%(type(__Function[k]),k))
494 if appliedInX is not None and isinstance(appliedInX, dict):
495 __appliedInX = appliedInX
496 elif appliedInX is not None:
497 __appliedInX = {"HXb":appliedInX}
501 if scheduledBy is not None:
502 self.__T = scheduledBy
504 if isinstance(__Function, dict) and \
505 ("useApproximatedDerivatives" in __Function) and bool(__Function["useApproximatedDerivatives"]) and \
506 ("Direct" in __Function) and (__Function["Direct"] is not None):
507 if "CenteredFiniteDifference" not in __Function: __Function["CenteredFiniteDifference"] = False
508 if "DifferentialIncrement" not in __Function: __Function["DifferentialIncrement"] = 0.01
509 if "withdX" not in __Function: __Function["withdX"] = None
510 if "withAvoidingRedundancy" not in __Function: __Function["withAvoidingRedundancy"] = avoidRC
511 if "withToleranceInRedundancy" not in __Function: __Function["withToleranceInRedundancy"] = 1.e-18
512 if "withLenghtOfRedundancy" not in __Function: __Function["withLenghtOfRedundancy"] = -1
513 if "NumberOfProcesses" not in __Function: __Function["NumberOfProcesses"] = None
514 if "withmfEnabled" not in __Function: __Function["withmfEnabled"] = inputAsMF
515 from daCore import NumericObjects
516 FDA = NumericObjects.FDApproximation(
518 Function = __Function["Direct"],
519 centeredDF = __Function["CenteredFiniteDifference"],
520 increment = __Function["DifferentialIncrement"],
521 dX = __Function["withdX"],
522 avoidingRedundancy = __Function["withAvoidingRedundancy"],
523 toleranceInRedundancy = __Function["withToleranceInRedundancy"],
524 lenghtOfRedundancy = __Function["withLenghtOfRedundancy"],
525 mpEnabled = __Function["EnableMultiProcessingInDerivatives"],
526 mpWorkers = __Function["NumberOfProcesses"],
527 mfEnabled = __Function["withmfEnabled"],
529 self.__FO["Direct"] = Operator( name = self.__name, fromMethod = FDA.DirectOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
530 self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMethod = FDA.TangentOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
531 self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMethod = FDA.AdjointOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
532 elif isinstance(__Function, dict) and \
533 ("Direct" in __Function) and ("Tangent" in __Function) and ("Adjoint" in __Function) and \
534 (__Function["Direct"] is not None) and (__Function["Tangent"] is not None) and (__Function["Adjoint"] is not None):
535 self.__FO["Direct"] = Operator( name = self.__name, fromMethod = __Function["Direct"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
536 self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMethod = __Function["Tangent"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
537 self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMethod = __Function["Adjoint"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
538 elif asMatrix is not None:
539 __matrice = numpy.matrix( __Matrix, numpy.float )
540 self.__FO["Direct"] = Operator( name = self.__name, fromMatrix = __matrice, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
541 self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMatrix = __matrice, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
542 self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMatrix = __matrice.T, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
545 raise ValueError("The %s object is improperly defined or undefined, it requires at minima either a matrix, a Direct operator for approximate derivatives or a Tangent/Adjoint operators pair. Please check your operator input."%self.__name)
547 if __appliedInX is not None:
548 self.__FO["AppliedInX"] = {}
549 for key in list(__appliedInX.keys()):
550 if type( __appliedInX[key] ) is type( numpy.matrix([]) ):
551 # Pour le cas où l'on a une vraie matrice
552 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].A1, numpy.float ).T
553 elif type( __appliedInX[key] ) is type( numpy.array([]) ) and len(__appliedInX[key].shape) > 1:
554 # Pour le cas où l'on a un vecteur représenté en array avec 2 dimensions
555 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].reshape(len(__appliedInX[key]),), numpy.float ).T
557 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key], numpy.float ).T
559 self.__FO["AppliedInX"] = None
565 "x.__repr__() <==> repr(x)"
566 return repr(self.__FO)
569 "x.__str__() <==> str(x)"
570 return str(self.__FO)
572 # ==============================================================================
573 class Algorithm(object):
575 Classe générale d'interface de type algorithme
577 Elle donne un cadre pour l'écriture d'une classe élémentaire d'algorithme
578 d'assimilation, en fournissant un container (dictionnaire) de variables
579 persistantes initialisées, et des méthodes d'accès à ces variables stockées.
581 Une classe élémentaire d'algorithme doit implémenter la méthode "run".
583 def __init__(self, name):
585 L'initialisation présente permet de fabriquer des variables de stockage
586 disponibles de manière générique dans les algorithmes élémentaires. Ces
587 variables de stockage sont ensuite conservées dans un dictionnaire
588 interne à l'objet, mais auquel on accède par la méthode "get".
590 Les variables prévues sont :
591 - APosterioriCorrelations : matrice de corrélations de la matrice A
592 - APosterioriCovariance : matrice de covariances a posteriori : A
593 - APosterioriStandardDeviations : vecteur des écart-types de la matrice A
594 - APosterioriVariances : vecteur des variances de la matrice A
595 - Analysis : vecteur d'analyse : Xa
596 - BMA : Background moins Analysis : Xa - Xb
597 - CostFunctionJ : fonction-coût globale, somme des deux parties suivantes Jb et Jo
598 - CostFunctionJAtCurrentOptimum : fonction-coût globale à l'état optimal courant lors d'itérations
599 - CostFunctionJb : partie ébauche ou background de la fonction-coût : Jb
600 - CostFunctionJbAtCurrentOptimum : partie ébauche à l'état optimal courant lors d'itérations
601 - CostFunctionJo : partie observations de la fonction-coût : Jo
602 - CostFunctionJoAtCurrentOptimum : partie observations à l'état optimal courant lors d'itérations
603 - CurrentIterationNumber : numéro courant d'itération dans les algorithmes itératifs, à partir de 0
604 - CurrentOptimum : état optimal courant lors d'itérations
605 - CurrentState : état courant lors d'itérations
606 - GradientOfCostFunctionJ : gradient de la fonction-coût globale
607 - GradientOfCostFunctionJb : gradient de la partie ébauche de la fonction-coût
608 - GradientOfCostFunctionJo : gradient de la partie observations de la fonction-coût
609 - IndexOfOptimum : index de l'état optimal courant lors d'itérations
610 - Innovation : l'innovation : d = Y - H(X)
611 - InnovationAtCurrentState : l'innovation à l'état courant : dn = Y - H(Xn)
612 - JacobianMatrixAtBackground : matrice jacobienne à l'état d'ébauche
613 - JacobianMatrixAtCurrentState : matrice jacobienne à l'état courant
614 - JacobianMatrixAtOptimum : matrice jacobienne à l'optimum
615 - KalmanGainAtOptimum : gain de Kalman à l'optimum
616 - MahalanobisConsistency : indicateur de consistance des covariances
617 - OMA : Observation moins Analyse : Y - Xa
618 - OMB : Observation moins Background : Y - Xb
619 - ForecastState : état prédit courant lors d'itérations
620 - Residu : dans le cas des algorithmes de vérification
621 - SigmaBck2 : indicateur de correction optimale des erreurs d'ébauche
622 - SigmaObs2 : indicateur de correction optimale des erreurs d'observation
623 - SimulatedObservationAtBackground : l'état observé H(Xb) à l'ébauche
624 - SimulatedObservationAtCurrentOptimum : l'état observé H(X) à l'état optimal courant
625 - SimulatedObservationAtCurrentState : l'état observé H(X) à l'état courant
626 - SimulatedObservationAtOptimum : l'état observé H(Xa) à l'optimum
627 - SimulationQuantiles : états observés H(X) pour les quantiles demandés
628 On peut rajouter des variables à stocker dans l'initialisation de
629 l'algorithme élémentaire qui va hériter de cette classe
631 logging.debug("%s Initialisation", str(name))
632 self._m = PlatformInfo.SystemUsage()
634 self._name = str( name )
635 self._parameters = {"StoreSupplementaryCalculations":[]}
636 self.__required_parameters = {}
637 self.__required_inputs = {
638 "RequiredInputValues":{"mandatory":(), "optional":()},
639 "ClassificationTags":[],
641 self.__variable_names_not_public = {"nextStep":False} # Duplication dans AlgorithmAndParameters
642 self.__canonical_parameter_name = {} # Correspondance "lower"->"correct"
643 self.__canonical_stored_name = {} # Correspondance "lower"->"correct"
645 self.StoredVariables = {}
646 self.StoredVariables["APosterioriCorrelations"] = Persistence.OneMatrix(name = "APosterioriCorrelations")
647 self.StoredVariables["APosterioriCovariance"] = Persistence.OneMatrix(name = "APosterioriCovariance")
648 self.StoredVariables["APosterioriStandardDeviations"] = Persistence.OneVector(name = "APosterioriStandardDeviations")
649 self.StoredVariables["APosterioriVariances"] = Persistence.OneVector(name = "APosterioriVariances")
650 self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
651 self.StoredVariables["BMA"] = Persistence.OneVector(name = "BMA")
652 self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
653 self.StoredVariables["CostFunctionJAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJAtCurrentOptimum")
654 self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
655 self.StoredVariables["CostFunctionJbAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJbAtCurrentOptimum")
656 self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
657 self.StoredVariables["CostFunctionJoAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJoAtCurrentOptimum")
658 self.StoredVariables["CurrentIterationNumber"] = Persistence.OneIndex(name = "CurrentIterationNumber")
659 self.StoredVariables["CurrentOptimum"] = Persistence.OneVector(name = "CurrentOptimum")
660 self.StoredVariables["CurrentState"] = Persistence.OneVector(name = "CurrentState")
661 self.StoredVariables["ForecastState"] = Persistence.OneVector(name = "ForecastState")
662 self.StoredVariables["GradientOfCostFunctionJ"] = Persistence.OneVector(name = "GradientOfCostFunctionJ")
663 self.StoredVariables["GradientOfCostFunctionJb"] = Persistence.OneVector(name = "GradientOfCostFunctionJb")
664 self.StoredVariables["GradientOfCostFunctionJo"] = Persistence.OneVector(name = "GradientOfCostFunctionJo")
665 self.StoredVariables["IndexOfOptimum"] = Persistence.OneIndex(name = "IndexOfOptimum")
666 self.StoredVariables["Innovation"] = Persistence.OneVector(name = "Innovation")
667 self.StoredVariables["InnovationAtCurrentAnalysis"] = Persistence.OneVector(name = "InnovationAtCurrentAnalysis")
668 self.StoredVariables["InnovationAtCurrentState"] = Persistence.OneVector(name = "InnovationAtCurrentState")
669 self.StoredVariables["JacobianMatrixAtBackground"] = Persistence.OneMatrix(name = "JacobianMatrixAtBackground")
670 self.StoredVariables["JacobianMatrixAtCurrentState"] = Persistence.OneMatrix(name = "JacobianMatrixAtCurrentState")
671 self.StoredVariables["JacobianMatrixAtOptimum"] = Persistence.OneMatrix(name = "JacobianMatrixAtOptimum")
672 self.StoredVariables["KalmanGainAtOptimum"] = Persistence.OneMatrix(name = "KalmanGainAtOptimum")
673 self.StoredVariables["MahalanobisConsistency"] = Persistence.OneScalar(name = "MahalanobisConsistency")
674 self.StoredVariables["OMA"] = Persistence.OneVector(name = "OMA")
675 self.StoredVariables["OMB"] = Persistence.OneVector(name = "OMB")
676 self.StoredVariables["Residu"] = Persistence.OneScalar(name = "Residu")
677 self.StoredVariables["SigmaBck2"] = Persistence.OneScalar(name = "SigmaBck2")
678 self.StoredVariables["SigmaObs2"] = Persistence.OneScalar(name = "SigmaObs2")
679 self.StoredVariables["SimulatedObservationAtBackground"] = Persistence.OneVector(name = "SimulatedObservationAtBackground")
680 self.StoredVariables["SimulatedObservationAtCurrentAnalysis"]= Persistence.OneVector(name = "SimulatedObservationAtCurrentAnalysis")
681 self.StoredVariables["SimulatedObservationAtCurrentOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentOptimum")
682 self.StoredVariables["SimulatedObservationAtCurrentState"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentState")
683 self.StoredVariables["SimulatedObservationAtOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtOptimum")
684 self.StoredVariables["SimulationQuantiles"] = Persistence.OneMatrix(name = "SimulationQuantiles")
686 for k in self.StoredVariables:
687 self.__canonical_stored_name[k.lower()] = k
689 for k, v in self.__variable_names_not_public.items():
690 self.__canonical_parameter_name[k.lower()] = k
691 self.__canonical_parameter_name["algorithm"] = "Algorithm"
692 self.__canonical_parameter_name["storesupplementarycalculations"] = "StoreSupplementaryCalculations"
694 def _pre_run(self, Parameters, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None ):
696 logging.debug("%s Lancement", self._name)
697 logging.debug("%s Taille mémoire utilisée de %.0f Mio"%(self._name, self._m.getUsedMemory("Mio")))
698 self._getTimeState(reset=True)
700 # Mise a jour des paramètres internes avec le contenu de Parameters, en
701 # reprenant les valeurs par défauts pour toutes celles non définies
702 self.__setParameters(Parameters, reset=True)
703 for k, v in self.__variable_names_not_public.items():
704 if k not in self._parameters: self.__setParameters( {k:v} )
706 # Corrections et compléments des vecteurs
707 def __test_vvalue(argument, variable, argname, symbol=None):
708 if symbol is None: symbol = variable
710 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
711 raise ValueError("%s %s vector %s is not set and has to be properly defined!"%(self._name,argname,symbol))
712 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
713 logging.debug("%s %s vector %s is not set, but is optional."%(self._name,argname,symbol))
715 logging.debug("%s %s vector %s is not set, but is not required."%(self._name,argname,symbol))
717 logging.debug("%s %s vector %s is set, and its size is %i."%(self._name,argname,symbol,numpy.array(argument).size))
719 __test_vvalue( Xb, "Xb", "Background or initial state" )
720 __test_vvalue( Y, "Y", "Observation" )
721 __test_vvalue( U, "U", "Control" )
723 # Corrections et compléments des covariances
724 def __test_cvalue(argument, variable, argname, symbol=None):
725 if symbol is None: symbol = variable
727 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
728 raise ValueError("%s %s error covariance matrix %s is not set and has to be properly defined!"%(self._name,argname,symbol))
729 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
730 logging.debug("%s %s error covariance matrix %s is not set, but is optional."%(self._name,argname,symbol))
732 logging.debug("%s %s error covariance matrix %s is not set, but is not required."%(self._name,argname,symbol))
734 logging.debug("%s %s error covariance matrix %s is set."%(self._name,argname,symbol))
736 __test_cvalue( B, "B", "Background" )
737 __test_cvalue( R, "R", "Observation" )
738 __test_cvalue( Q, "Q", "Evolution" )
740 # Corrections et compléments des opérateurs
741 def __test_ovalue(argument, variable, argname, symbol=None):
742 if symbol is None: symbol = variable
743 if argument is None or (isinstance(argument,dict) and len(argument)==0):
744 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
745 raise ValueError("%s %s operator %s is not set and has to be properly defined!"%(self._name,argname,symbol))
746 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
747 logging.debug("%s %s operator %s is not set, but is optional."%(self._name,argname,symbol))
749 logging.debug("%s %s operator %s is not set, but is not required."%(self._name,argname,symbol))
751 logging.debug("%s %s operator %s is set."%(self._name,argname,symbol))
753 __test_ovalue( HO, "HO", "Observation", "H" )
754 __test_ovalue( EM, "EM", "Evolution", "M" )
755 __test_ovalue( CM, "CM", "Control Model", "C" )
757 if ("Bounds" in self._parameters) and isinstance(self._parameters["Bounds"], (list, tuple)) and (len(self._parameters["Bounds"]) > 0):
758 logging.debug("%s Prise en compte des bornes effectuee"%(self._name,))
760 self._parameters["Bounds"] = None
762 if logging.getLogger().level < logging.WARNING:
763 self._parameters["optiprint"], self._parameters["optdisp"] = 1, 1
764 if PlatformInfo.has_scipy:
765 import scipy.optimize
766 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_ALL
768 self._parameters["optmessages"] = 15
770 self._parameters["optiprint"], self._parameters["optdisp"] = -1, 0
771 if PlatformInfo.has_scipy:
772 import scipy.optimize
773 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_NONE
775 self._parameters["optmessages"] = 15
779 def _post_run(self,_oH=None):
781 if ("StoreSupplementaryCalculations" in self._parameters) and \
782 "APosterioriCovariance" in self._parameters["StoreSupplementaryCalculations"]:
783 for _A in self.StoredVariables["APosterioriCovariance"]:
784 if "APosterioriVariances" in self._parameters["StoreSupplementaryCalculations"]:
785 self.StoredVariables["APosterioriVariances"].store( numpy.diag(_A) )
786 if "APosterioriStandardDeviations" in self._parameters["StoreSupplementaryCalculations"]:
787 self.StoredVariables["APosterioriStandardDeviations"].store( numpy.sqrt(numpy.diag(_A)) )
788 if "APosterioriCorrelations" in self._parameters["StoreSupplementaryCalculations"]:
789 _EI = numpy.diag(1./numpy.sqrt(numpy.diag(_A)))
790 _C = numpy.dot(_EI, numpy.dot(_A, _EI))
791 self.StoredVariables["APosterioriCorrelations"].store( _C )
792 if _oH is not None and "Direct" in _oH and "Tangent" in _oH and "Adjoint" in _oH:
793 logging.debug("%s Nombre d'évaluation(s) de l'opérateur d'observation direct/tangent/adjoint.: %i/%i/%i", self._name, _oH["Direct"].nbcalls(0),_oH["Tangent"].nbcalls(0),_oH["Adjoint"].nbcalls(0))
794 logging.debug("%s Nombre d'appels au cache d'opérateur d'observation direct/tangent/adjoint..: %i/%i/%i", self._name, _oH["Direct"].nbcalls(3),_oH["Tangent"].nbcalls(3),_oH["Adjoint"].nbcalls(3))
795 logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
796 logging.debug("%s Durées d'utilisation CPU de %.1fs et elapsed de %.1fs", self._name, self._getTimeState()[0], self._getTimeState()[1])
797 logging.debug("%s Terminé", self._name)
800 def _toStore(self, key):
801 "True if in StoreSupplementaryCalculations, else False"
802 return key in self._parameters["StoreSupplementaryCalculations"]
804 def get(self, key=None):
806 Renvoie l'une des variables stockées identifiée par la clé, ou le
807 dictionnaire de l'ensemble des variables disponibles en l'absence de
808 clé. Ce sont directement les variables sous forme objet qui sont
809 renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
810 des classes de persistance.
813 return self.StoredVariables[self.__canonical_stored_name[key.lower()]]
815 return self.StoredVariables
817 def __contains__(self, key=None):
818 "D.__contains__(k) -> True if D has a key k, else False"
819 if key is None or key.lower() not in self.__canonical_stored_name:
822 return self.__canonical_stored_name[key.lower()] in self.StoredVariables
825 "D.keys() -> list of D's keys"
826 if hasattr(self, "StoredVariables"):
827 return self.StoredVariables.keys()
832 "D.pop(k[,d]) -> v, remove specified key and return the corresponding value"
833 if hasattr(self, "StoredVariables") and k.lower() in self.__canonical_stored_name:
834 return self.StoredVariables.pop(self.__canonical_stored_name[k.lower()], d)
839 raise TypeError("pop expected at least 1 arguments, got 0")
840 "If key is not found, d is returned if given, otherwise KeyError is raised"
846 def run(self, Xb=None, Y=None, H=None, M=None, R=None, B=None, Q=None, Parameters=None):
848 Doit implémenter l'opération élémentaire de calcul d'assimilation sous
849 sa forme mathématique la plus naturelle possible.
851 raise NotImplementedError("Mathematical assimilation calculation has not been implemented!")
853 def defineRequiredParameter(self, name = None, default = None, typecast = None, message = None, minval = None, maxval = None, listval = None, listadv = None):
855 Permet de définir dans l'algorithme des paramètres requis et leurs
856 caractéristiques par défaut.
859 raise ValueError("A name is mandatory to define a required parameter.")
861 self.__required_parameters[name] = {
863 "typecast" : typecast,
870 self.__canonical_parameter_name[name.lower()] = name
871 logging.debug("%s %s (valeur par défaut = %s)", self._name, message, self.setParameterValue(name))
873 def getRequiredParameters(self, noDetails=True):
875 Renvoie la liste des noms de paramètres requis ou directement le
876 dictionnaire des paramètres requis.
879 return sorted(self.__required_parameters.keys())
881 return self.__required_parameters
883 def setParameterValue(self, name=None, value=None):
885 Renvoie la valeur d'un paramètre requis de manière contrôlée
887 __k = self.__canonical_parameter_name[name.lower()]
888 default = self.__required_parameters[__k]["default"]
889 typecast = self.__required_parameters[__k]["typecast"]
890 minval = self.__required_parameters[__k]["minval"]
891 maxval = self.__required_parameters[__k]["maxval"]
892 listval = self.__required_parameters[__k]["listval"]
893 listadv = self.__required_parameters[__k]["listadv"]
895 if value is None and default is None:
897 elif value is None and default is not None:
898 if typecast is None: __val = default
899 else: __val = typecast( default )
901 if typecast is None: __val = value
904 __val = typecast( value )
906 raise ValueError("The value '%s' for the parameter named '%s' can not be correctly evaluated with type '%s'."%(value, __k, typecast))
908 if minval is not None and (numpy.array(__val, float) < minval).any():
909 raise ValueError("The parameter named '%s' of value '%s' can not be less than %s."%(__k, __val, minval))
910 if maxval is not None and (numpy.array(__val, float) > maxval).any():
911 raise ValueError("The parameter named '%s' of value '%s' can not be greater than %s."%(__k, __val, maxval))
912 if listval is not None or listadv is not None:
913 if typecast is list or typecast is tuple or isinstance(__val,list) or isinstance(__val,tuple):
915 if listval is not None and v in listval: continue
916 elif listadv is not None and v in listadv: continue
918 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%(v, __k, listval))
919 elif not (listval is not None and __val in listval) and not (listadv is not None and __val in listadv):
920 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%( __val, __k,listval))
924 def requireInputArguments(self, mandatory=(), optional=()):
926 Permet d'imposer des arguments de calcul requis en entrée.
928 self.__required_inputs["RequiredInputValues"]["mandatory"] = tuple( mandatory )
929 self.__required_inputs["RequiredInputValues"]["optional"] = tuple( optional )
931 def getInputArguments(self):
933 Permet d'obtenir les listes des arguments de calcul requis en entrée.
935 return self.__required_inputs["RequiredInputValues"]["mandatory"], self.__required_inputs["RequiredInputValues"]["optional"]
937 def setAttributes(self, tags=()):
939 Permet d'adjoindre des attributs comme les tags de classification.
940 Renvoie la liste actuelle dans tous les cas.
942 self.__required_inputs["ClassificationTags"].extend( tags )
943 return self.__required_inputs["ClassificationTags"]
945 def __setParameters(self, fromDico={}, reset=False):
947 Permet de stocker les paramètres reçus dans le dictionnaire interne.
949 self._parameters.update( fromDico )
950 __inverse_fromDico_keys = {}
951 for k in fromDico.keys():
952 if k.lower() in self.__canonical_parameter_name:
953 __inverse_fromDico_keys[self.__canonical_parameter_name[k.lower()]] = k
954 #~ __inverse_fromDico_keys = dict([(self.__canonical_parameter_name[k.lower()],k) for k in fromDico.keys()])
955 __canonic_fromDico_keys = __inverse_fromDico_keys.keys()
956 for k in self.__required_parameters.keys():
957 if k in __canonic_fromDico_keys:
958 self._parameters[k] = self.setParameterValue(k,fromDico[__inverse_fromDico_keys[k]])
960 self._parameters[k] = self.setParameterValue(k)
963 logging.debug("%s %s : %s", self._name, self.__required_parameters[k]["message"], self._parameters[k])
965 def _getTimeState(self, reset=False):
967 Initialise ou restitue le temps de calcul (cpu/elapsed) à la seconde
970 self.__initial_cpu_time = time.process_time()
971 self.__initial_elapsed_time = time.perf_counter()
974 self.__cpu_time = time.process_time() - self.__initial_cpu_time
975 self.__elapsed_time = time.perf_counter() - self.__initial_elapsed_time
976 return self.__cpu_time, self.__elapsed_time
978 def _StopOnTimeLimit(self, X=None, withReason=False):
979 "Stop criteria on time limit: True/False [+ Reason]"
980 c, e = self._getTimeState()
981 if "MaximumCpuTime" in self._parameters and c > self._parameters["MaximumCpuTime"]:
982 __SC, __SR = True, "Reached maximum CPU time (%.1fs > %.1fs)"%(c, self._parameters["MaximumCpuTime"])
983 elif "MaximumElapsedTime" in self._parameters and e > self._parameters["MaximumElapsedTime"]:
984 __SC, __SR = True, "Reached maximum elapsed time (%.1fs > %.1fs)"%(e, self._parameters["MaximumElapsedTime"])
986 __SC, __SR = False, ""
992 # ==============================================================================
993 class AlgorithmAndParameters(object):
995 Classe générale d'interface d'action pour l'algorithme et ses paramètres
998 name = "GenericAlgorithm",
1005 self.__name = str(name)
1009 self.__algorithm = {}
1010 self.__algorithmFile = None
1011 self.__algorithmName = None
1013 self.updateParameters( asDict, asScript )
1015 if asAlgorithm is None and asScript is not None:
1016 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
1018 __Algo = asAlgorithm
1020 if __Algo is not None:
1021 self.__A = str(__Algo)
1022 self.__P.update( {"Algorithm":self.__A} )
1024 self.__setAlgorithm( self.__A )
1026 self.__variable_names_not_public = {"nextStep":False} # Duplication dans Algorithm
1028 def updateParameters(self,
1032 "Mise a jour des parametres"
1033 if asDict is None and asScript is not None:
1034 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1038 if __Dict is not None:
1039 self.__P.update( dict(__Dict) )
1041 def executePythonScheme(self, asDictAO = None):
1042 "Permet de lancer le calcul d'assimilation"
1043 Operator.CM.clearCache()
1045 if not isinstance(asDictAO, dict):
1046 raise ValueError("The objects for algorithm calculation have to be given together as a dictionnary, and they are not")
1047 if hasattr(asDictAO["Background"],"getO"): self.__Xb = asDictAO["Background"].getO()
1048 elif hasattr(asDictAO["CheckingPoint"],"getO"): self.__Xb = asDictAO["CheckingPoint"].getO()
1049 else: self.__Xb = None
1050 if hasattr(asDictAO["Observation"],"getO"): self.__Y = asDictAO["Observation"].getO()
1051 else: self.__Y = asDictAO["Observation"]
1052 if hasattr(asDictAO["ControlInput"],"getO"): self.__U = asDictAO["ControlInput"].getO()
1053 else: self.__U = asDictAO["ControlInput"]
1054 if hasattr(asDictAO["ObservationOperator"],"getO"): self.__HO = asDictAO["ObservationOperator"].getO()
1055 else: self.__HO = asDictAO["ObservationOperator"]
1056 if hasattr(asDictAO["EvolutionModel"],"getO"): self.__EM = asDictAO["EvolutionModel"].getO()
1057 else: self.__EM = asDictAO["EvolutionModel"]
1058 if hasattr(asDictAO["ControlModel"],"getO"): self.__CM = asDictAO["ControlModel"].getO()
1059 else: self.__CM = asDictAO["ControlModel"]
1060 self.__B = asDictAO["BackgroundError"]
1061 self.__R = asDictAO["ObservationError"]
1062 self.__Q = asDictAO["EvolutionError"]
1064 self.__shape_validate()
1066 self.__algorithm.run(
1076 Parameters = self.__P,
1080 def executeYACSScheme(self, FileName=None):
1081 "Permet de lancer le calcul d'assimilation"
1082 if FileName is None or not os.path.exists(FileName):
1083 raise ValueError("a YACS file name has to be given for YACS execution.\n")
1085 __file = os.path.abspath(FileName)
1086 logging.debug("The YACS file name is \"%s\"."%__file)
1087 if not PlatformInfo.has_salome or \
1088 not PlatformInfo.has_yacs or \
1089 not PlatformInfo.has_adao:
1090 raise ImportError("\n\n"+\
1091 "Unable to get SALOME, YACS or ADAO environnement variables.\n"+\
1092 "Please load the right environnement before trying to use it.\n")
1095 import SALOMERuntime
1097 SALOMERuntime.RuntimeSALOME_setRuntime()
1099 r = pilot.getRuntime()
1100 xmlLoader = loader.YACSLoader()
1101 xmlLoader.registerProcCataLoader()
1103 catalogAd = r.loadCatalog("proc", __file)
1104 r.addCatalog(catalogAd)
1109 p = xmlLoader.load(__file)
1110 except IOError as ex:
1111 print("The YACS XML schema file can not be loaded: %s"%(ex,))
1113 logger = p.getLogger("parser")
1114 if not logger.isEmpty():
1115 print("The imported YACS XML schema has errors on parsing:")
1116 print(logger.getStr())
1119 print("The YACS XML schema is not valid and will not be executed:")
1120 print(p.getErrorReport())
1122 info=pilot.LinkInfo(pilot.LinkInfo.ALL_DONT_STOP)
1123 p.checkConsistency(info)
1124 if info.areWarningsOrErrors():
1125 print("The YACS XML schema is not coherent and will not be executed:")
1126 print(info.getGlobalRepr())
1128 e = pilot.ExecutorSwig()
1130 if p.getEffectiveState() != pilot.DONE:
1131 print(p.getErrorReport())
1135 def get(self, key = None):
1136 "Vérifie l'existence d'une clé de variable ou de paramètres"
1137 if key in self.__algorithm:
1138 return self.__algorithm.get( key )
1139 elif key in self.__P:
1140 return self.__P[key]
1142 allvariables = self.__P
1143 for k in self.__variable_names_not_public: allvariables.pop(k, None)
1146 def pop(self, k, d):
1147 "Necessaire pour le pickling"
1148 return self.__algorithm.pop(k, d)
1150 def getAlgorithmRequiredParameters(self, noDetails=True):
1151 "Renvoie la liste des paramètres requis selon l'algorithme"
1152 return self.__algorithm.getRequiredParameters(noDetails)
1154 def getAlgorithmInputArguments(self):
1155 "Renvoie la liste des entrées requises selon l'algorithme"
1156 return self.__algorithm.getInputArguments()
1158 def getAlgorithmAttributes(self):
1159 "Renvoie la liste des attributs selon l'algorithme"
1160 return self.__algorithm.setAttributes()
1162 def setObserver(self, __V, __O, __I, __S):
1163 if self.__algorithm is None \
1164 or isinstance(self.__algorithm, dict) \
1165 or not hasattr(self.__algorithm,"StoredVariables"):
1166 raise ValueError("No observer can be build before choosing an algorithm.")
1167 if __V not in self.__algorithm:
1168 raise ValueError("An observer requires to be set on a variable named %s which does not exist."%__V)
1170 self.__algorithm.StoredVariables[ __V ].setDataObserver(
1173 HookParameters = __I,
1176 def removeObserver(self, __V, __O, __A = False):
1177 if self.__algorithm is None \
1178 or isinstance(self.__algorithm, dict) \
1179 or not hasattr(self.__algorithm,"StoredVariables"):
1180 raise ValueError("No observer can be removed before choosing an algorithm.")
1181 if __V not in self.__algorithm:
1182 raise ValueError("An observer requires to be removed on a variable named %s which does not exist."%__V)
1184 return self.__algorithm.StoredVariables[ __V ].removeDataObserver(
1189 def hasObserver(self, __V):
1190 if self.__algorithm is None \
1191 or isinstance(self.__algorithm, dict) \
1192 or not hasattr(self.__algorithm,"StoredVariables"):
1194 if __V not in self.__algorithm:
1196 return self.__algorithm.StoredVariables[ __V ].hasDataObserver()
1199 __allvariables = list(self.__algorithm.keys()) + list(self.__P.keys())
1200 for k in self.__variable_names_not_public:
1201 if k in __allvariables: __allvariables.remove(k)
1202 return __allvariables
1204 def __contains__(self, key=None):
1205 "D.__contains__(k) -> True if D has a key k, else False"
1206 return key in self.__algorithm or key in self.__P
1209 "x.__repr__() <==> repr(x)"
1210 return repr(self.__A)+", "+repr(self.__P)
1213 "x.__str__() <==> str(x)"
1214 return str(self.__A)+", "+str(self.__P)
1216 def __setAlgorithm(self, choice = None ):
1218 Permet de sélectionner l'algorithme à utiliser pour mener à bien l'étude
1219 d'assimilation. L'argument est un champ caractère se rapportant au nom
1220 d'un algorithme réalisant l'opération sur les arguments fixes.
1223 raise ValueError("Error: algorithm choice has to be given")
1224 if self.__algorithmName is not None:
1225 raise ValueError("Error: algorithm choice has already been done as \"%s\", it can't be changed."%self.__algorithmName)
1226 daDirectory = "daAlgorithms"
1228 # Recherche explicitement le fichier complet
1229 # ------------------------------------------
1231 for directory in sys.path:
1232 if os.path.isfile(os.path.join(directory, daDirectory, str(choice)+'.py')):
1233 module_path = os.path.abspath(os.path.join(directory, daDirectory))
1234 if module_path is None:
1235 raise ImportError("No algorithm module named \"%s\" has been found in the search path.\n The search path is %s"%(choice, sys.path))
1237 # Importe le fichier complet comme un module
1238 # ------------------------------------------
1240 sys_path_tmp = sys.path ; sys.path.insert(0,module_path)
1241 self.__algorithmFile = __import__(str(choice), globals(), locals(), [])
1242 if not hasattr(self.__algorithmFile, "ElementaryAlgorithm"):
1243 raise ImportError("this module does not define a valid elementary algorithm.")
1244 self.__algorithmName = str(choice)
1245 sys.path = sys_path_tmp ; del sys_path_tmp
1246 except ImportError as e:
1247 raise ImportError("The module named \"%s\" was found, but is incorrect at the import stage.\n The import error message is: %s"%(choice,e))
1249 # Instancie un objet du type élémentaire du fichier
1250 # -------------------------------------------------
1251 self.__algorithm = self.__algorithmFile.ElementaryAlgorithm()
1254 def __shape_validate(self):
1256 Validation de la correspondance correcte des tailles des variables et
1257 des matrices s'il y en a.
1259 if self.__Xb is None: __Xb_shape = (0,)
1260 elif hasattr(self.__Xb,"size"): __Xb_shape = (self.__Xb.size,)
1261 elif hasattr(self.__Xb,"shape"):
1262 if isinstance(self.__Xb.shape, tuple): __Xb_shape = self.__Xb.shape
1263 else: __Xb_shape = self.__Xb.shape()
1264 else: raise TypeError("The background (Xb) has no attribute of shape: problem !")
1266 if self.__Y is None: __Y_shape = (0,)
1267 elif hasattr(self.__Y,"size"): __Y_shape = (self.__Y.size,)
1268 elif hasattr(self.__Y,"shape"):
1269 if isinstance(self.__Y.shape, tuple): __Y_shape = self.__Y.shape
1270 else: __Y_shape = self.__Y.shape()
1271 else: raise TypeError("The observation (Y) has no attribute of shape: problem !")
1273 if self.__U is None: __U_shape = (0,)
1274 elif hasattr(self.__U,"size"): __U_shape = (self.__U.size,)
1275 elif hasattr(self.__U,"shape"):
1276 if isinstance(self.__U.shape, tuple): __U_shape = self.__U.shape
1277 else: __U_shape = self.__U.shape()
1278 else: raise TypeError("The control (U) has no attribute of shape: problem !")
1280 if self.__B is None: __B_shape = (0,0)
1281 elif hasattr(self.__B,"shape"):
1282 if isinstance(self.__B.shape, tuple): __B_shape = self.__B.shape
1283 else: __B_shape = self.__B.shape()
1284 else: raise TypeError("The a priori errors covariance matrix (B) has no attribute of shape: problem !")
1286 if self.__R is None: __R_shape = (0,0)
1287 elif hasattr(self.__R,"shape"):
1288 if isinstance(self.__R.shape, tuple): __R_shape = self.__R.shape
1289 else: __R_shape = self.__R.shape()
1290 else: raise TypeError("The observation errors covariance matrix (R) has no attribute of shape: problem !")
1292 if self.__Q is None: __Q_shape = (0,0)
1293 elif hasattr(self.__Q,"shape"):
1294 if isinstance(self.__Q.shape, tuple): __Q_shape = self.__Q.shape
1295 else: __Q_shape = self.__Q.shape()
1296 else: raise TypeError("The evolution errors covariance matrix (Q) has no attribute of shape: problem !")
1298 if len(self.__HO) == 0: __HO_shape = (0,0)
1299 elif isinstance(self.__HO, dict): __HO_shape = (0,0)
1300 elif hasattr(self.__HO["Direct"],"shape"):
1301 if isinstance(self.__HO["Direct"].shape, tuple): __HO_shape = self.__HO["Direct"].shape
1302 else: __HO_shape = self.__HO["Direct"].shape()
1303 else: raise TypeError("The observation operator (H) has no attribute of shape: problem !")
1305 if len(self.__EM) == 0: __EM_shape = (0,0)
1306 elif isinstance(self.__EM, dict): __EM_shape = (0,0)
1307 elif hasattr(self.__EM["Direct"],"shape"):
1308 if isinstance(self.__EM["Direct"].shape, tuple): __EM_shape = self.__EM["Direct"].shape
1309 else: __EM_shape = self.__EM["Direct"].shape()
1310 else: raise TypeError("The evolution model (EM) has no attribute of shape: problem !")
1312 if len(self.__CM) == 0: __CM_shape = (0,0)
1313 elif isinstance(self.__CM, dict): __CM_shape = (0,0)
1314 elif hasattr(self.__CM["Direct"],"shape"):
1315 if isinstance(self.__CM["Direct"].shape, tuple): __CM_shape = self.__CM["Direct"].shape
1316 else: __CM_shape = self.__CM["Direct"].shape()
1317 else: raise TypeError("The control model (CM) has no attribute of shape: problem !")
1319 # Vérification des conditions
1320 # ---------------------------
1321 if not( len(__Xb_shape) == 1 or min(__Xb_shape) == 1 ):
1322 raise ValueError("Shape characteristic of background (Xb) is incorrect: \"%s\"."%(__Xb_shape,))
1323 if not( len(__Y_shape) == 1 or min(__Y_shape) == 1 ):
1324 raise ValueError("Shape characteristic of observation (Y) is incorrect: \"%s\"."%(__Y_shape,))
1326 if not( min(__B_shape) == max(__B_shape) ):
1327 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) is incorrect: \"%s\"."%(__B_shape,))
1328 if not( min(__R_shape) == max(__R_shape) ):
1329 raise ValueError("Shape characteristic of observation errors covariance matrix (R) is incorrect: \"%s\"."%(__R_shape,))
1330 if not( min(__Q_shape) == max(__Q_shape) ):
1331 raise ValueError("Shape characteristic of evolution errors covariance matrix (Q) is incorrect: \"%s\"."%(__Q_shape,))
1332 if not( min(__EM_shape) == max(__EM_shape) ):
1333 raise ValueError("Shape characteristic of evolution operator (EM) is incorrect: \"%s\"."%(__EM_shape,))
1335 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[1] == max(__Xb_shape) ):
1336 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and state (X) \"%s\" are incompatible."%(__HO_shape,__Xb_shape))
1337 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[0] == max(__Y_shape) ):
1338 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation (Y) \"%s\" are incompatible."%(__HO_shape,__Y_shape))
1339 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__B) > 0 and not( __HO_shape[1] == __B_shape[0] ):
1340 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and a priori errors covariance matrix (B) \"%s\" are incompatible."%(__HO_shape,__B_shape))
1341 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__R) > 0 and not( __HO_shape[0] == __R_shape[1] ):
1342 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation errors covariance matrix (R) \"%s\" are incompatible."%(__HO_shape,__R_shape))
1344 if self.__B is not None and len(self.__B) > 0 and not( __B_shape[1] == max(__Xb_shape) ):
1345 if self.__algorithmName in ["EnsembleBlue",]:
1346 asPersistentVector = self.__Xb.reshape((-1,min(__B_shape)))
1347 self.__Xb = Persistence.OneVector("Background", basetype=numpy.matrix)
1348 for member in asPersistentVector:
1349 self.__Xb.store( numpy.matrix( numpy.ravel(member), numpy.float ).T )
1350 __Xb_shape = min(__B_shape)
1352 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) \"%s\" and background (Xb) \"%s\" are incompatible."%(__B_shape,__Xb_shape))
1354 if self.__R is not None and len(self.__R) > 0 and not( __R_shape[1] == max(__Y_shape) ):
1355 raise ValueError("Shape characteristic of observation errors covariance matrix (R) \"%s\" and observation (Y) \"%s\" are incompatible."%(__R_shape,__Y_shape))
1357 if self.__EM is not None and len(self.__EM) > 0 and not isinstance(self.__EM, dict) and not( __EM_shape[1] == max(__Xb_shape) ):
1358 raise ValueError("Shape characteristic of evolution model (EM) \"%s\" and state (X) \"%s\" are incompatible."%(__EM_shape,__Xb_shape))
1360 if self.__CM is not None and len(self.__CM) > 0 and not isinstance(self.__CM, dict) and not( __CM_shape[1] == max(__U_shape) ):
1361 raise ValueError("Shape characteristic of control model (CM) \"%s\" and control (U) \"%s\" are incompatible."%(__CM_shape,__U_shape))
1363 if ("Bounds" in self.__P) \
1364 and (isinstance(self.__P["Bounds"], list) or isinstance(self.__P["Bounds"], tuple)) \
1365 and (len(self.__P["Bounds"]) != max(__Xb_shape)):
1366 raise ValueError("The number \"%s\" of bound pairs for the state (X) components is different of the size \"%s\" of the state itself." \
1367 %(len(self.__P["Bounds"]),max(__Xb_shape)))
1371 # ==============================================================================
1372 class RegulationAndParameters(object):
1374 Classe générale d'interface d'action pour la régulation et ses paramètres
1377 name = "GenericRegulation",
1384 self.__name = str(name)
1387 if asAlgorithm is None and asScript is not None:
1388 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
1390 __Algo = asAlgorithm
1392 if asDict is None and asScript is not None:
1393 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1397 if __Dict is not None:
1398 self.__P.update( dict(__Dict) )
1400 if __Algo is not None:
1401 self.__P.update( {"Algorithm":str(__Algo)} )
1403 def get(self, key = None):
1404 "Vérifie l'existence d'une clé de variable ou de paramètres"
1406 return self.__P[key]
1410 # ==============================================================================
1411 class DataObserver(object):
1413 Classe générale d'interface de type observer
1416 name = "GenericObserver",
1428 self.__name = str(name)
1433 if onVariable is None:
1434 raise ValueError("setting an observer has to be done over a variable name or a list of variable names, not over None.")
1435 elif type(onVariable) in (tuple, list):
1436 self.__V = tuple(map( str, onVariable ))
1437 if withInfo is None:
1440 self.__I = (str(withInfo),)*len(self.__V)
1441 elif isinstance(onVariable, str):
1442 self.__V = (onVariable,)
1443 if withInfo is None:
1444 self.__I = (onVariable,)
1446 self.__I = (str(withInfo),)
1448 raise ValueError("setting an observer has to be done over a variable name or a list of variable names.")
1450 if asObsObject is not None:
1451 self.__O = asObsObject
1453 __FunctionText = str(UserScript('Observer', asTemplate, asString, asScript))
1454 __Function = Observer2Func(__FunctionText)
1455 self.__O = __Function.getfunc()
1457 for k in range(len(self.__V)):
1460 if ename not in withAlgo:
1461 raise ValueError("An observer is asked to be set on a variable named %s which does not exist."%ename)
1463 withAlgo.setObserver(ename, self.__O, einfo, scheduledBy)
1466 "x.__repr__() <==> repr(x)"
1467 return repr(self.__V)+"\n"+repr(self.__O)
1470 "x.__str__() <==> str(x)"
1471 return str(self.__V)+"\n"+str(self.__O)
1473 # ==============================================================================
1474 class UserScript(object):
1476 Classe générale d'interface de type texte de script utilisateur
1479 name = "GenericUserScript",
1486 self.__name = str(name)
1488 if asString is not None:
1490 elif self.__name == "UserPostAnalysis" and (asTemplate is not None) and (asTemplate in Templates.UserPostAnalysisTemplates):
1491 self.__F = Templates.UserPostAnalysisTemplates[asTemplate]
1492 elif self.__name == "Observer" and (asTemplate is not None) and (asTemplate in Templates.ObserverTemplates):
1493 self.__F = Templates.ObserverTemplates[asTemplate]
1494 elif asScript is not None:
1495 self.__F = Interfaces.ImportFromScript(asScript).getstring()
1500 "x.__repr__() <==> repr(x)"
1501 return repr(self.__F)
1504 "x.__str__() <==> str(x)"
1505 return str(self.__F)
1507 # ==============================================================================
1508 class ExternalParameters(object):
1510 Classe générale d'interface de type texte de script utilisateur
1513 name = "GenericExternalParameters",
1519 self.__name = str(name)
1522 self.updateParameters( asDict, asScript )
1524 def updateParameters(self,
1528 "Mise a jour des parametres"
1529 if asDict is None and asScript is not None:
1530 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "ExternalParameters" )
1534 if __Dict is not None:
1535 self.__P.update( dict(__Dict) )
1537 def get(self, key = None):
1539 return self.__P[key]
1541 return list(self.__P.keys())
1544 return list(self.__P.keys())
1546 def pop(self, k, d):
1547 return self.__P.pop(k, d)
1550 return self.__P.items()
1552 def __contains__(self, key=None):
1553 "D.__contains__(k) -> True if D has a key k, else False"
1554 return key in self.__P
1556 # ==============================================================================
1557 class State(object):
1559 Classe générale d'interface de type état
1562 name = "GenericVector",
1564 asPersistentVector = None,
1570 toBeChecked = False,
1573 Permet de définir un vecteur :
1574 - asVector : entrée des données, comme un vecteur compatible avec le
1575 constructeur de numpy.matrix, ou "True" si entrée par script.
1576 - asPersistentVector : entrée des données, comme une série de vecteurs
1577 compatible avec le constructeur de numpy.matrix, ou comme un objet de
1578 type Persistence, ou "True" si entrée par script.
1579 - asScript : si un script valide est donné contenant une variable
1580 nommée "name", la variable est de type "asVector" (par défaut) ou
1581 "asPersistentVector" selon que l'une de ces variables est placée à
1583 - asDataFile : si un ou plusieurs fichiers valides sont donnés
1584 contenant des valeurs en colonnes, elles-mêmes nommées "colNames"
1585 (s'il n'y a pas de nom de colonne indiquée, on cherche une colonne
1586 nommée "name"), on récupère les colonnes et on les range ligne après
1587 ligne (colMajor=False, par défaut) ou colonne après colonne
1588 (colMajor=True). La variable résultante est de type "asVector" (par
1589 défaut) ou "asPersistentVector" selon que l'une de ces variables est
1592 self.__name = str(name)
1593 self.__check = bool(toBeChecked)
1597 self.__is_vector = False
1598 self.__is_series = False
1600 if asScript is not None:
1601 __Vector, __Series = None, None
1602 if asPersistentVector:
1603 __Series = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1605 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1606 elif asDataFile is not None:
1607 __Vector, __Series = None, None
1608 if asPersistentVector:
1609 if colNames is not None:
1610 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
1612 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1613 if bool(colMajor) and not Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1614 __Series = numpy.transpose(__Series)
1615 elif not bool(colMajor) and Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1616 __Series = numpy.transpose(__Series)
1618 if colNames is not None:
1619 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
1621 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1623 __Vector = numpy.ravel(__Vector, order = "F")
1625 __Vector = numpy.ravel(__Vector, order = "C")
1627 __Vector, __Series = asVector, asPersistentVector
1629 if __Vector is not None:
1630 self.__is_vector = True
1631 self.__V = numpy.matrix( numpy.asmatrix(__Vector).A1, numpy.float ).T
1632 self.shape = self.__V.shape
1633 self.size = self.__V.size
1634 elif __Series is not None:
1635 self.__is_series = True
1636 if isinstance(__Series, (tuple, list, numpy.ndarray, numpy.matrix, str)):
1637 self.__V = Persistence.OneVector(self.__name, basetype=numpy.matrix)
1638 if isinstance(__Series, str): __Series = eval(__Series)
1639 for member in __Series:
1640 self.__V.store( numpy.matrix( numpy.asmatrix(member).A1, numpy.float ).T )
1643 if isinstance(self.__V.shape, (tuple, list)):
1644 self.shape = self.__V.shape
1646 self.shape = self.__V.shape()
1647 if len(self.shape) == 1:
1648 self.shape = (self.shape[0],1)
1649 self.size = self.shape[0] * self.shape[1]
1651 raise ValueError("The %s object is improperly defined or undefined, it requires at minima either a vector, a list/tuple of vectors or a persistent object. Please check your vector input."%self.__name)
1653 if scheduledBy is not None:
1654 self.__T = scheduledBy
1656 def getO(self, withScheduler=False):
1658 return self.__V, self.__T
1659 elif self.__T is None:
1665 "Vérification du type interne"
1666 return self.__is_vector
1669 "Vérification du type interne"
1670 return self.__is_series
1673 "x.__repr__() <==> repr(x)"
1674 return repr(self.__V)
1677 "x.__str__() <==> str(x)"
1678 return str(self.__V)
1680 # ==============================================================================
1681 class Covariance(object):
1683 Classe générale d'interface de type covariance
1686 name = "GenericCovariance",
1687 asCovariance = None,
1688 asEyeByScalar = None,
1689 asEyeByVector = None,
1692 toBeChecked = False,
1695 Permet de définir une covariance :
1696 - asCovariance : entrée des données, comme une matrice compatible avec
1697 le constructeur de numpy.matrix
1698 - asEyeByScalar : entrée des données comme un seul scalaire de variance,
1699 multiplicatif d'une matrice de corrélation identité, aucune matrice
1700 n'étant donc explicitement à donner
1701 - asEyeByVector : entrée des données comme un seul vecteur de variance,
1702 à mettre sur la diagonale d'une matrice de corrélation, aucune matrice
1703 n'étant donc explicitement à donner
1704 - asCovObject : entrée des données comme un objet python, qui a les
1705 methodes obligatoires "getT", "getI", "diag", "trace", "__add__",
1706 "__sub__", "__neg__", "__mul__", "__rmul__" et facultatives "shape",
1707 "size", "cholesky", "choleskyI", "asfullmatrix", "__repr__", "__str__"
1708 - toBeChecked : booléen indiquant si le caractère SDP de la matrice
1709 pleine doit être vérifié
1711 self.__name = str(name)
1712 self.__check = bool(toBeChecked)
1715 self.__is_scalar = False
1716 self.__is_vector = False
1717 self.__is_matrix = False
1718 self.__is_object = False
1720 if asScript is not None:
1721 __Matrix, __Scalar, __Vector, __Object = None, None, None, None
1723 __Scalar = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1725 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1727 __Object = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1729 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1731 __Matrix, __Scalar, __Vector, __Object = asCovariance, asEyeByScalar, asEyeByVector, asCovObject
1733 if __Scalar is not None:
1734 if numpy.matrix(__Scalar).size != 1:
1735 raise ValueError(' The diagonal multiplier given to define a sparse matrix is not a unique scalar value.\n Its actual measured size is %i. Please check your scalar input.'%numpy.matrix(__Scalar).size)
1736 self.__is_scalar = True
1737 self.__C = numpy.abs( float(__Scalar) )
1740 elif __Vector is not None:
1741 self.__is_vector = True
1742 self.__C = numpy.abs( numpy.array( numpy.ravel( numpy.matrix(__Vector, float ) ) ) )
1743 self.shape = (self.__C.size,self.__C.size)
1744 self.size = self.__C.size**2
1745 elif __Matrix is not None:
1746 self.__is_matrix = True
1747 self.__C = numpy.matrix( __Matrix, float )
1748 self.shape = self.__C.shape
1749 self.size = self.__C.size
1750 elif __Object is not None:
1751 self.__is_object = True
1753 for at in ("getT","getI","diag","trace","__add__","__sub__","__neg__","__mul__","__rmul__"):
1754 if not hasattr(self.__C,at):
1755 raise ValueError("The matrix given for %s as an object has no attribute \"%s\". Please check your object input."%(self.__name,at))
1756 if hasattr(self.__C,"shape"):
1757 self.shape = self.__C.shape
1760 if hasattr(self.__C,"size"):
1761 self.size = self.__C.size
1766 # raise ValueError("The %s covariance matrix has to be specified either as a matrix, a vector for its diagonal or a scalar multiplying an identity matrix."%self.__name)
1770 def __validate(self):
1772 if self.__C is None:
1773 raise UnboundLocalError("%s covariance matrix value has not been set!"%(self.__name,))
1774 if self.ismatrix() and min(self.shape) != max(self.shape):
1775 raise ValueError("The given matrix for %s is not a square one, its shape is %s. Please check your matrix input."%(self.__name,self.shape))
1776 if self.isobject() and min(self.shape) != max(self.shape):
1777 raise ValueError("The matrix given for \"%s\" is not a square one, its shape is %s. Please check your object input."%(self.__name,self.shape))
1778 if self.isscalar() and self.__C <= 0:
1779 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your scalar input %s."%(self.__name,self.__C))
1780 if self.isvector() and (self.__C <= 0).any():
1781 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your vector input."%(self.__name,))
1782 if self.ismatrix() and (self.__check or logging.getLogger().level < logging.WARNING):
1784 L = numpy.linalg.cholesky( self.__C )
1786 raise ValueError("The %s covariance matrix is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1787 if self.isobject() and (self.__check or logging.getLogger().level < logging.WARNING):
1789 L = self.__C.cholesky()
1791 raise ValueError("The %s covariance object is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1794 "Vérification du type interne"
1795 return self.__is_scalar
1798 "Vérification du type interne"
1799 return self.__is_vector
1802 "Vérification du type interne"
1803 return self.__is_matrix
1806 "Vérification du type interne"
1807 return self.__is_object
1812 return Covariance(self.__name+"I", asCovariance = self.__C.I )
1813 elif self.isvector():
1814 return Covariance(self.__name+"I", asEyeByVector = 1. / self.__C )
1815 elif self.isscalar():
1816 return Covariance(self.__name+"I", asEyeByScalar = 1. / self.__C )
1817 elif self.isobject():
1818 return Covariance(self.__name+"I", asCovObject = self.__C.getI() )
1820 return None # Indispensable
1825 return Covariance(self.__name+"T", asCovariance = self.__C.T )
1826 elif self.isvector():
1827 return Covariance(self.__name+"T", asEyeByVector = self.__C )
1828 elif self.isscalar():
1829 return Covariance(self.__name+"T", asEyeByScalar = self.__C )
1830 elif self.isobject():
1831 return Covariance(self.__name+"T", asCovObject = self.__C.getT() )
1834 "Décomposition de Cholesky"
1836 return Covariance(self.__name+"C", asCovariance = numpy.linalg.cholesky(self.__C) )
1837 elif self.isvector():
1838 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
1839 elif self.isscalar():
1840 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
1841 elif self.isobject() and hasattr(self.__C,"cholesky"):
1842 return Covariance(self.__name+"C", asCovObject = self.__C.cholesky() )
1844 def choleskyI(self):
1845 "Inversion de la décomposition de Cholesky"
1847 return Covariance(self.__name+"H", asCovariance = numpy.linalg.cholesky(self.__C).I )
1848 elif self.isvector():
1849 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
1850 elif self.isscalar():
1851 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
1852 elif self.isobject() and hasattr(self.__C,"choleskyI"):
1853 return Covariance(self.__name+"H", asCovObject = self.__C.choleskyI() )
1856 "Racine carrée matricielle"
1859 return Covariance(self.__name+"C", asCovariance = scipy.linalg.sqrtm(self.__C) )
1860 elif self.isvector():
1861 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
1862 elif self.isscalar():
1863 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
1864 elif self.isobject() and hasattr(self.__C,"sqrt"):
1865 return Covariance(self.__name+"C", asCovObject = self.__C.sqrt() )
1868 "Inversion de la racine carrée matricielle"
1871 return Covariance(self.__name+"H", asCovariance = scipy.linalg.sqrtm(self.__C).I )
1872 elif self.isvector():
1873 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
1874 elif self.isscalar():
1875 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
1876 elif self.isobject() and hasattr(self.__C,"sqrtI"):
1877 return Covariance(self.__name+"H", asCovObject = self.__C.sqrtI() )
1879 def diag(self, msize=None):
1880 "Diagonale de la matrice"
1882 return numpy.diag(self.__C)
1883 elif self.isvector():
1885 elif self.isscalar():
1887 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1889 return self.__C * numpy.ones(int(msize))
1890 elif self.isobject():
1891 return self.__C.diag()
1893 def asfullmatrix(self, msize=None):
1897 elif self.isvector():
1898 return numpy.matrix( numpy.diag(self.__C), float )
1899 elif self.isscalar():
1901 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1903 return numpy.matrix( self.__C * numpy.eye(int(msize)), float )
1904 elif self.isobject() and hasattr(self.__C,"asfullmatrix"):
1905 return self.__C.asfullmatrix()
1907 def trace(self, msize=None):
1908 "Trace de la matrice"
1910 return numpy.trace(self.__C)
1911 elif self.isvector():
1912 return float(numpy.sum(self.__C))
1913 elif self.isscalar():
1915 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1917 return self.__C * int(msize)
1918 elif self.isobject():
1919 return self.__C.trace()
1925 "x.__repr__() <==> repr(x)"
1926 return repr(self.__C)
1929 "x.__str__() <==> str(x)"
1930 return str(self.__C)
1932 def __add__(self, other):
1933 "x.__add__(y) <==> x+y"
1934 if self.ismatrix() or self.isobject():
1935 return self.__C + numpy.asmatrix(other)
1936 elif self.isvector() or self.isscalar():
1937 _A = numpy.asarray(other)
1938 _A.reshape(_A.size)[::_A.shape[1]+1] += self.__C
1939 return numpy.asmatrix(_A)
1941 def __radd__(self, other):
1942 "x.__radd__(y) <==> y+x"
1943 raise NotImplementedError("%s covariance matrix __radd__ method not available for %s type!"%(self.__name,type(other)))
1945 def __sub__(self, other):
1946 "x.__sub__(y) <==> x-y"
1947 if self.ismatrix() or self.isobject():
1948 return self.__C - numpy.asmatrix(other)
1949 elif self.isvector() or self.isscalar():
1950 _A = numpy.asarray(other)
1951 _A.reshape(_A.size)[::_A.shape[1]+1] = self.__C - _A.reshape(_A.size)[::_A.shape[1]+1]
1952 return numpy.asmatrix(_A)
1954 def __rsub__(self, other):
1955 "x.__rsub__(y) <==> y-x"
1956 raise NotImplementedError("%s covariance matrix __rsub__ method not available for %s type!"%(self.__name,type(other)))
1959 "x.__neg__() <==> -x"
1962 def __mul__(self, other):
1963 "x.__mul__(y) <==> x*y"
1964 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
1965 return self.__C * other
1966 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
1967 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1968 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
1969 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1970 return self.__C * numpy.asmatrix(other)
1972 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asmatrix(other).shape,self.__name))
1973 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
1974 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1975 return numpy.asmatrix(self.__C * numpy.ravel(other)).T
1976 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1977 return numpy.asmatrix((self.__C * (numpy.asarray(other).transpose())).transpose())
1979 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
1980 elif self.isscalar() and isinstance(other,numpy.matrix):
1981 return self.__C * other
1982 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
1983 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
1984 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
1986 return self.__C * numpy.asmatrix(other)
1987 elif self.isobject():
1988 return self.__C.__mul__(other)
1990 raise NotImplementedError("%s covariance matrix __mul__ method not available for %s type!"%(self.__name,type(other)))
1992 def __rmul__(self, other):
1993 "x.__rmul__(y) <==> y*x"
1994 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
1995 return other * self.__C
1996 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
1997 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1998 return numpy.asmatrix(numpy.ravel(other)) * self.__C
1999 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2000 return numpy.asmatrix(other) * self.__C
2002 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name))
2003 elif self.isvector() and isinstance(other,numpy.matrix):
2004 if numpy.ravel(other).size == self.shape[0]: # Vecteur
2005 return numpy.asmatrix(numpy.ravel(other) * self.__C)
2006 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
2007 return numpy.asmatrix(numpy.array(other) * self.__C)
2009 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
2010 elif self.isscalar() and isinstance(other,numpy.matrix):
2011 return other * self.__C
2012 elif self.isobject():
2013 return self.__C.__rmul__(other)
2015 raise NotImplementedError("%s covariance matrix __rmul__ method not available for %s type!"%(self.__name,type(other)))
2018 "x.__len__() <==> len(x)"
2019 return self.shape[0]
2021 # ==============================================================================
2022 class Observer2Func(object):
2024 Creation d'une fonction d'observateur a partir de son texte
2026 def __init__(self, corps=""):
2027 self.__corps = corps
2028 def func(self,var,info):
2029 "Fonction d'observation"
2032 "Restitution du pointeur de fonction dans l'objet"
2035 # ==============================================================================
2036 class CaseLogger(object):
2038 Conservation des commandes de creation d'un cas
2040 def __init__(self, __name="", __objname="case", __addViewers=None, __addLoaders=None):
2041 self.__name = str(__name)
2042 self.__objname = str(__objname)
2043 self.__logSerie = []
2044 self.__switchoff = False
2046 "TUI" :Interfaces._TUIViewer,
2047 "SCD" :Interfaces._SCDViewer,
2048 "YACS":Interfaces._YACSViewer,
2051 "TUI" :Interfaces._TUIViewer,
2052 "COM" :Interfaces._COMViewer,
2054 if __addViewers is not None:
2055 self.__viewers.update(dict(__addViewers))
2056 if __addLoaders is not None:
2057 self.__loaders.update(dict(__addLoaders))
2059 def register(self, __command=None, __keys=None, __local=None, __pre=None, __switchoff=False):
2060 "Enregistrement d'une commande individuelle"
2061 if __command is not None and __keys is not None and __local is not None and not self.__switchoff:
2062 if "self" in __keys: __keys.remove("self")
2063 self.__logSerie.append( (str(__command), __keys, __local, __pre, __switchoff) )
2065 self.__switchoff = True
2067 self.__switchoff = False
2069 def dump(self, __filename=None, __format="TUI", __upa=""):
2070 "Restitution normalisée des commandes"
2071 if __format in self.__viewers:
2072 __formater = self.__viewers[__format](self.__name, self.__objname, self.__logSerie)
2074 raise ValueError("Dumping as \"%s\" is not available"%__format)
2075 return __formater.dump(__filename, __upa)
2077 def load(self, __filename=None, __content=None, __object=None, __format="TUI"):
2078 "Chargement normalisé des commandes"
2079 if __format in self.__loaders:
2080 __formater = self.__loaders[__format]()
2082 raise ValueError("Loading as \"%s\" is not available"%__format)
2083 return __formater.load(__filename, __content, __object)
2085 # ==============================================================================
2088 _extraArguments = None,
2089 _sFunction = lambda x: x,
2094 Pour une liste ordonnée de vecteurs en entrée, renvoie en sortie la liste
2095 correspondante de valeurs de la fonction en argument
2097 # Vérifications et définitions initiales
2098 # logging.debug("MULTF Internal multifonction calculations begin with function %s"%(_sFunction.__name__,))
2099 if not PlatformInfo.isIterable( __xserie ):
2100 raise TypeError("MultiFonction not iterable unkown input type: %s"%(type(__xserie),))
2102 if (_mpWorkers is None) or (_mpWorkers is not None and _mpWorkers < 1):
2105 __mpWorkers = int(_mpWorkers)
2107 import multiprocessing
2118 if _extraArguments is None:
2120 elif _extraArguments is not None and isinstance(_extraArguments, (list, tuple, map)):
2121 for __xvalue in __xserie:
2122 _jobs.append( [__xvalue, ] + list(_extraArguments) )
2124 raise TypeError("MultiFonction extra arguments unkown input type: %s"%(type(_extraArguments),))
2125 # logging.debug("MULTF Internal multiprocessing calculations begin : evaluation of %i point(s)"%(len(_jobs),))
2126 import multiprocessing
2127 with multiprocessing.Pool(__mpWorkers) as pool:
2128 __multiHX = pool.map( _sFunction, _jobs )
2131 # logging.debug("MULTF Internal multiprocessing calculation end")
2133 # logging.debug("MULTF Internal monoprocessing calculation begin")
2135 if _extraArguments is None:
2136 for __xvalue in __xserie:
2137 __multiHX.append( _sFunction( __xvalue ) )
2138 elif _extraArguments is not None and isinstance(_extraArguments, (list, tuple, map)):
2139 for __xvalue in __xserie:
2140 __multiHX.append( _sFunction( __xvalue, *_extraArguments ) )
2141 elif _extraArguments is not None and isinstance(_extraArguments, dict):
2142 for __xvalue in __xserie:
2143 __multiHX.append( _sFunction( __xvalue, **_extraArguments ) )
2145 raise TypeError("MultiFonction extra arguments unkown input type: %s"%(type(_extraArguments),))
2146 # logging.debug("MULTF Internal monoprocessing calculation end")
2148 # logging.debug("MULTF Internal multifonction calculations end")
2151 # ==============================================================================
2152 def CostFunction3D(_x,
2153 _Hm = None, # Pour simuler Hm(x) : HO["Direct"].appliedTo
2154 _HmX = None, # Simulation déjà faite de Hm(x)
2155 _arg = None, # Arguments supplementaires pour Hm, sous la forme d'un tuple
2160 _SIV = False, # A résorber pour la 8.0
2161 _SSC = [], # self._parameters["StoreSupplementaryCalculations"]
2162 _nPS = 0, # nbPreviousSteps
2163 _QM = "DA", # QualityMeasure
2164 _SSV = {}, # Entrée et/ou sortie : self.StoredVariables
2165 _fRt = False, # Restitue ou pas la sortie étendue
2166 _sSc = True, # Stocke ou pas les SSC
2169 Fonction-coût générale utile pour les algorithmes statiques/3D : 3DVAR, BLUE
2170 et dérivés, Kalman et dérivés, LeastSquares, SamplingTest, PSO, SA, Tabu,
2171 DFO, QuantileRegression
2177 for k in ["CostFunctionJ",
2183 "SimulatedObservationAtCurrentOptimum",
2184 "SimulatedObservationAtCurrentState",
2188 if hasattr(_SSV[k],"store"):
2189 _SSV[k].append = _SSV[k].store # Pour utiliser "append" au lieu de "store"
2191 _X = numpy.asmatrix(numpy.ravel( _x )).T
2192 if _SIV or "CurrentState" in _SSC or "CurrentOptimum" in _SSC:
2193 _SSV["CurrentState"].append( _X )
2195 if _HmX is not None:
2199 raise ValueError("COSTFUNCTION3D Operator has to be defined.")
2203 _HX = _Hm( _X, *_arg )
2204 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
2206 if "SimulatedObservationAtCurrentState" in _SSC or \
2207 "SimulatedObservationAtCurrentOptimum" in _SSC:
2208 _SSV["SimulatedObservationAtCurrentState"].append( _HX )
2210 if numpy.any(numpy.isnan(_HX)):
2211 Jb, Jo, J = numpy.nan, numpy.nan, numpy.nan
2213 _Y = numpy.asmatrix(numpy.ravel( _Y )).T
2214 if _QM in ["AugmentedWeightedLeastSquares", "AWLS", "AugmentedPonderatedLeastSquares", "APLS", "DA"]:
2215 if _BI is None or _RI is None:
2216 raise ValueError("Background and Observation error covariance matrix has to be properly defined!")
2217 _Xb = numpy.asmatrix(numpy.ravel( _Xb )).T
2218 Jb = 0.5 * (_X - _Xb).T * _BI * (_X - _Xb)
2219 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
2220 elif _QM in ["WeightedLeastSquares", "WLS", "PonderatedLeastSquares", "PLS"]:
2222 raise ValueError("Observation error covariance matrix has to be properly defined!")
2224 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
2225 elif _QM in ["LeastSquares", "LS", "L2"]:
2227 Jo = 0.5 * (_Y - _HX).T * (_Y - _HX)
2228 elif _QM in ["AbsoluteValue", "L1"]:
2230 Jo = numpy.sum( numpy.abs(_Y - _HX) )
2231 elif _QM in ["MaximumError", "ME"]:
2233 Jo = numpy.max( numpy.abs(_Y - _HX) )
2234 elif _QM in ["QR", "Null"]:
2238 raise ValueError("Unknown asked quality measure!")
2240 J = float( Jb ) + float( Jo )
2243 _SSV["CostFunctionJb"].append( Jb )
2244 _SSV["CostFunctionJo"].append( Jo )
2245 _SSV["CostFunctionJ" ].append( J )
2247 if "IndexOfOptimum" in _SSC or \
2248 "CurrentOptimum" in _SSC or \
2249 "SimulatedObservationAtCurrentOptimum" in _SSC:
2250 IndexMin = numpy.argmin( _SSV["CostFunctionJ"][_nPS:] ) + _nPS
2251 if "IndexOfOptimum" in _SSC:
2252 _SSV["IndexOfOptimum"].append( IndexMin )
2253 if "CurrentOptimum" in _SSC:
2254 _SSV["CurrentOptimum"].append( _SSV["CurrentState"][IndexMin] )
2255 if "SimulatedObservationAtCurrentOptimum" in _SSC:
2256 _SSV["SimulatedObservationAtCurrentOptimum"].append( _SSV["SimulatedObservationAtCurrentState"][IndexMin] )
2261 if _QM in ["QR"]: # Pour le QuantileRegression
2266 # ==============================================================================
2267 if __name__ == "__main__":
2268 print('\n AUTODIAGNOSTIC\n')