1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2021 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les outils généraux élémentaires.
26 __author__ = "Jean-Philippe ARGAUD"
35 from functools import partial
36 from daCore import Persistence, PlatformInfo, Interfaces
37 from daCore import Templates
39 # ==============================================================================
40 class CacheManager(object):
42 Classe générale de gestion d'un cache de calculs
45 toleranceInRedundancy = 1.e-18,
46 lenghtOfRedundancy = -1,
49 Les caractéristiques de tolérance peuvent être modifiées à la création.
51 self.__tolerBP = float(toleranceInRedundancy)
52 self.__lenghtOR = int(lenghtOfRedundancy)
53 self.__initlnOR = self.__lenghtOR
60 self.__listOPCV = [] # Previous Calculated Points, Results, Point Norms, Operator
62 # logging.debug("CM Tolerance de determination des doublons : %.2e", self.__tolerBP)
64 def wasCalculatedIn(self, xValue, oName="" ): #, info="" ):
65 "Vérifie l'existence d'un calcul correspondant à la valeur"
69 for i in range(min(len(self.__listOPCV),self.__lenghtOR)-1,-1,-1):
70 if not hasattr(xValue, 'size') or (str(oName) != self.__listOPCV[i][3]) or (xValue.size != self.__listOPCV[i][0].size):
71 # logging.debug("CM Différence de la taille %s de X et de celle %s du point %i déjà calculé", xValue.shape,i,self.__listOPCP[i].shape)
73 elif numpy.linalg.norm(numpy.ravel(xValue) - self.__listOPCV[i][0]) < self.__tolerBP * self.__listOPCV[i][2]:
75 __HxV = self.__listOPCV[i][1]
76 # logging.debug("CM Cas%s déja calculé, portant le numéro %i", info, i)
80 def storeValueInX(self, xValue, HxValue, oName="" ):
81 "Stocke pour un opérateur o un calcul Hx correspondant à la valeur x"
82 if self.__lenghtOR < 0:
83 self.__lenghtOR = 2 * xValue.size + 2
84 self.__initlnOR = self.__lenghtOR
85 self.__seenNames.append(str(oName))
86 if str(oName) not in self.__seenNames: # Etend la liste si nouveau
87 self.__lenghtOR += 2 * xValue.size + 2
88 self.__initlnOR += self.__lenghtOR
89 self.__seenNames.append(str(oName))
90 while len(self.__listOPCV) > self.__lenghtOR:
91 # logging.debug("CM Réduction de la liste des cas à %i éléments par suppression du premier", self.__lenghtOR)
92 self.__listOPCV.pop(0)
93 self.__listOPCV.append( (
94 copy.copy(numpy.ravel(xValue)),
96 numpy.linalg.norm(xValue),
102 self.__initlnOR = self.__lenghtOR
104 self.__enabled = False
108 self.__lenghtOR = self.__initlnOR
109 self.__enabled = True
111 # ==============================================================================
112 class Operator(object):
114 Classe générale d'interface de type opérateur simple
122 name = "GenericOperator",
125 avoidingRedundancy = True,
126 inputAsMultiFunction = False,
127 enableMultiProcess = False,
128 extraArguments = None,
131 On construit un objet de ce type en fournissant, à l'aide de l'un des
132 deux mots-clé, soit une fonction ou un multi-fonction python, soit une
135 - name : nom d'opérateur
136 - fromMethod : argument de type fonction Python
137 - fromMatrix : argument adapté au constructeur numpy.matrix
138 - avoidingRedundancy : booléen évitant (ou pas) les calculs redondants
139 - inputAsMultiFunction : booléen indiquant une fonction explicitement
140 définie (ou pas) en multi-fonction
141 - extraArguments : arguments supplémentaires passés à la fonction de
142 base et ses dérivées (tuple ou dictionnaire)
144 self.__name = str(name)
145 self.__NbCallsAsMatrix, self.__NbCallsAsMethod, self.__NbCallsOfCached = 0, 0, 0
146 self.__AvoidRC = bool( avoidingRedundancy )
147 self.__inputAsMF = bool( inputAsMultiFunction )
148 self.__mpEnabled = bool( enableMultiProcess )
149 self.__extraArgs = extraArguments
150 if fromMethod is not None and self.__inputAsMF:
151 self.__Method = fromMethod # logtimer(fromMethod)
153 self.__Type = "Method"
154 elif fromMethod is not None and not self.__inputAsMF:
155 self.__Method = partial( MultiFonction, _sFunction=fromMethod, _mpEnabled=self.__mpEnabled)
157 self.__Type = "Method"
158 elif fromMatrix is not None:
160 self.__Matrix = numpy.matrix( fromMatrix, numpy.float )
161 self.__Type = "Matrix"
167 def disableAvoidingRedundancy(self):
169 Operator.CM.disable()
171 def enableAvoidingRedundancy(self):
176 Operator.CM.disable()
182 def appliedTo(self, xValue, HValue = None, argsAsSerie = False, returnSerieAsArrayMatrix = False):
184 Permet de restituer le résultat de l'application de l'opérateur à une
185 série d'arguments xValue. Cette méthode se contente d'appliquer, chaque
186 argument devant a priori être du bon type.
188 - les arguments par série sont :
189 - xValue : argument adapté pour appliquer l'opérateur
190 - HValue : valeur précalculée de l'opérateur en ce point
191 - argsAsSerie : indique si les arguments sont une mono ou multi-valeur
198 if HValue is not None:
202 PlatformInfo.isIterable( _xValue, True, " in Operator.appliedTo" )
204 if _HValue is not None:
205 assert len(_xValue) == len(_HValue), "Incompatible number of elements in xValue and HValue"
207 for i in range(len(_HValue)):
208 _HxValue.append( numpy.asmatrix( numpy.ravel( _HValue[i] ) ).T )
210 Operator.CM.storeValueInX(_xValue[i],_HxValue[-1],self.__name)
215 for i, xv in enumerate(_xValue):
217 __alreadyCalculated, __HxV = Operator.CM.wasCalculatedIn(xv,self.__name)
219 __alreadyCalculated = False
221 if __alreadyCalculated:
222 self.__addOneCacheCall()
225 if self.__Matrix is not None:
226 self.__addOneMatrixCall()
227 _hv = self.__Matrix * xv
229 self.__addOneMethodCall()
233 _HxValue.append( _hv )
235 if len(_xserie)>0 and self.__Matrix is None:
236 if self.__extraArgs is None:
237 _hserie = self.__Method( _xserie ) # Calcul MF
239 _hserie = self.__Method( _xserie, self.__extraArgs ) # Calcul MF
240 if not hasattr(_hserie, "pop"):
241 raise TypeError("The user input multi-function doesn't seem to return sequence results, behaving like a mono-function. It has to be checked.")
247 Operator.CM.storeValueInX(_xv,_hv,self.__name)
249 if returnSerieAsArrayMatrix:
250 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
252 if argsAsSerie: return _HxValue
253 else: return _HxValue[-1]
255 def appliedControledFormTo(self, paires, argsAsSerie = False, returnSerieAsArrayMatrix = False):
257 Permet de restituer le résultat de l'application de l'opérateur à des
258 paires (xValue, uValue). Cette méthode se contente d'appliquer, son
259 argument devant a priori être du bon type. Si la uValue est None,
260 on suppose que l'opérateur ne s'applique qu'à xValue.
262 - paires : les arguments par paire sont :
263 - xValue : argument X adapté pour appliquer l'opérateur
264 - uValue : argument U adapté pour appliquer l'opérateur
265 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
267 if argsAsSerie: _xuValue = paires
268 else: _xuValue = (paires,)
269 PlatformInfo.isIterable( _xuValue, True, " in Operator.appliedControledFormTo" )
271 if self.__Matrix is not None:
273 for paire in _xuValue:
274 _xValue, _uValue = paire
275 self.__addOneMatrixCall()
276 _HxValue.append( self.__Matrix * _xValue )
279 for paire in _xuValue:
280 _xValue, _uValue = paire
281 if _uValue is not None:
282 _xuArgs.append( paire )
284 _xuArgs.append( _xValue )
285 self.__addOneMethodCall( len(_xuArgs) )
286 if self.__extraArgs is None:
287 _HxValue = self.__Method( _xuArgs ) # Calcul MF
289 _HxValue = self.__Method( _xuArgs, self.__extraArgs ) # Calcul MF
291 if returnSerieAsArrayMatrix:
292 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
294 if argsAsSerie: return _HxValue
295 else: return _HxValue[-1]
297 def appliedInXTo(self, paires, argsAsSerie = False, returnSerieAsArrayMatrix = False):
299 Permet de restituer le résultat de l'application de l'opérateur à une
300 série d'arguments xValue, sachant que l'opérateur est valable en
301 xNominal. Cette méthode se contente d'appliquer, son argument devant a
302 priori être du bon type. Si l'opérateur est linéaire car c'est une
303 matrice, alors il est valable en tout point nominal et xNominal peut
304 être quelconque. Il n'y a qu'une seule paire par défaut, et argsAsSerie
305 permet d'indiquer que l'argument est multi-paires.
307 - paires : les arguments par paire sont :
308 - xNominal : série d'arguments permettant de donner le point où
309 l'opérateur est construit pour être ensuite appliqué
310 - xValue : série d'arguments adaptés pour appliquer l'opérateur
311 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
313 if argsAsSerie: _nxValue = paires
314 else: _nxValue = (paires,)
315 PlatformInfo.isIterable( _nxValue, True, " in Operator.appliedInXTo" )
317 if self.__Matrix is not None:
319 for paire in _nxValue:
320 _xNominal, _xValue = paire
321 self.__addOneMatrixCall()
322 _HxValue.append( self.__Matrix * _xValue )
324 self.__addOneMethodCall( len(_nxValue) )
325 if self.__extraArgs is None:
326 _HxValue = self.__Method( _nxValue ) # Calcul MF
328 _HxValue = self.__Method( _nxValue, self.__extraArgs ) # Calcul MF
330 if returnSerieAsArrayMatrix:
331 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
333 if argsAsSerie: return _HxValue
334 else: return _HxValue[-1]
336 def asMatrix(self, ValueForMethodForm = "UnknownVoidValue", argsAsSerie = False):
338 Permet de renvoyer l'opérateur sous la forme d'une matrice
340 if self.__Matrix is not None:
341 self.__addOneMatrixCall()
342 mValue = [self.__Matrix,]
343 elif not isinstance(ValueForMethodForm,str) or ValueForMethodForm != "UnknownVoidValue": # Ne pas utiliser "None"
346 self.__addOneMethodCall( len(ValueForMethodForm) )
347 for _vfmf in ValueForMethodForm:
348 mValue.append( numpy.matrix( self.__Method(((_vfmf, None),)) ) )
350 self.__addOneMethodCall()
351 mValue = self.__Method(((ValueForMethodForm, None),))
353 raise ValueError("Matrix form of the operator defined as a function/method requires to give an operating point.")
355 if argsAsSerie: return mValue
356 else: return mValue[-1]
360 Renvoie la taille sous forme numpy si l'opérateur est disponible sous
361 la forme d'une matrice
363 if self.__Matrix is not None:
364 return self.__Matrix.shape
366 raise ValueError("Matrix form of the operator is not available, nor the shape")
368 def nbcalls(self, which=None):
370 Renvoie les nombres d'évaluations de l'opérateur
373 self.__NbCallsAsMatrix+self.__NbCallsAsMethod,
374 self.__NbCallsAsMatrix,
375 self.__NbCallsAsMethod,
376 self.__NbCallsOfCached,
377 Operator.NbCallsAsMatrix+Operator.NbCallsAsMethod,
378 Operator.NbCallsAsMatrix,
379 Operator.NbCallsAsMethod,
380 Operator.NbCallsOfCached,
382 if which is None: return __nbcalls
383 else: return __nbcalls[which]
385 def __addOneMatrixCall(self):
386 "Comptabilise un appel"
387 self.__NbCallsAsMatrix += 1 # Decompte local
388 Operator.NbCallsAsMatrix += 1 # Decompte global
390 def __addOneMethodCall(self, nb = 1):
391 "Comptabilise un appel"
392 self.__NbCallsAsMethod += nb # Decompte local
393 Operator.NbCallsAsMethod += nb # Decompte global
395 def __addOneCacheCall(self):
396 "Comptabilise un appel"
397 self.__NbCallsOfCached += 1 # Decompte local
398 Operator.NbCallsOfCached += 1 # Decompte global
400 # ==============================================================================
401 class FullOperator(object):
403 Classe générale d'interface de type opérateur complet
404 (Direct, Linéaire Tangent, Adjoint)
407 name = "GenericFullOperator",
409 asOneFunction = None, # 1 Fonction
410 asThreeFunctions = None, # 3 Fonctions in a dictionary
411 asScript = None, # 1 or 3 Fonction(s) by script
412 asDict = None, # Parameters
414 extraArguments = None,
416 inputAsMF = False,# Fonction(s) as Multi-Functions
421 self.__name = str(name)
422 self.__check = bool(toBeChecked)
423 self.__extraArgs = extraArguments
428 if (asDict is not None) and isinstance(asDict, dict):
429 __Parameters.update( asDict )
430 # Priorité à EnableMultiProcessingInDerivatives=True
431 if "EnableMultiProcessing" in __Parameters and __Parameters["EnableMultiProcessing"]:
432 __Parameters["EnableMultiProcessingInDerivatives"] = True
433 __Parameters["EnableMultiProcessingInEvaluation"] = False
434 if "EnableMultiProcessingInDerivatives" not in __Parameters:
435 __Parameters["EnableMultiProcessingInDerivatives"] = False
436 if __Parameters["EnableMultiProcessingInDerivatives"]:
437 __Parameters["EnableMultiProcessingInEvaluation"] = False
438 if "EnableMultiProcessingInEvaluation" not in __Parameters:
439 __Parameters["EnableMultiProcessingInEvaluation"] = False
440 if "withIncrement" in __Parameters: # Temporaire
441 __Parameters["DifferentialIncrement"] = __Parameters["withIncrement"]
443 if asScript is not None:
444 __Matrix, __Function = None, None
446 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
448 __Function = { "Direct":Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ) }
449 __Function.update({"useApproximatedDerivatives":True})
450 __Function.update(__Parameters)
451 elif asThreeFunctions:
453 "Direct" :Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ),
454 "Tangent":Interfaces.ImportFromScript(asScript).getvalue( "TangentOperator" ),
455 "Adjoint":Interfaces.ImportFromScript(asScript).getvalue( "AdjointOperator" ),
457 __Function.update(__Parameters)
460 if asOneFunction is not None:
461 if isinstance(asOneFunction, dict) and "Direct" in asOneFunction:
462 if asOneFunction["Direct"] is not None:
463 __Function = asOneFunction
465 raise ValueError("The function has to be given in a dictionnary which have 1 key (\"Direct\")")
467 __Function = { "Direct":asOneFunction }
468 __Function.update({"useApproximatedDerivatives":True})
469 __Function.update(__Parameters)
470 elif asThreeFunctions is not None:
471 if isinstance(asThreeFunctions, dict) and \
472 ("Tangent" in asThreeFunctions) and (asThreeFunctions["Tangent"] is not None) and \
473 ("Adjoint" in asThreeFunctions) and (asThreeFunctions["Adjoint"] is not None) and \
474 (("useApproximatedDerivatives" not in asThreeFunctions) or not bool(asThreeFunctions["useApproximatedDerivatives"])):
475 __Function = asThreeFunctions
476 elif isinstance(asThreeFunctions, dict) and \
477 ("Direct" in asThreeFunctions) and (asThreeFunctions["Direct"] is not None):
478 __Function = asThreeFunctions
479 __Function.update({"useApproximatedDerivatives":True})
481 raise ValueError("The functions has to be given in a dictionnary which have either 1 key (\"Direct\") or 3 keys (\"Direct\" (optionnal), \"Tangent\" and \"Adjoint\")")
482 if "Direct" not in asThreeFunctions:
483 __Function["Direct"] = asThreeFunctions["Tangent"]
484 __Function.update(__Parameters)
488 # if sys.version_info[0] < 3 and isinstance(__Function, dict):
489 # for k in ("Direct", "Tangent", "Adjoint"):
490 # if k in __Function and hasattr(__Function[k],"__class__"):
491 # if type(__Function[k]) is type(self.__init__):
492 # raise TypeError("can't use a class method (%s) as a function for the \"%s\" operator. Use a real function instead."%(type(__Function[k]),k))
494 if appliedInX is not None and isinstance(appliedInX, dict):
495 __appliedInX = appliedInX
496 elif appliedInX is not None:
497 __appliedInX = {"HXb":appliedInX}
501 if scheduledBy is not None:
502 self.__T = scheduledBy
504 if isinstance(__Function, dict) and \
505 ("useApproximatedDerivatives" in __Function) and bool(__Function["useApproximatedDerivatives"]) and \
506 ("Direct" in __Function) and (__Function["Direct"] is not None):
507 if "CenteredFiniteDifference" not in __Function: __Function["CenteredFiniteDifference"] = False
508 if "DifferentialIncrement" not in __Function: __Function["DifferentialIncrement"] = 0.01
509 if "withdX" not in __Function: __Function["withdX"] = None
510 if "withAvoidingRedundancy" not in __Function: __Function["withAvoidingRedundancy"] = avoidRC
511 if "withToleranceInRedundancy" not in __Function: __Function["withToleranceInRedundancy"] = 1.e-18
512 if "withLenghtOfRedundancy" not in __Function: __Function["withLenghtOfRedundancy"] = -1
513 if "NumberOfProcesses" not in __Function: __Function["NumberOfProcesses"] = None
514 if "withmfEnabled" not in __Function: __Function["withmfEnabled"] = inputAsMF
515 from daCore import NumericObjects
516 FDA = NumericObjects.FDApproximation(
518 Function = __Function["Direct"],
519 centeredDF = __Function["CenteredFiniteDifference"],
520 increment = __Function["DifferentialIncrement"],
521 dX = __Function["withdX"],
522 avoidingRedundancy = __Function["withAvoidingRedundancy"],
523 toleranceInRedundancy = __Function["withToleranceInRedundancy"],
524 lenghtOfRedundancy = __Function["withLenghtOfRedundancy"],
525 mpEnabled = __Function["EnableMultiProcessingInDerivatives"],
526 mpWorkers = __Function["NumberOfProcesses"],
527 mfEnabled = __Function["withmfEnabled"],
529 self.__FO["Direct"] = Operator( name = self.__name, fromMethod = FDA.DirectOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
530 self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMethod = FDA.TangentOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
531 self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMethod = FDA.AdjointOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
532 elif isinstance(__Function, dict) and \
533 ("Direct" in __Function) and ("Tangent" in __Function) and ("Adjoint" in __Function) and \
534 (__Function["Direct"] is not None) and (__Function["Tangent"] is not None) and (__Function["Adjoint"] is not None):
535 self.__FO["Direct"] = Operator( name = self.__name, fromMethod = __Function["Direct"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
536 self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMethod = __Function["Tangent"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
537 self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMethod = __Function["Adjoint"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
538 elif asMatrix is not None:
539 __matrice = numpy.matrix( __Matrix, numpy.float )
540 self.__FO["Direct"] = Operator( name = self.__name, fromMatrix = __matrice, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
541 self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMatrix = __matrice, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
542 self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMatrix = __matrice.T, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
545 raise ValueError("The %s object is improperly defined or undefined, it requires at minima either a matrix, a Direct operator for approximate derivatives or a Tangent/Adjoint operators pair. Please check your operator input."%self.__name)
547 if __appliedInX is not None:
548 self.__FO["AppliedInX"] = {}
549 for key in list(__appliedInX.keys()):
550 if type( __appliedInX[key] ) is type( numpy.matrix([]) ):
551 # Pour le cas où l'on a une vraie matrice
552 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].A1, numpy.float ).T
553 elif type( __appliedInX[key] ) is type( numpy.array([]) ) and len(__appliedInX[key].shape) > 1:
554 # Pour le cas où l'on a un vecteur représenté en array avec 2 dimensions
555 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].reshape(len(__appliedInX[key]),), numpy.float ).T
557 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key], numpy.float ).T
559 self.__FO["AppliedInX"] = None
565 "x.__repr__() <==> repr(x)"
566 return repr(self.__FO)
569 "x.__str__() <==> str(x)"
570 return str(self.__FO)
572 # ==============================================================================
573 class Algorithm(object):
575 Classe générale d'interface de type algorithme
577 Elle donne un cadre pour l'écriture d'une classe élémentaire d'algorithme
578 d'assimilation, en fournissant un container (dictionnaire) de variables
579 persistantes initialisées, et des méthodes d'accès à ces variables stockées.
581 Une classe élémentaire d'algorithme doit implémenter la méthode "run".
583 def __init__(self, name):
585 L'initialisation présente permet de fabriquer des variables de stockage
586 disponibles de manière générique dans les algorithmes élémentaires. Ces
587 variables de stockage sont ensuite conservées dans un dictionnaire
588 interne à l'objet, mais auquel on accède par la méthode "get".
590 Les variables prévues sont :
591 - APosterioriCorrelations : matrice de corrélations de la matrice A
592 - APosterioriCovariance : matrice de covariances a posteriori : A
593 - APosterioriStandardDeviations : vecteur des écart-types de la matrice A
594 - APosterioriVariances : vecteur des variances de la matrice A
595 - Analysis : vecteur d'analyse : Xa
596 - BMA : Background moins Analysis : Xa - Xb
597 - CostFunctionJ : fonction-coût globale, somme des deux parties suivantes Jb et Jo
598 - CostFunctionJAtCurrentOptimum : fonction-coût globale à l'état optimal courant lors d'itérations
599 - CostFunctionJb : partie ébauche ou background de la fonction-coût : Jb
600 - CostFunctionJbAtCurrentOptimum : partie ébauche à l'état optimal courant lors d'itérations
601 - CostFunctionJo : partie observations de la fonction-coût : Jo
602 - CostFunctionJoAtCurrentOptimum : partie observations à l'état optimal courant lors d'itérations
603 - CurrentIterationNumber : numéro courant d'itération dans les algorithmes itératifs, à partir de 0
604 - CurrentOptimum : état optimal courant lors d'itérations
605 - CurrentState : état courant lors d'itérations
606 - GradientOfCostFunctionJ : gradient de la fonction-coût globale
607 - GradientOfCostFunctionJb : gradient de la partie ébauche de la fonction-coût
608 - GradientOfCostFunctionJo : gradient de la partie observations de la fonction-coût
609 - IndexOfOptimum : index de l'état optimal courant lors d'itérations
610 - Innovation : l'innovation : d = Y - H(X)
611 - InnovationAtCurrentState : l'innovation à l'état courant : dn = Y - H(Xn)
612 - JacobianMatrixAtBackground : matrice jacobienne à l'état d'ébauche
613 - JacobianMatrixAtCurrentState : matrice jacobienne à l'état courant
614 - JacobianMatrixAtOptimum : matrice jacobienne à l'optimum
615 - KalmanGainAtOptimum : gain de Kalman à l'optimum
616 - MahalanobisConsistency : indicateur de consistance des covariances
617 - OMA : Observation moins Analyse : Y - Xa
618 - OMB : Observation moins Background : Y - Xb
619 - ForecastState : état prédit courant lors d'itérations
620 - Residu : dans le cas des algorithmes de vérification
621 - SigmaBck2 : indicateur de correction optimale des erreurs d'ébauche
622 - SigmaObs2 : indicateur de correction optimale des erreurs d'observation
623 - SimulatedObservationAtBackground : l'état observé H(Xb) à l'ébauche
624 - SimulatedObservationAtCurrentOptimum : l'état observé H(X) à l'état optimal courant
625 - SimulatedObservationAtCurrentState : l'état observé H(X) à l'état courant
626 - SimulatedObservationAtOptimum : l'état observé H(Xa) à l'optimum
627 - SimulationQuantiles : états observés H(X) pour les quantiles demandés
628 On peut rajouter des variables à stocker dans l'initialisation de
629 l'algorithme élémentaire qui va hériter de cette classe
631 logging.debug("%s Initialisation", str(name))
632 self._m = PlatformInfo.SystemUsage()
634 self._name = str( name )
635 self._parameters = {"StoreSupplementaryCalculations":[]}
636 self.__required_parameters = {}
637 self.__required_inputs = {
638 "RequiredInputValues":{"mandatory":(), "optional":()},
639 "ClassificationTags":[],
641 self.__variable_names_not_public = {"nextStep":False} # Duplication dans AlgorithmAndParameters
642 self.__canonical_parameter_name = {} # Correspondance "lower"->"correct"
643 self.__canonical_stored_name = {} # Correspondance "lower"->"correct"
645 self.StoredVariables = {}
646 self.StoredVariables["APosterioriCorrelations"] = Persistence.OneMatrix(name = "APosterioriCorrelations")
647 self.StoredVariables["APosterioriCovariance"] = Persistence.OneMatrix(name = "APosterioriCovariance")
648 self.StoredVariables["APosterioriStandardDeviations"] = Persistence.OneVector(name = "APosterioriStandardDeviations")
649 self.StoredVariables["APosterioriVariances"] = Persistence.OneVector(name = "APosterioriVariances")
650 self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
651 self.StoredVariables["BMA"] = Persistence.OneVector(name = "BMA")
652 self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
653 self.StoredVariables["CostFunctionJAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJAtCurrentOptimum")
654 self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
655 self.StoredVariables["CostFunctionJbAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJbAtCurrentOptimum")
656 self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
657 self.StoredVariables["CostFunctionJoAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJoAtCurrentOptimum")
658 self.StoredVariables["CurrentIterationNumber"] = Persistence.OneIndex(name = "CurrentIterationNumber")
659 self.StoredVariables["CurrentOptimum"] = Persistence.OneVector(name = "CurrentOptimum")
660 self.StoredVariables["CurrentState"] = Persistence.OneVector(name = "CurrentState")
661 self.StoredVariables["ForecastState"] = Persistence.OneVector(name = "ForecastState")
662 self.StoredVariables["GradientOfCostFunctionJ"] = Persistence.OneVector(name = "GradientOfCostFunctionJ")
663 self.StoredVariables["GradientOfCostFunctionJb"] = Persistence.OneVector(name = "GradientOfCostFunctionJb")
664 self.StoredVariables["GradientOfCostFunctionJo"] = Persistence.OneVector(name = "GradientOfCostFunctionJo")
665 self.StoredVariables["IndexOfOptimum"] = Persistence.OneIndex(name = "IndexOfOptimum")
666 self.StoredVariables["Innovation"] = Persistence.OneVector(name = "Innovation")
667 self.StoredVariables["InnovationAtCurrentAnalysis"] = Persistence.OneVector(name = "InnovationAtCurrentAnalysis")
668 self.StoredVariables["InnovationAtCurrentState"] = Persistence.OneVector(name = "InnovationAtCurrentState")
669 self.StoredVariables["JacobianMatrixAtBackground"] = Persistence.OneMatrix(name = "JacobianMatrixAtBackground")
670 self.StoredVariables["JacobianMatrixAtCurrentState"] = Persistence.OneMatrix(name = "JacobianMatrixAtCurrentState")
671 self.StoredVariables["JacobianMatrixAtOptimum"] = Persistence.OneMatrix(name = "JacobianMatrixAtOptimum")
672 self.StoredVariables["KalmanGainAtOptimum"] = Persistence.OneMatrix(name = "KalmanGainAtOptimum")
673 self.StoredVariables["MahalanobisConsistency"] = Persistence.OneScalar(name = "MahalanobisConsistency")
674 self.StoredVariables["OMA"] = Persistence.OneVector(name = "OMA")
675 self.StoredVariables["OMB"] = Persistence.OneVector(name = "OMB")
676 self.StoredVariables["Residu"] = Persistence.OneScalar(name = "Residu")
677 self.StoredVariables["SigmaBck2"] = Persistence.OneScalar(name = "SigmaBck2")
678 self.StoredVariables["SigmaObs2"] = Persistence.OneScalar(name = "SigmaObs2")
679 self.StoredVariables["SimulatedObservationAtBackground"] = Persistence.OneVector(name = "SimulatedObservationAtBackground")
680 self.StoredVariables["SimulatedObservationAtCurrentAnalysis"]= Persistence.OneVector(name = "SimulatedObservationAtCurrentAnalysis")
681 self.StoredVariables["SimulatedObservationAtCurrentOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentOptimum")
682 self.StoredVariables["SimulatedObservationAtCurrentState"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentState")
683 self.StoredVariables["SimulatedObservationAtOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtOptimum")
684 self.StoredVariables["SimulationQuantiles"] = Persistence.OneMatrix(name = "SimulationQuantiles")
686 for k in self.StoredVariables:
687 self.__canonical_stored_name[k.lower()] = k
689 for k, v in self.__variable_names_not_public.items():
690 self.__canonical_parameter_name[k.lower()] = k
691 self.__canonical_parameter_name["algorithm"] = "Algorithm"
692 self.__canonical_parameter_name["storesupplementarycalculations"] = "StoreSupplementaryCalculations"
694 def _pre_run(self, Parameters, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None ):
696 logging.debug("%s Lancement", self._name)
697 logging.debug("%s Taille mémoire utilisée de %.0f Mio"%(self._name, self._m.getUsedMemory("Mio")))
698 self._getTimeState(reset=True)
700 # Mise a jour des paramètres internes avec le contenu de Parameters, en
701 # reprenant les valeurs par défauts pour toutes celles non définies
702 self.__setParameters(Parameters, reset=True)
703 for k, v in self.__variable_names_not_public.items():
704 if k not in self._parameters: self.__setParameters( {k:v} )
706 # Corrections et compléments des vecteurs
707 def __test_vvalue(argument, variable, argname, symbol=None):
708 if symbol is None: symbol = variable
710 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
711 raise ValueError("%s %s vector %s is not set and has to be properly defined!"%(self._name,argname,symbol))
712 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
713 logging.debug("%s %s vector %s is not set, but is optional."%(self._name,argname,symbol))
715 logging.debug("%s %s vector %s is not set, but is not required."%(self._name,argname,symbol))
717 logging.debug("%s %s vector %s is set, and its size is %i."%(self._name,argname,symbol,numpy.array(argument).size))
719 __test_vvalue( Xb, "Xb", "Background or initial state" )
720 __test_vvalue( Y, "Y", "Observation" )
721 __test_vvalue( U, "U", "Control" )
723 # Corrections et compléments des covariances
724 def __test_cvalue(argument, variable, argname, symbol=None):
725 if symbol is None: symbol = variable
727 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
728 raise ValueError("%s %s error covariance matrix %s is not set and has to be properly defined!"%(self._name,argname,symbol))
729 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
730 logging.debug("%s %s error covariance matrix %s is not set, but is optional."%(self._name,argname,symbol))
732 logging.debug("%s %s error covariance matrix %s is not set, but is not required."%(self._name,argname,symbol))
734 logging.debug("%s %s error covariance matrix %s is set."%(self._name,argname,symbol))
736 __test_cvalue( B, "B", "Background" )
737 __test_cvalue( R, "R", "Observation" )
738 __test_cvalue( Q, "Q", "Evolution" )
740 # Corrections et compléments des opérateurs
741 def __test_ovalue(argument, variable, argname, symbol=None):
742 if symbol is None: symbol = variable
743 if argument is None or (isinstance(argument,dict) and len(argument)==0):
744 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
745 raise ValueError("%s %s operator %s is not set and has to be properly defined!"%(self._name,argname,symbol))
746 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
747 logging.debug("%s %s operator %s is not set, but is optional."%(self._name,argname,symbol))
749 logging.debug("%s %s operator %s is not set, but is not required."%(self._name,argname,symbol))
751 logging.debug("%s %s operator %s is set."%(self._name,argname,symbol))
753 __test_ovalue( HO, "HO", "Observation", "H" )
754 __test_ovalue( EM, "EM", "Evolution", "M" )
755 __test_ovalue( CM, "CM", "Control Model", "C" )
757 if ("Bounds" in self._parameters) and isinstance(self._parameters["Bounds"], (list, tuple)) and (len(self._parameters["Bounds"]) > 0):
758 logging.debug("%s Prise en compte des bornes effectuee"%(self._name,))
760 self._parameters["Bounds"] = None
762 if logging.getLogger().level < logging.WARNING:
763 self._parameters["optiprint"], self._parameters["optdisp"] = 1, 1
764 if PlatformInfo.has_scipy:
765 import scipy.optimize
766 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_ALL
768 self._parameters["optmessages"] = 15
770 self._parameters["optiprint"], self._parameters["optdisp"] = -1, 0
771 if PlatformInfo.has_scipy:
772 import scipy.optimize
773 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_NONE
775 self._parameters["optmessages"] = 15
779 def _post_run(self,_oH=None):
781 if ("StoreSupplementaryCalculations" in self._parameters) and \
782 "APosterioriCovariance" in self._parameters["StoreSupplementaryCalculations"]:
783 for _A in self.StoredVariables["APosterioriCovariance"]:
784 if "APosterioriVariances" in self._parameters["StoreSupplementaryCalculations"]:
785 self.StoredVariables["APosterioriVariances"].store( numpy.diag(_A) )
786 if "APosterioriStandardDeviations" in self._parameters["StoreSupplementaryCalculations"]:
787 self.StoredVariables["APosterioriStandardDeviations"].store( numpy.sqrt(numpy.diag(_A)) )
788 if "APosterioriCorrelations" in self._parameters["StoreSupplementaryCalculations"]:
789 _EI = numpy.diag(1./numpy.sqrt(numpy.diag(_A)))
790 _C = numpy.dot(_EI, numpy.dot(_A, _EI))
791 self.StoredVariables["APosterioriCorrelations"].store( _C )
792 if _oH is not None and "Direct" in _oH and "Tangent" in _oH and "Adjoint" in _oH:
793 logging.debug("%s Nombre d'évaluation(s) de l'opérateur d'observation direct/tangent/adjoint.: %i/%i/%i", self._name, _oH["Direct"].nbcalls(0),_oH["Tangent"].nbcalls(0),_oH["Adjoint"].nbcalls(0))
794 logging.debug("%s Nombre d'appels au cache d'opérateur d'observation direct/tangent/adjoint..: %i/%i/%i", self._name, _oH["Direct"].nbcalls(3),_oH["Tangent"].nbcalls(3),_oH["Adjoint"].nbcalls(3))
795 logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
796 logging.debug("%s Durées d'utilisation CPU de %.1fs et elapsed de %.1fs", self._name, self._getTimeState()[0], self._getTimeState()[1])
797 logging.debug("%s Terminé", self._name)
800 def _toStore(self, key):
801 "True if in StoreSupplementaryCalculations, else False"
802 return key in self._parameters["StoreSupplementaryCalculations"]
804 def get(self, key=None):
806 Renvoie l'une des variables stockées identifiée par la clé, ou le
807 dictionnaire de l'ensemble des variables disponibles en l'absence de
808 clé. Ce sont directement les variables sous forme objet qui sont
809 renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
810 des classes de persistance.
813 return self.StoredVariables[self.__canonical_stored_name[key.lower()]]
815 return self.StoredVariables
817 def __contains__(self, key=None):
818 "D.__contains__(k) -> True if D has a key k, else False"
819 if key is None or key.lower() not in self.__canonical_stored_name:
822 return self.__canonical_stored_name[key.lower()] in self.StoredVariables
825 "D.keys() -> list of D's keys"
826 if hasattr(self, "StoredVariables"):
827 return self.StoredVariables.keys()
832 "D.pop(k[,d]) -> v, remove specified key and return the corresponding value"
833 if hasattr(self, "StoredVariables") and k.lower() in self.__canonical_stored_name:
834 return self.StoredVariables.pop(self.__canonical_stored_name[k.lower()], d)
839 raise TypeError("pop expected at least 1 arguments, got 0")
840 "If key is not found, d is returned if given, otherwise KeyError is raised"
846 def run(self, Xb=None, Y=None, H=None, M=None, R=None, B=None, Q=None, Parameters=None):
848 Doit implémenter l'opération élémentaire de calcul d'assimilation sous
849 sa forme mathématique la plus naturelle possible.
851 raise NotImplementedError("Mathematical assimilation calculation has not been implemented!")
853 def defineRequiredParameter(self, name = None, default = None, typecast = None, message = None, minval = None, maxval = None, listval = None):
855 Permet de définir dans l'algorithme des paramètres requis et leurs
856 caractéristiques par défaut.
859 raise ValueError("A name is mandatory to define a required parameter.")
861 self.__required_parameters[name] = {
863 "typecast" : typecast,
869 self.__canonical_parameter_name[name.lower()] = name
870 logging.debug("%s %s (valeur par défaut = %s)", self._name, message, self.setParameterValue(name))
872 def getRequiredParameters(self, noDetails=True):
874 Renvoie la liste des noms de paramètres requis ou directement le
875 dictionnaire des paramètres requis.
878 return sorted(self.__required_parameters.keys())
880 return self.__required_parameters
882 def setParameterValue(self, name=None, value=None):
884 Renvoie la valeur d'un paramètre requis de manière contrôlée
886 __k = self.__canonical_parameter_name[name.lower()]
887 default = self.__required_parameters[__k]["default"]
888 typecast = self.__required_parameters[__k]["typecast"]
889 minval = self.__required_parameters[__k]["minval"]
890 maxval = self.__required_parameters[__k]["maxval"]
891 listval = self.__required_parameters[__k]["listval"]
893 if value is None and default is None:
895 elif value is None and default is not None:
896 if typecast is None: __val = default
897 else: __val = typecast( default )
899 if typecast is None: __val = value
902 __val = typecast( value )
904 raise ValueError("The value '%s' for the parameter named '%s' can not be correctly evaluated with type '%s'."%(value, __k, typecast))
906 if minval is not None and (numpy.array(__val, float) < minval).any():
907 raise ValueError("The parameter named '%s' of value '%s' can not be less than %s."%(__k, __val, minval))
908 if maxval is not None and (numpy.array(__val, float) > maxval).any():
909 raise ValueError("The parameter named '%s' of value '%s' can not be greater than %s."%(__k, __val, maxval))
910 if listval is not None:
911 if typecast is list or typecast is tuple or isinstance(__val,list) or isinstance(__val,tuple):
914 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%(v, __k, listval))
915 elif __val not in listval:
916 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%( __val, __k,listval))
920 def requireInputArguments(self, mandatory=(), optional=()):
922 Permet d'imposer des arguments de calcul requis en entrée.
924 self.__required_inputs["RequiredInputValues"]["mandatory"] = tuple( mandatory )
925 self.__required_inputs["RequiredInputValues"]["optional"] = tuple( optional )
927 def getInputArguments(self):
929 Permet d'obtenir les listes des arguments de calcul requis en entrée.
931 return self.__required_inputs["RequiredInputValues"]["mandatory"], self.__required_inputs["RequiredInputValues"]["optional"]
933 def setAttributes(self, tags=()):
935 Permet d'adjoindre des attributs comme les tags de classification.
936 Renvoie la liste actuelle dans tous les cas.
938 self.__required_inputs["ClassificationTags"].extend( tags )
939 return self.__required_inputs["ClassificationTags"]
941 def __setParameters(self, fromDico={}, reset=False):
943 Permet de stocker les paramètres reçus dans le dictionnaire interne.
945 self._parameters.update( fromDico )
946 __inverse_fromDico_keys = {}
947 for k in fromDico.keys():
948 if k.lower() in self.__canonical_parameter_name:
949 __inverse_fromDico_keys[self.__canonical_parameter_name[k.lower()]] = k
950 #~ __inverse_fromDico_keys = dict([(self.__canonical_parameter_name[k.lower()],k) for k in fromDico.keys()])
951 __canonic_fromDico_keys = __inverse_fromDico_keys.keys()
952 for k in self.__required_parameters.keys():
953 if k in __canonic_fromDico_keys:
954 self._parameters[k] = self.setParameterValue(k,fromDico[__inverse_fromDico_keys[k]])
956 self._parameters[k] = self.setParameterValue(k)
959 logging.debug("%s %s : %s", self._name, self.__required_parameters[k]["message"], self._parameters[k])
961 def _getTimeState(self, reset=False):
963 Initialise ou restitue le temps de calcul (cpu/elapsed) à la seconde
966 self.__initial_cpu_time = time.process_time()
967 self.__initial_elapsed_time = time.perf_counter()
970 self.__cpu_time = time.process_time() - self.__initial_cpu_time
971 self.__elapsed_time = time.perf_counter() - self.__initial_elapsed_time
972 return self.__cpu_time, self.__elapsed_time
974 def _StopOnTimeLimit(self, X=None, withReason=False):
975 "Stop criteria on time limit: True/False [+ Reason]"
976 c, e = self._getTimeState()
977 if "MaximumCpuTime" in self._parameters and c > self._parameters["MaximumCpuTime"]:
978 __SC, __SR = True, "Reached maximum CPU time (%.1fs > %.1fs)"%(c, self._parameters["MaximumCpuTime"])
979 elif "MaximumElapsedTime" in self._parameters and e > self._parameters["MaximumElapsedTime"]:
980 __SC, __SR = True, "Reached maximum elapsed time (%.1fs > %.1fs)"%(e, self._parameters["MaximumElapsedTime"])
982 __SC, __SR = False, ""
988 # ==============================================================================
989 class AlgorithmAndParameters(object):
991 Classe générale d'interface d'action pour l'algorithme et ses paramètres
994 name = "GenericAlgorithm",
1001 self.__name = str(name)
1005 self.__algorithm = {}
1006 self.__algorithmFile = None
1007 self.__algorithmName = None
1009 self.updateParameters( asDict, asScript )
1011 if asAlgorithm is None and asScript is not None:
1012 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
1014 __Algo = asAlgorithm
1016 if __Algo is not None:
1017 self.__A = str(__Algo)
1018 self.__P.update( {"Algorithm":self.__A} )
1020 self.__setAlgorithm( self.__A )
1022 self.__variable_names_not_public = {"nextStep":False} # Duplication dans Algorithm
1024 def updateParameters(self,
1028 "Mise a jour des parametres"
1029 if asDict is None and asScript is not None:
1030 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1034 if __Dict is not None:
1035 self.__P.update( dict(__Dict) )
1037 def executePythonScheme(self, asDictAO = None):
1038 "Permet de lancer le calcul d'assimilation"
1039 Operator.CM.clearCache()
1041 if not isinstance(asDictAO, dict):
1042 raise ValueError("The objects for algorithm calculation have to be given together as a dictionnary, and they are not")
1043 if hasattr(asDictAO["Background"],"getO"): self.__Xb = asDictAO["Background"].getO()
1044 elif hasattr(asDictAO["CheckingPoint"],"getO"): self.__Xb = asDictAO["CheckingPoint"].getO()
1045 else: self.__Xb = None
1046 if hasattr(asDictAO["Observation"],"getO"): self.__Y = asDictAO["Observation"].getO()
1047 else: self.__Y = asDictAO["Observation"]
1048 if hasattr(asDictAO["ControlInput"],"getO"): self.__U = asDictAO["ControlInput"].getO()
1049 else: self.__U = asDictAO["ControlInput"]
1050 if hasattr(asDictAO["ObservationOperator"],"getO"): self.__HO = asDictAO["ObservationOperator"].getO()
1051 else: self.__HO = asDictAO["ObservationOperator"]
1052 if hasattr(asDictAO["EvolutionModel"],"getO"): self.__EM = asDictAO["EvolutionModel"].getO()
1053 else: self.__EM = asDictAO["EvolutionModel"]
1054 if hasattr(asDictAO["ControlModel"],"getO"): self.__CM = asDictAO["ControlModel"].getO()
1055 else: self.__CM = asDictAO["ControlModel"]
1056 self.__B = asDictAO["BackgroundError"]
1057 self.__R = asDictAO["ObservationError"]
1058 self.__Q = asDictAO["EvolutionError"]
1060 self.__shape_validate()
1062 self.__algorithm.run(
1072 Parameters = self.__P,
1076 def executeYACSScheme(self, FileName=None):
1077 "Permet de lancer le calcul d'assimilation"
1078 if FileName is None or not os.path.exists(FileName):
1079 raise ValueError("a YACS file name has to be given for YACS execution.\n")
1081 __file = os.path.abspath(FileName)
1082 logging.debug("The YACS file name is \"%s\"."%__file)
1083 if not PlatformInfo.has_salome or \
1084 not PlatformInfo.has_yacs or \
1085 not PlatformInfo.has_adao:
1086 raise ImportError("\n\n"+\
1087 "Unable to get SALOME, YACS or ADAO environnement variables.\n"+\
1088 "Please load the right environnement before trying to use it.\n")
1091 import SALOMERuntime
1093 SALOMERuntime.RuntimeSALOME_setRuntime()
1095 r = pilot.getRuntime()
1096 xmlLoader = loader.YACSLoader()
1097 xmlLoader.registerProcCataLoader()
1099 catalogAd = r.loadCatalog("proc", __file)
1100 r.addCatalog(catalogAd)
1105 p = xmlLoader.load(__file)
1106 except IOError as ex:
1107 print("The YACS XML schema file can not be loaded: %s"%(ex,))
1109 logger = p.getLogger("parser")
1110 if not logger.isEmpty():
1111 print("The imported YACS XML schema has errors on parsing:")
1112 print(logger.getStr())
1115 print("The YACS XML schema is not valid and will not be executed:")
1116 print(p.getErrorReport())
1118 info=pilot.LinkInfo(pilot.LinkInfo.ALL_DONT_STOP)
1119 p.checkConsistency(info)
1120 if info.areWarningsOrErrors():
1121 print("The YACS XML schema is not coherent and will not be executed:")
1122 print(info.getGlobalRepr())
1124 e = pilot.ExecutorSwig()
1126 if p.getEffectiveState() != pilot.DONE:
1127 print(p.getErrorReport())
1131 def get(self, key = None):
1132 "Vérifie l'existence d'une clé de variable ou de paramètres"
1133 if key in self.__algorithm:
1134 return self.__algorithm.get( key )
1135 elif key in self.__P:
1136 return self.__P[key]
1138 allvariables = self.__P
1139 for k in self.__variable_names_not_public: allvariables.pop(k, None)
1142 def pop(self, k, d):
1143 "Necessaire pour le pickling"
1144 return self.__algorithm.pop(k, d)
1146 def getAlgorithmRequiredParameters(self, noDetails=True):
1147 "Renvoie la liste des paramètres requis selon l'algorithme"
1148 return self.__algorithm.getRequiredParameters(noDetails)
1150 def getAlgorithmInputArguments(self):
1151 "Renvoie la liste des entrées requises selon l'algorithme"
1152 return self.__algorithm.getInputArguments()
1154 def getAlgorithmAttributes(self):
1155 "Renvoie la liste des attributs selon l'algorithme"
1156 return self.__algorithm.setAttributes()
1158 def setObserver(self, __V, __O, __I, __S):
1159 if self.__algorithm is None \
1160 or isinstance(self.__algorithm, dict) \
1161 or not hasattr(self.__algorithm,"StoredVariables"):
1162 raise ValueError("No observer can be build before choosing an algorithm.")
1163 if __V not in self.__algorithm:
1164 raise ValueError("An observer requires to be set on a variable named %s which does not exist."%__V)
1166 self.__algorithm.StoredVariables[ __V ].setDataObserver(
1169 HookParameters = __I,
1172 def removeObserver(self, __V, __O, __A = False):
1173 if self.__algorithm is None \
1174 or isinstance(self.__algorithm, dict) \
1175 or not hasattr(self.__algorithm,"StoredVariables"):
1176 raise ValueError("No observer can be removed before choosing an algorithm.")
1177 if __V not in self.__algorithm:
1178 raise ValueError("An observer requires to be removed on a variable named %s which does not exist."%__V)
1180 return self.__algorithm.StoredVariables[ __V ].removeDataObserver(
1185 def hasObserver(self, __V):
1186 if self.__algorithm is None \
1187 or isinstance(self.__algorithm, dict) \
1188 or not hasattr(self.__algorithm,"StoredVariables"):
1190 if __V not in self.__algorithm:
1192 return self.__algorithm.StoredVariables[ __V ].hasDataObserver()
1195 __allvariables = list(self.__algorithm.keys()) + list(self.__P.keys())
1196 for k in self.__variable_names_not_public:
1197 if k in __allvariables: __allvariables.remove(k)
1198 return __allvariables
1200 def __contains__(self, key=None):
1201 "D.__contains__(k) -> True if D has a key k, else False"
1202 return key in self.__algorithm or key in self.__P
1205 "x.__repr__() <==> repr(x)"
1206 return repr(self.__A)+", "+repr(self.__P)
1209 "x.__str__() <==> str(x)"
1210 return str(self.__A)+", "+str(self.__P)
1212 def __setAlgorithm(self, choice = None ):
1214 Permet de sélectionner l'algorithme à utiliser pour mener à bien l'étude
1215 d'assimilation. L'argument est un champ caractère se rapportant au nom
1216 d'un algorithme réalisant l'opération sur les arguments fixes.
1219 raise ValueError("Error: algorithm choice has to be given")
1220 if self.__algorithmName is not None:
1221 raise ValueError("Error: algorithm choice has already been done as \"%s\", it can't be changed."%self.__algorithmName)
1222 daDirectory = "daAlgorithms"
1224 # Recherche explicitement le fichier complet
1225 # ------------------------------------------
1227 for directory in sys.path:
1228 if os.path.isfile(os.path.join(directory, daDirectory, str(choice)+'.py')):
1229 module_path = os.path.abspath(os.path.join(directory, daDirectory))
1230 if module_path is None:
1231 raise ImportError("No algorithm module named \"%s\" has been found in the search path.\n The search path is %s"%(choice, sys.path))
1233 # Importe le fichier complet comme un module
1234 # ------------------------------------------
1236 sys_path_tmp = sys.path ; sys.path.insert(0,module_path)
1237 self.__algorithmFile = __import__(str(choice), globals(), locals(), [])
1238 if not hasattr(self.__algorithmFile, "ElementaryAlgorithm"):
1239 raise ImportError("this module does not define a valid elementary algorithm.")
1240 self.__algorithmName = str(choice)
1241 sys.path = sys_path_tmp ; del sys_path_tmp
1242 except ImportError as e:
1243 raise ImportError("The module named \"%s\" was found, but is incorrect at the import stage.\n The import error message is: %s"%(choice,e))
1245 # Instancie un objet du type élémentaire du fichier
1246 # -------------------------------------------------
1247 self.__algorithm = self.__algorithmFile.ElementaryAlgorithm()
1250 def __shape_validate(self):
1252 Validation de la correspondance correcte des tailles des variables et
1253 des matrices s'il y en a.
1255 if self.__Xb is None: __Xb_shape = (0,)
1256 elif hasattr(self.__Xb,"size"): __Xb_shape = (self.__Xb.size,)
1257 elif hasattr(self.__Xb,"shape"):
1258 if isinstance(self.__Xb.shape, tuple): __Xb_shape = self.__Xb.shape
1259 else: __Xb_shape = self.__Xb.shape()
1260 else: raise TypeError("The background (Xb) has no attribute of shape: problem !")
1262 if self.__Y is None: __Y_shape = (0,)
1263 elif hasattr(self.__Y,"size"): __Y_shape = (self.__Y.size,)
1264 elif hasattr(self.__Y,"shape"):
1265 if isinstance(self.__Y.shape, tuple): __Y_shape = self.__Y.shape
1266 else: __Y_shape = self.__Y.shape()
1267 else: raise TypeError("The observation (Y) has no attribute of shape: problem !")
1269 if self.__U is None: __U_shape = (0,)
1270 elif hasattr(self.__U,"size"): __U_shape = (self.__U.size,)
1271 elif hasattr(self.__U,"shape"):
1272 if isinstance(self.__U.shape, tuple): __U_shape = self.__U.shape
1273 else: __U_shape = self.__U.shape()
1274 else: raise TypeError("The control (U) has no attribute of shape: problem !")
1276 if self.__B is None: __B_shape = (0,0)
1277 elif hasattr(self.__B,"shape"):
1278 if isinstance(self.__B.shape, tuple): __B_shape = self.__B.shape
1279 else: __B_shape = self.__B.shape()
1280 else: raise TypeError("The a priori errors covariance matrix (B) has no attribute of shape: problem !")
1282 if self.__R is None: __R_shape = (0,0)
1283 elif hasattr(self.__R,"shape"):
1284 if isinstance(self.__R.shape, tuple): __R_shape = self.__R.shape
1285 else: __R_shape = self.__R.shape()
1286 else: raise TypeError("The observation errors covariance matrix (R) has no attribute of shape: problem !")
1288 if self.__Q is None: __Q_shape = (0,0)
1289 elif hasattr(self.__Q,"shape"):
1290 if isinstance(self.__Q.shape, tuple): __Q_shape = self.__Q.shape
1291 else: __Q_shape = self.__Q.shape()
1292 else: raise TypeError("The evolution errors covariance matrix (Q) has no attribute of shape: problem !")
1294 if len(self.__HO) == 0: __HO_shape = (0,0)
1295 elif isinstance(self.__HO, dict): __HO_shape = (0,0)
1296 elif hasattr(self.__HO["Direct"],"shape"):
1297 if isinstance(self.__HO["Direct"].shape, tuple): __HO_shape = self.__HO["Direct"].shape
1298 else: __HO_shape = self.__HO["Direct"].shape()
1299 else: raise TypeError("The observation operator (H) has no attribute of shape: problem !")
1301 if len(self.__EM) == 0: __EM_shape = (0,0)
1302 elif isinstance(self.__EM, dict): __EM_shape = (0,0)
1303 elif hasattr(self.__EM["Direct"],"shape"):
1304 if isinstance(self.__EM["Direct"].shape, tuple): __EM_shape = self.__EM["Direct"].shape
1305 else: __EM_shape = self.__EM["Direct"].shape()
1306 else: raise TypeError("The evolution model (EM) has no attribute of shape: problem !")
1308 if len(self.__CM) == 0: __CM_shape = (0,0)
1309 elif isinstance(self.__CM, dict): __CM_shape = (0,0)
1310 elif hasattr(self.__CM["Direct"],"shape"):
1311 if isinstance(self.__CM["Direct"].shape, tuple): __CM_shape = self.__CM["Direct"].shape
1312 else: __CM_shape = self.__CM["Direct"].shape()
1313 else: raise TypeError("The control model (CM) has no attribute of shape: problem !")
1315 # Vérification des conditions
1316 # ---------------------------
1317 if not( len(__Xb_shape) == 1 or min(__Xb_shape) == 1 ):
1318 raise ValueError("Shape characteristic of background (Xb) is incorrect: \"%s\"."%(__Xb_shape,))
1319 if not( len(__Y_shape) == 1 or min(__Y_shape) == 1 ):
1320 raise ValueError("Shape characteristic of observation (Y) is incorrect: \"%s\"."%(__Y_shape,))
1322 if not( min(__B_shape) == max(__B_shape) ):
1323 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) is incorrect: \"%s\"."%(__B_shape,))
1324 if not( min(__R_shape) == max(__R_shape) ):
1325 raise ValueError("Shape characteristic of observation errors covariance matrix (R) is incorrect: \"%s\"."%(__R_shape,))
1326 if not( min(__Q_shape) == max(__Q_shape) ):
1327 raise ValueError("Shape characteristic of evolution errors covariance matrix (Q) is incorrect: \"%s\"."%(__Q_shape,))
1328 if not( min(__EM_shape) == max(__EM_shape) ):
1329 raise ValueError("Shape characteristic of evolution operator (EM) is incorrect: \"%s\"."%(__EM_shape,))
1331 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[1] == max(__Xb_shape) ):
1332 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and state (X) \"%s\" are incompatible."%(__HO_shape,__Xb_shape))
1333 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[0] == max(__Y_shape) ):
1334 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation (Y) \"%s\" are incompatible."%(__HO_shape,__Y_shape))
1335 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__B) > 0 and not( __HO_shape[1] == __B_shape[0] ):
1336 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and a priori errors covariance matrix (B) \"%s\" are incompatible."%(__HO_shape,__B_shape))
1337 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__R) > 0 and not( __HO_shape[0] == __R_shape[1] ):
1338 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation errors covariance matrix (R) \"%s\" are incompatible."%(__HO_shape,__R_shape))
1340 if self.__B is not None and len(self.__B) > 0 and not( __B_shape[1] == max(__Xb_shape) ):
1341 if self.__algorithmName in ["EnsembleBlue",]:
1342 asPersistentVector = self.__Xb.reshape((-1,min(__B_shape)))
1343 self.__Xb = Persistence.OneVector("Background", basetype=numpy.matrix)
1344 for member in asPersistentVector:
1345 self.__Xb.store( numpy.matrix( numpy.ravel(member), numpy.float ).T )
1346 __Xb_shape = min(__B_shape)
1348 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) \"%s\" and background (Xb) \"%s\" are incompatible."%(__B_shape,__Xb_shape))
1350 if self.__R is not None and len(self.__R) > 0 and not( __R_shape[1] == max(__Y_shape) ):
1351 raise ValueError("Shape characteristic of observation errors covariance matrix (R) \"%s\" and observation (Y) \"%s\" are incompatible."%(__R_shape,__Y_shape))
1353 if self.__EM is not None and len(self.__EM) > 0 and not isinstance(self.__EM, dict) and not( __EM_shape[1] == max(__Xb_shape) ):
1354 raise ValueError("Shape characteristic of evolution model (EM) \"%s\" and state (X) \"%s\" are incompatible."%(__EM_shape,__Xb_shape))
1356 if self.__CM is not None and len(self.__CM) > 0 and not isinstance(self.__CM, dict) and not( __CM_shape[1] == max(__U_shape) ):
1357 raise ValueError("Shape characteristic of control model (CM) \"%s\" and control (U) \"%s\" are incompatible."%(__CM_shape,__U_shape))
1359 if ("Bounds" in self.__P) \
1360 and (isinstance(self.__P["Bounds"], list) or isinstance(self.__P["Bounds"], tuple)) \
1361 and (len(self.__P["Bounds"]) != max(__Xb_shape)):
1362 raise ValueError("The number \"%s\" of bound pairs for the state (X) components is different of the size \"%s\" of the state itself." \
1363 %(len(self.__P["Bounds"]),max(__Xb_shape)))
1367 # ==============================================================================
1368 class RegulationAndParameters(object):
1370 Classe générale d'interface d'action pour la régulation et ses paramètres
1373 name = "GenericRegulation",
1380 self.__name = str(name)
1383 if asAlgorithm is None and asScript is not None:
1384 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
1386 __Algo = asAlgorithm
1388 if asDict is None and asScript is not None:
1389 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1393 if __Dict is not None:
1394 self.__P.update( dict(__Dict) )
1396 if __Algo is not None:
1397 self.__P.update( {"Algorithm":str(__Algo)} )
1399 def get(self, key = None):
1400 "Vérifie l'existence d'une clé de variable ou de paramètres"
1402 return self.__P[key]
1406 # ==============================================================================
1407 class DataObserver(object):
1409 Classe générale d'interface de type observer
1412 name = "GenericObserver",
1424 self.__name = str(name)
1429 if onVariable is None:
1430 raise ValueError("setting an observer has to be done over a variable name or a list of variable names, not over None.")
1431 elif type(onVariable) in (tuple, list):
1432 self.__V = tuple(map( str, onVariable ))
1433 if withInfo is None:
1436 self.__I = (str(withInfo),)*len(self.__V)
1437 elif isinstance(onVariable, str):
1438 self.__V = (onVariable,)
1439 if withInfo is None:
1440 self.__I = (onVariable,)
1442 self.__I = (str(withInfo),)
1444 raise ValueError("setting an observer has to be done over a variable name or a list of variable names.")
1446 if asObsObject is not None:
1447 self.__O = asObsObject
1449 __FunctionText = str(UserScript('Observer', asTemplate, asString, asScript))
1450 __Function = Observer2Func(__FunctionText)
1451 self.__O = __Function.getfunc()
1453 for k in range(len(self.__V)):
1456 if ename not in withAlgo:
1457 raise ValueError("An observer is asked to be set on a variable named %s which does not exist."%ename)
1459 withAlgo.setObserver(ename, self.__O, einfo, scheduledBy)
1462 "x.__repr__() <==> repr(x)"
1463 return repr(self.__V)+"\n"+repr(self.__O)
1466 "x.__str__() <==> str(x)"
1467 return str(self.__V)+"\n"+str(self.__O)
1469 # ==============================================================================
1470 class UserScript(object):
1472 Classe générale d'interface de type texte de script utilisateur
1475 name = "GenericUserScript",
1482 self.__name = str(name)
1484 if asString is not None:
1486 elif self.__name == "UserPostAnalysis" and (asTemplate is not None) and (asTemplate in Templates.UserPostAnalysisTemplates):
1487 self.__F = Templates.UserPostAnalysisTemplates[asTemplate]
1488 elif self.__name == "Observer" and (asTemplate is not None) and (asTemplate in Templates.ObserverTemplates):
1489 self.__F = Templates.ObserverTemplates[asTemplate]
1490 elif asScript is not None:
1491 self.__F = Interfaces.ImportFromScript(asScript).getstring()
1496 "x.__repr__() <==> repr(x)"
1497 return repr(self.__F)
1500 "x.__str__() <==> str(x)"
1501 return str(self.__F)
1503 # ==============================================================================
1504 class ExternalParameters(object):
1506 Classe générale d'interface de type texte de script utilisateur
1509 name = "GenericExternalParameters",
1515 self.__name = str(name)
1518 self.updateParameters( asDict, asScript )
1520 def updateParameters(self,
1524 "Mise a jour des parametres"
1525 if asDict is None and asScript is not None:
1526 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "ExternalParameters" )
1530 if __Dict is not None:
1531 self.__P.update( dict(__Dict) )
1533 def get(self, key = None):
1535 return self.__P[key]
1537 return list(self.__P.keys())
1540 return list(self.__P.keys())
1542 def pop(self, k, d):
1543 return self.__P.pop(k, d)
1546 return self.__P.items()
1548 def __contains__(self, key=None):
1549 "D.__contains__(k) -> True if D has a key k, else False"
1550 return key in self.__P
1552 # ==============================================================================
1553 class State(object):
1555 Classe générale d'interface de type état
1558 name = "GenericVector",
1560 asPersistentVector = None,
1566 toBeChecked = False,
1569 Permet de définir un vecteur :
1570 - asVector : entrée des données, comme un vecteur compatible avec le
1571 constructeur de numpy.matrix, ou "True" si entrée par script.
1572 - asPersistentVector : entrée des données, comme une série de vecteurs
1573 compatible avec le constructeur de numpy.matrix, ou comme un objet de
1574 type Persistence, ou "True" si entrée par script.
1575 - asScript : si un script valide est donné contenant une variable
1576 nommée "name", la variable est de type "asVector" (par défaut) ou
1577 "asPersistentVector" selon que l'une de ces variables est placée à
1579 - asDataFile : si un ou plusieurs fichiers valides sont donnés
1580 contenant des valeurs en colonnes, elles-mêmes nommées "colNames"
1581 (s'il n'y a pas de nom de colonne indiquée, on cherche une colonne
1582 nommée "name"), on récupère les colonnes et on les range ligne après
1583 ligne (colMajor=False, par défaut) ou colonne après colonne
1584 (colMajor=True). La variable résultante est de type "asVector" (par
1585 défaut) ou "asPersistentVector" selon que l'une de ces variables est
1588 self.__name = str(name)
1589 self.__check = bool(toBeChecked)
1593 self.__is_vector = False
1594 self.__is_series = False
1596 if asScript is not None:
1597 __Vector, __Series = None, None
1598 if asPersistentVector:
1599 __Series = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1601 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1602 elif asDataFile is not None:
1603 __Vector, __Series = None, None
1604 if asPersistentVector:
1605 if colNames is not None:
1606 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
1608 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1609 if bool(colMajor) and not Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1610 __Series = numpy.transpose(__Series)
1611 elif not bool(colMajor) and Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1612 __Series = numpy.transpose(__Series)
1614 if colNames is not None:
1615 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
1617 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1619 __Vector = numpy.ravel(__Vector, order = "F")
1621 __Vector = numpy.ravel(__Vector, order = "C")
1623 __Vector, __Series = asVector, asPersistentVector
1625 if __Vector is not None:
1626 self.__is_vector = True
1627 self.__V = numpy.matrix( numpy.asmatrix(__Vector).A1, numpy.float ).T
1628 self.shape = self.__V.shape
1629 self.size = self.__V.size
1630 elif __Series is not None:
1631 self.__is_series = True
1632 if isinstance(__Series, (tuple, list, numpy.ndarray, numpy.matrix, str)):
1633 self.__V = Persistence.OneVector(self.__name, basetype=numpy.matrix)
1634 if isinstance(__Series, str): __Series = eval(__Series)
1635 for member in __Series:
1636 self.__V.store( numpy.matrix( numpy.asmatrix(member).A1, numpy.float ).T )
1639 if isinstance(self.__V.shape, (tuple, list)):
1640 self.shape = self.__V.shape
1642 self.shape = self.__V.shape()
1643 if len(self.shape) == 1:
1644 self.shape = (self.shape[0],1)
1645 self.size = self.shape[0] * self.shape[1]
1647 raise ValueError("The %s object is improperly defined or undefined, it requires at minima either a vector, a list/tuple of vectors or a persistent object. Please check your vector input."%self.__name)
1649 if scheduledBy is not None:
1650 self.__T = scheduledBy
1652 def getO(self, withScheduler=False):
1654 return self.__V, self.__T
1655 elif self.__T is None:
1661 "Vérification du type interne"
1662 return self.__is_vector
1665 "Vérification du type interne"
1666 return self.__is_series
1669 "x.__repr__() <==> repr(x)"
1670 return repr(self.__V)
1673 "x.__str__() <==> str(x)"
1674 return str(self.__V)
1676 # ==============================================================================
1677 class Covariance(object):
1679 Classe générale d'interface de type covariance
1682 name = "GenericCovariance",
1683 asCovariance = None,
1684 asEyeByScalar = None,
1685 asEyeByVector = None,
1688 toBeChecked = False,
1691 Permet de définir une covariance :
1692 - asCovariance : entrée des données, comme une matrice compatible avec
1693 le constructeur de numpy.matrix
1694 - asEyeByScalar : entrée des données comme un seul scalaire de variance,
1695 multiplicatif d'une matrice de corrélation identité, aucune matrice
1696 n'étant donc explicitement à donner
1697 - asEyeByVector : entrée des données comme un seul vecteur de variance,
1698 à mettre sur la diagonale d'une matrice de corrélation, aucune matrice
1699 n'étant donc explicitement à donner
1700 - asCovObject : entrée des données comme un objet python, qui a les
1701 methodes obligatoires "getT", "getI", "diag", "trace", "__add__",
1702 "__sub__", "__neg__", "__mul__", "__rmul__" et facultatives "shape",
1703 "size", "cholesky", "choleskyI", "asfullmatrix", "__repr__", "__str__"
1704 - toBeChecked : booléen indiquant si le caractère SDP de la matrice
1705 pleine doit être vérifié
1707 self.__name = str(name)
1708 self.__check = bool(toBeChecked)
1711 self.__is_scalar = False
1712 self.__is_vector = False
1713 self.__is_matrix = False
1714 self.__is_object = False
1716 if asScript is not None:
1717 __Matrix, __Scalar, __Vector, __Object = None, None, None, None
1719 __Scalar = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1721 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1723 __Object = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1725 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1727 __Matrix, __Scalar, __Vector, __Object = asCovariance, asEyeByScalar, asEyeByVector, asCovObject
1729 if __Scalar is not None:
1730 if numpy.matrix(__Scalar).size != 1:
1731 raise ValueError(' The diagonal multiplier given to define a sparse matrix is not a unique scalar value.\n Its actual measured size is %i. Please check your scalar input.'%numpy.matrix(__Scalar).size)
1732 self.__is_scalar = True
1733 self.__C = numpy.abs( float(__Scalar) )
1736 elif __Vector is not None:
1737 self.__is_vector = True
1738 self.__C = numpy.abs( numpy.array( numpy.ravel( numpy.matrix(__Vector, float ) ) ) )
1739 self.shape = (self.__C.size,self.__C.size)
1740 self.size = self.__C.size**2
1741 elif __Matrix is not None:
1742 self.__is_matrix = True
1743 self.__C = numpy.matrix( __Matrix, float )
1744 self.shape = self.__C.shape
1745 self.size = self.__C.size
1746 elif __Object is not None:
1747 self.__is_object = True
1749 for at in ("getT","getI","diag","trace","__add__","__sub__","__neg__","__mul__","__rmul__"):
1750 if not hasattr(self.__C,at):
1751 raise ValueError("The matrix given for %s as an object has no attribute \"%s\". Please check your object input."%(self.__name,at))
1752 if hasattr(self.__C,"shape"):
1753 self.shape = self.__C.shape
1756 if hasattr(self.__C,"size"):
1757 self.size = self.__C.size
1762 # raise ValueError("The %s covariance matrix has to be specified either as a matrix, a vector for its diagonal or a scalar multiplying an identity matrix."%self.__name)
1766 def __validate(self):
1768 if self.__C is None:
1769 raise UnboundLocalError("%s covariance matrix value has not been set!"%(self.__name,))
1770 if self.ismatrix() and min(self.shape) != max(self.shape):
1771 raise ValueError("The given matrix for %s is not a square one, its shape is %s. Please check your matrix input."%(self.__name,self.shape))
1772 if self.isobject() and min(self.shape) != max(self.shape):
1773 raise ValueError("The matrix given for \"%s\" is not a square one, its shape is %s. Please check your object input."%(self.__name,self.shape))
1774 if self.isscalar() and self.__C <= 0:
1775 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your scalar input %s."%(self.__name,self.__C))
1776 if self.isvector() and (self.__C <= 0).any():
1777 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your vector input."%(self.__name,))
1778 if self.ismatrix() and (self.__check or logging.getLogger().level < logging.WARNING):
1780 L = numpy.linalg.cholesky( self.__C )
1782 raise ValueError("The %s covariance matrix is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1783 if self.isobject() and (self.__check or logging.getLogger().level < logging.WARNING):
1785 L = self.__C.cholesky()
1787 raise ValueError("The %s covariance object is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1790 "Vérification du type interne"
1791 return self.__is_scalar
1794 "Vérification du type interne"
1795 return self.__is_vector
1798 "Vérification du type interne"
1799 return self.__is_matrix
1802 "Vérification du type interne"
1803 return self.__is_object
1808 return Covariance(self.__name+"I", asCovariance = self.__C.I )
1809 elif self.isvector():
1810 return Covariance(self.__name+"I", asEyeByVector = 1. / self.__C )
1811 elif self.isscalar():
1812 return Covariance(self.__name+"I", asEyeByScalar = 1. / self.__C )
1813 elif self.isobject():
1814 return Covariance(self.__name+"I", asCovObject = self.__C.getI() )
1816 return None # Indispensable
1821 return Covariance(self.__name+"T", asCovariance = self.__C.T )
1822 elif self.isvector():
1823 return Covariance(self.__name+"T", asEyeByVector = self.__C )
1824 elif self.isscalar():
1825 return Covariance(self.__name+"T", asEyeByScalar = self.__C )
1826 elif self.isobject():
1827 return Covariance(self.__name+"T", asCovObject = self.__C.getT() )
1830 "Décomposition de Cholesky"
1832 return Covariance(self.__name+"C", asCovariance = numpy.linalg.cholesky(self.__C) )
1833 elif self.isvector():
1834 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
1835 elif self.isscalar():
1836 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
1837 elif self.isobject() and hasattr(self.__C,"cholesky"):
1838 return Covariance(self.__name+"C", asCovObject = self.__C.cholesky() )
1840 def choleskyI(self):
1841 "Inversion de la décomposition de Cholesky"
1843 return Covariance(self.__name+"H", asCovariance = numpy.linalg.cholesky(self.__C).I )
1844 elif self.isvector():
1845 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
1846 elif self.isscalar():
1847 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
1848 elif self.isobject() and hasattr(self.__C,"choleskyI"):
1849 return Covariance(self.__name+"H", asCovObject = self.__C.choleskyI() )
1852 "Racine carrée matricielle"
1855 return Covariance(self.__name+"C", asCovariance = scipy.linalg.sqrtm(self.__C) )
1856 elif self.isvector():
1857 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
1858 elif self.isscalar():
1859 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
1860 elif self.isobject() and hasattr(self.__C,"sqrt"):
1861 return Covariance(self.__name+"C", asCovObject = self.__C.sqrt() )
1864 "Inversion de la racine carrée matricielle"
1867 return Covariance(self.__name+"H", asCovariance = scipy.linalg.sqrtm(self.__C).I )
1868 elif self.isvector():
1869 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
1870 elif self.isscalar():
1871 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
1872 elif self.isobject() and hasattr(self.__C,"sqrtI"):
1873 return Covariance(self.__name+"H", asCovObject = self.__C.sqrtI() )
1875 def diag(self, msize=None):
1876 "Diagonale de la matrice"
1878 return numpy.diag(self.__C)
1879 elif self.isvector():
1881 elif self.isscalar():
1883 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1885 return self.__C * numpy.ones(int(msize))
1886 elif self.isobject():
1887 return self.__C.diag()
1889 def asfullmatrix(self, msize=None):
1893 elif self.isvector():
1894 return numpy.matrix( numpy.diag(self.__C), float )
1895 elif self.isscalar():
1897 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1899 return numpy.matrix( self.__C * numpy.eye(int(msize)), float )
1900 elif self.isobject() and hasattr(self.__C,"asfullmatrix"):
1901 return self.__C.asfullmatrix()
1903 def trace(self, msize=None):
1904 "Trace de la matrice"
1906 return numpy.trace(self.__C)
1907 elif self.isvector():
1908 return float(numpy.sum(self.__C))
1909 elif self.isscalar():
1911 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1913 return self.__C * int(msize)
1914 elif self.isobject():
1915 return self.__C.trace()
1921 "x.__repr__() <==> repr(x)"
1922 return repr(self.__C)
1925 "x.__str__() <==> str(x)"
1926 return str(self.__C)
1928 def __add__(self, other):
1929 "x.__add__(y) <==> x+y"
1930 if self.ismatrix() or self.isobject():
1931 return self.__C + numpy.asmatrix(other)
1932 elif self.isvector() or self.isscalar():
1933 _A = numpy.asarray(other)
1934 _A.reshape(_A.size)[::_A.shape[1]+1] += self.__C
1935 return numpy.asmatrix(_A)
1937 def __radd__(self, other):
1938 "x.__radd__(y) <==> y+x"
1939 raise NotImplementedError("%s covariance matrix __radd__ method not available for %s type!"%(self.__name,type(other)))
1941 def __sub__(self, other):
1942 "x.__sub__(y) <==> x-y"
1943 if self.ismatrix() or self.isobject():
1944 return self.__C - numpy.asmatrix(other)
1945 elif self.isvector() or self.isscalar():
1946 _A = numpy.asarray(other)
1947 _A.reshape(_A.size)[::_A.shape[1]+1] = self.__C - _A.reshape(_A.size)[::_A.shape[1]+1]
1948 return numpy.asmatrix(_A)
1950 def __rsub__(self, other):
1951 "x.__rsub__(y) <==> y-x"
1952 raise NotImplementedError("%s covariance matrix __rsub__ method not available for %s type!"%(self.__name,type(other)))
1955 "x.__neg__() <==> -x"
1958 def __mul__(self, other):
1959 "x.__mul__(y) <==> x*y"
1960 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
1961 return self.__C * other
1962 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
1963 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1964 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
1965 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1966 return self.__C * numpy.asmatrix(other)
1968 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asmatrix(other).shape,self.__name))
1969 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
1970 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1971 return numpy.asmatrix(self.__C * numpy.ravel(other)).T
1972 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1973 return numpy.asmatrix((self.__C * (numpy.asarray(other).transpose())).transpose())
1975 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
1976 elif self.isscalar() and isinstance(other,numpy.matrix):
1977 return self.__C * other
1978 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
1979 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
1980 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
1982 return self.__C * numpy.asmatrix(other)
1983 elif self.isobject():
1984 return self.__C.__mul__(other)
1986 raise NotImplementedError("%s covariance matrix __mul__ method not available for %s type!"%(self.__name,type(other)))
1988 def __rmul__(self, other):
1989 "x.__rmul__(y) <==> y*x"
1990 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
1991 return other * self.__C
1992 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
1993 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1994 return numpy.asmatrix(numpy.ravel(other)) * self.__C
1995 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1996 return numpy.asmatrix(other) * self.__C
1998 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name))
1999 elif self.isvector() and isinstance(other,numpy.matrix):
2000 if numpy.ravel(other).size == self.shape[0]: # Vecteur
2001 return numpy.asmatrix(numpy.ravel(other) * self.__C)
2002 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
2003 return numpy.asmatrix(numpy.array(other) * self.__C)
2005 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
2006 elif self.isscalar() and isinstance(other,numpy.matrix):
2007 return other * self.__C
2008 elif self.isobject():
2009 return self.__C.__rmul__(other)
2011 raise NotImplementedError("%s covariance matrix __rmul__ method not available for %s type!"%(self.__name,type(other)))
2014 "x.__len__() <==> len(x)"
2015 return self.shape[0]
2017 # ==============================================================================
2018 class Observer2Func(object):
2020 Creation d'une fonction d'observateur a partir de son texte
2022 def __init__(self, corps=""):
2023 self.__corps = corps
2024 def func(self,var,info):
2025 "Fonction d'observation"
2028 "Restitution du pointeur de fonction dans l'objet"
2031 # ==============================================================================
2032 class CaseLogger(object):
2034 Conservation des commandes de creation d'un cas
2036 def __init__(self, __name="", __objname="case", __addViewers=None, __addLoaders=None):
2037 self.__name = str(__name)
2038 self.__objname = str(__objname)
2039 self.__logSerie = []
2040 self.__switchoff = False
2042 "TUI" :Interfaces._TUIViewer,
2043 "SCD" :Interfaces._SCDViewer,
2044 "YACS":Interfaces._YACSViewer,
2047 "TUI" :Interfaces._TUIViewer,
2048 "COM" :Interfaces._COMViewer,
2050 if __addViewers is not None:
2051 self.__viewers.update(dict(__addViewers))
2052 if __addLoaders is not None:
2053 self.__loaders.update(dict(__addLoaders))
2055 def register(self, __command=None, __keys=None, __local=None, __pre=None, __switchoff=False):
2056 "Enregistrement d'une commande individuelle"
2057 if __command is not None and __keys is not None and __local is not None and not self.__switchoff:
2058 if "self" in __keys: __keys.remove("self")
2059 self.__logSerie.append( (str(__command), __keys, __local, __pre, __switchoff) )
2061 self.__switchoff = True
2063 self.__switchoff = False
2065 def dump(self, __filename=None, __format="TUI", __upa=""):
2066 "Restitution normalisée des commandes"
2067 if __format in self.__viewers:
2068 __formater = self.__viewers[__format](self.__name, self.__objname, self.__logSerie)
2070 raise ValueError("Dumping as \"%s\" is not available"%__format)
2071 return __formater.dump(__filename, __upa)
2073 def load(self, __filename=None, __content=None, __object=None, __format="TUI"):
2074 "Chargement normalisé des commandes"
2075 if __format in self.__loaders:
2076 __formater = self.__loaders[__format]()
2078 raise ValueError("Loading as \"%s\" is not available"%__format)
2079 return __formater.load(__filename, __content, __object)
2081 # ==============================================================================
2084 _extraArguments = None,
2085 _sFunction = lambda x: x,
2090 Pour une liste ordonnée de vecteurs en entrée, renvoie en sortie la liste
2091 correspondante de valeurs de la fonction en argument
2093 # Vérifications et définitions initiales
2094 # logging.debug("MULTF Internal multifonction calculations begin with function %s"%(_sFunction.__name__,))
2095 if not PlatformInfo.isIterable( __xserie ):
2096 raise TypeError("MultiFonction not iterable unkown input type: %s"%(type(__xserie),))
2098 if (_mpWorkers is None) or (_mpWorkers is not None and _mpWorkers < 1):
2101 __mpWorkers = int(_mpWorkers)
2103 import multiprocessing
2114 if _extraArguments is None:
2116 elif _extraArguments is not None and isinstance(_extraArguments, (list, tuple, map)):
2117 for __xvalue in __xserie:
2118 _jobs.append( [__xvalue, ] + list(_extraArguments) )
2120 raise TypeError("MultiFonction extra arguments unkown input type: %s"%(type(_extraArguments),))
2121 # logging.debug("MULTF Internal multiprocessing calculations begin : evaluation of %i point(s)"%(len(_jobs),))
2122 import multiprocessing
2123 with multiprocessing.Pool(__mpWorkers) as pool:
2124 __multiHX = pool.map( _sFunction, _jobs )
2127 # logging.debug("MULTF Internal multiprocessing calculation end")
2129 # logging.debug("MULTF Internal monoprocessing calculation begin")
2131 if _extraArguments is None:
2132 for __xvalue in __xserie:
2133 __multiHX.append( _sFunction( __xvalue ) )
2134 elif _extraArguments is not None and isinstance(_extraArguments, (list, tuple, map)):
2135 for __xvalue in __xserie:
2136 __multiHX.append( _sFunction( __xvalue, *_extraArguments ) )
2137 elif _extraArguments is not None and isinstance(_extraArguments, dict):
2138 for __xvalue in __xserie:
2139 __multiHX.append( _sFunction( __xvalue, **_extraArguments ) )
2141 raise TypeError("MultiFonction extra arguments unkown input type: %s"%(type(_extraArguments),))
2142 # logging.debug("MULTF Internal monoprocessing calculation end")
2144 # logging.debug("MULTF Internal multifonction calculations end")
2147 # ==============================================================================
2148 def CostFunction3D(_x,
2149 _Hm = None, # Pour simuler Hm(x) : HO["Direct"].appliedTo
2150 _HmX = None, # Simulation déjà faite de Hm(x)
2151 _arg = None, # Arguments supplementaires pour Hm, sous la forme d'un tuple
2156 _SIV = False, # A résorber pour la 8.0
2157 _SSC = [], # self._parameters["StoreSupplementaryCalculations"]
2158 _nPS = 0, # nbPreviousSteps
2159 _QM = "DA", # QualityMeasure
2160 _SSV = {}, # Entrée et/ou sortie : self.StoredVariables
2161 _fRt = False, # Restitue ou pas la sortie étendue
2162 _sSc = True, # Stocke ou pas les SSC
2165 Fonction-coût générale utile pour les algorithmes statiques/3D : 3DVAR, BLUE
2166 et dérivés, Kalman et dérivés, LeastSquares, SamplingTest, PSO, SA, Tabu,
2167 DFO, QuantileRegression
2173 for k in ["CostFunctionJ",
2179 "SimulatedObservationAtCurrentOptimum",
2180 "SimulatedObservationAtCurrentState",
2184 if hasattr(_SSV[k],"store"):
2185 _SSV[k].append = _SSV[k].store # Pour utiliser "append" au lieu de "store"
2187 _X = numpy.asmatrix(numpy.ravel( _x )).T
2188 if _SIV or "CurrentState" in _SSC or "CurrentOptimum" in _SSC:
2189 _SSV["CurrentState"].append( _X )
2191 if _HmX is not None:
2195 raise ValueError("COSTFUNCTION3D Operator has to be defined.")
2199 _HX = _Hm( _X, *_arg )
2200 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
2202 if "SimulatedObservationAtCurrentState" in _SSC or \
2203 "SimulatedObservationAtCurrentOptimum" in _SSC:
2204 _SSV["SimulatedObservationAtCurrentState"].append( _HX )
2206 if numpy.any(numpy.isnan(_HX)):
2207 Jb, Jo, J = numpy.nan, numpy.nan, numpy.nan
2209 _Y = numpy.asmatrix(numpy.ravel( _Y )).T
2210 if _QM in ["AugmentedWeightedLeastSquares", "AWLS", "AugmentedPonderatedLeastSquares", "APLS", "DA"]:
2211 if _BI is None or _RI is None:
2212 raise ValueError("Background and Observation error covariance matrix has to be properly defined!")
2213 _Xb = numpy.asmatrix(numpy.ravel( _Xb )).T
2214 Jb = 0.5 * (_X - _Xb).T * _BI * (_X - _Xb)
2215 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
2216 elif _QM in ["WeightedLeastSquares", "WLS", "PonderatedLeastSquares", "PLS"]:
2218 raise ValueError("Observation error covariance matrix has to be properly defined!")
2220 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
2221 elif _QM in ["LeastSquares", "LS", "L2"]:
2223 Jo = 0.5 * (_Y - _HX).T * (_Y - _HX)
2224 elif _QM in ["AbsoluteValue", "L1"]:
2226 Jo = numpy.sum( numpy.abs(_Y - _HX) )
2227 elif _QM in ["MaximumError", "ME"]:
2229 Jo = numpy.max( numpy.abs(_Y - _HX) )
2230 elif _QM in ["QR", "Null"]:
2234 raise ValueError("Unknown asked quality measure!")
2236 J = float( Jb ) + float( Jo )
2239 _SSV["CostFunctionJb"].append( Jb )
2240 _SSV["CostFunctionJo"].append( Jo )
2241 _SSV["CostFunctionJ" ].append( J )
2243 if "IndexOfOptimum" in _SSC or \
2244 "CurrentOptimum" in _SSC or \
2245 "SimulatedObservationAtCurrentOptimum" in _SSC:
2246 IndexMin = numpy.argmin( _SSV["CostFunctionJ"][_nPS:] ) + _nPS
2247 if "IndexOfOptimum" in _SSC:
2248 _SSV["IndexOfOptimum"].append( IndexMin )
2249 if "CurrentOptimum" in _SSC:
2250 _SSV["CurrentOptimum"].append( _SSV["CurrentState"][IndexMin] )
2251 if "SimulatedObservationAtCurrentOptimum" in _SSC:
2252 _SSV["SimulatedObservationAtCurrentOptimum"].append( _SSV["SimulatedObservationAtCurrentState"][IndexMin] )
2257 if _QM in ["QR"]: # Pour le QuantileRegression
2262 # ==============================================================================
2263 if __name__ == "__main__":
2264 print('\n AUTODIAGNOSTIC\n')