1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2021 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les outils généraux élémentaires.
26 __author__ = "Jean-Philippe ARGAUD"
35 from functools import partial
36 from daCore import Persistence, PlatformInfo, Interfaces
37 from daCore import Templates
39 # ==============================================================================
40 class CacheManager(object):
42 Classe générale de gestion d'un cache de calculs
45 toleranceInRedundancy = 1.e-18,
46 lenghtOfRedundancy = -1,
49 Les caractéristiques de tolérance peuvent être modifiées à la création.
51 self.__tolerBP = float(toleranceInRedundancy)
52 self.__lenghtOR = int(lenghtOfRedundancy)
53 self.__initlnOR = self.__lenghtOR
63 def wasCalculatedIn(self, xValue, oName="" ):
64 "Vérifie l'existence d'un calcul correspondant à la valeur"
68 for i in range(min(len(self.__listOPCV),self.__lenghtOR)-1,-1,-1):
69 if not hasattr(xValue, 'size'):
71 elif (str(oName) != self.__listOPCV[i][3]):
73 elif (xValue.size != self.__listOPCV[i][0].size):
75 elif (numpy.ravel(xValue)[0] - self.__listOPCV[i][0][0]) > (self.__tolerBP * self.__listOPCV[i][2] / self.__listOPCV[i][0].size):
77 elif numpy.linalg.norm(numpy.ravel(xValue) - self.__listOPCV[i][0]) < (self.__tolerBP * self.__listOPCV[i][2]):
79 __HxV = self.__listOPCV[i][1]
83 def storeValueInX(self, xValue, HxValue, oName="" ):
84 "Stocke pour un opérateur o un calcul Hx correspondant à la valeur x"
85 if self.__lenghtOR < 0:
86 self.__lenghtOR = 2 * min(xValue.size, 50) + 2 # 2 * xValue.size + 2
87 self.__initlnOR = self.__lenghtOR
88 self.__seenNames.append(str(oName))
89 if str(oName) not in self.__seenNames: # Etend la liste si nouveau
90 self.__lenghtOR += 2 * min(xValue.size, 50) + 2 # 2 * xValue.size + 2
91 self.__initlnOR += self.__lenghtOR
92 self.__seenNames.append(str(oName))
93 while len(self.__listOPCV) > self.__lenghtOR:
94 self.__listOPCV.pop(0)
95 self.__listOPCV.append( (
96 copy.copy(numpy.ravel(xValue)), # 0 Previous point
97 copy.copy(HxValue), # 1 Previous value
98 numpy.linalg.norm(xValue), # 2 Norm
99 str(oName), # 3 Operator name
104 self.__initlnOR = self.__lenghtOR
106 self.__enabled = False
110 self.__lenghtOR = self.__initlnOR
111 self.__enabled = True
113 # ==============================================================================
114 class Operator(object):
116 Classe générale d'interface de type opérateur simple
124 name = "GenericOperator",
127 avoidingRedundancy = True,
128 inputAsMultiFunction = False,
129 enableMultiProcess = False,
130 extraArguments = None,
133 On construit un objet de ce type en fournissant, à l'aide de l'un des
134 deux mots-clé, soit une fonction ou un multi-fonction python, soit une
137 - name : nom d'opérateur
138 - fromMethod : argument de type fonction Python
139 - fromMatrix : argument adapté au constructeur numpy.matrix
140 - avoidingRedundancy : booléen évitant (ou pas) les calculs redondants
141 - inputAsMultiFunction : booléen indiquant une fonction explicitement
142 définie (ou pas) en multi-fonction
143 - extraArguments : arguments supplémentaires passés à la fonction de
144 base et ses dérivées (tuple ou dictionnaire)
146 self.__name = str(name)
147 self.__NbCallsAsMatrix, self.__NbCallsAsMethod, self.__NbCallsOfCached = 0, 0, 0
148 self.__AvoidRC = bool( avoidingRedundancy )
149 self.__inputAsMF = bool( inputAsMultiFunction )
150 self.__mpEnabled = bool( enableMultiProcess )
151 self.__extraArgs = extraArguments
152 if fromMethod is not None and self.__inputAsMF:
153 self.__Method = fromMethod # logtimer(fromMethod)
155 self.__Type = "Method"
156 elif fromMethod is not None and not self.__inputAsMF:
157 self.__Method = partial( MultiFonction, _sFunction=fromMethod, _mpEnabled=self.__mpEnabled)
159 self.__Type = "Method"
160 elif fromMatrix is not None:
162 self.__Matrix = numpy.matrix( fromMatrix, numpy.float )
163 self.__Type = "Matrix"
169 def disableAvoidingRedundancy(self):
171 Operator.CM.disable()
173 def enableAvoidingRedundancy(self):
178 Operator.CM.disable()
184 def appliedTo(self, xValue, HValue = None, argsAsSerie = False, returnSerieAsArrayMatrix = False):
186 Permet de restituer le résultat de l'application de l'opérateur à une
187 série d'arguments xValue. Cette méthode se contente d'appliquer, chaque
188 argument devant a priori être du bon type.
190 - les arguments par série sont :
191 - xValue : argument adapté pour appliquer l'opérateur
192 - HValue : valeur précalculée de l'opérateur en ce point
193 - argsAsSerie : indique si les arguments sont une mono ou multi-valeur
200 if HValue is not None:
204 PlatformInfo.isIterable( _xValue, True, " in Operator.appliedTo" )
206 if _HValue is not None:
207 assert len(_xValue) == len(_HValue), "Incompatible number of elements in xValue and HValue"
209 for i in range(len(_HValue)):
210 _HxValue.append( numpy.asmatrix( numpy.ravel( _HValue[i] ) ).T )
212 Operator.CM.storeValueInX(_xValue[i],_HxValue[-1],self.__name)
217 for i, xv in enumerate(_xValue):
219 __alreadyCalculated, __HxV = Operator.CM.wasCalculatedIn(xv,self.__name)
221 __alreadyCalculated = False
223 if __alreadyCalculated:
224 self.__addOneCacheCall()
227 if self.__Matrix is not None:
228 self.__addOneMatrixCall()
229 _xv = numpy.matrix(numpy.ravel(xv)).T
230 _hv = self.__Matrix * _xv
232 self.__addOneMethodCall()
236 _HxValue.append( _hv )
238 if len(_xserie)>0 and self.__Matrix is None:
239 if self.__extraArgs is None:
240 _hserie = self.__Method( _xserie ) # Calcul MF
242 _hserie = self.__Method( _xserie, self.__extraArgs ) # Calcul MF
243 if not hasattr(_hserie, "pop"):
244 raise TypeError("The user input multi-function doesn't seem to return sequence results, behaving like a mono-function. It has to be checked.")
250 Operator.CM.storeValueInX(_xv,_hv,self.__name)
252 if returnSerieAsArrayMatrix:
253 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
255 if argsAsSerie: return _HxValue
256 else: return _HxValue[-1]
258 def appliedControledFormTo(self, paires, argsAsSerie = False, returnSerieAsArrayMatrix = False):
260 Permet de restituer le résultat de l'application de l'opérateur à des
261 paires (xValue, uValue). Cette méthode se contente d'appliquer, son
262 argument devant a priori être du bon type. Si la uValue est None,
263 on suppose que l'opérateur ne s'applique qu'à xValue.
265 - paires : les arguments par paire sont :
266 - xValue : argument X adapté pour appliquer l'opérateur
267 - uValue : argument U adapté pour appliquer l'opérateur
268 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
270 if argsAsSerie: _xuValue = paires
271 else: _xuValue = (paires,)
272 PlatformInfo.isIterable( _xuValue, True, " in Operator.appliedControledFormTo" )
274 if self.__Matrix is not None:
276 for paire in _xuValue:
277 _xValue, _uValue = paire
278 _xValue = numpy.matrix(numpy.ravel(_xValue)).T
279 self.__addOneMatrixCall()
280 _HxValue.append( self.__Matrix * _xValue )
283 for paire in _xuValue:
284 _xValue, _uValue = paire
285 if _uValue is not None:
286 _xuArgs.append( paire )
288 _xuArgs.append( _xValue )
289 self.__addOneMethodCall( len(_xuArgs) )
290 if self.__extraArgs is None:
291 _HxValue = self.__Method( _xuArgs ) # Calcul MF
293 _HxValue = self.__Method( _xuArgs, self.__extraArgs ) # Calcul MF
295 if returnSerieAsArrayMatrix:
296 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
298 if argsAsSerie: return _HxValue
299 else: return _HxValue[-1]
301 def appliedInXTo(self, paires, argsAsSerie = False, returnSerieAsArrayMatrix = False):
303 Permet de restituer le résultat de l'application de l'opérateur à une
304 série d'arguments xValue, sachant que l'opérateur est valable en
305 xNominal. Cette méthode se contente d'appliquer, son argument devant a
306 priori être du bon type. Si l'opérateur est linéaire car c'est une
307 matrice, alors il est valable en tout point nominal et xNominal peut
308 être quelconque. Il n'y a qu'une seule paire par défaut, et argsAsSerie
309 permet d'indiquer que l'argument est multi-paires.
311 - paires : les arguments par paire sont :
312 - xNominal : série d'arguments permettant de donner le point où
313 l'opérateur est construit pour être ensuite appliqué
314 - xValue : série d'arguments adaptés pour appliquer l'opérateur
315 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
317 if argsAsSerie: _nxValue = paires
318 else: _nxValue = (paires,)
319 PlatformInfo.isIterable( _nxValue, True, " in Operator.appliedInXTo" )
321 if self.__Matrix is not None:
323 for paire in _nxValue:
324 _xNominal, _xValue = paire
325 _xValue = numpy.matrix(numpy.ravel(_xValue)).T
326 self.__addOneMatrixCall()
327 _HxValue.append( self.__Matrix * _xValue )
329 self.__addOneMethodCall( len(_nxValue) )
330 if self.__extraArgs is None:
331 _HxValue = self.__Method( _nxValue ) # Calcul MF
333 _HxValue = self.__Method( _nxValue, self.__extraArgs ) # Calcul MF
335 if returnSerieAsArrayMatrix:
336 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
338 if argsAsSerie: return _HxValue
339 else: return _HxValue[-1]
341 def asMatrix(self, ValueForMethodForm = "UnknownVoidValue", argsAsSerie = False):
343 Permet de renvoyer l'opérateur sous la forme d'une matrice
345 if self.__Matrix is not None:
346 self.__addOneMatrixCall()
347 mValue = [self.__Matrix,]
348 elif not isinstance(ValueForMethodForm,str) or ValueForMethodForm != "UnknownVoidValue": # Ne pas utiliser "None"
351 self.__addOneMethodCall( len(ValueForMethodForm) )
352 for _vfmf in ValueForMethodForm:
353 mValue.append( numpy.matrix( self.__Method(((_vfmf, None),)) ) )
355 self.__addOneMethodCall()
356 mValue = self.__Method(((ValueForMethodForm, None),))
358 raise ValueError("Matrix form of the operator defined as a function/method requires to give an operating point.")
360 if argsAsSerie: return mValue
361 else: return mValue[-1]
365 Renvoie la taille sous forme numpy si l'opérateur est disponible sous
366 la forme d'une matrice
368 if self.__Matrix is not None:
369 return self.__Matrix.shape
371 raise ValueError("Matrix form of the operator is not available, nor the shape")
373 def nbcalls(self, which=None):
375 Renvoie les nombres d'évaluations de l'opérateur
378 self.__NbCallsAsMatrix+self.__NbCallsAsMethod,
379 self.__NbCallsAsMatrix,
380 self.__NbCallsAsMethod,
381 self.__NbCallsOfCached,
382 Operator.NbCallsAsMatrix+Operator.NbCallsAsMethod,
383 Operator.NbCallsAsMatrix,
384 Operator.NbCallsAsMethod,
385 Operator.NbCallsOfCached,
387 if which is None: return __nbcalls
388 else: return __nbcalls[which]
390 def __addOneMatrixCall(self):
391 "Comptabilise un appel"
392 self.__NbCallsAsMatrix += 1 # Decompte local
393 Operator.NbCallsAsMatrix += 1 # Decompte global
395 def __addOneMethodCall(self, nb = 1):
396 "Comptabilise un appel"
397 self.__NbCallsAsMethod += nb # Decompte local
398 Operator.NbCallsAsMethod += nb # Decompte global
400 def __addOneCacheCall(self):
401 "Comptabilise un appel"
402 self.__NbCallsOfCached += 1 # Decompte local
403 Operator.NbCallsOfCached += 1 # Decompte global
405 # ==============================================================================
406 class FullOperator(object):
408 Classe générale d'interface de type opérateur complet
409 (Direct, Linéaire Tangent, Adjoint)
412 name = "GenericFullOperator",
414 asOneFunction = None, # 1 Fonction
415 asThreeFunctions = None, # 3 Fonctions in a dictionary
416 asScript = None, # 1 or 3 Fonction(s) by script
417 asDict = None, # Parameters
419 extraArguments = None,
421 inputAsMF = False,# Fonction(s) as Multi-Functions
426 self.__name = str(name)
427 self.__check = bool(toBeChecked)
428 self.__extraArgs = extraArguments
433 if (asDict is not None) and isinstance(asDict, dict):
434 __Parameters.update( asDict )
435 # Priorité à EnableMultiProcessingInDerivatives=True
436 if "EnableMultiProcessing" in __Parameters and __Parameters["EnableMultiProcessing"]:
437 __Parameters["EnableMultiProcessingInDerivatives"] = True
438 __Parameters["EnableMultiProcessingInEvaluation"] = False
439 if "EnableMultiProcessingInDerivatives" not in __Parameters:
440 __Parameters["EnableMultiProcessingInDerivatives"] = False
441 if __Parameters["EnableMultiProcessingInDerivatives"]:
442 __Parameters["EnableMultiProcessingInEvaluation"] = False
443 if "EnableMultiProcessingInEvaluation" not in __Parameters:
444 __Parameters["EnableMultiProcessingInEvaluation"] = False
445 if "withIncrement" in __Parameters: # Temporaire
446 __Parameters["DifferentialIncrement"] = __Parameters["withIncrement"]
448 if asScript is not None:
449 __Matrix, __Function = None, None
451 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
453 __Function = { "Direct":Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ) }
454 __Function.update({"useApproximatedDerivatives":True})
455 __Function.update(__Parameters)
456 elif asThreeFunctions:
458 "Direct" :Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ),
459 "Tangent":Interfaces.ImportFromScript(asScript).getvalue( "TangentOperator" ),
460 "Adjoint":Interfaces.ImportFromScript(asScript).getvalue( "AdjointOperator" ),
462 __Function.update(__Parameters)
465 if asOneFunction is not None:
466 if isinstance(asOneFunction, dict) and "Direct" in asOneFunction:
467 if asOneFunction["Direct"] is not None:
468 __Function = asOneFunction
470 raise ValueError("The function has to be given in a dictionnary which have 1 key (\"Direct\")")
472 __Function = { "Direct":asOneFunction }
473 __Function.update({"useApproximatedDerivatives":True})
474 __Function.update(__Parameters)
475 elif asThreeFunctions is not None:
476 if isinstance(asThreeFunctions, dict) and \
477 ("Tangent" in asThreeFunctions) and (asThreeFunctions["Tangent"] is not None) and \
478 ("Adjoint" in asThreeFunctions) and (asThreeFunctions["Adjoint"] is not None) and \
479 (("useApproximatedDerivatives" not in asThreeFunctions) or not bool(asThreeFunctions["useApproximatedDerivatives"])):
480 __Function = asThreeFunctions
481 elif isinstance(asThreeFunctions, dict) and \
482 ("Direct" in asThreeFunctions) and (asThreeFunctions["Direct"] is not None):
483 __Function = asThreeFunctions
484 __Function.update({"useApproximatedDerivatives":True})
486 raise ValueError("The functions has to be given in a dictionnary which have either 1 key (\"Direct\") or 3 keys (\"Direct\" (optionnal), \"Tangent\" and \"Adjoint\")")
487 if "Direct" not in asThreeFunctions:
488 __Function["Direct"] = asThreeFunctions["Tangent"]
489 __Function.update(__Parameters)
493 # if sys.version_info[0] < 3 and isinstance(__Function, dict):
494 # for k in ("Direct", "Tangent", "Adjoint"):
495 # if k in __Function and hasattr(__Function[k],"__class__"):
496 # if type(__Function[k]) is type(self.__init__):
497 # raise TypeError("can't use a class method (%s) as a function for the \"%s\" operator. Use a real function instead."%(type(__Function[k]),k))
499 if appliedInX is not None and isinstance(appliedInX, dict):
500 __appliedInX = appliedInX
501 elif appliedInX is not None:
502 __appliedInX = {"HXb":appliedInX}
506 if scheduledBy is not None:
507 self.__T = scheduledBy
509 if isinstance(__Function, dict) and \
510 ("useApproximatedDerivatives" in __Function) and bool(__Function["useApproximatedDerivatives"]) and \
511 ("Direct" in __Function) and (__Function["Direct"] is not None):
512 if "CenteredFiniteDifference" not in __Function: __Function["CenteredFiniteDifference"] = False
513 if "DifferentialIncrement" not in __Function: __Function["DifferentialIncrement"] = 0.01
514 if "withdX" not in __Function: __Function["withdX"] = None
515 if "withAvoidingRedundancy" not in __Function: __Function["withAvoidingRedundancy"] = avoidRC
516 if "withToleranceInRedundancy" not in __Function: __Function["withToleranceInRedundancy"] = 1.e-18
517 if "withLenghtOfRedundancy" not in __Function: __Function["withLenghtOfRedundancy"] = -1
518 if "NumberOfProcesses" not in __Function: __Function["NumberOfProcesses"] = None
519 if "withmfEnabled" not in __Function: __Function["withmfEnabled"] = inputAsMF
520 from daCore import NumericObjects
521 FDA = NumericObjects.FDApproximation(
523 Function = __Function["Direct"],
524 centeredDF = __Function["CenteredFiniteDifference"],
525 increment = __Function["DifferentialIncrement"],
526 dX = __Function["withdX"],
527 avoidingRedundancy = __Function["withAvoidingRedundancy"],
528 toleranceInRedundancy = __Function["withToleranceInRedundancy"],
529 lenghtOfRedundancy = __Function["withLenghtOfRedundancy"],
530 mpEnabled = __Function["EnableMultiProcessingInDerivatives"],
531 mpWorkers = __Function["NumberOfProcesses"],
532 mfEnabled = __Function["withmfEnabled"],
534 self.__FO["Direct"] = Operator( name = self.__name, fromMethod = FDA.DirectOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
535 self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMethod = FDA.TangentOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
536 self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMethod = FDA.AdjointOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
537 elif isinstance(__Function, dict) and \
538 ("Direct" in __Function) and ("Tangent" in __Function) and ("Adjoint" in __Function) and \
539 (__Function["Direct"] is not None) and (__Function["Tangent"] is not None) and (__Function["Adjoint"] is not None):
540 self.__FO["Direct"] = Operator( name = self.__name, fromMethod = __Function["Direct"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
541 self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMethod = __Function["Tangent"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
542 self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMethod = __Function["Adjoint"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
543 elif asMatrix is not None:
544 __matrice = numpy.matrix( __Matrix, numpy.float )
545 self.__FO["Direct"] = Operator( name = self.__name, fromMatrix = __matrice, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
546 self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMatrix = __matrice, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
547 self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMatrix = __matrice.T, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
550 raise ValueError("The %s object is improperly defined or undefined, it requires at minima either a matrix, a Direct operator for approximate derivatives or a Tangent/Adjoint operators pair. Please check your operator input."%self.__name)
552 if __appliedInX is not None:
553 self.__FO["AppliedInX"] = {}
554 for key in list(__appliedInX.keys()):
555 if type( __appliedInX[key] ) is type( numpy.matrix([]) ):
556 # Pour le cas où l'on a une vraie matrice
557 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].A1, numpy.float ).T
558 elif type( __appliedInX[key] ) is type( numpy.array([]) ) and len(__appliedInX[key].shape) > 1:
559 # Pour le cas où l'on a un vecteur représenté en array avec 2 dimensions
560 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].reshape(len(__appliedInX[key]),), numpy.float ).T
562 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key], numpy.float ).T
564 self.__FO["AppliedInX"] = None
570 "x.__repr__() <==> repr(x)"
571 return repr(self.__FO)
574 "x.__str__() <==> str(x)"
575 return str(self.__FO)
577 # ==============================================================================
578 class Algorithm(object):
580 Classe générale d'interface de type algorithme
582 Elle donne un cadre pour l'écriture d'une classe élémentaire d'algorithme
583 d'assimilation, en fournissant un container (dictionnaire) de variables
584 persistantes initialisées, et des méthodes d'accès à ces variables stockées.
586 Une classe élémentaire d'algorithme doit implémenter la méthode "run".
588 def __init__(self, name):
590 L'initialisation présente permet de fabriquer des variables de stockage
591 disponibles de manière générique dans les algorithmes élémentaires. Ces
592 variables de stockage sont ensuite conservées dans un dictionnaire
593 interne à l'objet, mais auquel on accède par la méthode "get".
595 Les variables prévues sont :
596 - APosterioriCorrelations : matrice de corrélations de la matrice A
597 - APosterioriCovariance : matrice de covariances a posteriori : A
598 - APosterioriStandardDeviations : vecteur des écart-types de la matrice A
599 - APosterioriVariances : vecteur des variances de la matrice A
600 - Analysis : vecteur d'analyse : Xa
601 - BMA : Background moins Analysis : Xa - Xb
602 - CostFunctionJ : fonction-coût globale, somme des deux parties suivantes Jb et Jo
603 - CostFunctionJAtCurrentOptimum : fonction-coût globale à l'état optimal courant lors d'itérations
604 - CostFunctionJb : partie ébauche ou background de la fonction-coût : Jb
605 - CostFunctionJbAtCurrentOptimum : partie ébauche à l'état optimal courant lors d'itérations
606 - CostFunctionJo : partie observations de la fonction-coût : Jo
607 - CostFunctionJoAtCurrentOptimum : partie observations à l'état optimal courant lors d'itérations
608 - CurrentIterationNumber : numéro courant d'itération dans les algorithmes itératifs, à partir de 0
609 - CurrentOptimum : état optimal courant lors d'itérations
610 - CurrentState : état courant lors d'itérations
611 - GradientOfCostFunctionJ : gradient de la fonction-coût globale
612 - GradientOfCostFunctionJb : gradient de la partie ébauche de la fonction-coût
613 - GradientOfCostFunctionJo : gradient de la partie observations de la fonction-coût
614 - IndexOfOptimum : index de l'état optimal courant lors d'itérations
615 - Innovation : l'innovation : d = Y - H(X)
616 - InnovationAtCurrentState : l'innovation à l'état courant : dn = Y - H(Xn)
617 - JacobianMatrixAtBackground : matrice jacobienne à l'état d'ébauche
618 - JacobianMatrixAtCurrentState : matrice jacobienne à l'état courant
619 - JacobianMatrixAtOptimum : matrice jacobienne à l'optimum
620 - KalmanGainAtOptimum : gain de Kalman à l'optimum
621 - MahalanobisConsistency : indicateur de consistance des covariances
622 - OMA : Observation moins Analyse : Y - Xa
623 - OMB : Observation moins Background : Y - Xb
624 - ForecastState : état prédit courant lors d'itérations
625 - Residu : dans le cas des algorithmes de vérification
626 - SigmaBck2 : indicateur de correction optimale des erreurs d'ébauche
627 - SigmaObs2 : indicateur de correction optimale des erreurs d'observation
628 - SimulatedObservationAtBackground : l'état observé H(Xb) à l'ébauche
629 - SimulatedObservationAtCurrentOptimum : l'état observé H(X) à l'état optimal courant
630 - SimulatedObservationAtCurrentState : l'état observé H(X) à l'état courant
631 - SimulatedObservationAtOptimum : l'état observé H(Xa) à l'optimum
632 - SimulationQuantiles : états observés H(X) pour les quantiles demandés
633 On peut rajouter des variables à stocker dans l'initialisation de
634 l'algorithme élémentaire qui va hériter de cette classe
636 logging.debug("%s Initialisation", str(name))
637 self._m = PlatformInfo.SystemUsage()
639 self._name = str( name )
640 self._parameters = {"StoreSupplementaryCalculations":[]}
641 self.__required_parameters = {}
642 self.__required_inputs = {
643 "RequiredInputValues":{"mandatory":(), "optional":()},
644 "ClassificationTags":[],
646 self.__variable_names_not_public = {"nextStep":False} # Duplication dans AlgorithmAndParameters
647 self.__canonical_parameter_name = {} # Correspondance "lower"->"correct"
648 self.__canonical_stored_name = {} # Correspondance "lower"->"correct"
650 self.StoredVariables = {}
651 self.StoredVariables["APosterioriCorrelations"] = Persistence.OneMatrix(name = "APosterioriCorrelations")
652 self.StoredVariables["APosterioriCovariance"] = Persistence.OneMatrix(name = "APosterioriCovariance")
653 self.StoredVariables["APosterioriStandardDeviations"] = Persistence.OneVector(name = "APosterioriStandardDeviations")
654 self.StoredVariables["APosterioriVariances"] = Persistence.OneVector(name = "APosterioriVariances")
655 self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
656 self.StoredVariables["BMA"] = Persistence.OneVector(name = "BMA")
657 self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
658 self.StoredVariables["CostFunctionJAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJAtCurrentOptimum")
659 self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
660 self.StoredVariables["CostFunctionJbAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJbAtCurrentOptimum")
661 self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
662 self.StoredVariables["CostFunctionJoAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJoAtCurrentOptimum")
663 self.StoredVariables["CurrentIterationNumber"] = Persistence.OneIndex(name = "CurrentIterationNumber")
664 self.StoredVariables["CurrentOptimum"] = Persistence.OneVector(name = "CurrentOptimum")
665 self.StoredVariables["CurrentState"] = Persistence.OneVector(name = "CurrentState")
666 self.StoredVariables["ForecastState"] = Persistence.OneVector(name = "ForecastState")
667 self.StoredVariables["GradientOfCostFunctionJ"] = Persistence.OneVector(name = "GradientOfCostFunctionJ")
668 self.StoredVariables["GradientOfCostFunctionJb"] = Persistence.OneVector(name = "GradientOfCostFunctionJb")
669 self.StoredVariables["GradientOfCostFunctionJo"] = Persistence.OneVector(name = "GradientOfCostFunctionJo")
670 self.StoredVariables["IndexOfOptimum"] = Persistence.OneIndex(name = "IndexOfOptimum")
671 self.StoredVariables["Innovation"] = Persistence.OneVector(name = "Innovation")
672 self.StoredVariables["InnovationAtCurrentAnalysis"] = Persistence.OneVector(name = "InnovationAtCurrentAnalysis")
673 self.StoredVariables["InnovationAtCurrentState"] = Persistence.OneVector(name = "InnovationAtCurrentState")
674 self.StoredVariables["JacobianMatrixAtBackground"] = Persistence.OneMatrix(name = "JacobianMatrixAtBackground")
675 self.StoredVariables["JacobianMatrixAtCurrentState"] = Persistence.OneMatrix(name = "JacobianMatrixAtCurrentState")
676 self.StoredVariables["JacobianMatrixAtOptimum"] = Persistence.OneMatrix(name = "JacobianMatrixAtOptimum")
677 self.StoredVariables["KalmanGainAtOptimum"] = Persistence.OneMatrix(name = "KalmanGainAtOptimum")
678 self.StoredVariables["MahalanobisConsistency"] = Persistence.OneScalar(name = "MahalanobisConsistency")
679 self.StoredVariables["OMA"] = Persistence.OneVector(name = "OMA")
680 self.StoredVariables["OMB"] = Persistence.OneVector(name = "OMB")
681 self.StoredVariables["Residu"] = Persistence.OneScalar(name = "Residu")
682 self.StoredVariables["SigmaBck2"] = Persistence.OneScalar(name = "SigmaBck2")
683 self.StoredVariables["SigmaObs2"] = Persistence.OneScalar(name = "SigmaObs2")
684 self.StoredVariables["SimulatedObservationAtBackground"] = Persistence.OneVector(name = "SimulatedObservationAtBackground")
685 self.StoredVariables["SimulatedObservationAtCurrentAnalysis"]= Persistence.OneVector(name = "SimulatedObservationAtCurrentAnalysis")
686 self.StoredVariables["SimulatedObservationAtCurrentOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentOptimum")
687 self.StoredVariables["SimulatedObservationAtCurrentState"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentState")
688 self.StoredVariables["SimulatedObservationAtOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtOptimum")
689 self.StoredVariables["SimulationQuantiles"] = Persistence.OneMatrix(name = "SimulationQuantiles")
691 for k in self.StoredVariables:
692 self.__canonical_stored_name[k.lower()] = k
694 for k, v in self.__variable_names_not_public.items():
695 self.__canonical_parameter_name[k.lower()] = k
696 self.__canonical_parameter_name["algorithm"] = "Algorithm"
697 self.__canonical_parameter_name["storesupplementarycalculations"] = "StoreSupplementaryCalculations"
699 def _pre_run(self, Parameters, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None ):
701 logging.debug("%s Lancement", self._name)
702 logging.debug("%s Taille mémoire utilisée de %.0f Mio"%(self._name, self._m.getUsedMemory("Mio")))
703 self._getTimeState(reset=True)
705 # Mise a jour des paramètres internes avec le contenu de Parameters, en
706 # reprenant les valeurs par défauts pour toutes celles non définies
707 self.__setParameters(Parameters, reset=True)
708 for k, v in self.__variable_names_not_public.items():
709 if k not in self._parameters: self.__setParameters( {k:v} )
711 # Corrections et compléments des vecteurs
712 def __test_vvalue(argument, variable, argname, symbol=None):
713 if symbol is None: symbol = variable
715 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
716 raise ValueError("%s %s vector %s is not set and has to be properly defined!"%(self._name,argname,symbol))
717 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
718 logging.debug("%s %s vector %s is not set, but is optional."%(self._name,argname,symbol))
720 logging.debug("%s %s vector %s is not set, but is not required."%(self._name,argname,symbol))
722 logging.debug("%s %s vector %s is set, and its size is %i."%(self._name,argname,symbol,numpy.array(argument).size))
724 __test_vvalue( Xb, "Xb", "Background or initial state" )
725 __test_vvalue( Y, "Y", "Observation" )
726 __test_vvalue( U, "U", "Control" )
728 # Corrections et compléments des covariances
729 def __test_cvalue(argument, variable, argname, symbol=None):
730 if symbol is None: symbol = variable
732 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
733 raise ValueError("%s %s error covariance matrix %s is not set and has to be properly defined!"%(self._name,argname,symbol))
734 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
735 logging.debug("%s %s error covariance matrix %s is not set, but is optional."%(self._name,argname,symbol))
737 logging.debug("%s %s error covariance matrix %s is not set, but is not required."%(self._name,argname,symbol))
739 logging.debug("%s %s error covariance matrix %s is set."%(self._name,argname,symbol))
741 __test_cvalue( B, "B", "Background" )
742 __test_cvalue( R, "R", "Observation" )
743 __test_cvalue( Q, "Q", "Evolution" )
745 # Corrections et compléments des opérateurs
746 def __test_ovalue(argument, variable, argname, symbol=None):
747 if symbol is None: symbol = variable
748 if argument is None or (isinstance(argument,dict) and len(argument)==0):
749 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
750 raise ValueError("%s %s operator %s is not set and has to be properly defined!"%(self._name,argname,symbol))
751 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
752 logging.debug("%s %s operator %s is not set, but is optional."%(self._name,argname,symbol))
754 logging.debug("%s %s operator %s is not set, but is not required."%(self._name,argname,symbol))
756 logging.debug("%s %s operator %s is set."%(self._name,argname,symbol))
758 __test_ovalue( HO, "HO", "Observation", "H" )
759 __test_ovalue( EM, "EM", "Evolution", "M" )
760 __test_ovalue( CM, "CM", "Control Model", "C" )
762 if ("Bounds" in self._parameters) and isinstance(self._parameters["Bounds"], (list, tuple)) and (len(self._parameters["Bounds"]) > 0):
763 logging.debug("%s Prise en compte des bornes effectuee"%(self._name,))
765 self._parameters["Bounds"] = None
767 # Corrections et compléments de l'initialisation en X
768 if "InitializationPoint" in self._parameters:
770 if self._parameters["InitializationPoint"] is not None and hasattr(self._parameters["InitializationPoint"],'size'):
771 if self._parameters["InitializationPoint"].size != numpy.ravel(Xb).size:
772 raise ValueError("Incompatible size %i of forced initial point that have to replace the background of size %i" \
773 %(self._parameters["InitializationPoint"].size,numpy.ravel(Xb).size))
774 # Obtenu par typecast : numpy.ravel(self._parameters["InitializationPoint"])
776 self._parameters["InitializationPoint"] = numpy.ravel(Xb)
778 if self._parameters["InitializationPoint"] is None:
779 raise ValueError("Forced initial point can not be set without any given Background or required value")
780 if logging.getLogger().level < logging.WARNING:
781 self._parameters["optiprint"], self._parameters["optdisp"] = 1, 1
782 if PlatformInfo.has_scipy:
783 import scipy.optimize
784 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_ALL
786 self._parameters["optmessages"] = 15
788 self._parameters["optiprint"], self._parameters["optdisp"] = -1, 0
789 if PlatformInfo.has_scipy:
790 import scipy.optimize
791 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_NONE
793 self._parameters["optmessages"] = 15
797 def _post_run(self,_oH=None):
799 if ("StoreSupplementaryCalculations" in self._parameters) and \
800 "APosterioriCovariance" in self._parameters["StoreSupplementaryCalculations"]:
801 for _A in self.StoredVariables["APosterioriCovariance"]:
802 if "APosterioriVariances" in self._parameters["StoreSupplementaryCalculations"]:
803 self.StoredVariables["APosterioriVariances"].store( numpy.diag(_A) )
804 if "APosterioriStandardDeviations" in self._parameters["StoreSupplementaryCalculations"]:
805 self.StoredVariables["APosterioriStandardDeviations"].store( numpy.sqrt(numpy.diag(_A)) )
806 if "APosterioriCorrelations" in self._parameters["StoreSupplementaryCalculations"]:
807 _EI = numpy.diag(1./numpy.sqrt(numpy.diag(_A)))
808 _C = numpy.dot(_EI, numpy.dot(_A, _EI))
809 self.StoredVariables["APosterioriCorrelations"].store( _C )
810 if _oH is not None and "Direct" in _oH and "Tangent" in _oH and "Adjoint" in _oH:
811 logging.debug("%s Nombre d'évaluation(s) de l'opérateur d'observation direct/tangent/adjoint.: %i/%i/%i", self._name, _oH["Direct"].nbcalls(0),_oH["Tangent"].nbcalls(0),_oH["Adjoint"].nbcalls(0))
812 logging.debug("%s Nombre d'appels au cache d'opérateur d'observation direct/tangent/adjoint..: %i/%i/%i", self._name, _oH["Direct"].nbcalls(3),_oH["Tangent"].nbcalls(3),_oH["Adjoint"].nbcalls(3))
813 logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
814 logging.debug("%s Durées d'utilisation CPU de %.1fs et elapsed de %.1fs", self._name, self._getTimeState()[0], self._getTimeState()[1])
815 logging.debug("%s Terminé", self._name)
818 def _toStore(self, key):
819 "True if in StoreSupplementaryCalculations, else False"
820 return key in self._parameters["StoreSupplementaryCalculations"]
822 def get(self, key=None):
824 Renvoie l'une des variables stockées identifiée par la clé, ou le
825 dictionnaire de l'ensemble des variables disponibles en l'absence de
826 clé. Ce sont directement les variables sous forme objet qui sont
827 renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
828 des classes de persistance.
831 return self.StoredVariables[self.__canonical_stored_name[key.lower()]]
833 return self.StoredVariables
835 def __contains__(self, key=None):
836 "D.__contains__(k) -> True if D has a key k, else False"
837 if key is None or key.lower() not in self.__canonical_stored_name:
840 return self.__canonical_stored_name[key.lower()] in self.StoredVariables
843 "D.keys() -> list of D's keys"
844 if hasattr(self, "StoredVariables"):
845 return self.StoredVariables.keys()
850 "D.pop(k[,d]) -> v, remove specified key and return the corresponding value"
851 if hasattr(self, "StoredVariables") and k.lower() in self.__canonical_stored_name:
852 return self.StoredVariables.pop(self.__canonical_stored_name[k.lower()], d)
857 raise TypeError("pop expected at least 1 arguments, got 0")
858 "If key is not found, d is returned if given, otherwise KeyError is raised"
864 def run(self, Xb=None, Y=None, H=None, M=None, R=None, B=None, Q=None, Parameters=None):
866 Doit implémenter l'opération élémentaire de calcul d'assimilation sous
867 sa forme mathématique la plus naturelle possible.
869 raise NotImplementedError("Mathematical assimilation calculation has not been implemented!")
871 def defineRequiredParameter(self, name = None, default = None, typecast = None, message = None, minval = None, maxval = None, listval = None, listadv = None):
873 Permet de définir dans l'algorithme des paramètres requis et leurs
874 caractéristiques par défaut.
877 raise ValueError("A name is mandatory to define a required parameter.")
879 self.__required_parameters[name] = {
881 "typecast" : typecast,
888 self.__canonical_parameter_name[name.lower()] = name
889 logging.debug("%s %s (valeur par défaut = %s)", self._name, message, self.setParameterValue(name))
891 def getRequiredParameters(self, noDetails=True):
893 Renvoie la liste des noms de paramètres requis ou directement le
894 dictionnaire des paramètres requis.
897 return sorted(self.__required_parameters.keys())
899 return self.__required_parameters
901 def setParameterValue(self, name=None, value=None):
903 Renvoie la valeur d'un paramètre requis de manière contrôlée
905 __k = self.__canonical_parameter_name[name.lower()]
906 default = self.__required_parameters[__k]["default"]
907 typecast = self.__required_parameters[__k]["typecast"]
908 minval = self.__required_parameters[__k]["minval"]
909 maxval = self.__required_parameters[__k]["maxval"]
910 listval = self.__required_parameters[__k]["listval"]
911 listadv = self.__required_parameters[__k]["listadv"]
913 if value is None and default is None:
915 elif value is None and default is not None:
916 if typecast is None: __val = default
917 else: __val = typecast( default )
919 if typecast is None: __val = value
922 __val = typecast( value )
924 raise ValueError("The value '%s' for the parameter named '%s' can not be correctly evaluated with type '%s'."%(value, __k, typecast))
926 if minval is not None and (numpy.array(__val, float) < minval).any():
927 raise ValueError("The parameter named '%s' of value '%s' can not be less than %s."%(__k, __val, minval))
928 if maxval is not None and (numpy.array(__val, float) > maxval).any():
929 raise ValueError("The parameter named '%s' of value '%s' can not be greater than %s."%(__k, __val, maxval))
930 if listval is not None or listadv is not None:
931 if typecast is list or typecast is tuple or isinstance(__val,list) or isinstance(__val,tuple):
933 if listval is not None and v in listval: continue
934 elif listadv is not None and v in listadv: continue
936 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%(v, __k, listval))
937 elif not (listval is not None and __val in listval) and not (listadv is not None and __val in listadv):
938 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%( __val, __k,listval))
942 def requireInputArguments(self, mandatory=(), optional=()):
944 Permet d'imposer des arguments de calcul requis en entrée.
946 self.__required_inputs["RequiredInputValues"]["mandatory"] = tuple( mandatory )
947 self.__required_inputs["RequiredInputValues"]["optional"] = tuple( optional )
949 def getInputArguments(self):
951 Permet d'obtenir les listes des arguments de calcul requis en entrée.
953 return self.__required_inputs["RequiredInputValues"]["mandatory"], self.__required_inputs["RequiredInputValues"]["optional"]
955 def setAttributes(self, tags=()):
957 Permet d'adjoindre des attributs comme les tags de classification.
958 Renvoie la liste actuelle dans tous les cas.
960 self.__required_inputs["ClassificationTags"].extend( tags )
961 return self.__required_inputs["ClassificationTags"]
963 def __setParameters(self, fromDico={}, reset=False):
965 Permet de stocker les paramètres reçus dans le dictionnaire interne.
967 self._parameters.update( fromDico )
968 __inverse_fromDico_keys = {}
969 for k in fromDico.keys():
970 if k.lower() in self.__canonical_parameter_name:
971 __inverse_fromDico_keys[self.__canonical_parameter_name[k.lower()]] = k
972 #~ __inverse_fromDico_keys = dict([(self.__canonical_parameter_name[k.lower()],k) for k in fromDico.keys()])
973 __canonic_fromDico_keys = __inverse_fromDico_keys.keys()
974 for k in self.__required_parameters.keys():
975 if k in __canonic_fromDico_keys:
976 self._parameters[k] = self.setParameterValue(k,fromDico[__inverse_fromDico_keys[k]])
978 self._parameters[k] = self.setParameterValue(k)
981 logging.debug("%s %s : %s", self._name, self.__required_parameters[k]["message"], self._parameters[k])
983 def _getTimeState(self, reset=False):
985 Initialise ou restitue le temps de calcul (cpu/elapsed) à la seconde
988 self.__initial_cpu_time = time.process_time()
989 self.__initial_elapsed_time = time.perf_counter()
992 self.__cpu_time = time.process_time() - self.__initial_cpu_time
993 self.__elapsed_time = time.perf_counter() - self.__initial_elapsed_time
994 return self.__cpu_time, self.__elapsed_time
996 def _StopOnTimeLimit(self, X=None, withReason=False):
997 "Stop criteria on time limit: True/False [+ Reason]"
998 c, e = self._getTimeState()
999 if "MaximumCpuTime" in self._parameters and c > self._parameters["MaximumCpuTime"]:
1000 __SC, __SR = True, "Reached maximum CPU time (%.1fs > %.1fs)"%(c, self._parameters["MaximumCpuTime"])
1001 elif "MaximumElapsedTime" in self._parameters and e > self._parameters["MaximumElapsedTime"]:
1002 __SC, __SR = True, "Reached maximum elapsed time (%.1fs > %.1fs)"%(e, self._parameters["MaximumElapsedTime"])
1004 __SC, __SR = False, ""
1010 # ==============================================================================
1011 class AlgorithmAndParameters(object):
1013 Classe générale d'interface d'action pour l'algorithme et ses paramètres
1016 name = "GenericAlgorithm",
1023 self.__name = str(name)
1027 self.__algorithm = {}
1028 self.__algorithmFile = None
1029 self.__algorithmName = None
1031 self.updateParameters( asDict, asScript )
1033 if asAlgorithm is None and asScript is not None:
1034 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
1036 __Algo = asAlgorithm
1038 if __Algo is not None:
1039 self.__A = str(__Algo)
1040 self.__P.update( {"Algorithm":self.__A} )
1042 self.__setAlgorithm( self.__A )
1044 self.__variable_names_not_public = {"nextStep":False} # Duplication dans Algorithm
1046 def updateParameters(self,
1050 "Mise a jour des parametres"
1051 if asDict is None and asScript is not None:
1052 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1056 if __Dict is not None:
1057 self.__P.update( dict(__Dict) )
1059 def executePythonScheme(self, asDictAO = None):
1060 "Permet de lancer le calcul d'assimilation"
1061 Operator.CM.clearCache()
1063 if not isinstance(asDictAO, dict):
1064 raise ValueError("The objects for algorithm calculation have to be given together as a dictionnary, and they are not")
1065 if hasattr(asDictAO["Background"],"getO"): self.__Xb = asDictAO["Background"].getO()
1066 elif hasattr(asDictAO["CheckingPoint"],"getO"): self.__Xb = asDictAO["CheckingPoint"].getO()
1067 else: self.__Xb = None
1068 if hasattr(asDictAO["Observation"],"getO"): self.__Y = asDictAO["Observation"].getO()
1069 else: self.__Y = asDictAO["Observation"]
1070 if hasattr(asDictAO["ControlInput"],"getO"): self.__U = asDictAO["ControlInput"].getO()
1071 else: self.__U = asDictAO["ControlInput"]
1072 if hasattr(asDictAO["ObservationOperator"],"getO"): self.__HO = asDictAO["ObservationOperator"].getO()
1073 else: self.__HO = asDictAO["ObservationOperator"]
1074 if hasattr(asDictAO["EvolutionModel"],"getO"): self.__EM = asDictAO["EvolutionModel"].getO()
1075 else: self.__EM = asDictAO["EvolutionModel"]
1076 if hasattr(asDictAO["ControlModel"],"getO"): self.__CM = asDictAO["ControlModel"].getO()
1077 else: self.__CM = asDictAO["ControlModel"]
1078 self.__B = asDictAO["BackgroundError"]
1079 self.__R = asDictAO["ObservationError"]
1080 self.__Q = asDictAO["EvolutionError"]
1082 self.__shape_validate()
1084 self.__algorithm.run(
1094 Parameters = self.__P,
1098 def executeYACSScheme(self, FileName=None):
1099 "Permet de lancer le calcul d'assimilation"
1100 if FileName is None or not os.path.exists(FileName):
1101 raise ValueError("a YACS file name has to be given for YACS execution.\n")
1103 __file = os.path.abspath(FileName)
1104 logging.debug("The YACS file name is \"%s\"."%__file)
1105 if not PlatformInfo.has_salome or \
1106 not PlatformInfo.has_yacs or \
1107 not PlatformInfo.has_adao:
1108 raise ImportError("\n\n"+\
1109 "Unable to get SALOME, YACS or ADAO environnement variables.\n"+\
1110 "Please load the right environnement before trying to use it.\n")
1113 import SALOMERuntime
1115 SALOMERuntime.RuntimeSALOME_setRuntime()
1117 r = pilot.getRuntime()
1118 xmlLoader = loader.YACSLoader()
1119 xmlLoader.registerProcCataLoader()
1121 catalogAd = r.loadCatalog("proc", __file)
1122 r.addCatalog(catalogAd)
1127 p = xmlLoader.load(__file)
1128 except IOError as ex:
1129 print("The YACS XML schema file can not be loaded: %s"%(ex,))
1131 logger = p.getLogger("parser")
1132 if not logger.isEmpty():
1133 print("The imported YACS XML schema has errors on parsing:")
1134 print(logger.getStr())
1137 print("The YACS XML schema is not valid and will not be executed:")
1138 print(p.getErrorReport())
1140 info=pilot.LinkInfo(pilot.LinkInfo.ALL_DONT_STOP)
1141 p.checkConsistency(info)
1142 if info.areWarningsOrErrors():
1143 print("The YACS XML schema is not coherent and will not be executed:")
1144 print(info.getGlobalRepr())
1146 e = pilot.ExecutorSwig()
1148 if p.getEffectiveState() != pilot.DONE:
1149 print(p.getErrorReport())
1153 def get(self, key = None):
1154 "Vérifie l'existence d'une clé de variable ou de paramètres"
1155 if key in self.__algorithm:
1156 return self.__algorithm.get( key )
1157 elif key in self.__P:
1158 return self.__P[key]
1160 allvariables = self.__P
1161 for k in self.__variable_names_not_public: allvariables.pop(k, None)
1164 def pop(self, k, d):
1165 "Necessaire pour le pickling"
1166 return self.__algorithm.pop(k, d)
1168 def getAlgorithmRequiredParameters(self, noDetails=True):
1169 "Renvoie la liste des paramètres requis selon l'algorithme"
1170 return self.__algorithm.getRequiredParameters(noDetails)
1172 def getAlgorithmInputArguments(self):
1173 "Renvoie la liste des entrées requises selon l'algorithme"
1174 return self.__algorithm.getInputArguments()
1176 def getAlgorithmAttributes(self):
1177 "Renvoie la liste des attributs selon l'algorithme"
1178 return self.__algorithm.setAttributes()
1180 def setObserver(self, __V, __O, __I, __S):
1181 if self.__algorithm is None \
1182 or isinstance(self.__algorithm, dict) \
1183 or not hasattr(self.__algorithm,"StoredVariables"):
1184 raise ValueError("No observer can be build before choosing an algorithm.")
1185 if __V not in self.__algorithm:
1186 raise ValueError("An observer requires to be set on a variable named %s which does not exist."%__V)
1188 self.__algorithm.StoredVariables[ __V ].setDataObserver(
1191 HookParameters = __I,
1194 def removeObserver(self, __V, __O, __A = False):
1195 if self.__algorithm is None \
1196 or isinstance(self.__algorithm, dict) \
1197 or not hasattr(self.__algorithm,"StoredVariables"):
1198 raise ValueError("No observer can be removed before choosing an algorithm.")
1199 if __V not in self.__algorithm:
1200 raise ValueError("An observer requires to be removed on a variable named %s which does not exist."%__V)
1202 return self.__algorithm.StoredVariables[ __V ].removeDataObserver(
1207 def hasObserver(self, __V):
1208 if self.__algorithm is None \
1209 or isinstance(self.__algorithm, dict) \
1210 or not hasattr(self.__algorithm,"StoredVariables"):
1212 if __V not in self.__algorithm:
1214 return self.__algorithm.StoredVariables[ __V ].hasDataObserver()
1217 __allvariables = list(self.__algorithm.keys()) + list(self.__P.keys())
1218 for k in self.__variable_names_not_public:
1219 if k in __allvariables: __allvariables.remove(k)
1220 return __allvariables
1222 def __contains__(self, key=None):
1223 "D.__contains__(k) -> True if D has a key k, else False"
1224 return key in self.__algorithm or key in self.__P
1227 "x.__repr__() <==> repr(x)"
1228 return repr(self.__A)+", "+repr(self.__P)
1231 "x.__str__() <==> str(x)"
1232 return str(self.__A)+", "+str(self.__P)
1234 def __setAlgorithm(self, choice = None ):
1236 Permet de sélectionner l'algorithme à utiliser pour mener à bien l'étude
1237 d'assimilation. L'argument est un champ caractère se rapportant au nom
1238 d'un algorithme réalisant l'opération sur les arguments fixes.
1241 raise ValueError("Error: algorithm choice has to be given")
1242 if self.__algorithmName is not None:
1243 raise ValueError("Error: algorithm choice has already been done as \"%s\", it can't be changed."%self.__algorithmName)
1244 daDirectory = "daAlgorithms"
1246 # Recherche explicitement le fichier complet
1247 # ------------------------------------------
1249 for directory in sys.path:
1250 if os.path.isfile(os.path.join(directory, daDirectory, str(choice)+'.py')):
1251 module_path = os.path.abspath(os.path.join(directory, daDirectory))
1252 if module_path is None:
1253 raise ImportError("No algorithm module named \"%s\" has been found in the search path.\n The search path is %s"%(choice, sys.path))
1255 # Importe le fichier complet comme un module
1256 # ------------------------------------------
1258 sys_path_tmp = sys.path ; sys.path.insert(0,module_path)
1259 self.__algorithmFile = __import__(str(choice), globals(), locals(), [])
1260 if not hasattr(self.__algorithmFile, "ElementaryAlgorithm"):
1261 raise ImportError("this module does not define a valid elementary algorithm.")
1262 self.__algorithmName = str(choice)
1263 sys.path = sys_path_tmp ; del sys_path_tmp
1264 except ImportError as e:
1265 raise ImportError("The module named \"%s\" was found, but is incorrect at the import stage.\n The import error message is: %s"%(choice,e))
1267 # Instancie un objet du type élémentaire du fichier
1268 # -------------------------------------------------
1269 self.__algorithm = self.__algorithmFile.ElementaryAlgorithm()
1272 def __shape_validate(self):
1274 Validation de la correspondance correcte des tailles des variables et
1275 des matrices s'il y en a.
1277 if self.__Xb is None: __Xb_shape = (0,)
1278 elif hasattr(self.__Xb,"size"): __Xb_shape = (self.__Xb.size,)
1279 elif hasattr(self.__Xb,"shape"):
1280 if isinstance(self.__Xb.shape, tuple): __Xb_shape = self.__Xb.shape
1281 else: __Xb_shape = self.__Xb.shape()
1282 else: raise TypeError("The background (Xb) has no attribute of shape: problem !")
1284 if self.__Y is None: __Y_shape = (0,)
1285 elif hasattr(self.__Y,"size"): __Y_shape = (self.__Y.size,)
1286 elif hasattr(self.__Y,"shape"):
1287 if isinstance(self.__Y.shape, tuple): __Y_shape = self.__Y.shape
1288 else: __Y_shape = self.__Y.shape()
1289 else: raise TypeError("The observation (Y) has no attribute of shape: problem !")
1291 if self.__U is None: __U_shape = (0,)
1292 elif hasattr(self.__U,"size"): __U_shape = (self.__U.size,)
1293 elif hasattr(self.__U,"shape"):
1294 if isinstance(self.__U.shape, tuple): __U_shape = self.__U.shape
1295 else: __U_shape = self.__U.shape()
1296 else: raise TypeError("The control (U) has no attribute of shape: problem !")
1298 if self.__B is None: __B_shape = (0,0)
1299 elif hasattr(self.__B,"shape"):
1300 if isinstance(self.__B.shape, tuple): __B_shape = self.__B.shape
1301 else: __B_shape = self.__B.shape()
1302 else: raise TypeError("The a priori errors covariance matrix (B) has no attribute of shape: problem !")
1304 if self.__R is None: __R_shape = (0,0)
1305 elif hasattr(self.__R,"shape"):
1306 if isinstance(self.__R.shape, tuple): __R_shape = self.__R.shape
1307 else: __R_shape = self.__R.shape()
1308 else: raise TypeError("The observation errors covariance matrix (R) has no attribute of shape: problem !")
1310 if self.__Q is None: __Q_shape = (0,0)
1311 elif hasattr(self.__Q,"shape"):
1312 if isinstance(self.__Q.shape, tuple): __Q_shape = self.__Q.shape
1313 else: __Q_shape = self.__Q.shape()
1314 else: raise TypeError("The evolution errors covariance matrix (Q) has no attribute of shape: problem !")
1316 if len(self.__HO) == 0: __HO_shape = (0,0)
1317 elif isinstance(self.__HO, dict): __HO_shape = (0,0)
1318 elif hasattr(self.__HO["Direct"],"shape"):
1319 if isinstance(self.__HO["Direct"].shape, tuple): __HO_shape = self.__HO["Direct"].shape
1320 else: __HO_shape = self.__HO["Direct"].shape()
1321 else: raise TypeError("The observation operator (H) has no attribute of shape: problem !")
1323 if len(self.__EM) == 0: __EM_shape = (0,0)
1324 elif isinstance(self.__EM, dict): __EM_shape = (0,0)
1325 elif hasattr(self.__EM["Direct"],"shape"):
1326 if isinstance(self.__EM["Direct"].shape, tuple): __EM_shape = self.__EM["Direct"].shape
1327 else: __EM_shape = self.__EM["Direct"].shape()
1328 else: raise TypeError("The evolution model (EM) has no attribute of shape: problem !")
1330 if len(self.__CM) == 0: __CM_shape = (0,0)
1331 elif isinstance(self.__CM, dict): __CM_shape = (0,0)
1332 elif hasattr(self.__CM["Direct"],"shape"):
1333 if isinstance(self.__CM["Direct"].shape, tuple): __CM_shape = self.__CM["Direct"].shape
1334 else: __CM_shape = self.__CM["Direct"].shape()
1335 else: raise TypeError("The control model (CM) has no attribute of shape: problem !")
1337 # Vérification des conditions
1338 # ---------------------------
1339 if not( len(__Xb_shape) == 1 or min(__Xb_shape) == 1 ):
1340 raise ValueError("Shape characteristic of background (Xb) is incorrect: \"%s\"."%(__Xb_shape,))
1341 if not( len(__Y_shape) == 1 or min(__Y_shape) == 1 ):
1342 raise ValueError("Shape characteristic of observation (Y) is incorrect: \"%s\"."%(__Y_shape,))
1344 if not( min(__B_shape) == max(__B_shape) ):
1345 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) is incorrect: \"%s\"."%(__B_shape,))
1346 if not( min(__R_shape) == max(__R_shape) ):
1347 raise ValueError("Shape characteristic of observation errors covariance matrix (R) is incorrect: \"%s\"."%(__R_shape,))
1348 if not( min(__Q_shape) == max(__Q_shape) ):
1349 raise ValueError("Shape characteristic of evolution errors covariance matrix (Q) is incorrect: \"%s\"."%(__Q_shape,))
1350 if not( min(__EM_shape) == max(__EM_shape) ):
1351 raise ValueError("Shape characteristic of evolution operator (EM) is incorrect: \"%s\"."%(__EM_shape,))
1353 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[1] == max(__Xb_shape) ):
1354 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and state (X) \"%s\" are incompatible."%(__HO_shape,__Xb_shape))
1355 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[0] == max(__Y_shape) ):
1356 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation (Y) \"%s\" are incompatible."%(__HO_shape,__Y_shape))
1357 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__B) > 0 and not( __HO_shape[1] == __B_shape[0] ):
1358 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and a priori errors covariance matrix (B) \"%s\" are incompatible."%(__HO_shape,__B_shape))
1359 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__R) > 0 and not( __HO_shape[0] == __R_shape[1] ):
1360 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation errors covariance matrix (R) \"%s\" are incompatible."%(__HO_shape,__R_shape))
1362 if self.__B is not None and len(self.__B) > 0 and not( __B_shape[1] == max(__Xb_shape) ):
1363 if self.__algorithmName in ["EnsembleBlue",]:
1364 asPersistentVector = self.__Xb.reshape((-1,min(__B_shape)))
1365 self.__Xb = Persistence.OneVector("Background", basetype=numpy.matrix)
1366 for member in asPersistentVector:
1367 self.__Xb.store( numpy.matrix( numpy.ravel(member), numpy.float ).T )
1368 __Xb_shape = min(__B_shape)
1370 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) \"%s\" and background (Xb) \"%s\" are incompatible."%(__B_shape,__Xb_shape))
1372 if self.__R is not None and len(self.__R) > 0 and not( __R_shape[1] == max(__Y_shape) ):
1373 raise ValueError("Shape characteristic of observation errors covariance matrix (R) \"%s\" and observation (Y) \"%s\" are incompatible."%(__R_shape,__Y_shape))
1375 if self.__EM is not None and len(self.__EM) > 0 and not isinstance(self.__EM, dict) and not( __EM_shape[1] == max(__Xb_shape) ):
1376 raise ValueError("Shape characteristic of evolution model (EM) \"%s\" and state (X) \"%s\" are incompatible."%(__EM_shape,__Xb_shape))
1378 if self.__CM is not None and len(self.__CM) > 0 and not isinstance(self.__CM, dict) and not( __CM_shape[1] == max(__U_shape) ):
1379 raise ValueError("Shape characteristic of control model (CM) \"%s\" and control (U) \"%s\" are incompatible."%(__CM_shape,__U_shape))
1381 if ("Bounds" in self.__P) \
1382 and (isinstance(self.__P["Bounds"], list) or isinstance(self.__P["Bounds"], tuple)) \
1383 and (len(self.__P["Bounds"]) != max(__Xb_shape)):
1384 raise ValueError("The number \"%s\" of bound pairs for the state (X) components is different of the size \"%s\" of the state itself." \
1385 %(len(self.__P["Bounds"]),max(__Xb_shape)))
1389 # ==============================================================================
1390 class RegulationAndParameters(object):
1392 Classe générale d'interface d'action pour la régulation et ses paramètres
1395 name = "GenericRegulation",
1402 self.__name = str(name)
1405 if asAlgorithm is None and asScript is not None:
1406 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
1408 __Algo = asAlgorithm
1410 if asDict is None and asScript is not None:
1411 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1415 if __Dict is not None:
1416 self.__P.update( dict(__Dict) )
1418 if __Algo is not None:
1419 self.__P.update( {"Algorithm":str(__Algo)} )
1421 def get(self, key = None):
1422 "Vérifie l'existence d'une clé de variable ou de paramètres"
1424 return self.__P[key]
1428 # ==============================================================================
1429 class DataObserver(object):
1431 Classe générale d'interface de type observer
1434 name = "GenericObserver",
1446 self.__name = str(name)
1451 if onVariable is None:
1452 raise ValueError("setting an observer has to be done over a variable name or a list of variable names, not over None.")
1453 elif type(onVariable) in (tuple, list):
1454 self.__V = tuple(map( str, onVariable ))
1455 if withInfo is None:
1458 self.__I = (str(withInfo),)*len(self.__V)
1459 elif isinstance(onVariable, str):
1460 self.__V = (onVariable,)
1461 if withInfo is None:
1462 self.__I = (onVariable,)
1464 self.__I = (str(withInfo),)
1466 raise ValueError("setting an observer has to be done over a variable name or a list of variable names.")
1468 if asObsObject is not None:
1469 self.__O = asObsObject
1471 __FunctionText = str(UserScript('Observer', asTemplate, asString, asScript))
1472 __Function = Observer2Func(__FunctionText)
1473 self.__O = __Function.getfunc()
1475 for k in range(len(self.__V)):
1478 if ename not in withAlgo:
1479 raise ValueError("An observer is asked to be set on a variable named %s which does not exist."%ename)
1481 withAlgo.setObserver(ename, self.__O, einfo, scheduledBy)
1484 "x.__repr__() <==> repr(x)"
1485 return repr(self.__V)+"\n"+repr(self.__O)
1488 "x.__str__() <==> str(x)"
1489 return str(self.__V)+"\n"+str(self.__O)
1491 # ==============================================================================
1492 class UserScript(object):
1494 Classe générale d'interface de type texte de script utilisateur
1497 name = "GenericUserScript",
1504 self.__name = str(name)
1506 if asString is not None:
1508 elif self.__name == "UserPostAnalysis" and (asTemplate is not None) and (asTemplate in Templates.UserPostAnalysisTemplates):
1509 self.__F = Templates.UserPostAnalysisTemplates[asTemplate]
1510 elif self.__name == "Observer" and (asTemplate is not None) and (asTemplate in Templates.ObserverTemplates):
1511 self.__F = Templates.ObserverTemplates[asTemplate]
1512 elif asScript is not None:
1513 self.__F = Interfaces.ImportFromScript(asScript).getstring()
1518 "x.__repr__() <==> repr(x)"
1519 return repr(self.__F)
1522 "x.__str__() <==> str(x)"
1523 return str(self.__F)
1525 # ==============================================================================
1526 class ExternalParameters(object):
1528 Classe générale d'interface de type texte de script utilisateur
1531 name = "GenericExternalParameters",
1537 self.__name = str(name)
1540 self.updateParameters( asDict, asScript )
1542 def updateParameters(self,
1546 "Mise a jour des parametres"
1547 if asDict is None and asScript is not None:
1548 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "ExternalParameters" )
1552 if __Dict is not None:
1553 self.__P.update( dict(__Dict) )
1555 def get(self, key = None):
1557 return self.__P[key]
1559 return list(self.__P.keys())
1562 return list(self.__P.keys())
1564 def pop(self, k, d):
1565 return self.__P.pop(k, d)
1568 return self.__P.items()
1570 def __contains__(self, key=None):
1571 "D.__contains__(k) -> True if D has a key k, else False"
1572 return key in self.__P
1574 # ==============================================================================
1575 class State(object):
1577 Classe générale d'interface de type état
1580 name = "GenericVector",
1582 asPersistentVector = None,
1588 toBeChecked = False,
1591 Permet de définir un vecteur :
1592 - asVector : entrée des données, comme un vecteur compatible avec le
1593 constructeur de numpy.matrix, ou "True" si entrée par script.
1594 - asPersistentVector : entrée des données, comme une série de vecteurs
1595 compatible avec le constructeur de numpy.matrix, ou comme un objet de
1596 type Persistence, ou "True" si entrée par script.
1597 - asScript : si un script valide est donné contenant une variable
1598 nommée "name", la variable est de type "asVector" (par défaut) ou
1599 "asPersistentVector" selon que l'une de ces variables est placée à
1601 - asDataFile : si un ou plusieurs fichiers valides sont donnés
1602 contenant des valeurs en colonnes, elles-mêmes nommées "colNames"
1603 (s'il n'y a pas de nom de colonne indiquée, on cherche une colonne
1604 nommée "name"), on récupère les colonnes et on les range ligne après
1605 ligne (colMajor=False, par défaut) ou colonne après colonne
1606 (colMajor=True). La variable résultante est de type "asVector" (par
1607 défaut) ou "asPersistentVector" selon que l'une de ces variables est
1610 self.__name = str(name)
1611 self.__check = bool(toBeChecked)
1615 self.__is_vector = False
1616 self.__is_series = False
1618 if asScript is not None:
1619 __Vector, __Series = None, None
1620 if asPersistentVector:
1621 __Series = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1623 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1624 elif asDataFile is not None:
1625 __Vector, __Series = None, None
1626 if asPersistentVector:
1627 if colNames is not None:
1628 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
1630 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1631 if bool(colMajor) and not Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1632 __Series = numpy.transpose(__Series)
1633 elif not bool(colMajor) and Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1634 __Series = numpy.transpose(__Series)
1636 if colNames is not None:
1637 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
1639 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1641 __Vector = numpy.ravel(__Vector, order = "F")
1643 __Vector = numpy.ravel(__Vector, order = "C")
1645 __Vector, __Series = asVector, asPersistentVector
1647 if __Vector is not None:
1648 self.__is_vector = True
1649 self.__V = numpy.matrix( numpy.asmatrix(__Vector).A1, numpy.float ).T
1650 self.shape = self.__V.shape
1651 self.size = self.__V.size
1652 elif __Series is not None:
1653 self.__is_series = True
1654 if isinstance(__Series, (tuple, list, numpy.ndarray, numpy.matrix, str)):
1655 self.__V = Persistence.OneVector(self.__name, basetype=numpy.matrix)
1656 if isinstance(__Series, str): __Series = eval(__Series)
1657 for member in __Series:
1658 self.__V.store( numpy.matrix( numpy.asmatrix(member).A1, numpy.float ).T )
1661 if isinstance(self.__V.shape, (tuple, list)):
1662 self.shape = self.__V.shape
1664 self.shape = self.__V.shape()
1665 if len(self.shape) == 1:
1666 self.shape = (self.shape[0],1)
1667 self.size = self.shape[0] * self.shape[1]
1669 raise ValueError("The %s object is improperly defined or undefined, it requires at minima either a vector, a list/tuple of vectors or a persistent object. Please check your vector input."%self.__name)
1671 if scheduledBy is not None:
1672 self.__T = scheduledBy
1674 def getO(self, withScheduler=False):
1676 return self.__V, self.__T
1677 elif self.__T is None:
1683 "Vérification du type interne"
1684 return self.__is_vector
1687 "Vérification du type interne"
1688 return self.__is_series
1691 "x.__repr__() <==> repr(x)"
1692 return repr(self.__V)
1695 "x.__str__() <==> str(x)"
1696 return str(self.__V)
1698 # ==============================================================================
1699 class Covariance(object):
1701 Classe générale d'interface de type covariance
1704 name = "GenericCovariance",
1705 asCovariance = None,
1706 asEyeByScalar = None,
1707 asEyeByVector = None,
1710 toBeChecked = False,
1713 Permet de définir une covariance :
1714 - asCovariance : entrée des données, comme une matrice compatible avec
1715 le constructeur de numpy.matrix
1716 - asEyeByScalar : entrée des données comme un seul scalaire de variance,
1717 multiplicatif d'une matrice de corrélation identité, aucune matrice
1718 n'étant donc explicitement à donner
1719 - asEyeByVector : entrée des données comme un seul vecteur de variance,
1720 à mettre sur la diagonale d'une matrice de corrélation, aucune matrice
1721 n'étant donc explicitement à donner
1722 - asCovObject : entrée des données comme un objet python, qui a les
1723 methodes obligatoires "getT", "getI", "diag", "trace", "__add__",
1724 "__sub__", "__neg__", "__mul__", "__rmul__" et facultatives "shape",
1725 "size", "cholesky", "choleskyI", "asfullmatrix", "__repr__", "__str__"
1726 - toBeChecked : booléen indiquant si le caractère SDP de la matrice
1727 pleine doit être vérifié
1729 self.__name = str(name)
1730 self.__check = bool(toBeChecked)
1733 self.__is_scalar = False
1734 self.__is_vector = False
1735 self.__is_matrix = False
1736 self.__is_object = False
1738 if asScript is not None:
1739 __Matrix, __Scalar, __Vector, __Object = None, None, None, None
1741 __Scalar = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1743 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1745 __Object = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1747 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1749 __Matrix, __Scalar, __Vector, __Object = asCovariance, asEyeByScalar, asEyeByVector, asCovObject
1751 if __Scalar is not None:
1752 if numpy.matrix(__Scalar).size != 1:
1753 raise ValueError(' The diagonal multiplier given to define a sparse matrix is not a unique scalar value.\n Its actual measured size is %i. Please check your scalar input.'%numpy.matrix(__Scalar).size)
1754 self.__is_scalar = True
1755 self.__C = numpy.abs( float(__Scalar) )
1758 elif __Vector is not None:
1759 self.__is_vector = True
1760 self.__C = numpy.abs( numpy.array( numpy.ravel( numpy.matrix(__Vector, float ) ) ) )
1761 self.shape = (self.__C.size,self.__C.size)
1762 self.size = self.__C.size**2
1763 elif __Matrix is not None:
1764 self.__is_matrix = True
1765 self.__C = numpy.matrix( __Matrix, float )
1766 self.shape = self.__C.shape
1767 self.size = self.__C.size
1768 elif __Object is not None:
1769 self.__is_object = True
1771 for at in ("getT","getI","diag","trace","__add__","__sub__","__neg__","__mul__","__rmul__"):
1772 if not hasattr(self.__C,at):
1773 raise ValueError("The matrix given for %s as an object has no attribute \"%s\". Please check your object input."%(self.__name,at))
1774 if hasattr(self.__C,"shape"):
1775 self.shape = self.__C.shape
1778 if hasattr(self.__C,"size"):
1779 self.size = self.__C.size
1784 # raise ValueError("The %s covariance matrix has to be specified either as a matrix, a vector for its diagonal or a scalar multiplying an identity matrix."%self.__name)
1788 def __validate(self):
1790 if self.__C is None:
1791 raise UnboundLocalError("%s covariance matrix value has not been set!"%(self.__name,))
1792 if self.ismatrix() and min(self.shape) != max(self.shape):
1793 raise ValueError("The given matrix for %s is not a square one, its shape is %s. Please check your matrix input."%(self.__name,self.shape))
1794 if self.isobject() and min(self.shape) != max(self.shape):
1795 raise ValueError("The matrix given for \"%s\" is not a square one, its shape is %s. Please check your object input."%(self.__name,self.shape))
1796 if self.isscalar() and self.__C <= 0:
1797 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your scalar input %s."%(self.__name,self.__C))
1798 if self.isvector() and (self.__C <= 0).any():
1799 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your vector input."%(self.__name,))
1800 if self.ismatrix() and (self.__check or logging.getLogger().level < logging.WARNING):
1802 L = numpy.linalg.cholesky( self.__C )
1804 raise ValueError("The %s covariance matrix is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1805 if self.isobject() and (self.__check or logging.getLogger().level < logging.WARNING):
1807 L = self.__C.cholesky()
1809 raise ValueError("The %s covariance object is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1812 "Vérification du type interne"
1813 return self.__is_scalar
1816 "Vérification du type interne"
1817 return self.__is_vector
1820 "Vérification du type interne"
1821 return self.__is_matrix
1824 "Vérification du type interne"
1825 return self.__is_object
1830 return Covariance(self.__name+"I", asCovariance = self.__C.I )
1831 elif self.isvector():
1832 return Covariance(self.__name+"I", asEyeByVector = 1. / self.__C )
1833 elif self.isscalar():
1834 return Covariance(self.__name+"I", asEyeByScalar = 1. / self.__C )
1835 elif self.isobject():
1836 return Covariance(self.__name+"I", asCovObject = self.__C.getI() )
1838 return None # Indispensable
1843 return Covariance(self.__name+"T", asCovariance = self.__C.T )
1844 elif self.isvector():
1845 return Covariance(self.__name+"T", asEyeByVector = self.__C )
1846 elif self.isscalar():
1847 return Covariance(self.__name+"T", asEyeByScalar = self.__C )
1848 elif self.isobject():
1849 return Covariance(self.__name+"T", asCovObject = self.__C.getT() )
1852 "Décomposition de Cholesky"
1854 return Covariance(self.__name+"C", asCovariance = numpy.linalg.cholesky(self.__C) )
1855 elif self.isvector():
1856 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
1857 elif self.isscalar():
1858 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
1859 elif self.isobject() and hasattr(self.__C,"cholesky"):
1860 return Covariance(self.__name+"C", asCovObject = self.__C.cholesky() )
1862 def choleskyI(self):
1863 "Inversion de la décomposition de Cholesky"
1865 return Covariance(self.__name+"H", asCovariance = numpy.linalg.cholesky(self.__C).I )
1866 elif self.isvector():
1867 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
1868 elif self.isscalar():
1869 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
1870 elif self.isobject() and hasattr(self.__C,"choleskyI"):
1871 return Covariance(self.__name+"H", asCovObject = self.__C.choleskyI() )
1874 "Racine carrée matricielle"
1877 return Covariance(self.__name+"C", asCovariance = scipy.linalg.sqrtm(self.__C) )
1878 elif self.isvector():
1879 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
1880 elif self.isscalar():
1881 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
1882 elif self.isobject() and hasattr(self.__C,"sqrt"):
1883 return Covariance(self.__name+"C", asCovObject = self.__C.sqrt() )
1886 "Inversion de la racine carrée matricielle"
1889 return Covariance(self.__name+"H", asCovariance = scipy.linalg.sqrtm(self.__C).I )
1890 elif self.isvector():
1891 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
1892 elif self.isscalar():
1893 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
1894 elif self.isobject() and hasattr(self.__C,"sqrtI"):
1895 return Covariance(self.__name+"H", asCovObject = self.__C.sqrtI() )
1897 def diag(self, msize=None):
1898 "Diagonale de la matrice"
1900 return numpy.diag(self.__C)
1901 elif self.isvector():
1903 elif self.isscalar():
1905 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1907 return self.__C * numpy.ones(int(msize))
1908 elif self.isobject():
1909 return self.__C.diag()
1911 def asfullmatrix(self, msize=None):
1915 elif self.isvector():
1916 return numpy.matrix( numpy.diag(self.__C), float )
1917 elif self.isscalar():
1919 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1921 return numpy.matrix( self.__C * numpy.eye(int(msize)), float )
1922 elif self.isobject() and hasattr(self.__C,"asfullmatrix"):
1923 return self.__C.asfullmatrix()
1925 def trace(self, msize=None):
1926 "Trace de la matrice"
1928 return numpy.trace(self.__C)
1929 elif self.isvector():
1930 return float(numpy.sum(self.__C))
1931 elif self.isscalar():
1933 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1935 return self.__C * int(msize)
1936 elif self.isobject():
1937 return self.__C.trace()
1943 "x.__repr__() <==> repr(x)"
1944 return repr(self.__C)
1947 "x.__str__() <==> str(x)"
1948 return str(self.__C)
1950 def __add__(self, other):
1951 "x.__add__(y) <==> x+y"
1952 if self.ismatrix() or self.isobject():
1953 return self.__C + numpy.asmatrix(other)
1954 elif self.isvector() or self.isscalar():
1955 _A = numpy.asarray(other)
1956 _A.reshape(_A.size)[::_A.shape[1]+1] += self.__C
1957 return numpy.asmatrix(_A)
1959 def __radd__(self, other):
1960 "x.__radd__(y) <==> y+x"
1961 raise NotImplementedError("%s covariance matrix __radd__ method not available for %s type!"%(self.__name,type(other)))
1963 def __sub__(self, other):
1964 "x.__sub__(y) <==> x-y"
1965 if self.ismatrix() or self.isobject():
1966 return self.__C - numpy.asmatrix(other)
1967 elif self.isvector() or self.isscalar():
1968 _A = numpy.asarray(other)
1969 _A.reshape(_A.size)[::_A.shape[1]+1] = self.__C - _A.reshape(_A.size)[::_A.shape[1]+1]
1970 return numpy.asmatrix(_A)
1972 def __rsub__(self, other):
1973 "x.__rsub__(y) <==> y-x"
1974 raise NotImplementedError("%s covariance matrix __rsub__ method not available for %s type!"%(self.__name,type(other)))
1977 "x.__neg__() <==> -x"
1980 def __mul__(self, other):
1981 "x.__mul__(y) <==> x*y"
1982 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
1983 return self.__C * other
1984 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
1985 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1986 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
1987 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1988 return self.__C * numpy.asmatrix(other)
1990 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asmatrix(other).shape,self.__name))
1991 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
1992 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1993 return numpy.asmatrix(self.__C * numpy.ravel(other)).T
1994 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1995 return numpy.asmatrix((self.__C * (numpy.asarray(other).transpose())).transpose())
1997 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
1998 elif self.isscalar() and isinstance(other,numpy.matrix):
1999 return self.__C * other
2000 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
2001 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
2002 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
2004 return self.__C * numpy.asmatrix(other)
2005 elif self.isobject():
2006 return self.__C.__mul__(other)
2008 raise NotImplementedError("%s covariance matrix __mul__ method not available for %s type!"%(self.__name,type(other)))
2010 def __rmul__(self, other):
2011 "x.__rmul__(y) <==> y*x"
2012 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2013 return other * self.__C
2014 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2015 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2016 return numpy.asmatrix(numpy.ravel(other)) * self.__C
2017 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2018 return numpy.asmatrix(other) * self.__C
2020 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name))
2021 elif self.isvector() and isinstance(other,numpy.matrix):
2022 if numpy.ravel(other).size == self.shape[0]: # Vecteur
2023 return numpy.asmatrix(numpy.ravel(other) * self.__C)
2024 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
2025 return numpy.asmatrix(numpy.array(other) * self.__C)
2027 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
2028 elif self.isscalar() and isinstance(other,numpy.matrix):
2029 return other * self.__C
2030 elif self.isobject():
2031 return self.__C.__rmul__(other)
2033 raise NotImplementedError("%s covariance matrix __rmul__ method not available for %s type!"%(self.__name,type(other)))
2036 "x.__len__() <==> len(x)"
2037 return self.shape[0]
2039 # ==============================================================================
2040 class Observer2Func(object):
2042 Creation d'une fonction d'observateur a partir de son texte
2044 def __init__(self, corps=""):
2045 self.__corps = corps
2046 def func(self,var,info):
2047 "Fonction d'observation"
2050 "Restitution du pointeur de fonction dans l'objet"
2053 # ==============================================================================
2054 class CaseLogger(object):
2056 Conservation des commandes de creation d'un cas
2058 def __init__(self, __name="", __objname="case", __addViewers=None, __addLoaders=None):
2059 self.__name = str(__name)
2060 self.__objname = str(__objname)
2061 self.__logSerie = []
2062 self.__switchoff = False
2064 "TUI" :Interfaces._TUIViewer,
2065 "SCD" :Interfaces._SCDViewer,
2066 "YACS":Interfaces._YACSViewer,
2069 "TUI" :Interfaces._TUIViewer,
2070 "COM" :Interfaces._COMViewer,
2072 if __addViewers is not None:
2073 self.__viewers.update(dict(__addViewers))
2074 if __addLoaders is not None:
2075 self.__loaders.update(dict(__addLoaders))
2077 def register(self, __command=None, __keys=None, __local=None, __pre=None, __switchoff=False):
2078 "Enregistrement d'une commande individuelle"
2079 if __command is not None and __keys is not None and __local is not None and not self.__switchoff:
2080 if "self" in __keys: __keys.remove("self")
2081 self.__logSerie.append( (str(__command), __keys, __local, __pre, __switchoff) )
2083 self.__switchoff = True
2085 self.__switchoff = False
2087 def dump(self, __filename=None, __format="TUI", __upa=""):
2088 "Restitution normalisée des commandes"
2089 if __format in self.__viewers:
2090 __formater = self.__viewers[__format](self.__name, self.__objname, self.__logSerie)
2092 raise ValueError("Dumping as \"%s\" is not available"%__format)
2093 return __formater.dump(__filename, __upa)
2095 def load(self, __filename=None, __content=None, __object=None, __format="TUI"):
2096 "Chargement normalisé des commandes"
2097 if __format in self.__loaders:
2098 __formater = self.__loaders[__format]()
2100 raise ValueError("Loading as \"%s\" is not available"%__format)
2101 return __formater.load(__filename, __content, __object)
2103 # ==============================================================================
2106 _extraArguments = None,
2107 _sFunction = lambda x: x,
2112 Pour une liste ordonnée de vecteurs en entrée, renvoie en sortie la liste
2113 correspondante de valeurs de la fonction en argument
2115 # Vérifications et définitions initiales
2116 # logging.debug("MULTF Internal multifonction calculations begin with function %s"%(_sFunction.__name__,))
2117 if not PlatformInfo.isIterable( __xserie ):
2118 raise TypeError("MultiFonction not iterable unkown input type: %s"%(type(__xserie),))
2120 if (_mpWorkers is None) or (_mpWorkers is not None and _mpWorkers < 1):
2123 __mpWorkers = int(_mpWorkers)
2125 import multiprocessing
2136 if _extraArguments is None:
2138 elif _extraArguments is not None and isinstance(_extraArguments, (list, tuple, map)):
2139 for __xvalue in __xserie:
2140 _jobs.append( [__xvalue, ] + list(_extraArguments) )
2142 raise TypeError("MultiFonction extra arguments unkown input type: %s"%(type(_extraArguments),))
2143 # logging.debug("MULTF Internal multiprocessing calculations begin : evaluation of %i point(s)"%(len(_jobs),))
2144 import multiprocessing
2145 with multiprocessing.Pool(__mpWorkers) as pool:
2146 __multiHX = pool.map( _sFunction, _jobs )
2149 # logging.debug("MULTF Internal multiprocessing calculation end")
2151 # logging.debug("MULTF Internal monoprocessing calculation begin")
2153 if _extraArguments is None:
2154 for __xvalue in __xserie:
2155 __multiHX.append( _sFunction( __xvalue ) )
2156 elif _extraArguments is not None and isinstance(_extraArguments, (list, tuple, map)):
2157 for __xvalue in __xserie:
2158 __multiHX.append( _sFunction( __xvalue, *_extraArguments ) )
2159 elif _extraArguments is not None and isinstance(_extraArguments, dict):
2160 for __xvalue in __xserie:
2161 __multiHX.append( _sFunction( __xvalue, **_extraArguments ) )
2163 raise TypeError("MultiFonction extra arguments unkown input type: %s"%(type(_extraArguments),))
2164 # logging.debug("MULTF Internal monoprocessing calculation end")
2166 # logging.debug("MULTF Internal multifonction calculations end")
2169 # ==============================================================================
2170 def CostFunction3D(_x,
2171 _Hm = None, # Pour simuler Hm(x) : HO["Direct"].appliedTo
2172 _HmX = None, # Simulation déjà faite de Hm(x)
2173 _arg = None, # Arguments supplementaires pour Hm, sous la forme d'un tuple
2178 _SIV = False, # A résorber pour la 8.0
2179 _SSC = [], # self._parameters["StoreSupplementaryCalculations"]
2180 _nPS = 0, # nbPreviousSteps
2181 _QM = "DA", # QualityMeasure
2182 _SSV = {}, # Entrée et/ou sortie : self.StoredVariables
2183 _fRt = False, # Restitue ou pas la sortie étendue
2184 _sSc = True, # Stocke ou pas les SSC
2187 Fonction-coût générale utile pour les algorithmes statiques/3D : 3DVAR, BLUE
2188 et dérivés, Kalman et dérivés, LeastSquares, SamplingTest, PSO, SA, Tabu,
2189 DFO, QuantileRegression
2195 for k in ["CostFunctionJ",
2201 "SimulatedObservationAtCurrentOptimum",
2202 "SimulatedObservationAtCurrentState",
2206 if hasattr(_SSV[k],"store"):
2207 _SSV[k].append = _SSV[k].store # Pour utiliser "append" au lieu de "store"
2209 _X = numpy.asmatrix(numpy.ravel( _x )).T
2210 if _SIV or "CurrentState" in _SSC or "CurrentOptimum" in _SSC:
2211 _SSV["CurrentState"].append( _X )
2213 if _HmX is not None:
2217 raise ValueError("COSTFUNCTION3D Operator has to be defined.")
2221 _HX = _Hm( _X, *_arg )
2222 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
2224 if "SimulatedObservationAtCurrentState" in _SSC or \
2225 "SimulatedObservationAtCurrentOptimum" in _SSC:
2226 _SSV["SimulatedObservationAtCurrentState"].append( _HX )
2228 if numpy.any(numpy.isnan(_HX)):
2229 Jb, Jo, J = numpy.nan, numpy.nan, numpy.nan
2231 _Y = numpy.asmatrix(numpy.ravel( _Y )).T
2232 if _QM in ["AugmentedWeightedLeastSquares", "AWLS", "AugmentedPonderatedLeastSquares", "APLS", "DA"]:
2233 if _BI is None or _RI is None:
2234 raise ValueError("Background and Observation error covariance matrix has to be properly defined!")
2235 _Xb = numpy.asmatrix(numpy.ravel( _Xb )).T
2236 Jb = 0.5 * (_X - _Xb).T * _BI * (_X - _Xb)
2237 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
2238 elif _QM in ["WeightedLeastSquares", "WLS", "PonderatedLeastSquares", "PLS"]:
2240 raise ValueError("Observation error covariance matrix has to be properly defined!")
2242 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
2243 elif _QM in ["LeastSquares", "LS", "L2"]:
2245 Jo = 0.5 * (_Y - _HX).T * (_Y - _HX)
2246 elif _QM in ["AbsoluteValue", "L1"]:
2248 Jo = numpy.sum( numpy.abs(_Y - _HX) )
2249 elif _QM in ["MaximumError", "ME"]:
2251 Jo = numpy.max( numpy.abs(_Y - _HX) )
2252 elif _QM in ["QR", "Null"]:
2256 raise ValueError("Unknown asked quality measure!")
2258 J = float( Jb ) + float( Jo )
2261 _SSV["CostFunctionJb"].append( Jb )
2262 _SSV["CostFunctionJo"].append( Jo )
2263 _SSV["CostFunctionJ" ].append( J )
2265 if "IndexOfOptimum" in _SSC or \
2266 "CurrentOptimum" in _SSC or \
2267 "SimulatedObservationAtCurrentOptimum" in _SSC:
2268 IndexMin = numpy.argmin( _SSV["CostFunctionJ"][_nPS:] ) + _nPS
2269 if "IndexOfOptimum" in _SSC:
2270 _SSV["IndexOfOptimum"].append( IndexMin )
2271 if "CurrentOptimum" in _SSC:
2272 _SSV["CurrentOptimum"].append( _SSV["CurrentState"][IndexMin] )
2273 if "SimulatedObservationAtCurrentOptimum" in _SSC:
2274 _SSV["SimulatedObservationAtCurrentOptimum"].append( _SSV["SimulatedObservationAtCurrentState"][IndexMin] )
2279 if _QM in ["QR"]: # Pour le QuantileRegression
2284 # ==============================================================================
2285 if __name__ == "__main__":
2286 print('\n AUTODIAGNOSTIC\n')