1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2021 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les outils généraux élémentaires.
26 __author__ = "Jean-Philippe ARGAUD"
35 from functools import partial
36 from daCore import Persistence, PlatformInfo, Interfaces
37 from daCore import Templates
39 # ==============================================================================
40 class CacheManager(object):
42 Classe générale de gestion d'un cache de calculs
45 toleranceInRedundancy = 1.e-18,
46 lenghtOfRedundancy = -1,
49 Les caractéristiques de tolérance peuvent être modifiées à la création.
51 self.__tolerBP = float(toleranceInRedundancy)
52 self.__lenghtOR = int(lenghtOfRedundancy)
53 self.__initlnOR = self.__lenghtOR
63 def wasCalculatedIn(self, xValue, oName="" ):
64 "Vérifie l'existence d'un calcul correspondant à la valeur"
68 for i in range(min(len(self.__listOPCV),self.__lenghtOR)-1,-1,-1):
69 if not hasattr(xValue, 'size'):
71 elif (str(oName) != self.__listOPCV[i][3]):
73 elif (xValue.size != self.__listOPCV[i][0].size):
75 elif (numpy.ravel(xValue)[0] - self.__listOPCV[i][0][0]) > (self.__tolerBP * self.__listOPCV[i][2] / self.__listOPCV[i][0].size):
77 elif numpy.linalg.norm(numpy.ravel(xValue) - self.__listOPCV[i][0]) < (self.__tolerBP * self.__listOPCV[i][2]):
79 __HxV = self.__listOPCV[i][1]
83 def storeValueInX(self, xValue, HxValue, oName="" ):
84 "Stocke pour un opérateur o un calcul Hx correspondant à la valeur x"
85 if self.__lenghtOR < 0:
86 self.__lenghtOR = 2 * min(xValue.size, 50) + 2 # 2 * xValue.size + 2
87 self.__initlnOR = self.__lenghtOR
88 self.__seenNames.append(str(oName))
89 if str(oName) not in self.__seenNames: # Etend la liste si nouveau
90 self.__lenghtOR += 2 * min(xValue.size, 50) + 2 # 2 * xValue.size + 2
91 self.__initlnOR += self.__lenghtOR
92 self.__seenNames.append(str(oName))
93 while len(self.__listOPCV) > self.__lenghtOR:
94 self.__listOPCV.pop(0)
95 self.__listOPCV.append( (
96 copy.copy(numpy.ravel(xValue)), # 0 Previous point
97 copy.copy(HxValue), # 1 Previous value
98 numpy.linalg.norm(xValue), # 2 Norm
99 str(oName), # 3 Operator name
104 self.__initlnOR = self.__lenghtOR
106 self.__enabled = False
110 self.__lenghtOR = self.__initlnOR
111 self.__enabled = True
113 # ==============================================================================
114 class Operator(object):
116 Classe générale d'interface de type opérateur simple
124 name = "GenericOperator",
127 avoidingRedundancy = True,
128 inputAsMultiFunction = False,
129 enableMultiProcess = False,
130 extraArguments = None,
133 On construit un objet de ce type en fournissant, à l'aide de l'un des
134 deux mots-clé, soit une fonction ou un multi-fonction python, soit une
137 - name : nom d'opérateur
138 - fromMethod : argument de type fonction Python
139 - fromMatrix : argument adapté au constructeur numpy.matrix
140 - avoidingRedundancy : booléen évitant (ou pas) les calculs redondants
141 - inputAsMultiFunction : booléen indiquant une fonction explicitement
142 définie (ou pas) en multi-fonction
143 - extraArguments : arguments supplémentaires passés à la fonction de
144 base et ses dérivées (tuple ou dictionnaire)
146 self.__name = str(name)
147 self.__NbCallsAsMatrix, self.__NbCallsAsMethod, self.__NbCallsOfCached = 0, 0, 0
148 self.__AvoidRC = bool( avoidingRedundancy )
149 self.__inputAsMF = bool( inputAsMultiFunction )
150 self.__mpEnabled = bool( enableMultiProcess )
151 self.__extraArgs = extraArguments
152 if fromMethod is not None and self.__inputAsMF:
153 self.__Method = fromMethod # logtimer(fromMethod)
155 self.__Type = "Method"
156 elif fromMethod is not None and not self.__inputAsMF:
157 self.__Method = partial( MultiFonction, _sFunction=fromMethod, _mpEnabled=self.__mpEnabled)
159 self.__Type = "Method"
160 elif fromMatrix is not None:
162 self.__Matrix = numpy.matrix( fromMatrix, numpy.float )
163 self.__Type = "Matrix"
169 def disableAvoidingRedundancy(self):
171 Operator.CM.disable()
173 def enableAvoidingRedundancy(self):
178 Operator.CM.disable()
184 def appliedTo(self, xValue, HValue = None, argsAsSerie = False, returnSerieAsArrayMatrix = False):
186 Permet de restituer le résultat de l'application de l'opérateur à une
187 série d'arguments xValue. Cette méthode se contente d'appliquer, chaque
188 argument devant a priori être du bon type.
190 - les arguments par série sont :
191 - xValue : argument adapté pour appliquer l'opérateur
192 - HValue : valeur précalculée de l'opérateur en ce point
193 - argsAsSerie : indique si les arguments sont une mono ou multi-valeur
200 if HValue is not None:
204 PlatformInfo.isIterable( _xValue, True, " in Operator.appliedTo" )
206 if _HValue is not None:
207 assert len(_xValue) == len(_HValue), "Incompatible number of elements in xValue and HValue"
209 for i in range(len(_HValue)):
210 _HxValue.append( numpy.asmatrix( numpy.ravel( _HValue[i] ) ).T )
212 Operator.CM.storeValueInX(_xValue[i],_HxValue[-1],self.__name)
217 for i, xv in enumerate(_xValue):
219 __alreadyCalculated, __HxV = Operator.CM.wasCalculatedIn(xv,self.__name)
221 __alreadyCalculated = False
223 if __alreadyCalculated:
224 self.__addOneCacheCall()
227 if self.__Matrix is not None:
228 self.__addOneMatrixCall()
229 _xv = numpy.matrix(numpy.ravel(xv)).T
230 _hv = self.__Matrix * _xv
232 self.__addOneMethodCall()
236 _HxValue.append( _hv )
238 if len(_xserie)>0 and self.__Matrix is None:
239 if self.__extraArgs is None:
240 _hserie = self.__Method( _xserie ) # Calcul MF
242 _hserie = self.__Method( _xserie, self.__extraArgs ) # Calcul MF
243 if not hasattr(_hserie, "pop"):
244 raise TypeError("The user input multi-function doesn't seem to return sequence results, behaving like a mono-function. It has to be checked.")
250 Operator.CM.storeValueInX(_xv,_hv,self.__name)
252 if returnSerieAsArrayMatrix:
253 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
255 if argsAsSerie: return _HxValue
256 else: return _HxValue[-1]
258 def appliedControledFormTo(self, paires, argsAsSerie = False, returnSerieAsArrayMatrix = False):
260 Permet de restituer le résultat de l'application de l'opérateur à des
261 paires (xValue, uValue). Cette méthode se contente d'appliquer, son
262 argument devant a priori être du bon type. Si la uValue est None,
263 on suppose que l'opérateur ne s'applique qu'à xValue.
265 - paires : les arguments par paire sont :
266 - xValue : argument X adapté pour appliquer l'opérateur
267 - uValue : argument U adapté pour appliquer l'opérateur
268 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
270 if argsAsSerie: _xuValue = paires
271 else: _xuValue = (paires,)
272 PlatformInfo.isIterable( _xuValue, True, " in Operator.appliedControledFormTo" )
274 if self.__Matrix is not None:
276 for paire in _xuValue:
277 _xValue, _uValue = paire
278 _xValue = numpy.matrix(numpy.ravel(_xValue)).T
279 self.__addOneMatrixCall()
280 _HxValue.append( self.__Matrix * _xValue )
283 for paire in _xuValue:
284 _xValue, _uValue = paire
285 if _uValue is not None:
286 _xuArgs.append( paire )
288 _xuArgs.append( _xValue )
289 self.__addOneMethodCall( len(_xuArgs) )
290 if self.__extraArgs is None:
291 _HxValue = self.__Method( _xuArgs ) # Calcul MF
293 _HxValue = self.__Method( _xuArgs, self.__extraArgs ) # Calcul MF
295 if returnSerieAsArrayMatrix:
296 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
298 if argsAsSerie: return _HxValue
299 else: return _HxValue[-1]
301 def appliedInXTo(self, paires, argsAsSerie = False, returnSerieAsArrayMatrix = False):
303 Permet de restituer le résultat de l'application de l'opérateur à une
304 série d'arguments xValue, sachant que l'opérateur est valable en
305 xNominal. Cette méthode se contente d'appliquer, son argument devant a
306 priori être du bon type. Si l'opérateur est linéaire car c'est une
307 matrice, alors il est valable en tout point nominal et xNominal peut
308 être quelconque. Il n'y a qu'une seule paire par défaut, et argsAsSerie
309 permet d'indiquer que l'argument est multi-paires.
311 - paires : les arguments par paire sont :
312 - xNominal : série d'arguments permettant de donner le point où
313 l'opérateur est construit pour être ensuite appliqué
314 - xValue : série d'arguments adaptés pour appliquer l'opérateur
315 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
317 if argsAsSerie: _nxValue = paires
318 else: _nxValue = (paires,)
319 PlatformInfo.isIterable( _nxValue, True, " in Operator.appliedInXTo" )
321 if self.__Matrix is not None:
323 for paire in _nxValue:
324 _xNominal, _xValue = paire
325 _xValue = numpy.matrix(numpy.ravel(_xValue)).T
326 self.__addOneMatrixCall()
327 _HxValue.append( self.__Matrix * _xValue )
329 self.__addOneMethodCall( len(_nxValue) )
330 if self.__extraArgs is None:
331 _HxValue = self.__Method( _nxValue ) # Calcul MF
333 _HxValue = self.__Method( _nxValue, self.__extraArgs ) # Calcul MF
335 if returnSerieAsArrayMatrix:
336 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
338 if argsAsSerie: return _HxValue
339 else: return _HxValue[-1]
341 def asMatrix(self, ValueForMethodForm = "UnknownVoidValue", argsAsSerie = False):
343 Permet de renvoyer l'opérateur sous la forme d'une matrice
345 if self.__Matrix is not None:
346 self.__addOneMatrixCall()
347 mValue = [self.__Matrix,]
348 elif not isinstance(ValueForMethodForm,str) or ValueForMethodForm != "UnknownVoidValue": # Ne pas utiliser "None"
351 self.__addOneMethodCall( len(ValueForMethodForm) )
352 for _vfmf in ValueForMethodForm:
353 mValue.append( numpy.matrix( self.__Method(((_vfmf, None),)) ) )
355 self.__addOneMethodCall()
356 mValue = self.__Method(((ValueForMethodForm, None),))
358 raise ValueError("Matrix form of the operator defined as a function/method requires to give an operating point.")
360 if argsAsSerie: return mValue
361 else: return mValue[-1]
365 Renvoie la taille sous forme numpy si l'opérateur est disponible sous
366 la forme d'une matrice
368 if self.__Matrix is not None:
369 return self.__Matrix.shape
371 raise ValueError("Matrix form of the operator is not available, nor the shape")
373 def nbcalls(self, which=None):
375 Renvoie les nombres d'évaluations de l'opérateur
378 self.__NbCallsAsMatrix+self.__NbCallsAsMethod,
379 self.__NbCallsAsMatrix,
380 self.__NbCallsAsMethod,
381 self.__NbCallsOfCached,
382 Operator.NbCallsAsMatrix+Operator.NbCallsAsMethod,
383 Operator.NbCallsAsMatrix,
384 Operator.NbCallsAsMethod,
385 Operator.NbCallsOfCached,
387 if which is None: return __nbcalls
388 else: return __nbcalls[which]
390 def __addOneMatrixCall(self):
391 "Comptabilise un appel"
392 self.__NbCallsAsMatrix += 1 # Decompte local
393 Operator.NbCallsAsMatrix += 1 # Decompte global
395 def __addOneMethodCall(self, nb = 1):
396 "Comptabilise un appel"
397 self.__NbCallsAsMethod += nb # Decompte local
398 Operator.NbCallsAsMethod += nb # Decompte global
400 def __addOneCacheCall(self):
401 "Comptabilise un appel"
402 self.__NbCallsOfCached += 1 # Decompte local
403 Operator.NbCallsOfCached += 1 # Decompte global
405 # ==============================================================================
406 class FullOperator(object):
408 Classe générale d'interface de type opérateur complet
409 (Direct, Linéaire Tangent, Adjoint)
412 name = "GenericFullOperator",
414 asOneFunction = None, # 1 Fonction
415 asThreeFunctions = None, # 3 Fonctions in a dictionary
416 asScript = None, # 1 or 3 Fonction(s) by script
417 asDict = None, # Parameters
419 extraArguments = None,
421 inputAsMF = False,# Fonction(s) as Multi-Functions
426 self.__name = str(name)
427 self.__check = bool(toBeChecked)
428 self.__extraArgs = extraArguments
433 if (asDict is not None) and isinstance(asDict, dict):
434 __Parameters.update( asDict )
435 # Priorité à EnableMultiProcessingInDerivatives=True
436 if "EnableMultiProcessing" in __Parameters and __Parameters["EnableMultiProcessing"]:
437 __Parameters["EnableMultiProcessingInDerivatives"] = True
438 __Parameters["EnableMultiProcessingInEvaluation"] = False
439 if "EnableMultiProcessingInDerivatives" not in __Parameters:
440 __Parameters["EnableMultiProcessingInDerivatives"] = False
441 if __Parameters["EnableMultiProcessingInDerivatives"]:
442 __Parameters["EnableMultiProcessingInEvaluation"] = False
443 if "EnableMultiProcessingInEvaluation" not in __Parameters:
444 __Parameters["EnableMultiProcessingInEvaluation"] = False
445 if "withIncrement" in __Parameters: # Temporaire
446 __Parameters["DifferentialIncrement"] = __Parameters["withIncrement"]
448 if asScript is not None:
449 __Matrix, __Function = None, None
451 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
453 __Function = { "Direct":Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ) }
454 __Function.update({"useApproximatedDerivatives":True})
455 __Function.update(__Parameters)
456 elif asThreeFunctions:
458 "Direct" :Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ),
459 "Tangent":Interfaces.ImportFromScript(asScript).getvalue( "TangentOperator" ),
460 "Adjoint":Interfaces.ImportFromScript(asScript).getvalue( "AdjointOperator" ),
462 __Function.update(__Parameters)
465 if asOneFunction is not None:
466 if isinstance(asOneFunction, dict) and "Direct" in asOneFunction:
467 if asOneFunction["Direct"] is not None:
468 __Function = asOneFunction
470 raise ValueError("The function has to be given in a dictionnary which have 1 key (\"Direct\")")
472 __Function = { "Direct":asOneFunction }
473 __Function.update({"useApproximatedDerivatives":True})
474 __Function.update(__Parameters)
475 elif asThreeFunctions is not None:
476 if isinstance(asThreeFunctions, dict) and \
477 ("Tangent" in asThreeFunctions) and (asThreeFunctions["Tangent"] is not None) and \
478 ("Adjoint" in asThreeFunctions) and (asThreeFunctions["Adjoint"] is not None) and \
479 (("useApproximatedDerivatives" not in asThreeFunctions) or not bool(asThreeFunctions["useApproximatedDerivatives"])):
480 __Function = asThreeFunctions
481 elif isinstance(asThreeFunctions, dict) and \
482 ("Direct" in asThreeFunctions) and (asThreeFunctions["Direct"] is not None):
483 __Function = asThreeFunctions
484 __Function.update({"useApproximatedDerivatives":True})
486 raise ValueError("The functions has to be given in a dictionnary which have either 1 key (\"Direct\") or 3 keys (\"Direct\" (optionnal), \"Tangent\" and \"Adjoint\")")
487 if "Direct" not in asThreeFunctions:
488 __Function["Direct"] = asThreeFunctions["Tangent"]
489 __Function.update(__Parameters)
493 # if sys.version_info[0] < 3 and isinstance(__Function, dict):
494 # for k in ("Direct", "Tangent", "Adjoint"):
495 # if k in __Function and hasattr(__Function[k],"__class__"):
496 # if type(__Function[k]) is type(self.__init__):
497 # raise TypeError("can't use a class method (%s) as a function for the \"%s\" operator. Use a real function instead."%(type(__Function[k]),k))
499 if appliedInX is not None and isinstance(appliedInX, dict):
500 __appliedInX = appliedInX
501 elif appliedInX is not None:
502 __appliedInX = {"HXb":appliedInX}
506 if scheduledBy is not None:
507 self.__T = scheduledBy
509 if isinstance(__Function, dict) and \
510 ("useApproximatedDerivatives" in __Function) and bool(__Function["useApproximatedDerivatives"]) and \
511 ("Direct" in __Function) and (__Function["Direct"] is not None):
512 if "CenteredFiniteDifference" not in __Function: __Function["CenteredFiniteDifference"] = False
513 if "DifferentialIncrement" not in __Function: __Function["DifferentialIncrement"] = 0.01
514 if "withdX" not in __Function: __Function["withdX"] = None
515 if "withAvoidingRedundancy" not in __Function: __Function["withAvoidingRedundancy"] = avoidRC
516 if "withToleranceInRedundancy" not in __Function: __Function["withToleranceInRedundancy"] = 1.e-18
517 if "withLenghtOfRedundancy" not in __Function: __Function["withLenghtOfRedundancy"] = -1
518 if "NumberOfProcesses" not in __Function: __Function["NumberOfProcesses"] = None
519 if "withmfEnabled" not in __Function: __Function["withmfEnabled"] = inputAsMF
520 from daCore import NumericObjects
521 FDA = NumericObjects.FDApproximation(
523 Function = __Function["Direct"],
524 centeredDF = __Function["CenteredFiniteDifference"],
525 increment = __Function["DifferentialIncrement"],
526 dX = __Function["withdX"],
527 extraArguments = self.__extraArgs,
528 avoidingRedundancy = __Function["withAvoidingRedundancy"],
529 toleranceInRedundancy = __Function["withToleranceInRedundancy"],
530 lenghtOfRedundancy = __Function["withLenghtOfRedundancy"],
531 mpEnabled = __Function["EnableMultiProcessingInDerivatives"],
532 mpWorkers = __Function["NumberOfProcesses"],
533 mfEnabled = __Function["withmfEnabled"],
535 self.__FO["Direct"] = Operator( name = self.__name, fromMethod = FDA.DirectOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
536 self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMethod = FDA.TangentOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
537 self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMethod = FDA.AdjointOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
538 elif isinstance(__Function, dict) and \
539 ("Direct" in __Function) and ("Tangent" in __Function) and ("Adjoint" in __Function) and \
540 (__Function["Direct"] is not None) and (__Function["Tangent"] is not None) and (__Function["Adjoint"] is not None):
541 self.__FO["Direct"] = Operator( name = self.__name, fromMethod = __Function["Direct"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
542 self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMethod = __Function["Tangent"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
543 self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMethod = __Function["Adjoint"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
544 elif asMatrix is not None:
545 __matrice = numpy.matrix( __Matrix, numpy.float )
546 self.__FO["Direct"] = Operator( name = self.__name, fromMatrix = __matrice, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
547 self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMatrix = __matrice, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
548 self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMatrix = __matrice.T, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
551 raise ValueError("The %s object is improperly defined or undefined, it requires at minima either a matrix, a Direct operator for approximate derivatives or a Tangent/Adjoint operators pair. Please check your operator input."%self.__name)
553 if __appliedInX is not None:
554 self.__FO["AppliedInX"] = {}
555 for key in list(__appliedInX.keys()):
556 if type( __appliedInX[key] ) is type( numpy.matrix([]) ):
557 # Pour le cas où l'on a une vraie matrice
558 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].A1, numpy.float ).T
559 elif type( __appliedInX[key] ) is type( numpy.array([]) ) and len(__appliedInX[key].shape) > 1:
560 # Pour le cas où l'on a un vecteur représenté en array avec 2 dimensions
561 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].reshape(len(__appliedInX[key]),), numpy.float ).T
563 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key], numpy.float ).T
565 self.__FO["AppliedInX"] = None
571 "x.__repr__() <==> repr(x)"
572 return repr(self.__FO)
575 "x.__str__() <==> str(x)"
576 return str(self.__FO)
578 # ==============================================================================
579 class Algorithm(object):
581 Classe générale d'interface de type algorithme
583 Elle donne un cadre pour l'écriture d'une classe élémentaire d'algorithme
584 d'assimilation, en fournissant un container (dictionnaire) de variables
585 persistantes initialisées, et des méthodes d'accès à ces variables stockées.
587 Une classe élémentaire d'algorithme doit implémenter la méthode "run".
589 def __init__(self, name):
591 L'initialisation présente permet de fabriquer des variables de stockage
592 disponibles de manière générique dans les algorithmes élémentaires. Ces
593 variables de stockage sont ensuite conservées dans un dictionnaire
594 interne à l'objet, mais auquel on accède par la méthode "get".
596 Les variables prévues sont :
597 - APosterioriCorrelations : matrice de corrélations de la matrice A
598 - APosterioriCovariance : matrice de covariances a posteriori : A
599 - APosterioriStandardDeviations : vecteur des écart-types de la matrice A
600 - APosterioriVariances : vecteur des variances de la matrice A
601 - Analysis : vecteur d'analyse : Xa
602 - BMA : Background moins Analysis : Xa - Xb
603 - CostFunctionJ : fonction-coût globale, somme des deux parties suivantes Jb et Jo
604 - CostFunctionJAtCurrentOptimum : fonction-coût globale à l'état optimal courant lors d'itérations
605 - CostFunctionJb : partie ébauche ou background de la fonction-coût : Jb
606 - CostFunctionJbAtCurrentOptimum : partie ébauche à l'état optimal courant lors d'itérations
607 - CostFunctionJo : partie observations de la fonction-coût : Jo
608 - CostFunctionJoAtCurrentOptimum : partie observations à l'état optimal courant lors d'itérations
609 - CurrentIterationNumber : numéro courant d'itération dans les algorithmes itératifs, à partir de 0
610 - CurrentOptimum : état optimal courant lors d'itérations
611 - CurrentState : état courant lors d'itérations
612 - GradientOfCostFunctionJ : gradient de la fonction-coût globale
613 - GradientOfCostFunctionJb : gradient de la partie ébauche de la fonction-coût
614 - GradientOfCostFunctionJo : gradient de la partie observations de la fonction-coût
615 - IndexOfOptimum : index de l'état optimal courant lors d'itérations
616 - Innovation : l'innovation : d = Y - H(X)
617 - InnovationAtCurrentState : l'innovation à l'état courant : dn = Y - H(Xn)
618 - JacobianMatrixAtBackground : matrice jacobienne à l'état d'ébauche
619 - JacobianMatrixAtCurrentState : matrice jacobienne à l'état courant
620 - JacobianMatrixAtOptimum : matrice jacobienne à l'optimum
621 - KalmanGainAtOptimum : gain de Kalman à l'optimum
622 - MahalanobisConsistency : indicateur de consistance des covariances
623 - OMA : Observation moins Analyse : Y - Xa
624 - OMB : Observation moins Background : Y - Xb
625 - ForecastState : état prédit courant lors d'itérations
626 - Residu : dans le cas des algorithmes de vérification
627 - SampledStateForQuantiles : échantillons d'états pour l'estimation des quantiles
628 - SigmaBck2 : indicateur de correction optimale des erreurs d'ébauche
629 - SigmaObs2 : indicateur de correction optimale des erreurs d'observation
630 - SimulatedObservationAtBackground : l'état observé H(Xb) à l'ébauche
631 - SimulatedObservationAtCurrentOptimum : l'état observé H(X) à l'état optimal courant
632 - SimulatedObservationAtCurrentState : l'état observé H(X) à l'état courant
633 - SimulatedObservationAtOptimum : l'état observé H(Xa) à l'optimum
634 - SimulationQuantiles : états observés H(X) pour les quantiles demandés
635 On peut rajouter des variables à stocker dans l'initialisation de
636 l'algorithme élémentaire qui va hériter de cette classe
638 logging.debug("%s Initialisation", str(name))
639 self._m = PlatformInfo.SystemUsage()
641 self._name = str( name )
642 self._parameters = {"StoreSupplementaryCalculations":[]}
643 self.__required_parameters = {}
644 self.__required_inputs = {
645 "RequiredInputValues":{"mandatory":(), "optional":()},
646 "ClassificationTags":[],
648 self.__variable_names_not_public = {"nextStep":False} # Duplication dans AlgorithmAndParameters
649 self.__canonical_parameter_name = {} # Correspondance "lower"->"correct"
650 self.__canonical_stored_name = {} # Correspondance "lower"->"correct"
652 self.StoredVariables = {}
653 self.StoredVariables["APosterioriCorrelations"] = Persistence.OneMatrix(name = "APosterioriCorrelations")
654 self.StoredVariables["APosterioriCovariance"] = Persistence.OneMatrix(name = "APosterioriCovariance")
655 self.StoredVariables["APosterioriStandardDeviations"] = Persistence.OneVector(name = "APosterioriStandardDeviations")
656 self.StoredVariables["APosterioriVariances"] = Persistence.OneVector(name = "APosterioriVariances")
657 self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
658 self.StoredVariables["BMA"] = Persistence.OneVector(name = "BMA")
659 self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
660 self.StoredVariables["CostFunctionJAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJAtCurrentOptimum")
661 self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
662 self.StoredVariables["CostFunctionJbAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJbAtCurrentOptimum")
663 self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
664 self.StoredVariables["CostFunctionJoAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJoAtCurrentOptimum")
665 self.StoredVariables["CurrentEnsembleState"] = Persistence.OneMatrix(name = "CurrentEnsembleState")
666 self.StoredVariables["CurrentIterationNumber"] = Persistence.OneIndex(name = "CurrentIterationNumber")
667 self.StoredVariables["CurrentOptimum"] = Persistence.OneVector(name = "CurrentOptimum")
668 self.StoredVariables["CurrentState"] = Persistence.OneVector(name = "CurrentState")
669 self.StoredVariables["ForecastState"] = Persistence.OneVector(name = "ForecastState")
670 self.StoredVariables["GradientOfCostFunctionJ"] = Persistence.OneVector(name = "GradientOfCostFunctionJ")
671 self.StoredVariables["GradientOfCostFunctionJb"] = Persistence.OneVector(name = "GradientOfCostFunctionJb")
672 self.StoredVariables["GradientOfCostFunctionJo"] = Persistence.OneVector(name = "GradientOfCostFunctionJo")
673 self.StoredVariables["IndexOfOptimum"] = Persistence.OneIndex(name = "IndexOfOptimum")
674 self.StoredVariables["Innovation"] = Persistence.OneVector(name = "Innovation")
675 self.StoredVariables["InnovationAtCurrentAnalysis"] = Persistence.OneVector(name = "InnovationAtCurrentAnalysis")
676 self.StoredVariables["InnovationAtCurrentState"] = Persistence.OneVector(name = "InnovationAtCurrentState")
677 self.StoredVariables["JacobianMatrixAtBackground"] = Persistence.OneMatrix(name = "JacobianMatrixAtBackground")
678 self.StoredVariables["JacobianMatrixAtCurrentState"] = Persistence.OneMatrix(name = "JacobianMatrixAtCurrentState")
679 self.StoredVariables["JacobianMatrixAtOptimum"] = Persistence.OneMatrix(name = "JacobianMatrixAtOptimum")
680 self.StoredVariables["KalmanGainAtOptimum"] = Persistence.OneMatrix(name = "KalmanGainAtOptimum")
681 self.StoredVariables["MahalanobisConsistency"] = Persistence.OneScalar(name = "MahalanobisConsistency")
682 self.StoredVariables["OMA"] = Persistence.OneVector(name = "OMA")
683 self.StoredVariables["OMB"] = Persistence.OneVector(name = "OMB")
684 self.StoredVariables["Residu"] = Persistence.OneScalar(name = "Residu")
685 self.StoredVariables["SampledStateForQuantiles"] = Persistence.OneMatrix(name = "SampledStateForQuantiles")
686 self.StoredVariables["SigmaBck2"] = Persistence.OneScalar(name = "SigmaBck2")
687 self.StoredVariables["SigmaObs2"] = Persistence.OneScalar(name = "SigmaObs2")
688 self.StoredVariables["SimulatedObservationAtBackground"] = Persistence.OneVector(name = "SimulatedObservationAtBackground")
689 self.StoredVariables["SimulatedObservationAtCurrentAnalysis"]= Persistence.OneVector(name = "SimulatedObservationAtCurrentAnalysis")
690 self.StoredVariables["SimulatedObservationAtCurrentOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentOptimum")
691 self.StoredVariables["SimulatedObservationAtCurrentState"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentState")
692 self.StoredVariables["SimulatedObservationAtOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtOptimum")
693 self.StoredVariables["SimulationQuantiles"] = Persistence.OneMatrix(name = "SimulationQuantiles")
695 for k in self.StoredVariables:
696 self.__canonical_stored_name[k.lower()] = k
698 for k, v in self.__variable_names_not_public.items():
699 self.__canonical_parameter_name[k.lower()] = k
700 self.__canonical_parameter_name["algorithm"] = "Algorithm"
701 self.__canonical_parameter_name["storesupplementarycalculations"] = "StoreSupplementaryCalculations"
703 def _pre_run(self, Parameters, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None ):
705 logging.debug("%s Lancement", self._name)
706 logging.debug("%s Taille mémoire utilisée de %.0f Mio"%(self._name, self._m.getUsedMemory("Mio")))
707 self._getTimeState(reset=True)
709 # Mise a jour des paramètres internes avec le contenu de Parameters, en
710 # reprenant les valeurs par défauts pour toutes celles non définies
711 self.__setParameters(Parameters, reset=True)
712 for k, v in self.__variable_names_not_public.items():
713 if k not in self._parameters: self.__setParameters( {k:v} )
715 # Corrections et compléments des vecteurs
716 def __test_vvalue(argument, variable, argname, symbol=None):
717 if symbol is None: symbol = variable
719 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
720 raise ValueError("%s %s vector %s is not set and has to be properly defined!"%(self._name,argname,symbol))
721 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
722 logging.debug("%s %s vector %s is not set, but is optional."%(self._name,argname,symbol))
724 logging.debug("%s %s vector %s is not set, but is not required."%(self._name,argname,symbol))
726 logging.debug("%s %s vector %s is set, and its size is %i."%(self._name,argname,symbol,numpy.array(argument).size))
728 __test_vvalue( Xb, "Xb", "Background or initial state" )
729 __test_vvalue( Y, "Y", "Observation" )
730 __test_vvalue( U, "U", "Control" )
732 # Corrections et compléments des covariances
733 def __test_cvalue(argument, variable, argname, symbol=None):
734 if symbol is None: symbol = variable
736 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
737 raise ValueError("%s %s error covariance matrix %s is not set and has to be properly defined!"%(self._name,argname,symbol))
738 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
739 logging.debug("%s %s error covariance matrix %s is not set, but is optional."%(self._name,argname,symbol))
741 logging.debug("%s %s error covariance matrix %s is not set, but is not required."%(self._name,argname,symbol))
743 logging.debug("%s %s error covariance matrix %s is set."%(self._name,argname,symbol))
745 __test_cvalue( B, "B", "Background" )
746 __test_cvalue( R, "R", "Observation" )
747 __test_cvalue( Q, "Q", "Evolution" )
749 # Corrections et compléments des opérateurs
750 def __test_ovalue(argument, variable, argname, symbol=None):
751 if symbol is None: symbol = variable
752 if argument is None or (isinstance(argument,dict) and len(argument)==0):
753 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
754 raise ValueError("%s %s operator %s is not set and has to be properly defined!"%(self._name,argname,symbol))
755 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
756 logging.debug("%s %s operator %s is not set, but is optional."%(self._name,argname,symbol))
758 logging.debug("%s %s operator %s is not set, but is not required."%(self._name,argname,symbol))
760 logging.debug("%s %s operator %s is set."%(self._name,argname,symbol))
762 __test_ovalue( HO, "HO", "Observation", "H" )
763 __test_ovalue( EM, "EM", "Evolution", "M" )
764 __test_ovalue( CM, "CM", "Control Model", "C" )
766 # Corrections et compléments des bornes
767 if ("Bounds" in self._parameters) and isinstance(self._parameters["Bounds"], (list, tuple)) and (len(self._parameters["Bounds"]) > 0):
768 logging.debug("%s Prise en compte des bornes effectuee"%(self._name,))
770 self._parameters["Bounds"] = None
772 # Corrections et compléments de l'initialisation en X
773 if "InitializationPoint" in self._parameters:
775 if self._parameters["InitializationPoint"] is not None and hasattr(self._parameters["InitializationPoint"],'size'):
776 if self._parameters["InitializationPoint"].size != numpy.ravel(Xb).size:
777 raise ValueError("Incompatible size %i of forced initial point that have to replace the background of size %i" \
778 %(self._parameters["InitializationPoint"].size,numpy.ravel(Xb).size))
779 # Obtenu par typecast : numpy.ravel(self._parameters["InitializationPoint"])
781 self._parameters["InitializationPoint"] = numpy.ravel(Xb)
783 if self._parameters["InitializationPoint"] is None:
784 raise ValueError("Forced initial point can not be set without any given Background or required value")
786 # Correction pour pallier a un bug de TNC sur le retour du Minimum
787 if "Minimizer" in self._parameters and self._parameters["Minimizer"] == "TNC":
788 self.setParameterValue("StoreInternalVariables",True)
790 # Verbosité et logging
791 if logging.getLogger().level < logging.WARNING:
792 self._parameters["optiprint"], self._parameters["optdisp"] = 1, 1
793 if PlatformInfo.has_scipy:
794 import scipy.optimize
795 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_ALL
797 self._parameters["optmessages"] = 15
799 self._parameters["optiprint"], self._parameters["optdisp"] = -1, 0
800 if PlatformInfo.has_scipy:
801 import scipy.optimize
802 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_NONE
804 self._parameters["optmessages"] = 15
808 def _post_run(self,_oH=None):
810 if ("StoreSupplementaryCalculations" in self._parameters) and \
811 "APosterioriCovariance" in self._parameters["StoreSupplementaryCalculations"]:
812 for _A in self.StoredVariables["APosterioriCovariance"]:
813 if "APosterioriVariances" in self._parameters["StoreSupplementaryCalculations"]:
814 self.StoredVariables["APosterioriVariances"].store( numpy.diag(_A) )
815 if "APosterioriStandardDeviations" in self._parameters["StoreSupplementaryCalculations"]:
816 self.StoredVariables["APosterioriStandardDeviations"].store( numpy.sqrt(numpy.diag(_A)) )
817 if "APosterioriCorrelations" in self._parameters["StoreSupplementaryCalculations"]:
818 _EI = numpy.diag(1./numpy.sqrt(numpy.diag(_A)))
819 _C = numpy.dot(_EI, numpy.dot(_A, _EI))
820 self.StoredVariables["APosterioriCorrelations"].store( _C )
821 if _oH is not None and "Direct" in _oH and "Tangent" in _oH and "Adjoint" in _oH:
822 logging.debug("%s Nombre d'évaluation(s) de l'opérateur d'observation direct/tangent/adjoint.: %i/%i/%i", self._name, _oH["Direct"].nbcalls(0),_oH["Tangent"].nbcalls(0),_oH["Adjoint"].nbcalls(0))
823 logging.debug("%s Nombre d'appels au cache d'opérateur d'observation direct/tangent/adjoint..: %i/%i/%i", self._name, _oH["Direct"].nbcalls(3),_oH["Tangent"].nbcalls(3),_oH["Adjoint"].nbcalls(3))
824 logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
825 logging.debug("%s Durées d'utilisation CPU de %.1fs et elapsed de %.1fs", self._name, self._getTimeState()[0], self._getTimeState()[1])
826 logging.debug("%s Terminé", self._name)
829 def _toStore(self, key):
830 "True if in StoreSupplementaryCalculations, else False"
831 return key in self._parameters["StoreSupplementaryCalculations"]
833 def get(self, key=None):
835 Renvoie l'une des variables stockées identifiée par la clé, ou le
836 dictionnaire de l'ensemble des variables disponibles en l'absence de
837 clé. Ce sont directement les variables sous forme objet qui sont
838 renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
839 des classes de persistance.
842 return self.StoredVariables[self.__canonical_stored_name[key.lower()]]
844 return self.StoredVariables
846 def __contains__(self, key=None):
847 "D.__contains__(k) -> True if D has a key k, else False"
848 if key is None or key.lower() not in self.__canonical_stored_name:
851 return self.__canonical_stored_name[key.lower()] in self.StoredVariables
854 "D.keys() -> list of D's keys"
855 if hasattr(self, "StoredVariables"):
856 return self.StoredVariables.keys()
861 "D.pop(k[,d]) -> v, remove specified key and return the corresponding value"
862 if hasattr(self, "StoredVariables") and k.lower() in self.__canonical_stored_name:
863 return self.StoredVariables.pop(self.__canonical_stored_name[k.lower()], d)
868 raise TypeError("pop expected at least 1 arguments, got 0")
869 "If key is not found, d is returned if given, otherwise KeyError is raised"
875 def run(self, Xb=None, Y=None, H=None, M=None, R=None, B=None, Q=None, Parameters=None):
877 Doit implémenter l'opération élémentaire de calcul d'assimilation sous
878 sa forme mathématique la plus naturelle possible.
880 raise NotImplementedError("Mathematical assimilation calculation has not been implemented!")
882 def defineRequiredParameter(self, name = None, default = None, typecast = None, message = None, minval = None, maxval = None, listval = None, listadv = None):
884 Permet de définir dans l'algorithme des paramètres requis et leurs
885 caractéristiques par défaut.
888 raise ValueError("A name is mandatory to define a required parameter.")
890 self.__required_parameters[name] = {
892 "typecast" : typecast,
899 self.__canonical_parameter_name[name.lower()] = name
900 logging.debug("%s %s (valeur par défaut = %s)", self._name, message, self.setParameterValue(name))
902 def getRequiredParameters(self, noDetails=True):
904 Renvoie la liste des noms de paramètres requis ou directement le
905 dictionnaire des paramètres requis.
908 return sorted(self.__required_parameters.keys())
910 return self.__required_parameters
912 def setParameterValue(self, name=None, value=None):
914 Renvoie la valeur d'un paramètre requis de manière contrôlée
916 __k = self.__canonical_parameter_name[name.lower()]
917 default = self.__required_parameters[__k]["default"]
918 typecast = self.__required_parameters[__k]["typecast"]
919 minval = self.__required_parameters[__k]["minval"]
920 maxval = self.__required_parameters[__k]["maxval"]
921 listval = self.__required_parameters[__k]["listval"]
922 listadv = self.__required_parameters[__k]["listadv"]
924 if value is None and default is None:
926 elif value is None and default is not None:
927 if typecast is None: __val = default
928 else: __val = typecast( default )
930 if typecast is None: __val = value
933 __val = typecast( value )
935 raise ValueError("The value '%s' for the parameter named '%s' can not be correctly evaluated with type '%s'."%(value, __k, typecast))
937 if minval is not None and (numpy.array(__val, float) < minval).any():
938 raise ValueError("The parameter named '%s' of value '%s' can not be less than %s."%(__k, __val, minval))
939 if maxval is not None and (numpy.array(__val, float) > maxval).any():
940 raise ValueError("The parameter named '%s' of value '%s' can not be greater than %s."%(__k, __val, maxval))
941 if listval is not None or listadv is not None:
942 if typecast is list or typecast is tuple or isinstance(__val,list) or isinstance(__val,tuple):
944 if listval is not None and v in listval: continue
945 elif listadv is not None and v in listadv: continue
947 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%(v, __k, listval))
948 elif not (listval is not None and __val in listval) and not (listadv is not None and __val in listadv):
949 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%( __val, __k,listval))
953 def requireInputArguments(self, mandatory=(), optional=()):
955 Permet d'imposer des arguments de calcul requis en entrée.
957 self.__required_inputs["RequiredInputValues"]["mandatory"] = tuple( mandatory )
958 self.__required_inputs["RequiredInputValues"]["optional"] = tuple( optional )
960 def getInputArguments(self):
962 Permet d'obtenir les listes des arguments de calcul requis en entrée.
964 return self.__required_inputs["RequiredInputValues"]["mandatory"], self.__required_inputs["RequiredInputValues"]["optional"]
966 def setAttributes(self, tags=()):
968 Permet d'adjoindre des attributs comme les tags de classification.
969 Renvoie la liste actuelle dans tous les cas.
971 self.__required_inputs["ClassificationTags"].extend( tags )
972 return self.__required_inputs["ClassificationTags"]
974 def __setParameters(self, fromDico={}, reset=False):
976 Permet de stocker les paramètres reçus dans le dictionnaire interne.
978 self._parameters.update( fromDico )
979 __inverse_fromDico_keys = {}
980 for k in fromDico.keys():
981 if k.lower() in self.__canonical_parameter_name:
982 __inverse_fromDico_keys[self.__canonical_parameter_name[k.lower()]] = k
983 #~ __inverse_fromDico_keys = dict([(self.__canonical_parameter_name[k.lower()],k) for k in fromDico.keys()])
984 __canonic_fromDico_keys = __inverse_fromDico_keys.keys()
985 for k in self.__required_parameters.keys():
986 if k in __canonic_fromDico_keys:
987 self._parameters[k] = self.setParameterValue(k,fromDico[__inverse_fromDico_keys[k]])
989 self._parameters[k] = self.setParameterValue(k)
992 logging.debug("%s %s : %s", self._name, self.__required_parameters[k]["message"], self._parameters[k])
994 def _getTimeState(self, reset=False):
996 Initialise ou restitue le temps de calcul (cpu/elapsed) à la seconde
999 self.__initial_cpu_time = time.process_time()
1000 self.__initial_elapsed_time = time.perf_counter()
1003 self.__cpu_time = time.process_time() - self.__initial_cpu_time
1004 self.__elapsed_time = time.perf_counter() - self.__initial_elapsed_time
1005 return self.__cpu_time, self.__elapsed_time
1007 def _StopOnTimeLimit(self, X=None, withReason=False):
1008 "Stop criteria on time limit: True/False [+ Reason]"
1009 c, e = self._getTimeState()
1010 if "MaximumCpuTime" in self._parameters and c > self._parameters["MaximumCpuTime"]:
1011 __SC, __SR = True, "Reached maximum CPU time (%.1fs > %.1fs)"%(c, self._parameters["MaximumCpuTime"])
1012 elif "MaximumElapsedTime" in self._parameters and e > self._parameters["MaximumElapsedTime"]:
1013 __SC, __SR = True, "Reached maximum elapsed time (%.1fs > %.1fs)"%(e, self._parameters["MaximumElapsedTime"])
1015 __SC, __SR = False, ""
1021 # ==============================================================================
1022 class AlgorithmAndParameters(object):
1024 Classe générale d'interface d'action pour l'algorithme et ses paramètres
1027 name = "GenericAlgorithm",
1034 self.__name = str(name)
1038 self.__algorithm = {}
1039 self.__algorithmFile = None
1040 self.__algorithmName = None
1042 self.updateParameters( asDict, asScript )
1044 if asAlgorithm is None and asScript is not None:
1045 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
1047 __Algo = asAlgorithm
1049 if __Algo is not None:
1050 self.__A = str(__Algo)
1051 self.__P.update( {"Algorithm":self.__A} )
1053 self.__setAlgorithm( self.__A )
1055 self.__variable_names_not_public = {"nextStep":False} # Duplication dans Algorithm
1057 def updateParameters(self,
1061 "Mise a jour des parametres"
1062 if asDict is None and asScript is not None:
1063 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1067 if __Dict is not None:
1068 self.__P.update( dict(__Dict) )
1070 def executePythonScheme(self, asDictAO = None):
1071 "Permet de lancer le calcul d'assimilation"
1072 Operator.CM.clearCache()
1074 if not isinstance(asDictAO, dict):
1075 raise ValueError("The objects for algorithm calculation have to be given together as a dictionnary, and they are not")
1076 if hasattr(asDictAO["Background"],"getO"): self.__Xb = asDictAO["Background"].getO()
1077 elif hasattr(asDictAO["CheckingPoint"],"getO"): self.__Xb = asDictAO["CheckingPoint"].getO()
1078 else: self.__Xb = None
1079 if hasattr(asDictAO["Observation"],"getO"): self.__Y = asDictAO["Observation"].getO()
1080 else: self.__Y = asDictAO["Observation"]
1081 if hasattr(asDictAO["ControlInput"],"getO"): self.__U = asDictAO["ControlInput"].getO()
1082 else: self.__U = asDictAO["ControlInput"]
1083 if hasattr(asDictAO["ObservationOperator"],"getO"): self.__HO = asDictAO["ObservationOperator"].getO()
1084 else: self.__HO = asDictAO["ObservationOperator"]
1085 if hasattr(asDictAO["EvolutionModel"],"getO"): self.__EM = asDictAO["EvolutionModel"].getO()
1086 else: self.__EM = asDictAO["EvolutionModel"]
1087 if hasattr(asDictAO["ControlModel"],"getO"): self.__CM = asDictAO["ControlModel"].getO()
1088 else: self.__CM = asDictAO["ControlModel"]
1089 self.__B = asDictAO["BackgroundError"]
1090 self.__R = asDictAO["ObservationError"]
1091 self.__Q = asDictAO["EvolutionError"]
1093 self.__shape_validate()
1095 self.__algorithm.run(
1105 Parameters = self.__P,
1109 def executeYACSScheme(self, FileName=None):
1110 "Permet de lancer le calcul d'assimilation"
1111 if FileName is None or not os.path.exists(FileName):
1112 raise ValueError("a YACS file name has to be given for YACS execution.\n")
1114 __file = os.path.abspath(FileName)
1115 logging.debug("The YACS file name is \"%s\"."%__file)
1116 if not PlatformInfo.has_salome or \
1117 not PlatformInfo.has_yacs or \
1118 not PlatformInfo.has_adao:
1119 raise ImportError("\n\n"+\
1120 "Unable to get SALOME, YACS or ADAO environnement variables.\n"+\
1121 "Please load the right environnement before trying to use it.\n")
1124 import SALOMERuntime
1126 SALOMERuntime.RuntimeSALOME_setRuntime()
1128 r = pilot.getRuntime()
1129 xmlLoader = loader.YACSLoader()
1130 xmlLoader.registerProcCataLoader()
1132 catalogAd = r.loadCatalog("proc", __file)
1133 r.addCatalog(catalogAd)
1138 p = xmlLoader.load(__file)
1139 except IOError as ex:
1140 print("The YACS XML schema file can not be loaded: %s"%(ex,))
1142 logger = p.getLogger("parser")
1143 if not logger.isEmpty():
1144 print("The imported YACS XML schema has errors on parsing:")
1145 print(logger.getStr())
1148 print("The YACS XML schema is not valid and will not be executed:")
1149 print(p.getErrorReport())
1151 info=pilot.LinkInfo(pilot.LinkInfo.ALL_DONT_STOP)
1152 p.checkConsistency(info)
1153 if info.areWarningsOrErrors():
1154 print("The YACS XML schema is not coherent and will not be executed:")
1155 print(info.getGlobalRepr())
1157 e = pilot.ExecutorSwig()
1159 if p.getEffectiveState() != pilot.DONE:
1160 print(p.getErrorReport())
1164 def get(self, key = None):
1165 "Vérifie l'existence d'une clé de variable ou de paramètres"
1166 if key in self.__algorithm:
1167 return self.__algorithm.get( key )
1168 elif key in self.__P:
1169 return self.__P[key]
1171 allvariables = self.__P
1172 for k in self.__variable_names_not_public: allvariables.pop(k, None)
1175 def pop(self, k, d):
1176 "Necessaire pour le pickling"
1177 return self.__algorithm.pop(k, d)
1179 def getAlgorithmRequiredParameters(self, noDetails=True):
1180 "Renvoie la liste des paramètres requis selon l'algorithme"
1181 return self.__algorithm.getRequiredParameters(noDetails)
1183 def getAlgorithmInputArguments(self):
1184 "Renvoie la liste des entrées requises selon l'algorithme"
1185 return self.__algorithm.getInputArguments()
1187 def getAlgorithmAttributes(self):
1188 "Renvoie la liste des attributs selon l'algorithme"
1189 return self.__algorithm.setAttributes()
1191 def setObserver(self, __V, __O, __I, __S):
1192 if self.__algorithm is None \
1193 or isinstance(self.__algorithm, dict) \
1194 or not hasattr(self.__algorithm,"StoredVariables"):
1195 raise ValueError("No observer can be build before choosing an algorithm.")
1196 if __V not in self.__algorithm:
1197 raise ValueError("An observer requires to be set on a variable named %s which does not exist."%__V)
1199 self.__algorithm.StoredVariables[ __V ].setDataObserver(
1202 HookParameters = __I,
1205 def removeObserver(self, __V, __O, __A = False):
1206 if self.__algorithm is None \
1207 or isinstance(self.__algorithm, dict) \
1208 or not hasattr(self.__algorithm,"StoredVariables"):
1209 raise ValueError("No observer can be removed before choosing an algorithm.")
1210 if __V not in self.__algorithm:
1211 raise ValueError("An observer requires to be removed on a variable named %s which does not exist."%__V)
1213 return self.__algorithm.StoredVariables[ __V ].removeDataObserver(
1218 def hasObserver(self, __V):
1219 if self.__algorithm is None \
1220 or isinstance(self.__algorithm, dict) \
1221 or not hasattr(self.__algorithm,"StoredVariables"):
1223 if __V not in self.__algorithm:
1225 return self.__algorithm.StoredVariables[ __V ].hasDataObserver()
1228 __allvariables = list(self.__algorithm.keys()) + list(self.__P.keys())
1229 for k in self.__variable_names_not_public:
1230 if k in __allvariables: __allvariables.remove(k)
1231 return __allvariables
1233 def __contains__(self, key=None):
1234 "D.__contains__(k) -> True if D has a key k, else False"
1235 return key in self.__algorithm or key in self.__P
1238 "x.__repr__() <==> repr(x)"
1239 return repr(self.__A)+", "+repr(self.__P)
1242 "x.__str__() <==> str(x)"
1243 return str(self.__A)+", "+str(self.__P)
1245 def __setAlgorithm(self, choice = None ):
1247 Permet de sélectionner l'algorithme à utiliser pour mener à bien l'étude
1248 d'assimilation. L'argument est un champ caractère se rapportant au nom
1249 d'un algorithme réalisant l'opération sur les arguments fixes.
1252 raise ValueError("Error: algorithm choice has to be given")
1253 if self.__algorithmName is not None:
1254 raise ValueError("Error: algorithm choice has already been done as \"%s\", it can't be changed."%self.__algorithmName)
1255 daDirectory = "daAlgorithms"
1257 # Recherche explicitement le fichier complet
1258 # ------------------------------------------
1260 for directory in sys.path:
1261 if os.path.isfile(os.path.join(directory, daDirectory, str(choice)+'.py')):
1262 module_path = os.path.abspath(os.path.join(directory, daDirectory))
1263 if module_path is None:
1264 raise ImportError("No algorithm module named \"%s\" has been found in the search path.\n The search path is %s"%(choice, sys.path))
1266 # Importe le fichier complet comme un module
1267 # ------------------------------------------
1269 sys_path_tmp = sys.path ; sys.path.insert(0,module_path)
1270 self.__algorithmFile = __import__(str(choice), globals(), locals(), [])
1271 if not hasattr(self.__algorithmFile, "ElementaryAlgorithm"):
1272 raise ImportError("this module does not define a valid elementary algorithm.")
1273 self.__algorithmName = str(choice)
1274 sys.path = sys_path_tmp ; del sys_path_tmp
1275 except ImportError as e:
1276 raise ImportError("The module named \"%s\" was found, but is incorrect at the import stage.\n The import error message is: %s"%(choice,e))
1278 # Instancie un objet du type élémentaire du fichier
1279 # -------------------------------------------------
1280 self.__algorithm = self.__algorithmFile.ElementaryAlgorithm()
1283 def __shape_validate(self):
1285 Validation de la correspondance correcte des tailles des variables et
1286 des matrices s'il y en a.
1288 if self.__Xb is None: __Xb_shape = (0,)
1289 elif hasattr(self.__Xb,"size"): __Xb_shape = (self.__Xb.size,)
1290 elif hasattr(self.__Xb,"shape"):
1291 if isinstance(self.__Xb.shape, tuple): __Xb_shape = self.__Xb.shape
1292 else: __Xb_shape = self.__Xb.shape()
1293 else: raise TypeError("The background (Xb) has no attribute of shape: problem !")
1295 if self.__Y is None: __Y_shape = (0,)
1296 elif hasattr(self.__Y,"size"): __Y_shape = (self.__Y.size,)
1297 elif hasattr(self.__Y,"shape"):
1298 if isinstance(self.__Y.shape, tuple): __Y_shape = self.__Y.shape
1299 else: __Y_shape = self.__Y.shape()
1300 else: raise TypeError("The observation (Y) has no attribute of shape: problem !")
1302 if self.__U is None: __U_shape = (0,)
1303 elif hasattr(self.__U,"size"): __U_shape = (self.__U.size,)
1304 elif hasattr(self.__U,"shape"):
1305 if isinstance(self.__U.shape, tuple): __U_shape = self.__U.shape
1306 else: __U_shape = self.__U.shape()
1307 else: raise TypeError("The control (U) has no attribute of shape: problem !")
1309 if self.__B is None: __B_shape = (0,0)
1310 elif hasattr(self.__B,"shape"):
1311 if isinstance(self.__B.shape, tuple): __B_shape = self.__B.shape
1312 else: __B_shape = self.__B.shape()
1313 else: raise TypeError("The a priori errors covariance matrix (B) has no attribute of shape: problem !")
1315 if self.__R is None: __R_shape = (0,0)
1316 elif hasattr(self.__R,"shape"):
1317 if isinstance(self.__R.shape, tuple): __R_shape = self.__R.shape
1318 else: __R_shape = self.__R.shape()
1319 else: raise TypeError("The observation errors covariance matrix (R) has no attribute of shape: problem !")
1321 if self.__Q is None: __Q_shape = (0,0)
1322 elif hasattr(self.__Q,"shape"):
1323 if isinstance(self.__Q.shape, tuple): __Q_shape = self.__Q.shape
1324 else: __Q_shape = self.__Q.shape()
1325 else: raise TypeError("The evolution errors covariance matrix (Q) has no attribute of shape: problem !")
1327 if len(self.__HO) == 0: __HO_shape = (0,0)
1328 elif isinstance(self.__HO, dict): __HO_shape = (0,0)
1329 elif hasattr(self.__HO["Direct"],"shape"):
1330 if isinstance(self.__HO["Direct"].shape, tuple): __HO_shape = self.__HO["Direct"].shape
1331 else: __HO_shape = self.__HO["Direct"].shape()
1332 else: raise TypeError("The observation operator (H) has no attribute of shape: problem !")
1334 if len(self.__EM) == 0: __EM_shape = (0,0)
1335 elif isinstance(self.__EM, dict): __EM_shape = (0,0)
1336 elif hasattr(self.__EM["Direct"],"shape"):
1337 if isinstance(self.__EM["Direct"].shape, tuple): __EM_shape = self.__EM["Direct"].shape
1338 else: __EM_shape = self.__EM["Direct"].shape()
1339 else: raise TypeError("The evolution model (EM) has no attribute of shape: problem !")
1341 if len(self.__CM) == 0: __CM_shape = (0,0)
1342 elif isinstance(self.__CM, dict): __CM_shape = (0,0)
1343 elif hasattr(self.__CM["Direct"],"shape"):
1344 if isinstance(self.__CM["Direct"].shape, tuple): __CM_shape = self.__CM["Direct"].shape
1345 else: __CM_shape = self.__CM["Direct"].shape()
1346 else: raise TypeError("The control model (CM) has no attribute of shape: problem !")
1348 # Vérification des conditions
1349 # ---------------------------
1350 if not( len(__Xb_shape) == 1 or min(__Xb_shape) == 1 ):
1351 raise ValueError("Shape characteristic of background (Xb) is incorrect: \"%s\"."%(__Xb_shape,))
1352 if not( len(__Y_shape) == 1 or min(__Y_shape) == 1 ):
1353 raise ValueError("Shape characteristic of observation (Y) is incorrect: \"%s\"."%(__Y_shape,))
1355 if not( min(__B_shape) == max(__B_shape) ):
1356 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) is incorrect: \"%s\"."%(__B_shape,))
1357 if not( min(__R_shape) == max(__R_shape) ):
1358 raise ValueError("Shape characteristic of observation errors covariance matrix (R) is incorrect: \"%s\"."%(__R_shape,))
1359 if not( min(__Q_shape) == max(__Q_shape) ):
1360 raise ValueError("Shape characteristic of evolution errors covariance matrix (Q) is incorrect: \"%s\"."%(__Q_shape,))
1361 if not( min(__EM_shape) == max(__EM_shape) ):
1362 raise ValueError("Shape characteristic of evolution operator (EM) is incorrect: \"%s\"."%(__EM_shape,))
1364 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[1] == max(__Xb_shape) ):
1365 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and state (X) \"%s\" are incompatible."%(__HO_shape,__Xb_shape))
1366 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[0] == max(__Y_shape) ):
1367 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation (Y) \"%s\" are incompatible."%(__HO_shape,__Y_shape))
1368 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__B) > 0 and not( __HO_shape[1] == __B_shape[0] ):
1369 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and a priori errors covariance matrix (B) \"%s\" are incompatible."%(__HO_shape,__B_shape))
1370 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__R) > 0 and not( __HO_shape[0] == __R_shape[1] ):
1371 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation errors covariance matrix (R) \"%s\" are incompatible."%(__HO_shape,__R_shape))
1373 if self.__B is not None and len(self.__B) > 0 and not( __B_shape[1] == max(__Xb_shape) ):
1374 if self.__algorithmName in ["EnsembleBlue",]:
1375 asPersistentVector = self.__Xb.reshape((-1,min(__B_shape)))
1376 self.__Xb = Persistence.OneVector("Background", basetype=numpy.matrix)
1377 for member in asPersistentVector:
1378 self.__Xb.store( numpy.matrix( numpy.ravel(member), numpy.float ).T )
1379 __Xb_shape = min(__B_shape)
1381 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) \"%s\" and background (Xb) \"%s\" are incompatible."%(__B_shape,__Xb_shape))
1383 if self.__R is not None and len(self.__R) > 0 and not( __R_shape[1] == max(__Y_shape) ):
1384 raise ValueError("Shape characteristic of observation errors covariance matrix (R) \"%s\" and observation (Y) \"%s\" are incompatible."%(__R_shape,__Y_shape))
1386 if self.__EM is not None and len(self.__EM) > 0 and not isinstance(self.__EM, dict) and not( __EM_shape[1] == max(__Xb_shape) ):
1387 raise ValueError("Shape characteristic of evolution model (EM) \"%s\" and state (X) \"%s\" are incompatible."%(__EM_shape,__Xb_shape))
1389 if self.__CM is not None and len(self.__CM) > 0 and not isinstance(self.__CM, dict) and not( __CM_shape[1] == max(__U_shape) ):
1390 raise ValueError("Shape characteristic of control model (CM) \"%s\" and control (U) \"%s\" are incompatible."%(__CM_shape,__U_shape))
1392 if ("Bounds" in self.__P) \
1393 and (isinstance(self.__P["Bounds"], list) or isinstance(self.__P["Bounds"], tuple)) \
1394 and (len(self.__P["Bounds"]) != max(__Xb_shape)):
1395 raise ValueError("The number \"%s\" of bound pairs for the state (X) components is different of the size \"%s\" of the state itself." \
1396 %(len(self.__P["Bounds"]),max(__Xb_shape)))
1400 # ==============================================================================
1401 class RegulationAndParameters(object):
1403 Classe générale d'interface d'action pour la régulation et ses paramètres
1406 name = "GenericRegulation",
1413 self.__name = str(name)
1416 if asAlgorithm is None and asScript is not None:
1417 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
1419 __Algo = asAlgorithm
1421 if asDict is None and asScript is not None:
1422 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1426 if __Dict is not None:
1427 self.__P.update( dict(__Dict) )
1429 if __Algo is not None:
1430 self.__P.update( {"Algorithm":str(__Algo)} )
1432 def get(self, key = None):
1433 "Vérifie l'existence d'une clé de variable ou de paramètres"
1435 return self.__P[key]
1439 # ==============================================================================
1440 class DataObserver(object):
1442 Classe générale d'interface de type observer
1445 name = "GenericObserver",
1457 self.__name = str(name)
1462 if onVariable is None:
1463 raise ValueError("setting an observer has to be done over a variable name or a list of variable names, not over None.")
1464 elif type(onVariable) in (tuple, list):
1465 self.__V = tuple(map( str, onVariable ))
1466 if withInfo is None:
1469 self.__I = (str(withInfo),)*len(self.__V)
1470 elif isinstance(onVariable, str):
1471 self.__V = (onVariable,)
1472 if withInfo is None:
1473 self.__I = (onVariable,)
1475 self.__I = (str(withInfo),)
1477 raise ValueError("setting an observer has to be done over a variable name or a list of variable names.")
1479 if asObsObject is not None:
1480 self.__O = asObsObject
1482 __FunctionText = str(UserScript('Observer', asTemplate, asString, asScript))
1483 __Function = Observer2Func(__FunctionText)
1484 self.__O = __Function.getfunc()
1486 for k in range(len(self.__V)):
1489 if ename not in withAlgo:
1490 raise ValueError("An observer is asked to be set on a variable named %s which does not exist."%ename)
1492 withAlgo.setObserver(ename, self.__O, einfo, scheduledBy)
1495 "x.__repr__() <==> repr(x)"
1496 return repr(self.__V)+"\n"+repr(self.__O)
1499 "x.__str__() <==> str(x)"
1500 return str(self.__V)+"\n"+str(self.__O)
1502 # ==============================================================================
1503 class UserScript(object):
1505 Classe générale d'interface de type texte de script utilisateur
1508 name = "GenericUserScript",
1515 self.__name = str(name)
1517 if asString is not None:
1519 elif self.__name == "UserPostAnalysis" and (asTemplate is not None) and (asTemplate in Templates.UserPostAnalysisTemplates):
1520 self.__F = Templates.UserPostAnalysisTemplates[asTemplate]
1521 elif self.__name == "Observer" and (asTemplate is not None) and (asTemplate in Templates.ObserverTemplates):
1522 self.__F = Templates.ObserverTemplates[asTemplate]
1523 elif asScript is not None:
1524 self.__F = Interfaces.ImportFromScript(asScript).getstring()
1529 "x.__repr__() <==> repr(x)"
1530 return repr(self.__F)
1533 "x.__str__() <==> str(x)"
1534 return str(self.__F)
1536 # ==============================================================================
1537 class ExternalParameters(object):
1539 Classe générale d'interface de type texte de script utilisateur
1542 name = "GenericExternalParameters",
1548 self.__name = str(name)
1551 self.updateParameters( asDict, asScript )
1553 def updateParameters(self,
1557 "Mise a jour des parametres"
1558 if asDict is None and asScript is not None:
1559 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "ExternalParameters" )
1563 if __Dict is not None:
1564 self.__P.update( dict(__Dict) )
1566 def get(self, key = None):
1568 return self.__P[key]
1570 return list(self.__P.keys())
1573 return list(self.__P.keys())
1575 def pop(self, k, d):
1576 return self.__P.pop(k, d)
1579 return self.__P.items()
1581 def __contains__(self, key=None):
1582 "D.__contains__(k) -> True if D has a key k, else False"
1583 return key in self.__P
1585 # ==============================================================================
1586 class State(object):
1588 Classe générale d'interface de type état
1591 name = "GenericVector",
1593 asPersistentVector = None,
1599 toBeChecked = False,
1602 Permet de définir un vecteur :
1603 - asVector : entrée des données, comme un vecteur compatible avec le
1604 constructeur de numpy.matrix, ou "True" si entrée par script.
1605 - asPersistentVector : entrée des données, comme une série de vecteurs
1606 compatible avec le constructeur de numpy.matrix, ou comme un objet de
1607 type Persistence, ou "True" si entrée par script.
1608 - asScript : si un script valide est donné contenant une variable
1609 nommée "name", la variable est de type "asVector" (par défaut) ou
1610 "asPersistentVector" selon que l'une de ces variables est placée à
1612 - asDataFile : si un ou plusieurs fichiers valides sont donnés
1613 contenant des valeurs en colonnes, elles-mêmes nommées "colNames"
1614 (s'il n'y a pas de nom de colonne indiquée, on cherche une colonne
1615 nommée "name"), on récupère les colonnes et on les range ligne après
1616 ligne (colMajor=False, par défaut) ou colonne après colonne
1617 (colMajor=True). La variable résultante est de type "asVector" (par
1618 défaut) ou "asPersistentVector" selon que l'une de ces variables est
1621 self.__name = str(name)
1622 self.__check = bool(toBeChecked)
1626 self.__is_vector = False
1627 self.__is_series = False
1629 if asScript is not None:
1630 __Vector, __Series = None, None
1631 if asPersistentVector:
1632 __Series = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1634 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1635 elif asDataFile is not None:
1636 __Vector, __Series = None, None
1637 if asPersistentVector:
1638 if colNames is not None:
1639 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
1641 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1642 if bool(colMajor) and not Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1643 __Series = numpy.transpose(__Series)
1644 elif not bool(colMajor) and Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1645 __Series = numpy.transpose(__Series)
1647 if colNames is not None:
1648 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
1650 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1652 __Vector = numpy.ravel(__Vector, order = "F")
1654 __Vector = numpy.ravel(__Vector, order = "C")
1656 __Vector, __Series = asVector, asPersistentVector
1658 if __Vector is not None:
1659 self.__is_vector = True
1660 self.__V = numpy.matrix( numpy.asmatrix(__Vector).A1, numpy.float ).T
1661 self.shape = self.__V.shape
1662 self.size = self.__V.size
1663 elif __Series is not None:
1664 self.__is_series = True
1665 if isinstance(__Series, (tuple, list, numpy.ndarray, numpy.matrix, str)):
1666 self.__V = Persistence.OneVector(self.__name, basetype=numpy.matrix)
1667 if isinstance(__Series, str): __Series = eval(__Series)
1668 for member in __Series:
1669 self.__V.store( numpy.matrix( numpy.asmatrix(member).A1, numpy.float ).T )
1672 if isinstance(self.__V.shape, (tuple, list)):
1673 self.shape = self.__V.shape
1675 self.shape = self.__V.shape()
1676 if len(self.shape) == 1:
1677 self.shape = (self.shape[0],1)
1678 self.size = self.shape[0] * self.shape[1]
1680 raise ValueError("The %s object is improperly defined or undefined, it requires at minima either a vector, a list/tuple of vectors or a persistent object. Please check your vector input."%self.__name)
1682 if scheduledBy is not None:
1683 self.__T = scheduledBy
1685 def getO(self, withScheduler=False):
1687 return self.__V, self.__T
1688 elif self.__T is None:
1694 "Vérification du type interne"
1695 return self.__is_vector
1698 "Vérification du type interne"
1699 return self.__is_series
1702 "x.__repr__() <==> repr(x)"
1703 return repr(self.__V)
1706 "x.__str__() <==> str(x)"
1707 return str(self.__V)
1709 # ==============================================================================
1710 class Covariance(object):
1712 Classe générale d'interface de type covariance
1715 name = "GenericCovariance",
1716 asCovariance = None,
1717 asEyeByScalar = None,
1718 asEyeByVector = None,
1721 toBeChecked = False,
1724 Permet de définir une covariance :
1725 - asCovariance : entrée des données, comme une matrice compatible avec
1726 le constructeur de numpy.matrix
1727 - asEyeByScalar : entrée des données comme un seul scalaire de variance,
1728 multiplicatif d'une matrice de corrélation identité, aucune matrice
1729 n'étant donc explicitement à donner
1730 - asEyeByVector : entrée des données comme un seul vecteur de variance,
1731 à mettre sur la diagonale d'une matrice de corrélation, aucune matrice
1732 n'étant donc explicitement à donner
1733 - asCovObject : entrée des données comme un objet python, qui a les
1734 methodes obligatoires "getT", "getI", "diag", "trace", "__add__",
1735 "__sub__", "__neg__", "__mul__", "__rmul__" et facultatives "shape",
1736 "size", "cholesky", "choleskyI", "asfullmatrix", "__repr__", "__str__"
1737 - toBeChecked : booléen indiquant si le caractère SDP de la matrice
1738 pleine doit être vérifié
1740 self.__name = str(name)
1741 self.__check = bool(toBeChecked)
1744 self.__is_scalar = False
1745 self.__is_vector = False
1746 self.__is_matrix = False
1747 self.__is_object = False
1749 if asScript is not None:
1750 __Matrix, __Scalar, __Vector, __Object = None, None, None, None
1752 __Scalar = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1754 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1756 __Object = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1758 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1760 __Matrix, __Scalar, __Vector, __Object = asCovariance, asEyeByScalar, asEyeByVector, asCovObject
1762 if __Scalar is not None:
1763 if isinstance(__Scalar, str):
1764 __Scalar = __Scalar.replace(";"," ").replace(","," ").split()
1765 if len(__Scalar) > 0: __Scalar = __Scalar[0]
1766 if numpy.array(__Scalar).size != 1:
1767 raise ValueError(' The diagonal multiplier given to define a sparse matrix is not a unique scalar value.\n Its actual measured size is %i. Please check your scalar input.'%numpy.array(__Scalar).size)
1768 self.__is_scalar = True
1769 self.__C = numpy.abs( float(__Scalar) )
1772 elif __Vector is not None:
1773 if isinstance(__Vector, str):
1774 __Vector = __Vector.replace(";"," ").replace(","," ").split()
1775 self.__is_vector = True
1776 self.__C = numpy.abs( numpy.array( numpy.ravel( __Vector ), dtype=float ) )
1777 self.shape = (self.__C.size,self.__C.size)
1778 self.size = self.__C.size**2
1779 elif __Matrix is not None:
1780 self.__is_matrix = True
1781 self.__C = numpy.matrix( __Matrix, float )
1782 self.shape = self.__C.shape
1783 self.size = self.__C.size
1784 elif __Object is not None:
1785 self.__is_object = True
1787 for at in ("getT","getI","diag","trace","__add__","__sub__","__neg__","__matmul__","__mul__","__rmatmul__","__rmul__"):
1788 if not hasattr(self.__C,at):
1789 raise ValueError("The matrix given for %s as an object has no attribute \"%s\". Please check your object input."%(self.__name,at))
1790 if hasattr(self.__C,"shape"):
1791 self.shape = self.__C.shape
1794 if hasattr(self.__C,"size"):
1795 self.size = self.__C.size
1800 # raise ValueError("The %s covariance matrix has to be specified either as a matrix, a vector for its diagonal or a scalar multiplying an identity matrix."%self.__name)
1804 def __validate(self):
1806 if self.__C is None:
1807 raise UnboundLocalError("%s covariance matrix value has not been set!"%(self.__name,))
1808 if self.ismatrix() and min(self.shape) != max(self.shape):
1809 raise ValueError("The given matrix for %s is not a square one, its shape is %s. Please check your matrix input."%(self.__name,self.shape))
1810 if self.isobject() and min(self.shape) != max(self.shape):
1811 raise ValueError("The matrix given for \"%s\" is not a square one, its shape is %s. Please check your object input."%(self.__name,self.shape))
1812 if self.isscalar() and self.__C <= 0:
1813 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your scalar input %s."%(self.__name,self.__C))
1814 if self.isvector() and (self.__C <= 0).any():
1815 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your vector input."%(self.__name,))
1816 if self.ismatrix() and (self.__check or logging.getLogger().level < logging.WARNING):
1818 L = numpy.linalg.cholesky( self.__C )
1820 raise ValueError("The %s covariance matrix is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1821 if self.isobject() and (self.__check or logging.getLogger().level < logging.WARNING):
1823 L = self.__C.cholesky()
1825 raise ValueError("The %s covariance object is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1828 "Vérification du type interne"
1829 return self.__is_scalar
1832 "Vérification du type interne"
1833 return self.__is_vector
1836 "Vérification du type interne"
1837 return self.__is_matrix
1840 "Vérification du type interne"
1841 return self.__is_object
1846 return Covariance(self.__name+"I", asCovariance = numpy.linalg.inv(self.__C) )
1847 elif self.isvector():
1848 return Covariance(self.__name+"I", asEyeByVector = 1. / self.__C )
1849 elif self.isscalar():
1850 return Covariance(self.__name+"I", asEyeByScalar = 1. / self.__C )
1851 elif self.isobject() and hasattr(self.__C,"getI"):
1852 return Covariance(self.__name+"I", asCovObject = self.__C.getI() )
1854 return None # Indispensable
1859 return Covariance(self.__name+"T", asCovariance = self.__C.T )
1860 elif self.isvector():
1861 return Covariance(self.__name+"T", asEyeByVector = self.__C )
1862 elif self.isscalar():
1863 return Covariance(self.__name+"T", asEyeByScalar = self.__C )
1864 elif self.isobject() and hasattr(self.__C,"getT"):
1865 return Covariance(self.__name+"T", asCovObject = self.__C.getT() )
1867 raise AttributeError("the %s covariance matrix has no getT attribute."%(self.__name,))
1870 "Décomposition de Cholesky"
1872 return Covariance(self.__name+"C", asCovariance = numpy.linalg.cholesky(self.__C) )
1873 elif self.isvector():
1874 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
1875 elif self.isscalar():
1876 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
1877 elif self.isobject() and hasattr(self.__C,"cholesky"):
1878 return Covariance(self.__name+"C", asCovObject = self.__C.cholesky() )
1880 raise AttributeError("the %s covariance matrix has no cholesky attribute."%(self.__name,))
1882 def choleskyI(self):
1883 "Inversion de la décomposition de Cholesky"
1885 return Covariance(self.__name+"H", asCovariance = numpy.linalg.inv(numpy.linalg.cholesky(self.__C)) )
1886 elif self.isvector():
1887 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
1888 elif self.isscalar():
1889 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
1890 elif self.isobject() and hasattr(self.__C,"choleskyI"):
1891 return Covariance(self.__name+"H", asCovObject = self.__C.choleskyI() )
1893 raise AttributeError("the %s covariance matrix has no choleskyI attribute."%(self.__name,))
1896 "Racine carrée matricielle"
1899 return Covariance(self.__name+"C", asCovariance = numpy.real(scipy.linalg.sqrtm(self.__C)) )
1900 elif self.isvector():
1901 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
1902 elif self.isscalar():
1903 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
1904 elif self.isobject() and hasattr(self.__C,"sqrtm"):
1905 return Covariance(self.__name+"C", asCovObject = self.__C.sqrtm() )
1907 raise AttributeError("the %s covariance matrix has no sqrtm attribute."%(self.__name,))
1910 "Inversion de la racine carrée matricielle"
1913 return Covariance(self.__name+"H", asCovariance = numpy.linalg.inv(numpy.real(scipy.linalg.sqrtm(self.__C))) )
1914 elif self.isvector():
1915 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
1916 elif self.isscalar():
1917 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
1918 elif self.isobject() and hasattr(self.__C,"sqrtmI"):
1919 return Covariance(self.__name+"H", asCovObject = self.__C.sqrtmI() )
1921 raise AttributeError("the %s covariance matrix has no sqrtmI attribute."%(self.__name,))
1923 def diag(self, msize=None):
1924 "Diagonale de la matrice"
1926 return numpy.diag(self.__C)
1927 elif self.isvector():
1929 elif self.isscalar():
1931 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1933 return self.__C * numpy.ones(int(msize))
1934 elif self.isobject() and hasattr(self.__C,"diag"):
1935 return self.__C.diag()
1937 raise AttributeError("the %s covariance matrix has no diag attribute."%(self.__name,))
1939 def trace(self, msize=None):
1940 "Trace de la matrice"
1942 return numpy.trace(self.__C)
1943 elif self.isvector():
1944 return float(numpy.sum(self.__C))
1945 elif self.isscalar():
1947 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1949 return self.__C * int(msize)
1950 elif self.isobject():
1951 return self.__C.trace()
1953 raise AttributeError("the %s covariance matrix has no trace attribute."%(self.__name,))
1955 def asfullmatrix(self, msize=None):
1958 return numpy.asarray(self.__C)
1959 elif self.isvector():
1960 return numpy.asarray( numpy.diag(self.__C), float )
1961 elif self.isscalar():
1963 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1965 return numpy.asarray( self.__C * numpy.eye(int(msize)), float )
1966 elif self.isobject() and hasattr(self.__C,"asfullmatrix"):
1967 return self.__C.asfullmatrix()
1969 raise AttributeError("the %s covariance matrix has no asfullmatrix attribute."%(self.__name,))
1971 def assparsematrix(self):
1979 "x.__repr__() <==> repr(x)"
1980 return repr(self.__C)
1983 "x.__str__() <==> str(x)"
1984 return str(self.__C)
1986 def __add__(self, other):
1987 "x.__add__(y) <==> x+y"
1988 if self.ismatrix() or self.isobject():
1989 return self.__C + numpy.asmatrix(other)
1990 elif self.isvector() or self.isscalar():
1991 _A = numpy.asarray(other)
1992 _A.reshape(_A.size)[::_A.shape[1]+1] += self.__C
1993 return numpy.asmatrix(_A)
1995 def __radd__(self, other):
1996 "x.__radd__(y) <==> y+x"
1997 raise NotImplementedError("%s covariance matrix __radd__ method not available for %s type!"%(self.__name,type(other)))
1999 def __sub__(self, other):
2000 "x.__sub__(y) <==> x-y"
2001 if self.ismatrix() or self.isobject():
2002 return self.__C - numpy.asmatrix(other)
2003 elif self.isvector() or self.isscalar():
2004 _A = numpy.asarray(other)
2005 _A.reshape(_A.size)[::_A.shape[1]+1] = self.__C - _A.reshape(_A.size)[::_A.shape[1]+1]
2006 return numpy.asmatrix(_A)
2008 def __rsub__(self, other):
2009 "x.__rsub__(y) <==> y-x"
2010 raise NotImplementedError("%s covariance matrix __rsub__ method not available for %s type!"%(self.__name,type(other)))
2013 "x.__neg__() <==> -x"
2016 def __matmul__(self, other):
2017 "x.__mul__(y) <==> x@y"
2018 if self.ismatrix() and isinstance(other, (int, float)):
2019 return numpy.asarray(self.__C) * other
2020 elif self.ismatrix() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2021 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2022 return numpy.ravel(self.__C @ numpy.ravel(other))
2023 elif numpy.asarray(other).shape[0] == self.shape[1]: # Matrice
2024 return numpy.asarray(self.__C) @ numpy.asarray(other)
2026 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asarray(other).shape,self.__name))
2027 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2028 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2029 return numpy.ravel(self.__C) * numpy.ravel(other)
2030 elif numpy.asarray(other).shape[0] == self.shape[1]: # Matrice
2031 return numpy.ravel(self.__C).reshape((-1,1)) * numpy.asarray(other)
2033 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
2034 elif self.isscalar() and isinstance(other,numpy.matrix):
2035 return numpy.asarray(self.__C * other)
2036 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
2037 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
2038 return self.__C * numpy.ravel(other)
2040 return self.__C * numpy.asarray(other)
2041 elif self.isobject():
2042 return self.__C.__matmul__(other)
2044 raise NotImplementedError("%s covariance matrix __matmul__ method not available for %s type!"%(self.__name,type(other)))
2046 def __mul__(self, other):
2047 "x.__mul__(y) <==> x*y"
2048 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2049 return self.__C * other
2050 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2051 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2052 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
2053 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2054 return self.__C * numpy.asmatrix(other)
2056 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asmatrix(other).shape,self.__name))
2057 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2058 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2059 return numpy.asmatrix(self.__C * numpy.ravel(other)).T
2060 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2061 return numpy.asmatrix((self.__C * (numpy.asarray(other).transpose())).transpose())
2063 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
2064 elif self.isscalar() and isinstance(other,numpy.matrix):
2065 return self.__C * other
2066 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
2067 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
2068 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
2070 return self.__C * numpy.asmatrix(other)
2071 elif self.isobject():
2072 return self.__C.__mul__(other)
2074 raise NotImplementedError("%s covariance matrix __mul__ method not available for %s type!"%(self.__name,type(other)))
2076 def __rmatmul__(self, other):
2077 "x.__rmul__(y) <==> y@x"
2078 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2079 return other * self.__C
2080 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2081 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2082 return numpy.asmatrix(numpy.ravel(other)) * self.__C
2083 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2084 return numpy.asmatrix(other) * self.__C
2086 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name))
2087 elif self.isvector() and isinstance(other,numpy.matrix):
2088 if numpy.ravel(other).size == self.shape[0]: # Vecteur
2089 return numpy.asmatrix(numpy.ravel(other) * self.__C)
2090 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
2091 return numpy.asmatrix(numpy.array(other) * self.__C)
2093 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
2094 elif self.isscalar() and isinstance(other,numpy.matrix):
2095 return other * self.__C
2096 elif self.isobject():
2097 return self.__C.__rmatmul__(other)
2099 raise NotImplementedError("%s covariance matrix __rmatmul__ method not available for %s type!"%(self.__name,type(other)))
2101 def __rmul__(self, other):
2102 "x.__rmul__(y) <==> y*x"
2103 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2104 return other * self.__C
2105 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2106 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2107 return numpy.asmatrix(numpy.ravel(other)) * self.__C
2108 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2109 return numpy.asmatrix(other) * self.__C
2111 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name))
2112 elif self.isvector() and isinstance(other,numpy.matrix):
2113 if numpy.ravel(other).size == self.shape[0]: # Vecteur
2114 return numpy.asmatrix(numpy.ravel(other) * self.__C)
2115 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
2116 return numpy.asmatrix(numpy.array(other) * self.__C)
2118 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
2119 elif self.isscalar() and isinstance(other,numpy.matrix):
2120 return other * self.__C
2121 elif self.isobject():
2122 return self.__C.__rmul__(other)
2124 raise NotImplementedError("%s covariance matrix __rmul__ method not available for %s type!"%(self.__name,type(other)))
2127 "x.__len__() <==> len(x)"
2128 return self.shape[0]
2130 # ==============================================================================
2131 class Observer2Func(object):
2133 Creation d'une fonction d'observateur a partir de son texte
2135 def __init__(self, corps=""):
2136 self.__corps = corps
2137 def func(self,var,info):
2138 "Fonction d'observation"
2141 "Restitution du pointeur de fonction dans l'objet"
2144 # ==============================================================================
2145 class CaseLogger(object):
2147 Conservation des commandes de creation d'un cas
2149 def __init__(self, __name="", __objname="case", __addViewers=None, __addLoaders=None):
2150 self.__name = str(__name)
2151 self.__objname = str(__objname)
2152 self.__logSerie = []
2153 self.__switchoff = False
2155 "TUI" :Interfaces._TUIViewer,
2156 "SCD" :Interfaces._SCDViewer,
2157 "YACS":Interfaces._YACSViewer,
2160 "TUI" :Interfaces._TUIViewer,
2161 "COM" :Interfaces._COMViewer,
2163 if __addViewers is not None:
2164 self.__viewers.update(dict(__addViewers))
2165 if __addLoaders is not None:
2166 self.__loaders.update(dict(__addLoaders))
2168 def register(self, __command=None, __keys=None, __local=None, __pre=None, __switchoff=False):
2169 "Enregistrement d'une commande individuelle"
2170 if __command is not None and __keys is not None and __local is not None and not self.__switchoff:
2171 if "self" in __keys: __keys.remove("self")
2172 self.__logSerie.append( (str(__command), __keys, __local, __pre, __switchoff) )
2174 self.__switchoff = True
2176 self.__switchoff = False
2178 def dump(self, __filename=None, __format="TUI", __upa=""):
2179 "Restitution normalisée des commandes"
2180 if __format in self.__viewers:
2181 __formater = self.__viewers[__format](self.__name, self.__objname, self.__logSerie)
2183 raise ValueError("Dumping as \"%s\" is not available"%__format)
2184 return __formater.dump(__filename, __upa)
2186 def load(self, __filename=None, __content=None, __object=None, __format="TUI"):
2187 "Chargement normalisé des commandes"
2188 if __format in self.__loaders:
2189 __formater = self.__loaders[__format]()
2191 raise ValueError("Loading as \"%s\" is not available"%__format)
2192 return __formater.load(__filename, __content, __object)
2194 # ==============================================================================
2197 _extraArguments = None,
2198 _sFunction = lambda x: x,
2203 Pour une liste ordonnée de vecteurs en entrée, renvoie en sortie la liste
2204 correspondante de valeurs de la fonction en argument
2206 # Vérifications et définitions initiales
2207 # logging.debug("MULTF Internal multifonction calculations begin with function %s"%(_sFunction.__name__,))
2208 if not PlatformInfo.isIterable( __xserie ):
2209 raise TypeError("MultiFonction not iterable unkown input type: %s"%(type(__xserie),))
2211 if (_mpWorkers is None) or (_mpWorkers is not None and _mpWorkers < 1):
2214 __mpWorkers = int(_mpWorkers)
2216 import multiprocessing
2227 # logging.debug("MULTF Internal multiprocessing calculations begin : evaluation of %i point(s)"%(len(_jobs),))
2228 import multiprocessing
2229 with multiprocessing.Pool(__mpWorkers) as pool:
2230 __multiHX = pool.map( _sFunction, _jobs )
2233 # logging.debug("MULTF Internal multiprocessing calculation end")
2235 # logging.debug("MULTF Internal monoprocessing calculation begin")
2237 if _extraArguments is None:
2238 for __xvalue in __xserie:
2239 __multiHX.append( _sFunction( __xvalue ) )
2240 elif _extraArguments is not None and isinstance(_extraArguments, (list, tuple, map)):
2241 for __xvalue in __xserie:
2242 __multiHX.append( _sFunction( __xvalue, *_extraArguments ) )
2243 elif _extraArguments is not None and isinstance(_extraArguments, dict):
2244 for __xvalue in __xserie:
2245 __multiHX.append( _sFunction( __xvalue, **_extraArguments ) )
2247 raise TypeError("MultiFonction extra arguments unkown input type: %s"%(type(_extraArguments),))
2248 # logging.debug("MULTF Internal monoprocessing calculation end")
2250 # logging.debug("MULTF Internal multifonction calculations end")
2253 # ==============================================================================
2254 def CostFunction3D(_x,
2255 _Hm = None, # Pour simuler Hm(x) : HO["Direct"].appliedTo
2256 _HmX = None, # Simulation déjà faite de Hm(x)
2257 _arg = None, # Arguments supplementaires pour Hm, sous la forme d'un tuple
2262 _SIV = False, # A résorber pour la 8.0
2263 _SSC = [], # self._parameters["StoreSupplementaryCalculations"]
2264 _nPS = 0, # nbPreviousSteps
2265 _QM = "DA", # QualityMeasure
2266 _SSV = {}, # Entrée et/ou sortie : self.StoredVariables
2267 _fRt = False, # Restitue ou pas la sortie étendue
2268 _sSc = True, # Stocke ou pas les SSC
2271 Fonction-coût générale utile pour les algorithmes statiques/3D : 3DVAR, BLUE
2272 et dérivés, Kalman et dérivés, LeastSquares, SamplingTest, PSO, SA, Tabu,
2273 DFO, QuantileRegression
2279 for k in ["CostFunctionJ",
2285 "SimulatedObservationAtCurrentOptimum",
2286 "SimulatedObservationAtCurrentState",
2290 if hasattr(_SSV[k],"store"):
2291 _SSV[k].append = _SSV[k].store # Pour utiliser "append" au lieu de "store"
2293 _X = numpy.asmatrix(numpy.ravel( _x )).T
2294 if _SIV or "CurrentState" in _SSC or "CurrentOptimum" in _SSC:
2295 _SSV["CurrentState"].append( _X )
2297 if _HmX is not None:
2301 raise ValueError("COSTFUNCTION3D Operator has to be defined.")
2305 _HX = _Hm( _X, *_arg )
2306 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
2308 if "SimulatedObservationAtCurrentState" in _SSC or \
2309 "SimulatedObservationAtCurrentOptimum" in _SSC:
2310 _SSV["SimulatedObservationAtCurrentState"].append( _HX )
2312 if numpy.any(numpy.isnan(_HX)):
2313 Jb, Jo, J = numpy.nan, numpy.nan, numpy.nan
2315 _Y = numpy.asmatrix(numpy.ravel( _Y )).T
2316 if _QM in ["AugmentedWeightedLeastSquares", "AWLS", "AugmentedPonderatedLeastSquares", "APLS", "DA"]:
2317 if _BI is None or _RI is None:
2318 raise ValueError("Background and Observation error covariance matrix has to be properly defined!")
2319 _Xb = numpy.asmatrix(numpy.ravel( _Xb )).T
2320 Jb = 0.5 * (_X - _Xb).T * _BI * (_X - _Xb)
2321 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
2322 elif _QM in ["WeightedLeastSquares", "WLS", "PonderatedLeastSquares", "PLS"]:
2324 raise ValueError("Observation error covariance matrix has to be properly defined!")
2326 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
2327 elif _QM in ["LeastSquares", "LS", "L2"]:
2329 Jo = 0.5 * (_Y - _HX).T * (_Y - _HX)
2330 elif _QM in ["AbsoluteValue", "L1"]:
2332 Jo = numpy.sum( numpy.abs(_Y - _HX) )
2333 elif _QM in ["MaximumError", "ME"]:
2335 Jo = numpy.max( numpy.abs(_Y - _HX) )
2336 elif _QM in ["QR", "Null"]:
2340 raise ValueError("Unknown asked quality measure!")
2342 J = float( Jb ) + float( Jo )
2345 _SSV["CostFunctionJb"].append( Jb )
2346 _SSV["CostFunctionJo"].append( Jo )
2347 _SSV["CostFunctionJ" ].append( J )
2349 if "IndexOfOptimum" in _SSC or \
2350 "CurrentOptimum" in _SSC or \
2351 "SimulatedObservationAtCurrentOptimum" in _SSC:
2352 IndexMin = numpy.argmin( _SSV["CostFunctionJ"][_nPS:] ) + _nPS
2353 if "IndexOfOptimum" in _SSC:
2354 _SSV["IndexOfOptimum"].append( IndexMin )
2355 if "CurrentOptimum" in _SSC:
2356 _SSV["CurrentOptimum"].append( _SSV["CurrentState"][IndexMin] )
2357 if "SimulatedObservationAtCurrentOptimum" in _SSC:
2358 _SSV["SimulatedObservationAtCurrentOptimum"].append( _SSV["SimulatedObservationAtCurrentState"][IndexMin] )
2363 if _QM in ["QR"]: # Pour le QuantileRegression
2368 # ==============================================================================
2369 if __name__ == "__main__":
2370 print('\n AUTODIAGNOSTIC\n')