1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2021 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les outils généraux élémentaires.
26 __author__ = "Jean-Philippe ARGAUD"
35 from functools import partial
36 from daCore import Persistence, PlatformInfo, Interfaces
37 from daCore import Templates
39 # ==============================================================================
40 class CacheManager(object):
42 Classe générale de gestion d'un cache de calculs
45 toleranceInRedundancy = 1.e-18,
46 lenghtOfRedundancy = -1,
49 Les caractéristiques de tolérance peuvent être modifiées à la création.
51 self.__tolerBP = float(toleranceInRedundancy)
52 self.__lenghtOR = int(lenghtOfRedundancy)
53 self.__initlnOR = self.__lenghtOR
63 def wasCalculatedIn(self, xValue, oName="" ):
64 "Vérifie l'existence d'un calcul correspondant à la valeur"
68 for i in range(min(len(self.__listOPCV),self.__lenghtOR)-1,-1,-1):
69 if not hasattr(xValue, 'size'):
71 elif (str(oName) != self.__listOPCV[i][3]):
73 elif (xValue.size != self.__listOPCV[i][0].size):
75 elif (numpy.ravel(xValue)[0] - self.__listOPCV[i][0][0]) > (self.__tolerBP * self.__listOPCV[i][2] / self.__listOPCV[i][0].size):
77 elif numpy.linalg.norm(numpy.ravel(xValue) - self.__listOPCV[i][0]) < (self.__tolerBP * self.__listOPCV[i][2]):
79 __HxV = self.__listOPCV[i][1]
83 def storeValueInX(self, xValue, HxValue, oName="" ):
84 "Stocke pour un opérateur o un calcul Hx correspondant à la valeur x"
85 if self.__lenghtOR < 0:
86 self.__lenghtOR = 2 * min(xValue.size, 50) + 2 # 2 * xValue.size + 2
87 self.__initlnOR = self.__lenghtOR
88 self.__seenNames.append(str(oName))
89 if str(oName) not in self.__seenNames: # Etend la liste si nouveau
90 self.__lenghtOR += 2 * min(xValue.size, 50) + 2 # 2 * xValue.size + 2
91 self.__initlnOR += self.__lenghtOR
92 self.__seenNames.append(str(oName))
93 while len(self.__listOPCV) > self.__lenghtOR:
94 self.__listOPCV.pop(0)
95 self.__listOPCV.append( (
96 copy.copy(numpy.ravel(xValue)), # 0 Previous point
97 copy.copy(HxValue), # 1 Previous value
98 numpy.linalg.norm(xValue), # 2 Norm
99 str(oName), # 3 Operator name
104 self.__initlnOR = self.__lenghtOR
106 self.__enabled = False
110 self.__lenghtOR = self.__initlnOR
111 self.__enabled = True
113 # ==============================================================================
114 class Operator(object):
116 Classe générale d'interface de type opérateur simple
124 name = "GenericOperator",
127 avoidingRedundancy = True,
128 inputAsMultiFunction = False,
129 enableMultiProcess = False,
130 extraArguments = None,
133 On construit un objet de ce type en fournissant, à l'aide de l'un des
134 deux mots-clé, soit une fonction ou un multi-fonction python, soit une
137 - name : nom d'opérateur
138 - fromMethod : argument de type fonction Python
139 - fromMatrix : argument adapté au constructeur numpy.matrix
140 - avoidingRedundancy : booléen évitant (ou pas) les calculs redondants
141 - inputAsMultiFunction : booléen indiquant une fonction explicitement
142 définie (ou pas) en multi-fonction
143 - extraArguments : arguments supplémentaires passés à la fonction de
144 base et ses dérivées (tuple ou dictionnaire)
146 self.__name = str(name)
147 self.__NbCallsAsMatrix, self.__NbCallsAsMethod, self.__NbCallsOfCached = 0, 0, 0
148 self.__AvoidRC = bool( avoidingRedundancy )
149 self.__inputAsMF = bool( inputAsMultiFunction )
150 self.__mpEnabled = bool( enableMultiProcess )
151 self.__extraArgs = extraArguments
152 if fromMethod is not None and self.__inputAsMF:
153 self.__Method = fromMethod # logtimer(fromMethod)
155 self.__Type = "Method"
156 elif fromMethod is not None and not self.__inputAsMF:
157 self.__Method = partial( MultiFonction, _sFunction=fromMethod, _mpEnabled=self.__mpEnabled)
159 self.__Type = "Method"
160 elif fromMatrix is not None:
162 self.__Matrix = numpy.matrix( fromMatrix, numpy.float )
163 self.__Type = "Matrix"
169 def disableAvoidingRedundancy(self):
171 Operator.CM.disable()
173 def enableAvoidingRedundancy(self):
178 Operator.CM.disable()
184 def appliedTo(self, xValue, HValue = None, argsAsSerie = False, returnSerieAsArrayMatrix = False):
186 Permet de restituer le résultat de l'application de l'opérateur à une
187 série d'arguments xValue. Cette méthode se contente d'appliquer, chaque
188 argument devant a priori être du bon type.
190 - les arguments par série sont :
191 - xValue : argument adapté pour appliquer l'opérateur
192 - HValue : valeur précalculée de l'opérateur en ce point
193 - argsAsSerie : indique si les arguments sont une mono ou multi-valeur
200 if HValue is not None:
204 PlatformInfo.isIterable( _xValue, True, " in Operator.appliedTo" )
206 if _HValue is not None:
207 assert len(_xValue) == len(_HValue), "Incompatible number of elements in xValue and HValue"
209 for i in range(len(_HValue)):
210 _HxValue.append( numpy.asmatrix( numpy.ravel( _HValue[i] ) ).T )
212 Operator.CM.storeValueInX(_xValue[i],_HxValue[-1],self.__name)
217 for i, xv in enumerate(_xValue):
219 __alreadyCalculated, __HxV = Operator.CM.wasCalculatedIn(xv,self.__name)
221 __alreadyCalculated = False
223 if __alreadyCalculated:
224 self.__addOneCacheCall()
227 if self.__Matrix is not None:
228 self.__addOneMatrixCall()
229 _xv = numpy.matrix(numpy.ravel(xv)).T
230 _hv = self.__Matrix * _xv
232 self.__addOneMethodCall()
236 _HxValue.append( _hv )
238 if len(_xserie)>0 and self.__Matrix is None:
239 if self.__extraArgs is None:
240 _hserie = self.__Method( _xserie ) # Calcul MF
242 _hserie = self.__Method( _xserie, self.__extraArgs ) # Calcul MF
243 if not hasattr(_hserie, "pop"):
244 raise TypeError("The user input multi-function doesn't seem to return sequence results, behaving like a mono-function. It has to be checked.")
250 Operator.CM.storeValueInX(_xv,_hv,self.__name)
252 if returnSerieAsArrayMatrix:
253 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
255 if argsAsSerie: return _HxValue
256 else: return _HxValue[-1]
258 def appliedControledFormTo(self, paires, argsAsSerie = False, returnSerieAsArrayMatrix = False):
260 Permet de restituer le résultat de l'application de l'opérateur à des
261 paires (xValue, uValue). Cette méthode se contente d'appliquer, son
262 argument devant a priori être du bon type. Si la uValue est None,
263 on suppose que l'opérateur ne s'applique qu'à xValue.
265 - paires : les arguments par paire sont :
266 - xValue : argument X adapté pour appliquer l'opérateur
267 - uValue : argument U adapté pour appliquer l'opérateur
268 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
270 if argsAsSerie: _xuValue = paires
271 else: _xuValue = (paires,)
272 PlatformInfo.isIterable( _xuValue, True, " in Operator.appliedControledFormTo" )
274 if self.__Matrix is not None:
276 for paire in _xuValue:
277 _xValue, _uValue = paire
278 _xValue = numpy.matrix(numpy.ravel(_xValue)).T
279 self.__addOneMatrixCall()
280 _HxValue.append( self.__Matrix * _xValue )
283 for paire in _xuValue:
284 _xValue, _uValue = paire
285 if _uValue is not None:
286 _xuArgs.append( paire )
288 _xuArgs.append( _xValue )
289 self.__addOneMethodCall( len(_xuArgs) )
290 if self.__extraArgs is None:
291 _HxValue = self.__Method( _xuArgs ) # Calcul MF
293 _HxValue = self.__Method( _xuArgs, self.__extraArgs ) # Calcul MF
295 if returnSerieAsArrayMatrix:
296 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
298 if argsAsSerie: return _HxValue
299 else: return _HxValue[-1]
301 def appliedInXTo(self, paires, argsAsSerie = False, returnSerieAsArrayMatrix = False):
303 Permet de restituer le résultat de l'application de l'opérateur à une
304 série d'arguments xValue, sachant que l'opérateur est valable en
305 xNominal. Cette méthode se contente d'appliquer, son argument devant a
306 priori être du bon type. Si l'opérateur est linéaire car c'est une
307 matrice, alors il est valable en tout point nominal et xNominal peut
308 être quelconque. Il n'y a qu'une seule paire par défaut, et argsAsSerie
309 permet d'indiquer que l'argument est multi-paires.
311 - paires : les arguments par paire sont :
312 - xNominal : série d'arguments permettant de donner le point où
313 l'opérateur est construit pour être ensuite appliqué
314 - xValue : série d'arguments adaptés pour appliquer l'opérateur
315 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
317 if argsAsSerie: _nxValue = paires
318 else: _nxValue = (paires,)
319 PlatformInfo.isIterable( _nxValue, True, " in Operator.appliedInXTo" )
321 if self.__Matrix is not None:
323 for paire in _nxValue:
324 _xNominal, _xValue = paire
325 _xValue = numpy.matrix(numpy.ravel(_xValue)).T
326 self.__addOneMatrixCall()
327 _HxValue.append( self.__Matrix * _xValue )
329 self.__addOneMethodCall( len(_nxValue) )
330 if self.__extraArgs is None:
331 _HxValue = self.__Method( _nxValue ) # Calcul MF
333 _HxValue = self.__Method( _nxValue, self.__extraArgs ) # Calcul MF
335 if returnSerieAsArrayMatrix:
336 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
338 if argsAsSerie: return _HxValue
339 else: return _HxValue[-1]
341 def asMatrix(self, ValueForMethodForm = "UnknownVoidValue", argsAsSerie = False):
343 Permet de renvoyer l'opérateur sous la forme d'une matrice
345 if self.__Matrix is not None:
346 self.__addOneMatrixCall()
347 mValue = [self.__Matrix,]
348 elif not isinstance(ValueForMethodForm,str) or ValueForMethodForm != "UnknownVoidValue": # Ne pas utiliser "None"
351 self.__addOneMethodCall( len(ValueForMethodForm) )
352 for _vfmf in ValueForMethodForm:
353 mValue.append( numpy.matrix( self.__Method(((_vfmf, None),)) ) )
355 self.__addOneMethodCall()
356 mValue = self.__Method(((ValueForMethodForm, None),))
358 raise ValueError("Matrix form of the operator defined as a function/method requires to give an operating point.")
360 if argsAsSerie: return mValue
361 else: return mValue[-1]
365 Renvoie la taille sous forme numpy si l'opérateur est disponible sous
366 la forme d'une matrice
368 if self.__Matrix is not None:
369 return self.__Matrix.shape
371 raise ValueError("Matrix form of the operator is not available, nor the shape")
373 def nbcalls(self, which=None):
375 Renvoie les nombres d'évaluations de l'opérateur
378 self.__NbCallsAsMatrix+self.__NbCallsAsMethod,
379 self.__NbCallsAsMatrix,
380 self.__NbCallsAsMethod,
381 self.__NbCallsOfCached,
382 Operator.NbCallsAsMatrix+Operator.NbCallsAsMethod,
383 Operator.NbCallsAsMatrix,
384 Operator.NbCallsAsMethod,
385 Operator.NbCallsOfCached,
387 if which is None: return __nbcalls
388 else: return __nbcalls[which]
390 def __addOneMatrixCall(self):
391 "Comptabilise un appel"
392 self.__NbCallsAsMatrix += 1 # Decompte local
393 Operator.NbCallsAsMatrix += 1 # Decompte global
395 def __addOneMethodCall(self, nb = 1):
396 "Comptabilise un appel"
397 self.__NbCallsAsMethod += nb # Decompte local
398 Operator.NbCallsAsMethod += nb # Decompte global
400 def __addOneCacheCall(self):
401 "Comptabilise un appel"
402 self.__NbCallsOfCached += 1 # Decompte local
403 Operator.NbCallsOfCached += 1 # Decompte global
405 # ==============================================================================
406 class FullOperator(object):
408 Classe générale d'interface de type opérateur complet
409 (Direct, Linéaire Tangent, Adjoint)
412 name = "GenericFullOperator",
414 asOneFunction = None, # 1 Fonction
415 asThreeFunctions = None, # 3 Fonctions in a dictionary
416 asScript = None, # 1 or 3 Fonction(s) by script
417 asDict = None, # Parameters
419 extraArguments = None,
421 inputAsMF = False,# Fonction(s) as Multi-Functions
426 self.__name = str(name)
427 self.__check = bool(toBeChecked)
428 self.__extraArgs = extraArguments
433 if (asDict is not None) and isinstance(asDict, dict):
434 __Parameters.update( asDict )
435 # Priorité à EnableMultiProcessingInDerivatives=True
436 if "EnableMultiProcessing" in __Parameters and __Parameters["EnableMultiProcessing"]:
437 __Parameters["EnableMultiProcessingInDerivatives"] = True
438 __Parameters["EnableMultiProcessingInEvaluation"] = False
439 if "EnableMultiProcessingInDerivatives" not in __Parameters:
440 __Parameters["EnableMultiProcessingInDerivatives"] = False
441 if __Parameters["EnableMultiProcessingInDerivatives"]:
442 __Parameters["EnableMultiProcessingInEvaluation"] = False
443 if "EnableMultiProcessingInEvaluation" not in __Parameters:
444 __Parameters["EnableMultiProcessingInEvaluation"] = False
445 if "withIncrement" in __Parameters: # Temporaire
446 __Parameters["DifferentialIncrement"] = __Parameters["withIncrement"]
448 if asScript is not None:
449 __Matrix, __Function = None, None
451 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
453 __Function = { "Direct":Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ) }
454 __Function.update({"useApproximatedDerivatives":True})
455 __Function.update(__Parameters)
456 elif asThreeFunctions:
458 "Direct" :Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ),
459 "Tangent":Interfaces.ImportFromScript(asScript).getvalue( "TangentOperator" ),
460 "Adjoint":Interfaces.ImportFromScript(asScript).getvalue( "AdjointOperator" ),
462 __Function.update(__Parameters)
465 if asOneFunction is not None:
466 if isinstance(asOneFunction, dict) and "Direct" in asOneFunction:
467 if asOneFunction["Direct"] is not None:
468 __Function = asOneFunction
470 raise ValueError("The function has to be given in a dictionnary which have 1 key (\"Direct\")")
472 __Function = { "Direct":asOneFunction }
473 __Function.update({"useApproximatedDerivatives":True})
474 __Function.update(__Parameters)
475 elif asThreeFunctions is not None:
476 if isinstance(asThreeFunctions, dict) and \
477 ("Tangent" in asThreeFunctions) and (asThreeFunctions["Tangent"] is not None) and \
478 ("Adjoint" in asThreeFunctions) and (asThreeFunctions["Adjoint"] is not None) and \
479 (("useApproximatedDerivatives" not in asThreeFunctions) or not bool(asThreeFunctions["useApproximatedDerivatives"])):
480 __Function = asThreeFunctions
481 elif isinstance(asThreeFunctions, dict) and \
482 ("Direct" in asThreeFunctions) and (asThreeFunctions["Direct"] is not None):
483 __Function = asThreeFunctions
484 __Function.update({"useApproximatedDerivatives":True})
486 raise ValueError("The functions has to be given in a dictionnary which have either 1 key (\"Direct\") or 3 keys (\"Direct\" (optionnal), \"Tangent\" and \"Adjoint\")")
487 if "Direct" not in asThreeFunctions:
488 __Function["Direct"] = asThreeFunctions["Tangent"]
489 __Function.update(__Parameters)
493 # if sys.version_info[0] < 3 and isinstance(__Function, dict):
494 # for k in ("Direct", "Tangent", "Adjoint"):
495 # if k in __Function and hasattr(__Function[k],"__class__"):
496 # if type(__Function[k]) is type(self.__init__):
497 # raise TypeError("can't use a class method (%s) as a function for the \"%s\" operator. Use a real function instead."%(type(__Function[k]),k))
499 if appliedInX is not None and isinstance(appliedInX, dict):
500 __appliedInX = appliedInX
501 elif appliedInX is not None:
502 __appliedInX = {"HXb":appliedInX}
506 if scheduledBy is not None:
507 self.__T = scheduledBy
509 if isinstance(__Function, dict) and \
510 ("useApproximatedDerivatives" in __Function) and bool(__Function["useApproximatedDerivatives"]) and \
511 ("Direct" in __Function) and (__Function["Direct"] is not None):
512 if "CenteredFiniteDifference" not in __Function: __Function["CenteredFiniteDifference"] = False
513 if "DifferentialIncrement" not in __Function: __Function["DifferentialIncrement"] = 0.01
514 if "withdX" not in __Function: __Function["withdX"] = None
515 if "withAvoidingRedundancy" not in __Function: __Function["withAvoidingRedundancy"] = avoidRC
516 if "withToleranceInRedundancy" not in __Function: __Function["withToleranceInRedundancy"] = 1.e-18
517 if "withLenghtOfRedundancy" not in __Function: __Function["withLenghtOfRedundancy"] = -1
518 if "NumberOfProcesses" not in __Function: __Function["NumberOfProcesses"] = None
519 if "withmfEnabled" not in __Function: __Function["withmfEnabled"] = inputAsMF
520 from daCore import NumericObjects
521 FDA = NumericObjects.FDApproximation(
523 Function = __Function["Direct"],
524 centeredDF = __Function["CenteredFiniteDifference"],
525 increment = __Function["DifferentialIncrement"],
526 dX = __Function["withdX"],
527 avoidingRedundancy = __Function["withAvoidingRedundancy"],
528 toleranceInRedundancy = __Function["withToleranceInRedundancy"],
529 lenghtOfRedundancy = __Function["withLenghtOfRedundancy"],
530 mpEnabled = __Function["EnableMultiProcessingInDerivatives"],
531 mpWorkers = __Function["NumberOfProcesses"],
532 mfEnabled = __Function["withmfEnabled"],
534 self.__FO["Direct"] = Operator( name = self.__name, fromMethod = FDA.DirectOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
535 self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMethod = FDA.TangentOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
536 self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMethod = FDA.AdjointOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
537 elif isinstance(__Function, dict) and \
538 ("Direct" in __Function) and ("Tangent" in __Function) and ("Adjoint" in __Function) and \
539 (__Function["Direct"] is not None) and (__Function["Tangent"] is not None) and (__Function["Adjoint"] is not None):
540 self.__FO["Direct"] = Operator( name = self.__name, fromMethod = __Function["Direct"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
541 self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMethod = __Function["Tangent"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
542 self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMethod = __Function["Adjoint"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
543 elif asMatrix is not None:
544 __matrice = numpy.matrix( __Matrix, numpy.float )
545 self.__FO["Direct"] = Operator( name = self.__name, fromMatrix = __matrice, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
546 self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMatrix = __matrice, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
547 self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMatrix = __matrice.T, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
550 raise ValueError("The %s object is improperly defined or undefined, it requires at minima either a matrix, a Direct operator for approximate derivatives or a Tangent/Adjoint operators pair. Please check your operator input."%self.__name)
552 if __appliedInX is not None:
553 self.__FO["AppliedInX"] = {}
554 for key in list(__appliedInX.keys()):
555 if type( __appliedInX[key] ) is type( numpy.matrix([]) ):
556 # Pour le cas où l'on a une vraie matrice
557 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].A1, numpy.float ).T
558 elif type( __appliedInX[key] ) is type( numpy.array([]) ) and len(__appliedInX[key].shape) > 1:
559 # Pour le cas où l'on a un vecteur représenté en array avec 2 dimensions
560 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].reshape(len(__appliedInX[key]),), numpy.float ).T
562 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key], numpy.float ).T
564 self.__FO["AppliedInX"] = None
570 "x.__repr__() <==> repr(x)"
571 return repr(self.__FO)
574 "x.__str__() <==> str(x)"
575 return str(self.__FO)
577 # ==============================================================================
578 class Algorithm(object):
580 Classe générale d'interface de type algorithme
582 Elle donne un cadre pour l'écriture d'une classe élémentaire d'algorithme
583 d'assimilation, en fournissant un container (dictionnaire) de variables
584 persistantes initialisées, et des méthodes d'accès à ces variables stockées.
586 Une classe élémentaire d'algorithme doit implémenter la méthode "run".
588 def __init__(self, name):
590 L'initialisation présente permet de fabriquer des variables de stockage
591 disponibles de manière générique dans les algorithmes élémentaires. Ces
592 variables de stockage sont ensuite conservées dans un dictionnaire
593 interne à l'objet, mais auquel on accède par la méthode "get".
595 Les variables prévues sont :
596 - APosterioriCorrelations : matrice de corrélations de la matrice A
597 - APosterioriCovariance : matrice de covariances a posteriori : A
598 - APosterioriStandardDeviations : vecteur des écart-types de la matrice A
599 - APosterioriVariances : vecteur des variances de la matrice A
600 - Analysis : vecteur d'analyse : Xa
601 - BMA : Background moins Analysis : Xa - Xb
602 - CostFunctionJ : fonction-coût globale, somme des deux parties suivantes Jb et Jo
603 - CostFunctionJAtCurrentOptimum : fonction-coût globale à l'état optimal courant lors d'itérations
604 - CostFunctionJb : partie ébauche ou background de la fonction-coût : Jb
605 - CostFunctionJbAtCurrentOptimum : partie ébauche à l'état optimal courant lors d'itérations
606 - CostFunctionJo : partie observations de la fonction-coût : Jo
607 - CostFunctionJoAtCurrentOptimum : partie observations à l'état optimal courant lors d'itérations
608 - CurrentIterationNumber : numéro courant d'itération dans les algorithmes itératifs, à partir de 0
609 - CurrentOptimum : état optimal courant lors d'itérations
610 - CurrentState : état courant lors d'itérations
611 - GradientOfCostFunctionJ : gradient de la fonction-coût globale
612 - GradientOfCostFunctionJb : gradient de la partie ébauche de la fonction-coût
613 - GradientOfCostFunctionJo : gradient de la partie observations de la fonction-coût
614 - IndexOfOptimum : index de l'état optimal courant lors d'itérations
615 - Innovation : l'innovation : d = Y - H(X)
616 - InnovationAtCurrentState : l'innovation à l'état courant : dn = Y - H(Xn)
617 - JacobianMatrixAtBackground : matrice jacobienne à l'état d'ébauche
618 - JacobianMatrixAtCurrentState : matrice jacobienne à l'état courant
619 - JacobianMatrixAtOptimum : matrice jacobienne à l'optimum
620 - KalmanGainAtOptimum : gain de Kalman à l'optimum
621 - MahalanobisConsistency : indicateur de consistance des covariances
622 - OMA : Observation moins Analyse : Y - Xa
623 - OMB : Observation moins Background : Y - Xb
624 - ForecastState : état prédit courant lors d'itérations
625 - Residu : dans le cas des algorithmes de vérification
626 - SigmaBck2 : indicateur de correction optimale des erreurs d'ébauche
627 - SigmaObs2 : indicateur de correction optimale des erreurs d'observation
628 - SimulatedObservationAtBackground : l'état observé H(Xb) à l'ébauche
629 - SimulatedObservationAtCurrentOptimum : l'état observé H(X) à l'état optimal courant
630 - SimulatedObservationAtCurrentState : l'état observé H(X) à l'état courant
631 - SimulatedObservationAtOptimum : l'état observé H(Xa) à l'optimum
632 - SimulationQuantiles : états observés H(X) pour les quantiles demandés
633 On peut rajouter des variables à stocker dans l'initialisation de
634 l'algorithme élémentaire qui va hériter de cette classe
636 logging.debug("%s Initialisation", str(name))
637 self._m = PlatformInfo.SystemUsage()
639 self._name = str( name )
640 self._parameters = {"StoreSupplementaryCalculations":[]}
641 self.__required_parameters = {}
642 self.__required_inputs = {
643 "RequiredInputValues":{"mandatory":(), "optional":()},
644 "ClassificationTags":[],
646 self.__variable_names_not_public = {"nextStep":False} # Duplication dans AlgorithmAndParameters
647 self.__canonical_parameter_name = {} # Correspondance "lower"->"correct"
648 self.__canonical_stored_name = {} # Correspondance "lower"->"correct"
650 self.StoredVariables = {}
651 self.StoredVariables["APosterioriCorrelations"] = Persistence.OneMatrix(name = "APosterioriCorrelations")
652 self.StoredVariables["APosterioriCovariance"] = Persistence.OneMatrix(name = "APosterioriCovariance")
653 self.StoredVariables["APosterioriStandardDeviations"] = Persistence.OneVector(name = "APosterioriStandardDeviations")
654 self.StoredVariables["APosterioriVariances"] = Persistence.OneVector(name = "APosterioriVariances")
655 self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
656 self.StoredVariables["BMA"] = Persistence.OneVector(name = "BMA")
657 self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
658 self.StoredVariables["CostFunctionJAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJAtCurrentOptimum")
659 self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
660 self.StoredVariables["CostFunctionJbAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJbAtCurrentOptimum")
661 self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
662 self.StoredVariables["CostFunctionJoAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJoAtCurrentOptimum")
663 self.StoredVariables["CurrentIterationNumber"] = Persistence.OneIndex(name = "CurrentIterationNumber")
664 self.StoredVariables["CurrentOptimum"] = Persistence.OneVector(name = "CurrentOptimum")
665 self.StoredVariables["CurrentState"] = Persistence.OneVector(name = "CurrentState")
666 self.StoredVariables["ForecastState"] = Persistence.OneVector(name = "ForecastState")
667 self.StoredVariables["GradientOfCostFunctionJ"] = Persistence.OneVector(name = "GradientOfCostFunctionJ")
668 self.StoredVariables["GradientOfCostFunctionJb"] = Persistence.OneVector(name = "GradientOfCostFunctionJb")
669 self.StoredVariables["GradientOfCostFunctionJo"] = Persistence.OneVector(name = "GradientOfCostFunctionJo")
670 self.StoredVariables["IndexOfOptimum"] = Persistence.OneIndex(name = "IndexOfOptimum")
671 self.StoredVariables["Innovation"] = Persistence.OneVector(name = "Innovation")
672 self.StoredVariables["InnovationAtCurrentAnalysis"] = Persistence.OneVector(name = "InnovationAtCurrentAnalysis")
673 self.StoredVariables["InnovationAtCurrentState"] = Persistence.OneVector(name = "InnovationAtCurrentState")
674 self.StoredVariables["JacobianMatrixAtBackground"] = Persistence.OneMatrix(name = "JacobianMatrixAtBackground")
675 self.StoredVariables["JacobianMatrixAtCurrentState"] = Persistence.OneMatrix(name = "JacobianMatrixAtCurrentState")
676 self.StoredVariables["JacobianMatrixAtOptimum"] = Persistence.OneMatrix(name = "JacobianMatrixAtOptimum")
677 self.StoredVariables["KalmanGainAtOptimum"] = Persistence.OneMatrix(name = "KalmanGainAtOptimum")
678 self.StoredVariables["MahalanobisConsistency"] = Persistence.OneScalar(name = "MahalanobisConsistency")
679 self.StoredVariables["OMA"] = Persistence.OneVector(name = "OMA")
680 self.StoredVariables["OMB"] = Persistence.OneVector(name = "OMB")
681 self.StoredVariables["Residu"] = Persistence.OneScalar(name = "Residu")
682 self.StoredVariables["SigmaBck2"] = Persistence.OneScalar(name = "SigmaBck2")
683 self.StoredVariables["SigmaObs2"] = Persistence.OneScalar(name = "SigmaObs2")
684 self.StoredVariables["SimulatedObservationAtBackground"] = Persistence.OneVector(name = "SimulatedObservationAtBackground")
685 self.StoredVariables["SimulatedObservationAtCurrentAnalysis"]= Persistence.OneVector(name = "SimulatedObservationAtCurrentAnalysis")
686 self.StoredVariables["SimulatedObservationAtCurrentOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentOptimum")
687 self.StoredVariables["SimulatedObservationAtCurrentState"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentState")
688 self.StoredVariables["SimulatedObservationAtOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtOptimum")
689 self.StoredVariables["SimulationQuantiles"] = Persistence.OneMatrix(name = "SimulationQuantiles")
691 for k in self.StoredVariables:
692 self.__canonical_stored_name[k.lower()] = k
694 for k, v in self.__variable_names_not_public.items():
695 self.__canonical_parameter_name[k.lower()] = k
696 self.__canonical_parameter_name["algorithm"] = "Algorithm"
697 self.__canonical_parameter_name["storesupplementarycalculations"] = "StoreSupplementaryCalculations"
699 def _pre_run(self, Parameters, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None ):
701 logging.debug("%s Lancement", self._name)
702 logging.debug("%s Taille mémoire utilisée de %.0f Mio"%(self._name, self._m.getUsedMemory("Mio")))
703 self._getTimeState(reset=True)
705 # Mise a jour des paramètres internes avec le contenu de Parameters, en
706 # reprenant les valeurs par défauts pour toutes celles non définies
707 self.__setParameters(Parameters, reset=True)
708 for k, v in self.__variable_names_not_public.items():
709 if k not in self._parameters: self.__setParameters( {k:v} )
711 # Corrections et compléments des vecteurs
712 def __test_vvalue(argument, variable, argname, symbol=None):
713 if symbol is None: symbol = variable
715 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
716 raise ValueError("%s %s vector %s is not set and has to be properly defined!"%(self._name,argname,symbol))
717 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
718 logging.debug("%s %s vector %s is not set, but is optional."%(self._name,argname,symbol))
720 logging.debug("%s %s vector %s is not set, but is not required."%(self._name,argname,symbol))
722 logging.debug("%s %s vector %s is set, and its size is %i."%(self._name,argname,symbol,numpy.array(argument).size))
724 __test_vvalue( Xb, "Xb", "Background or initial state" )
725 __test_vvalue( Y, "Y", "Observation" )
726 __test_vvalue( U, "U", "Control" )
728 # Corrections et compléments des covariances
729 def __test_cvalue(argument, variable, argname, symbol=None):
730 if symbol is None: symbol = variable
732 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
733 raise ValueError("%s %s error covariance matrix %s is not set and has to be properly defined!"%(self._name,argname,symbol))
734 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
735 logging.debug("%s %s error covariance matrix %s is not set, but is optional."%(self._name,argname,symbol))
737 logging.debug("%s %s error covariance matrix %s is not set, but is not required."%(self._name,argname,symbol))
739 logging.debug("%s %s error covariance matrix %s is set."%(self._name,argname,symbol))
741 __test_cvalue( B, "B", "Background" )
742 __test_cvalue( R, "R", "Observation" )
743 __test_cvalue( Q, "Q", "Evolution" )
745 # Corrections et compléments des opérateurs
746 def __test_ovalue(argument, variable, argname, symbol=None):
747 if symbol is None: symbol = variable
748 if argument is None or (isinstance(argument,dict) and len(argument)==0):
749 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
750 raise ValueError("%s %s operator %s is not set and has to be properly defined!"%(self._name,argname,symbol))
751 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
752 logging.debug("%s %s operator %s is not set, but is optional."%(self._name,argname,symbol))
754 logging.debug("%s %s operator %s is not set, but is not required."%(self._name,argname,symbol))
756 logging.debug("%s %s operator %s is set."%(self._name,argname,symbol))
758 __test_ovalue( HO, "HO", "Observation", "H" )
759 __test_ovalue( EM, "EM", "Evolution", "M" )
760 __test_ovalue( CM, "CM", "Control Model", "C" )
762 # Corrections et compléments des bornes
763 if ("Bounds" in self._parameters) and isinstance(self._parameters["Bounds"], (list, tuple)) and (len(self._parameters["Bounds"]) > 0):
764 logging.debug("%s Prise en compte des bornes effectuee"%(self._name,))
766 self._parameters["Bounds"] = None
768 # Corrections et compléments de l'initialisation en X
769 if "InitializationPoint" in self._parameters:
771 if self._parameters["InitializationPoint"] is not None and hasattr(self._parameters["InitializationPoint"],'size'):
772 if self._parameters["InitializationPoint"].size != numpy.ravel(Xb).size:
773 raise ValueError("Incompatible size %i of forced initial point that have to replace the background of size %i" \
774 %(self._parameters["InitializationPoint"].size,numpy.ravel(Xb).size))
775 # Obtenu par typecast : numpy.ravel(self._parameters["InitializationPoint"])
777 self._parameters["InitializationPoint"] = numpy.ravel(Xb)
779 if self._parameters["InitializationPoint"] is None:
780 raise ValueError("Forced initial point can not be set without any given Background or required value")
782 # Correction pour pallier a un bug de TNC sur le retour du Minimum
783 if "Minimizer" in self._parameters and self._parameters["Minimizer"] == "TNC":
784 self.setParameterValue("StoreInternalVariables",True)
786 # Verbosité et logging
787 if logging.getLogger().level < logging.WARNING:
788 self._parameters["optiprint"], self._parameters["optdisp"] = 1, 1
789 if PlatformInfo.has_scipy:
790 import scipy.optimize
791 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_ALL
793 self._parameters["optmessages"] = 15
795 self._parameters["optiprint"], self._parameters["optdisp"] = -1, 0
796 if PlatformInfo.has_scipy:
797 import scipy.optimize
798 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_NONE
800 self._parameters["optmessages"] = 15
804 def _post_run(self,_oH=None):
806 if ("StoreSupplementaryCalculations" in self._parameters) and \
807 "APosterioriCovariance" in self._parameters["StoreSupplementaryCalculations"]:
808 for _A in self.StoredVariables["APosterioriCovariance"]:
809 if "APosterioriVariances" in self._parameters["StoreSupplementaryCalculations"]:
810 self.StoredVariables["APosterioriVariances"].store( numpy.diag(_A) )
811 if "APosterioriStandardDeviations" in self._parameters["StoreSupplementaryCalculations"]:
812 self.StoredVariables["APosterioriStandardDeviations"].store( numpy.sqrt(numpy.diag(_A)) )
813 if "APosterioriCorrelations" in self._parameters["StoreSupplementaryCalculations"]:
814 _EI = numpy.diag(1./numpy.sqrt(numpy.diag(_A)))
815 _C = numpy.dot(_EI, numpy.dot(_A, _EI))
816 self.StoredVariables["APosterioriCorrelations"].store( _C )
817 if _oH is not None and "Direct" in _oH and "Tangent" in _oH and "Adjoint" in _oH:
818 logging.debug("%s Nombre d'évaluation(s) de l'opérateur d'observation direct/tangent/adjoint.: %i/%i/%i", self._name, _oH["Direct"].nbcalls(0),_oH["Tangent"].nbcalls(0),_oH["Adjoint"].nbcalls(0))
819 logging.debug("%s Nombre d'appels au cache d'opérateur d'observation direct/tangent/adjoint..: %i/%i/%i", self._name, _oH["Direct"].nbcalls(3),_oH["Tangent"].nbcalls(3),_oH["Adjoint"].nbcalls(3))
820 logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
821 logging.debug("%s Durées d'utilisation CPU de %.1fs et elapsed de %.1fs", self._name, self._getTimeState()[0], self._getTimeState()[1])
822 logging.debug("%s Terminé", self._name)
825 def _toStore(self, key):
826 "True if in StoreSupplementaryCalculations, else False"
827 return key in self._parameters["StoreSupplementaryCalculations"]
829 def get(self, key=None):
831 Renvoie l'une des variables stockées identifiée par la clé, ou le
832 dictionnaire de l'ensemble des variables disponibles en l'absence de
833 clé. Ce sont directement les variables sous forme objet qui sont
834 renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
835 des classes de persistance.
838 return self.StoredVariables[self.__canonical_stored_name[key.lower()]]
840 return self.StoredVariables
842 def __contains__(self, key=None):
843 "D.__contains__(k) -> True if D has a key k, else False"
844 if key is None or key.lower() not in self.__canonical_stored_name:
847 return self.__canonical_stored_name[key.lower()] in self.StoredVariables
850 "D.keys() -> list of D's keys"
851 if hasattr(self, "StoredVariables"):
852 return self.StoredVariables.keys()
857 "D.pop(k[,d]) -> v, remove specified key and return the corresponding value"
858 if hasattr(self, "StoredVariables") and k.lower() in self.__canonical_stored_name:
859 return self.StoredVariables.pop(self.__canonical_stored_name[k.lower()], d)
864 raise TypeError("pop expected at least 1 arguments, got 0")
865 "If key is not found, d is returned if given, otherwise KeyError is raised"
871 def run(self, Xb=None, Y=None, H=None, M=None, R=None, B=None, Q=None, Parameters=None):
873 Doit implémenter l'opération élémentaire de calcul d'assimilation sous
874 sa forme mathématique la plus naturelle possible.
876 raise NotImplementedError("Mathematical assimilation calculation has not been implemented!")
878 def defineRequiredParameter(self, name = None, default = None, typecast = None, message = None, minval = None, maxval = None, listval = None, listadv = None):
880 Permet de définir dans l'algorithme des paramètres requis et leurs
881 caractéristiques par défaut.
884 raise ValueError("A name is mandatory to define a required parameter.")
886 self.__required_parameters[name] = {
888 "typecast" : typecast,
895 self.__canonical_parameter_name[name.lower()] = name
896 logging.debug("%s %s (valeur par défaut = %s)", self._name, message, self.setParameterValue(name))
898 def getRequiredParameters(self, noDetails=True):
900 Renvoie la liste des noms de paramètres requis ou directement le
901 dictionnaire des paramètres requis.
904 return sorted(self.__required_parameters.keys())
906 return self.__required_parameters
908 def setParameterValue(self, name=None, value=None):
910 Renvoie la valeur d'un paramètre requis de manière contrôlée
912 __k = self.__canonical_parameter_name[name.lower()]
913 default = self.__required_parameters[__k]["default"]
914 typecast = self.__required_parameters[__k]["typecast"]
915 minval = self.__required_parameters[__k]["minval"]
916 maxval = self.__required_parameters[__k]["maxval"]
917 listval = self.__required_parameters[__k]["listval"]
918 listadv = self.__required_parameters[__k]["listadv"]
920 if value is None and default is None:
922 elif value is None and default is not None:
923 if typecast is None: __val = default
924 else: __val = typecast( default )
926 if typecast is None: __val = value
929 __val = typecast( value )
931 raise ValueError("The value '%s' for the parameter named '%s' can not be correctly evaluated with type '%s'."%(value, __k, typecast))
933 if minval is not None and (numpy.array(__val, float) < minval).any():
934 raise ValueError("The parameter named '%s' of value '%s' can not be less than %s."%(__k, __val, minval))
935 if maxval is not None and (numpy.array(__val, float) > maxval).any():
936 raise ValueError("The parameter named '%s' of value '%s' can not be greater than %s."%(__k, __val, maxval))
937 if listval is not None or listadv is not None:
938 if typecast is list or typecast is tuple or isinstance(__val,list) or isinstance(__val,tuple):
940 if listval is not None and v in listval: continue
941 elif listadv is not None and v in listadv: continue
943 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%(v, __k, listval))
944 elif not (listval is not None and __val in listval) and not (listadv is not None and __val in listadv):
945 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%( __val, __k,listval))
949 def requireInputArguments(self, mandatory=(), optional=()):
951 Permet d'imposer des arguments de calcul requis en entrée.
953 self.__required_inputs["RequiredInputValues"]["mandatory"] = tuple( mandatory )
954 self.__required_inputs["RequiredInputValues"]["optional"] = tuple( optional )
956 def getInputArguments(self):
958 Permet d'obtenir les listes des arguments de calcul requis en entrée.
960 return self.__required_inputs["RequiredInputValues"]["mandatory"], self.__required_inputs["RequiredInputValues"]["optional"]
962 def setAttributes(self, tags=()):
964 Permet d'adjoindre des attributs comme les tags de classification.
965 Renvoie la liste actuelle dans tous les cas.
967 self.__required_inputs["ClassificationTags"].extend( tags )
968 return self.__required_inputs["ClassificationTags"]
970 def __setParameters(self, fromDico={}, reset=False):
972 Permet de stocker les paramètres reçus dans le dictionnaire interne.
974 self._parameters.update( fromDico )
975 __inverse_fromDico_keys = {}
976 for k in fromDico.keys():
977 if k.lower() in self.__canonical_parameter_name:
978 __inverse_fromDico_keys[self.__canonical_parameter_name[k.lower()]] = k
979 #~ __inverse_fromDico_keys = dict([(self.__canonical_parameter_name[k.lower()],k) for k in fromDico.keys()])
980 __canonic_fromDico_keys = __inverse_fromDico_keys.keys()
981 for k in self.__required_parameters.keys():
982 if k in __canonic_fromDico_keys:
983 self._parameters[k] = self.setParameterValue(k,fromDico[__inverse_fromDico_keys[k]])
985 self._parameters[k] = self.setParameterValue(k)
988 logging.debug("%s %s : %s", self._name, self.__required_parameters[k]["message"], self._parameters[k])
990 def _getTimeState(self, reset=False):
992 Initialise ou restitue le temps de calcul (cpu/elapsed) à la seconde
995 self.__initial_cpu_time = time.process_time()
996 self.__initial_elapsed_time = time.perf_counter()
999 self.__cpu_time = time.process_time() - self.__initial_cpu_time
1000 self.__elapsed_time = time.perf_counter() - self.__initial_elapsed_time
1001 return self.__cpu_time, self.__elapsed_time
1003 def _StopOnTimeLimit(self, X=None, withReason=False):
1004 "Stop criteria on time limit: True/False [+ Reason]"
1005 c, e = self._getTimeState()
1006 if "MaximumCpuTime" in self._parameters and c > self._parameters["MaximumCpuTime"]:
1007 __SC, __SR = True, "Reached maximum CPU time (%.1fs > %.1fs)"%(c, self._parameters["MaximumCpuTime"])
1008 elif "MaximumElapsedTime" in self._parameters and e > self._parameters["MaximumElapsedTime"]:
1009 __SC, __SR = True, "Reached maximum elapsed time (%.1fs > %.1fs)"%(e, self._parameters["MaximumElapsedTime"])
1011 __SC, __SR = False, ""
1017 # ==============================================================================
1018 class AlgorithmAndParameters(object):
1020 Classe générale d'interface d'action pour l'algorithme et ses paramètres
1023 name = "GenericAlgorithm",
1030 self.__name = str(name)
1034 self.__algorithm = {}
1035 self.__algorithmFile = None
1036 self.__algorithmName = None
1038 self.updateParameters( asDict, asScript )
1040 if asAlgorithm is None and asScript is not None:
1041 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
1043 __Algo = asAlgorithm
1045 if __Algo is not None:
1046 self.__A = str(__Algo)
1047 self.__P.update( {"Algorithm":self.__A} )
1049 self.__setAlgorithm( self.__A )
1051 self.__variable_names_not_public = {"nextStep":False} # Duplication dans Algorithm
1053 def updateParameters(self,
1057 "Mise a jour des parametres"
1058 if asDict is None and asScript is not None:
1059 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1063 if __Dict is not None:
1064 self.__P.update( dict(__Dict) )
1066 def executePythonScheme(self, asDictAO = None):
1067 "Permet de lancer le calcul d'assimilation"
1068 Operator.CM.clearCache()
1070 if not isinstance(asDictAO, dict):
1071 raise ValueError("The objects for algorithm calculation have to be given together as a dictionnary, and they are not")
1072 if hasattr(asDictAO["Background"],"getO"): self.__Xb = asDictAO["Background"].getO()
1073 elif hasattr(asDictAO["CheckingPoint"],"getO"): self.__Xb = asDictAO["CheckingPoint"].getO()
1074 else: self.__Xb = None
1075 if hasattr(asDictAO["Observation"],"getO"): self.__Y = asDictAO["Observation"].getO()
1076 else: self.__Y = asDictAO["Observation"]
1077 if hasattr(asDictAO["ControlInput"],"getO"): self.__U = asDictAO["ControlInput"].getO()
1078 else: self.__U = asDictAO["ControlInput"]
1079 if hasattr(asDictAO["ObservationOperator"],"getO"): self.__HO = asDictAO["ObservationOperator"].getO()
1080 else: self.__HO = asDictAO["ObservationOperator"]
1081 if hasattr(asDictAO["EvolutionModel"],"getO"): self.__EM = asDictAO["EvolutionModel"].getO()
1082 else: self.__EM = asDictAO["EvolutionModel"]
1083 if hasattr(asDictAO["ControlModel"],"getO"): self.__CM = asDictAO["ControlModel"].getO()
1084 else: self.__CM = asDictAO["ControlModel"]
1085 self.__B = asDictAO["BackgroundError"]
1086 self.__R = asDictAO["ObservationError"]
1087 self.__Q = asDictAO["EvolutionError"]
1089 self.__shape_validate()
1091 self.__algorithm.run(
1101 Parameters = self.__P,
1105 def executeYACSScheme(self, FileName=None):
1106 "Permet de lancer le calcul d'assimilation"
1107 if FileName is None or not os.path.exists(FileName):
1108 raise ValueError("a YACS file name has to be given for YACS execution.\n")
1110 __file = os.path.abspath(FileName)
1111 logging.debug("The YACS file name is \"%s\"."%__file)
1112 if not PlatformInfo.has_salome or \
1113 not PlatformInfo.has_yacs or \
1114 not PlatformInfo.has_adao:
1115 raise ImportError("\n\n"+\
1116 "Unable to get SALOME, YACS or ADAO environnement variables.\n"+\
1117 "Please load the right environnement before trying to use it.\n")
1120 import SALOMERuntime
1122 SALOMERuntime.RuntimeSALOME_setRuntime()
1124 r = pilot.getRuntime()
1125 xmlLoader = loader.YACSLoader()
1126 xmlLoader.registerProcCataLoader()
1128 catalogAd = r.loadCatalog("proc", __file)
1129 r.addCatalog(catalogAd)
1134 p = xmlLoader.load(__file)
1135 except IOError as ex:
1136 print("The YACS XML schema file can not be loaded: %s"%(ex,))
1138 logger = p.getLogger("parser")
1139 if not logger.isEmpty():
1140 print("The imported YACS XML schema has errors on parsing:")
1141 print(logger.getStr())
1144 print("The YACS XML schema is not valid and will not be executed:")
1145 print(p.getErrorReport())
1147 info=pilot.LinkInfo(pilot.LinkInfo.ALL_DONT_STOP)
1148 p.checkConsistency(info)
1149 if info.areWarningsOrErrors():
1150 print("The YACS XML schema is not coherent and will not be executed:")
1151 print(info.getGlobalRepr())
1153 e = pilot.ExecutorSwig()
1155 if p.getEffectiveState() != pilot.DONE:
1156 print(p.getErrorReport())
1160 def get(self, key = None):
1161 "Vérifie l'existence d'une clé de variable ou de paramètres"
1162 if key in self.__algorithm:
1163 return self.__algorithm.get( key )
1164 elif key in self.__P:
1165 return self.__P[key]
1167 allvariables = self.__P
1168 for k in self.__variable_names_not_public: allvariables.pop(k, None)
1171 def pop(self, k, d):
1172 "Necessaire pour le pickling"
1173 return self.__algorithm.pop(k, d)
1175 def getAlgorithmRequiredParameters(self, noDetails=True):
1176 "Renvoie la liste des paramètres requis selon l'algorithme"
1177 return self.__algorithm.getRequiredParameters(noDetails)
1179 def getAlgorithmInputArguments(self):
1180 "Renvoie la liste des entrées requises selon l'algorithme"
1181 return self.__algorithm.getInputArguments()
1183 def getAlgorithmAttributes(self):
1184 "Renvoie la liste des attributs selon l'algorithme"
1185 return self.__algorithm.setAttributes()
1187 def setObserver(self, __V, __O, __I, __S):
1188 if self.__algorithm is None \
1189 or isinstance(self.__algorithm, dict) \
1190 or not hasattr(self.__algorithm,"StoredVariables"):
1191 raise ValueError("No observer can be build before choosing an algorithm.")
1192 if __V not in self.__algorithm:
1193 raise ValueError("An observer requires to be set on a variable named %s which does not exist."%__V)
1195 self.__algorithm.StoredVariables[ __V ].setDataObserver(
1198 HookParameters = __I,
1201 def removeObserver(self, __V, __O, __A = False):
1202 if self.__algorithm is None \
1203 or isinstance(self.__algorithm, dict) \
1204 or not hasattr(self.__algorithm,"StoredVariables"):
1205 raise ValueError("No observer can be removed before choosing an algorithm.")
1206 if __V not in self.__algorithm:
1207 raise ValueError("An observer requires to be removed on a variable named %s which does not exist."%__V)
1209 return self.__algorithm.StoredVariables[ __V ].removeDataObserver(
1214 def hasObserver(self, __V):
1215 if self.__algorithm is None \
1216 or isinstance(self.__algorithm, dict) \
1217 or not hasattr(self.__algorithm,"StoredVariables"):
1219 if __V not in self.__algorithm:
1221 return self.__algorithm.StoredVariables[ __V ].hasDataObserver()
1224 __allvariables = list(self.__algorithm.keys()) + list(self.__P.keys())
1225 for k in self.__variable_names_not_public:
1226 if k in __allvariables: __allvariables.remove(k)
1227 return __allvariables
1229 def __contains__(self, key=None):
1230 "D.__contains__(k) -> True if D has a key k, else False"
1231 return key in self.__algorithm or key in self.__P
1234 "x.__repr__() <==> repr(x)"
1235 return repr(self.__A)+", "+repr(self.__P)
1238 "x.__str__() <==> str(x)"
1239 return str(self.__A)+", "+str(self.__P)
1241 def __setAlgorithm(self, choice = None ):
1243 Permet de sélectionner l'algorithme à utiliser pour mener à bien l'étude
1244 d'assimilation. L'argument est un champ caractère se rapportant au nom
1245 d'un algorithme réalisant l'opération sur les arguments fixes.
1248 raise ValueError("Error: algorithm choice has to be given")
1249 if self.__algorithmName is not None:
1250 raise ValueError("Error: algorithm choice has already been done as \"%s\", it can't be changed."%self.__algorithmName)
1251 daDirectory = "daAlgorithms"
1253 # Recherche explicitement le fichier complet
1254 # ------------------------------------------
1256 for directory in sys.path:
1257 if os.path.isfile(os.path.join(directory, daDirectory, str(choice)+'.py')):
1258 module_path = os.path.abspath(os.path.join(directory, daDirectory))
1259 if module_path is None:
1260 raise ImportError("No algorithm module named \"%s\" has been found in the search path.\n The search path is %s"%(choice, sys.path))
1262 # Importe le fichier complet comme un module
1263 # ------------------------------------------
1265 sys_path_tmp = sys.path ; sys.path.insert(0,module_path)
1266 self.__algorithmFile = __import__(str(choice), globals(), locals(), [])
1267 if not hasattr(self.__algorithmFile, "ElementaryAlgorithm"):
1268 raise ImportError("this module does not define a valid elementary algorithm.")
1269 self.__algorithmName = str(choice)
1270 sys.path = sys_path_tmp ; del sys_path_tmp
1271 except ImportError as e:
1272 raise ImportError("The module named \"%s\" was found, but is incorrect at the import stage.\n The import error message is: %s"%(choice,e))
1274 # Instancie un objet du type élémentaire du fichier
1275 # -------------------------------------------------
1276 self.__algorithm = self.__algorithmFile.ElementaryAlgorithm()
1279 def __shape_validate(self):
1281 Validation de la correspondance correcte des tailles des variables et
1282 des matrices s'il y en a.
1284 if self.__Xb is None: __Xb_shape = (0,)
1285 elif hasattr(self.__Xb,"size"): __Xb_shape = (self.__Xb.size,)
1286 elif hasattr(self.__Xb,"shape"):
1287 if isinstance(self.__Xb.shape, tuple): __Xb_shape = self.__Xb.shape
1288 else: __Xb_shape = self.__Xb.shape()
1289 else: raise TypeError("The background (Xb) has no attribute of shape: problem !")
1291 if self.__Y is None: __Y_shape = (0,)
1292 elif hasattr(self.__Y,"size"): __Y_shape = (self.__Y.size,)
1293 elif hasattr(self.__Y,"shape"):
1294 if isinstance(self.__Y.shape, tuple): __Y_shape = self.__Y.shape
1295 else: __Y_shape = self.__Y.shape()
1296 else: raise TypeError("The observation (Y) has no attribute of shape: problem !")
1298 if self.__U is None: __U_shape = (0,)
1299 elif hasattr(self.__U,"size"): __U_shape = (self.__U.size,)
1300 elif hasattr(self.__U,"shape"):
1301 if isinstance(self.__U.shape, tuple): __U_shape = self.__U.shape
1302 else: __U_shape = self.__U.shape()
1303 else: raise TypeError("The control (U) has no attribute of shape: problem !")
1305 if self.__B is None: __B_shape = (0,0)
1306 elif hasattr(self.__B,"shape"):
1307 if isinstance(self.__B.shape, tuple): __B_shape = self.__B.shape
1308 else: __B_shape = self.__B.shape()
1309 else: raise TypeError("The a priori errors covariance matrix (B) has no attribute of shape: problem !")
1311 if self.__R is None: __R_shape = (0,0)
1312 elif hasattr(self.__R,"shape"):
1313 if isinstance(self.__R.shape, tuple): __R_shape = self.__R.shape
1314 else: __R_shape = self.__R.shape()
1315 else: raise TypeError("The observation errors covariance matrix (R) has no attribute of shape: problem !")
1317 if self.__Q is None: __Q_shape = (0,0)
1318 elif hasattr(self.__Q,"shape"):
1319 if isinstance(self.__Q.shape, tuple): __Q_shape = self.__Q.shape
1320 else: __Q_shape = self.__Q.shape()
1321 else: raise TypeError("The evolution errors covariance matrix (Q) has no attribute of shape: problem !")
1323 if len(self.__HO) == 0: __HO_shape = (0,0)
1324 elif isinstance(self.__HO, dict): __HO_shape = (0,0)
1325 elif hasattr(self.__HO["Direct"],"shape"):
1326 if isinstance(self.__HO["Direct"].shape, tuple): __HO_shape = self.__HO["Direct"].shape
1327 else: __HO_shape = self.__HO["Direct"].shape()
1328 else: raise TypeError("The observation operator (H) has no attribute of shape: problem !")
1330 if len(self.__EM) == 0: __EM_shape = (0,0)
1331 elif isinstance(self.__EM, dict): __EM_shape = (0,0)
1332 elif hasattr(self.__EM["Direct"],"shape"):
1333 if isinstance(self.__EM["Direct"].shape, tuple): __EM_shape = self.__EM["Direct"].shape
1334 else: __EM_shape = self.__EM["Direct"].shape()
1335 else: raise TypeError("The evolution model (EM) has no attribute of shape: problem !")
1337 if len(self.__CM) == 0: __CM_shape = (0,0)
1338 elif isinstance(self.__CM, dict): __CM_shape = (0,0)
1339 elif hasattr(self.__CM["Direct"],"shape"):
1340 if isinstance(self.__CM["Direct"].shape, tuple): __CM_shape = self.__CM["Direct"].shape
1341 else: __CM_shape = self.__CM["Direct"].shape()
1342 else: raise TypeError("The control model (CM) has no attribute of shape: problem !")
1344 # Vérification des conditions
1345 # ---------------------------
1346 if not( len(__Xb_shape) == 1 or min(__Xb_shape) == 1 ):
1347 raise ValueError("Shape characteristic of background (Xb) is incorrect: \"%s\"."%(__Xb_shape,))
1348 if not( len(__Y_shape) == 1 or min(__Y_shape) == 1 ):
1349 raise ValueError("Shape characteristic of observation (Y) is incorrect: \"%s\"."%(__Y_shape,))
1351 if not( min(__B_shape) == max(__B_shape) ):
1352 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) is incorrect: \"%s\"."%(__B_shape,))
1353 if not( min(__R_shape) == max(__R_shape) ):
1354 raise ValueError("Shape characteristic of observation errors covariance matrix (R) is incorrect: \"%s\"."%(__R_shape,))
1355 if not( min(__Q_shape) == max(__Q_shape) ):
1356 raise ValueError("Shape characteristic of evolution errors covariance matrix (Q) is incorrect: \"%s\"."%(__Q_shape,))
1357 if not( min(__EM_shape) == max(__EM_shape) ):
1358 raise ValueError("Shape characteristic of evolution operator (EM) is incorrect: \"%s\"."%(__EM_shape,))
1360 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[1] == max(__Xb_shape) ):
1361 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and state (X) \"%s\" are incompatible."%(__HO_shape,__Xb_shape))
1362 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[0] == max(__Y_shape) ):
1363 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation (Y) \"%s\" are incompatible."%(__HO_shape,__Y_shape))
1364 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__B) > 0 and not( __HO_shape[1] == __B_shape[0] ):
1365 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and a priori errors covariance matrix (B) \"%s\" are incompatible."%(__HO_shape,__B_shape))
1366 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__R) > 0 and not( __HO_shape[0] == __R_shape[1] ):
1367 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation errors covariance matrix (R) \"%s\" are incompatible."%(__HO_shape,__R_shape))
1369 if self.__B is not None and len(self.__B) > 0 and not( __B_shape[1] == max(__Xb_shape) ):
1370 if self.__algorithmName in ["EnsembleBlue",]:
1371 asPersistentVector = self.__Xb.reshape((-1,min(__B_shape)))
1372 self.__Xb = Persistence.OneVector("Background", basetype=numpy.matrix)
1373 for member in asPersistentVector:
1374 self.__Xb.store( numpy.matrix( numpy.ravel(member), numpy.float ).T )
1375 __Xb_shape = min(__B_shape)
1377 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) \"%s\" and background (Xb) \"%s\" are incompatible."%(__B_shape,__Xb_shape))
1379 if self.__R is not None and len(self.__R) > 0 and not( __R_shape[1] == max(__Y_shape) ):
1380 raise ValueError("Shape characteristic of observation errors covariance matrix (R) \"%s\" and observation (Y) \"%s\" are incompatible."%(__R_shape,__Y_shape))
1382 if self.__EM is not None and len(self.__EM) > 0 and not isinstance(self.__EM, dict) and not( __EM_shape[1] == max(__Xb_shape) ):
1383 raise ValueError("Shape characteristic of evolution model (EM) \"%s\" and state (X) \"%s\" are incompatible."%(__EM_shape,__Xb_shape))
1385 if self.__CM is not None and len(self.__CM) > 0 and not isinstance(self.__CM, dict) and not( __CM_shape[1] == max(__U_shape) ):
1386 raise ValueError("Shape characteristic of control model (CM) \"%s\" and control (U) \"%s\" are incompatible."%(__CM_shape,__U_shape))
1388 if ("Bounds" in self.__P) \
1389 and (isinstance(self.__P["Bounds"], list) or isinstance(self.__P["Bounds"], tuple)) \
1390 and (len(self.__P["Bounds"]) != max(__Xb_shape)):
1391 raise ValueError("The number \"%s\" of bound pairs for the state (X) components is different of the size \"%s\" of the state itself." \
1392 %(len(self.__P["Bounds"]),max(__Xb_shape)))
1396 # ==============================================================================
1397 class RegulationAndParameters(object):
1399 Classe générale d'interface d'action pour la régulation et ses paramètres
1402 name = "GenericRegulation",
1409 self.__name = str(name)
1412 if asAlgorithm is None and asScript is not None:
1413 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
1415 __Algo = asAlgorithm
1417 if asDict is None and asScript is not None:
1418 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1422 if __Dict is not None:
1423 self.__P.update( dict(__Dict) )
1425 if __Algo is not None:
1426 self.__P.update( {"Algorithm":str(__Algo)} )
1428 def get(self, key = None):
1429 "Vérifie l'existence d'une clé de variable ou de paramètres"
1431 return self.__P[key]
1435 # ==============================================================================
1436 class DataObserver(object):
1438 Classe générale d'interface de type observer
1441 name = "GenericObserver",
1453 self.__name = str(name)
1458 if onVariable is None:
1459 raise ValueError("setting an observer has to be done over a variable name or a list of variable names, not over None.")
1460 elif type(onVariable) in (tuple, list):
1461 self.__V = tuple(map( str, onVariable ))
1462 if withInfo is None:
1465 self.__I = (str(withInfo),)*len(self.__V)
1466 elif isinstance(onVariable, str):
1467 self.__V = (onVariable,)
1468 if withInfo is None:
1469 self.__I = (onVariable,)
1471 self.__I = (str(withInfo),)
1473 raise ValueError("setting an observer has to be done over a variable name or a list of variable names.")
1475 if asObsObject is not None:
1476 self.__O = asObsObject
1478 __FunctionText = str(UserScript('Observer', asTemplate, asString, asScript))
1479 __Function = Observer2Func(__FunctionText)
1480 self.__O = __Function.getfunc()
1482 for k in range(len(self.__V)):
1485 if ename not in withAlgo:
1486 raise ValueError("An observer is asked to be set on a variable named %s which does not exist."%ename)
1488 withAlgo.setObserver(ename, self.__O, einfo, scheduledBy)
1491 "x.__repr__() <==> repr(x)"
1492 return repr(self.__V)+"\n"+repr(self.__O)
1495 "x.__str__() <==> str(x)"
1496 return str(self.__V)+"\n"+str(self.__O)
1498 # ==============================================================================
1499 class UserScript(object):
1501 Classe générale d'interface de type texte de script utilisateur
1504 name = "GenericUserScript",
1511 self.__name = str(name)
1513 if asString is not None:
1515 elif self.__name == "UserPostAnalysis" and (asTemplate is not None) and (asTemplate in Templates.UserPostAnalysisTemplates):
1516 self.__F = Templates.UserPostAnalysisTemplates[asTemplate]
1517 elif self.__name == "Observer" and (asTemplate is not None) and (asTemplate in Templates.ObserverTemplates):
1518 self.__F = Templates.ObserverTemplates[asTemplate]
1519 elif asScript is not None:
1520 self.__F = Interfaces.ImportFromScript(asScript).getstring()
1525 "x.__repr__() <==> repr(x)"
1526 return repr(self.__F)
1529 "x.__str__() <==> str(x)"
1530 return str(self.__F)
1532 # ==============================================================================
1533 class ExternalParameters(object):
1535 Classe générale d'interface de type texte de script utilisateur
1538 name = "GenericExternalParameters",
1544 self.__name = str(name)
1547 self.updateParameters( asDict, asScript )
1549 def updateParameters(self,
1553 "Mise a jour des parametres"
1554 if asDict is None and asScript is not None:
1555 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "ExternalParameters" )
1559 if __Dict is not None:
1560 self.__P.update( dict(__Dict) )
1562 def get(self, key = None):
1564 return self.__P[key]
1566 return list(self.__P.keys())
1569 return list(self.__P.keys())
1571 def pop(self, k, d):
1572 return self.__P.pop(k, d)
1575 return self.__P.items()
1577 def __contains__(self, key=None):
1578 "D.__contains__(k) -> True if D has a key k, else False"
1579 return key in self.__P
1581 # ==============================================================================
1582 class State(object):
1584 Classe générale d'interface de type état
1587 name = "GenericVector",
1589 asPersistentVector = None,
1595 toBeChecked = False,
1598 Permet de définir un vecteur :
1599 - asVector : entrée des données, comme un vecteur compatible avec le
1600 constructeur de numpy.matrix, ou "True" si entrée par script.
1601 - asPersistentVector : entrée des données, comme une série de vecteurs
1602 compatible avec le constructeur de numpy.matrix, ou comme un objet de
1603 type Persistence, ou "True" si entrée par script.
1604 - asScript : si un script valide est donné contenant une variable
1605 nommée "name", la variable est de type "asVector" (par défaut) ou
1606 "asPersistentVector" selon que l'une de ces variables est placée à
1608 - asDataFile : si un ou plusieurs fichiers valides sont donnés
1609 contenant des valeurs en colonnes, elles-mêmes nommées "colNames"
1610 (s'il n'y a pas de nom de colonne indiquée, on cherche une colonne
1611 nommée "name"), on récupère les colonnes et on les range ligne après
1612 ligne (colMajor=False, par défaut) ou colonne après colonne
1613 (colMajor=True). La variable résultante est de type "asVector" (par
1614 défaut) ou "asPersistentVector" selon que l'une de ces variables est
1617 self.__name = str(name)
1618 self.__check = bool(toBeChecked)
1622 self.__is_vector = False
1623 self.__is_series = False
1625 if asScript is not None:
1626 __Vector, __Series = None, None
1627 if asPersistentVector:
1628 __Series = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1630 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1631 elif asDataFile is not None:
1632 __Vector, __Series = None, None
1633 if asPersistentVector:
1634 if colNames is not None:
1635 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
1637 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1638 if bool(colMajor) and not Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1639 __Series = numpy.transpose(__Series)
1640 elif not bool(colMajor) and Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1641 __Series = numpy.transpose(__Series)
1643 if colNames is not None:
1644 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
1646 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1648 __Vector = numpy.ravel(__Vector, order = "F")
1650 __Vector = numpy.ravel(__Vector, order = "C")
1652 __Vector, __Series = asVector, asPersistentVector
1654 if __Vector is not None:
1655 self.__is_vector = True
1656 self.__V = numpy.matrix( numpy.asmatrix(__Vector).A1, numpy.float ).T
1657 self.shape = self.__V.shape
1658 self.size = self.__V.size
1659 elif __Series is not None:
1660 self.__is_series = True
1661 if isinstance(__Series, (tuple, list, numpy.ndarray, numpy.matrix, str)):
1662 self.__V = Persistence.OneVector(self.__name, basetype=numpy.matrix)
1663 if isinstance(__Series, str): __Series = eval(__Series)
1664 for member in __Series:
1665 self.__V.store( numpy.matrix( numpy.asmatrix(member).A1, numpy.float ).T )
1668 if isinstance(self.__V.shape, (tuple, list)):
1669 self.shape = self.__V.shape
1671 self.shape = self.__V.shape()
1672 if len(self.shape) == 1:
1673 self.shape = (self.shape[0],1)
1674 self.size = self.shape[0] * self.shape[1]
1676 raise ValueError("The %s object is improperly defined or undefined, it requires at minima either a vector, a list/tuple of vectors or a persistent object. Please check your vector input."%self.__name)
1678 if scheduledBy is not None:
1679 self.__T = scheduledBy
1681 def getO(self, withScheduler=False):
1683 return self.__V, self.__T
1684 elif self.__T is None:
1690 "Vérification du type interne"
1691 return self.__is_vector
1694 "Vérification du type interne"
1695 return self.__is_series
1698 "x.__repr__() <==> repr(x)"
1699 return repr(self.__V)
1702 "x.__str__() <==> str(x)"
1703 return str(self.__V)
1705 # ==============================================================================
1706 class Covariance(object):
1708 Classe générale d'interface de type covariance
1711 name = "GenericCovariance",
1712 asCovariance = None,
1713 asEyeByScalar = None,
1714 asEyeByVector = None,
1717 toBeChecked = False,
1720 Permet de définir une covariance :
1721 - asCovariance : entrée des données, comme une matrice compatible avec
1722 le constructeur de numpy.matrix
1723 - asEyeByScalar : entrée des données comme un seul scalaire de variance,
1724 multiplicatif d'une matrice de corrélation identité, aucune matrice
1725 n'étant donc explicitement à donner
1726 - asEyeByVector : entrée des données comme un seul vecteur de variance,
1727 à mettre sur la diagonale d'une matrice de corrélation, aucune matrice
1728 n'étant donc explicitement à donner
1729 - asCovObject : entrée des données comme un objet python, qui a les
1730 methodes obligatoires "getT", "getI", "diag", "trace", "__add__",
1731 "__sub__", "__neg__", "__mul__", "__rmul__" et facultatives "shape",
1732 "size", "cholesky", "choleskyI", "asfullmatrix", "__repr__", "__str__"
1733 - toBeChecked : booléen indiquant si le caractère SDP de la matrice
1734 pleine doit être vérifié
1736 self.__name = str(name)
1737 self.__check = bool(toBeChecked)
1740 self.__is_scalar = False
1741 self.__is_vector = False
1742 self.__is_matrix = False
1743 self.__is_object = False
1745 if asScript is not None:
1746 __Matrix, __Scalar, __Vector, __Object = None, None, None, None
1748 __Scalar = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1750 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1752 __Object = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1754 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1756 __Matrix, __Scalar, __Vector, __Object = asCovariance, asEyeByScalar, asEyeByVector, asCovObject
1758 if __Scalar is not None:
1759 if numpy.matrix(__Scalar).size != 1:
1760 raise ValueError(' The diagonal multiplier given to define a sparse matrix is not a unique scalar value.\n Its actual measured size is %i. Please check your scalar input.'%numpy.matrix(__Scalar).size)
1761 self.__is_scalar = True
1762 self.__C = numpy.abs( float(__Scalar) )
1765 elif __Vector is not None:
1766 self.__is_vector = True
1767 self.__C = numpy.abs( numpy.array( numpy.ravel( numpy.matrix(__Vector, float ) ) ) )
1768 self.shape = (self.__C.size,self.__C.size)
1769 self.size = self.__C.size**2
1770 elif __Matrix is not None:
1771 self.__is_matrix = True
1772 self.__C = numpy.matrix( __Matrix, float )
1773 self.shape = self.__C.shape
1774 self.size = self.__C.size
1775 elif __Object is not None:
1776 self.__is_object = True
1778 for at in ("getT","getI","diag","trace","__add__","__sub__","__neg__","__matmul__","__mul__","__rmatmul__","__rmul__"):
1779 if not hasattr(self.__C,at):
1780 raise ValueError("The matrix given for %s as an object has no attribute \"%s\". Please check your object input."%(self.__name,at))
1781 if hasattr(self.__C,"shape"):
1782 self.shape = self.__C.shape
1785 if hasattr(self.__C,"size"):
1786 self.size = self.__C.size
1791 # raise ValueError("The %s covariance matrix has to be specified either as a matrix, a vector for its diagonal or a scalar multiplying an identity matrix."%self.__name)
1795 def __validate(self):
1797 if self.__C is None:
1798 raise UnboundLocalError("%s covariance matrix value has not been set!"%(self.__name,))
1799 if self.ismatrix() and min(self.shape) != max(self.shape):
1800 raise ValueError("The given matrix for %s is not a square one, its shape is %s. Please check your matrix input."%(self.__name,self.shape))
1801 if self.isobject() and min(self.shape) != max(self.shape):
1802 raise ValueError("The matrix given for \"%s\" is not a square one, its shape is %s. Please check your object input."%(self.__name,self.shape))
1803 if self.isscalar() and self.__C <= 0:
1804 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your scalar input %s."%(self.__name,self.__C))
1805 if self.isvector() and (self.__C <= 0).any():
1806 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your vector input."%(self.__name,))
1807 if self.ismatrix() and (self.__check or logging.getLogger().level < logging.WARNING):
1809 L = numpy.linalg.cholesky( self.__C )
1811 raise ValueError("The %s covariance matrix is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1812 if self.isobject() and (self.__check or logging.getLogger().level < logging.WARNING):
1814 L = self.__C.cholesky()
1816 raise ValueError("The %s covariance object is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1819 "Vérification du type interne"
1820 return self.__is_scalar
1823 "Vérification du type interne"
1824 return self.__is_vector
1827 "Vérification du type interne"
1828 return self.__is_matrix
1831 "Vérification du type interne"
1832 return self.__is_object
1837 return Covariance(self.__name+"I", asCovariance = self.__C.I )
1838 elif self.isvector():
1839 return Covariance(self.__name+"I", asEyeByVector = 1. / self.__C )
1840 elif self.isscalar():
1841 return Covariance(self.__name+"I", asEyeByScalar = 1. / self.__C )
1842 elif self.isobject():
1843 return Covariance(self.__name+"I", asCovObject = self.__C.getI() )
1845 return None # Indispensable
1850 return Covariance(self.__name+"T", asCovariance = self.__C.T )
1851 elif self.isvector():
1852 return Covariance(self.__name+"T", asEyeByVector = self.__C )
1853 elif self.isscalar():
1854 return Covariance(self.__name+"T", asEyeByScalar = self.__C )
1855 elif self.isobject():
1856 return Covariance(self.__name+"T", asCovObject = self.__C.getT() )
1859 "Décomposition de Cholesky"
1861 return Covariance(self.__name+"C", asCovariance = numpy.linalg.cholesky(self.__C) )
1862 elif self.isvector():
1863 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
1864 elif self.isscalar():
1865 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
1866 elif self.isobject() and hasattr(self.__C,"cholesky"):
1867 return Covariance(self.__name+"C", asCovObject = self.__C.cholesky() )
1869 def choleskyI(self):
1870 "Inversion de la décomposition de Cholesky"
1872 return Covariance(self.__name+"H", asCovariance = numpy.linalg.cholesky(self.__C).I )
1873 elif self.isvector():
1874 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
1875 elif self.isscalar():
1876 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
1877 elif self.isobject() and hasattr(self.__C,"choleskyI"):
1878 return Covariance(self.__name+"H", asCovObject = self.__C.choleskyI() )
1881 "Racine carrée matricielle"
1884 return Covariance(self.__name+"C", asCovariance = scipy.linalg.sqrtm(self.__C) )
1885 elif self.isvector():
1886 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
1887 elif self.isscalar():
1888 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
1889 elif self.isobject() and hasattr(self.__C,"sqrt"):
1890 return Covariance(self.__name+"C", asCovObject = self.__C.sqrt() )
1893 "Inversion de la racine carrée matricielle"
1896 return Covariance(self.__name+"H", asCovariance = scipy.linalg.sqrtm(self.__C).I )
1897 elif self.isvector():
1898 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
1899 elif self.isscalar():
1900 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
1901 elif self.isobject() and hasattr(self.__C,"sqrtI"):
1902 return Covariance(self.__name+"H", asCovObject = self.__C.sqrtI() )
1904 def diag(self, msize=None):
1905 "Diagonale de la matrice"
1907 return numpy.diag(self.__C)
1908 elif self.isvector():
1910 elif self.isscalar():
1912 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1914 return self.__C * numpy.ones(int(msize))
1915 elif self.isobject():
1916 return self.__C.diag()
1918 def asfullmatrix(self, msize=None):
1921 return numpy.asarray(self.__C)
1922 elif self.isvector():
1923 return numpy.asarray( numpy.diag(self.__C), float )
1924 elif self.isscalar():
1926 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1928 return numpy.asarray( self.__C * numpy.eye(int(msize)), float )
1929 elif self.isobject() and hasattr(self.__C,"asfullmatrix"):
1930 return self.__C.asfullmatrix()
1932 def trace(self, msize=None):
1933 "Trace de la matrice"
1935 return numpy.trace(self.__C)
1936 elif self.isvector():
1937 return float(numpy.sum(self.__C))
1938 elif self.isscalar():
1940 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1942 return self.__C * int(msize)
1943 elif self.isobject():
1944 return self.__C.trace()
1950 "x.__repr__() <==> repr(x)"
1951 return repr(self.__C)
1954 "x.__str__() <==> str(x)"
1955 return str(self.__C)
1957 def __add__(self, other):
1958 "x.__add__(y) <==> x+y"
1959 if self.ismatrix() or self.isobject():
1960 return self.__C + numpy.asmatrix(other)
1961 elif self.isvector() or self.isscalar():
1962 _A = numpy.asarray(other)
1963 _A.reshape(_A.size)[::_A.shape[1]+1] += self.__C
1964 return numpy.asmatrix(_A)
1966 def __radd__(self, other):
1967 "x.__radd__(y) <==> y+x"
1968 raise NotImplementedError("%s covariance matrix __radd__ method not available for %s type!"%(self.__name,type(other)))
1970 def __sub__(self, other):
1971 "x.__sub__(y) <==> x-y"
1972 if self.ismatrix() or self.isobject():
1973 return self.__C - numpy.asmatrix(other)
1974 elif self.isvector() or self.isscalar():
1975 _A = numpy.asarray(other)
1976 _A.reshape(_A.size)[::_A.shape[1]+1] = self.__C - _A.reshape(_A.size)[::_A.shape[1]+1]
1977 return numpy.asmatrix(_A)
1979 def __rsub__(self, other):
1980 "x.__rsub__(y) <==> y-x"
1981 raise NotImplementedError("%s covariance matrix __rsub__ method not available for %s type!"%(self.__name,type(other)))
1984 "x.__neg__() <==> -x"
1987 def __matmul__(self, other):
1988 "x.__mul__(y) <==> x@y"
1989 if self.ismatrix() and isinstance(other, (int, float)):
1990 return numpy.asarray(self.__C) * other
1991 elif self.ismatrix() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
1992 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1993 return numpy.ravel(self.__C @ numpy.ravel(other))
1994 elif numpy.asarray(other).shape[0] == self.shape[1]: # Matrice
1995 return numpy.asarray(self.__C) @ numpy.asarray(other)
1997 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asarray(other).shape,self.__name))
1998 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
1999 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2000 return numpy.ravel(self.__C) * numpy.ravel(other)
2001 elif numpy.asarray(other).shape[0] == self.shape[1]: # Matrice
2002 return numpy.ravel(self.__C).reshape((-1,1)) * numpy.asarray(other)
2004 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
2005 elif self.isscalar() and isinstance(other,numpy.matrix):
2006 return numpy.asarray(self.__C * other)
2007 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
2008 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
2009 return self.__C * numpy.ravel(other)
2011 return self.__C * numpy.asarray(other)
2012 elif self.isobject():
2013 return self.__C.__matmul__(other)
2015 raise NotImplementedError("%s covariance matrix __matmul__ method not available for %s type!"%(self.__name,type(other)))
2017 def __mul__(self, other):
2018 "x.__mul__(y) <==> x*y"
2019 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2020 return self.__C * other
2021 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2022 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2023 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
2024 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2025 return self.__C * numpy.asmatrix(other)
2027 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asmatrix(other).shape,self.__name))
2028 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2029 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2030 return numpy.asmatrix(self.__C * numpy.ravel(other)).T
2031 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2032 return numpy.asmatrix((self.__C * (numpy.asarray(other).transpose())).transpose())
2034 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
2035 elif self.isscalar() and isinstance(other,numpy.matrix):
2036 return self.__C * other
2037 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
2038 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
2039 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
2041 return self.__C * numpy.asmatrix(other)
2042 elif self.isobject():
2043 return self.__C.__mul__(other)
2045 raise NotImplementedError("%s covariance matrix __mul__ method not available for %s type!"%(self.__name,type(other)))
2047 def __rmatmul__(self, other):
2048 "x.__rmul__(y) <==> y@x"
2049 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2050 return other * self.__C
2051 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2052 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2053 return numpy.asmatrix(numpy.ravel(other)) * self.__C
2054 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2055 return numpy.asmatrix(other) * self.__C
2057 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name))
2058 elif self.isvector() and isinstance(other,numpy.matrix):
2059 if numpy.ravel(other).size == self.shape[0]: # Vecteur
2060 return numpy.asmatrix(numpy.ravel(other) * self.__C)
2061 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
2062 return numpy.asmatrix(numpy.array(other) * self.__C)
2064 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
2065 elif self.isscalar() and isinstance(other,numpy.matrix):
2066 return other * self.__C
2067 elif self.isobject():
2068 return self.__C.__rmatmul__(other)
2070 raise NotImplementedError("%s covariance matrix __rmatmul__ method not available for %s type!"%(self.__name,type(other)))
2072 def __rmul__(self, other):
2073 "x.__rmul__(y) <==> y*x"
2074 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2075 return other * self.__C
2076 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2077 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2078 return numpy.asmatrix(numpy.ravel(other)) * self.__C
2079 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2080 return numpy.asmatrix(other) * self.__C
2082 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name))
2083 elif self.isvector() and isinstance(other,numpy.matrix):
2084 if numpy.ravel(other).size == self.shape[0]: # Vecteur
2085 return numpy.asmatrix(numpy.ravel(other) * self.__C)
2086 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
2087 return numpy.asmatrix(numpy.array(other) * self.__C)
2089 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
2090 elif self.isscalar() and isinstance(other,numpy.matrix):
2091 return other * self.__C
2092 elif self.isobject():
2093 return self.__C.__rmul__(other)
2095 raise NotImplementedError("%s covariance matrix __rmul__ method not available for %s type!"%(self.__name,type(other)))
2098 "x.__len__() <==> len(x)"
2099 return self.shape[0]
2101 # ==============================================================================
2102 class Observer2Func(object):
2104 Creation d'une fonction d'observateur a partir de son texte
2106 def __init__(self, corps=""):
2107 self.__corps = corps
2108 def func(self,var,info):
2109 "Fonction d'observation"
2112 "Restitution du pointeur de fonction dans l'objet"
2115 # ==============================================================================
2116 class CaseLogger(object):
2118 Conservation des commandes de creation d'un cas
2120 def __init__(self, __name="", __objname="case", __addViewers=None, __addLoaders=None):
2121 self.__name = str(__name)
2122 self.__objname = str(__objname)
2123 self.__logSerie = []
2124 self.__switchoff = False
2126 "TUI" :Interfaces._TUIViewer,
2127 "SCD" :Interfaces._SCDViewer,
2128 "YACS":Interfaces._YACSViewer,
2131 "TUI" :Interfaces._TUIViewer,
2132 "COM" :Interfaces._COMViewer,
2134 if __addViewers is not None:
2135 self.__viewers.update(dict(__addViewers))
2136 if __addLoaders is not None:
2137 self.__loaders.update(dict(__addLoaders))
2139 def register(self, __command=None, __keys=None, __local=None, __pre=None, __switchoff=False):
2140 "Enregistrement d'une commande individuelle"
2141 if __command is not None and __keys is not None and __local is not None and not self.__switchoff:
2142 if "self" in __keys: __keys.remove("self")
2143 self.__logSerie.append( (str(__command), __keys, __local, __pre, __switchoff) )
2145 self.__switchoff = True
2147 self.__switchoff = False
2149 def dump(self, __filename=None, __format="TUI", __upa=""):
2150 "Restitution normalisée des commandes"
2151 if __format in self.__viewers:
2152 __formater = self.__viewers[__format](self.__name, self.__objname, self.__logSerie)
2154 raise ValueError("Dumping as \"%s\" is not available"%__format)
2155 return __formater.dump(__filename, __upa)
2157 def load(self, __filename=None, __content=None, __object=None, __format="TUI"):
2158 "Chargement normalisé des commandes"
2159 if __format in self.__loaders:
2160 __formater = self.__loaders[__format]()
2162 raise ValueError("Loading as \"%s\" is not available"%__format)
2163 return __formater.load(__filename, __content, __object)
2165 # ==============================================================================
2168 _extraArguments = None,
2169 _sFunction = lambda x: x,
2174 Pour une liste ordonnée de vecteurs en entrée, renvoie en sortie la liste
2175 correspondante de valeurs de la fonction en argument
2177 # Vérifications et définitions initiales
2178 # logging.debug("MULTF Internal multifonction calculations begin with function %s"%(_sFunction.__name__,))
2179 if not PlatformInfo.isIterable( __xserie ):
2180 raise TypeError("MultiFonction not iterable unkown input type: %s"%(type(__xserie),))
2182 if (_mpWorkers is None) or (_mpWorkers is not None and _mpWorkers < 1):
2185 __mpWorkers = int(_mpWorkers)
2187 import multiprocessing
2198 if _extraArguments is None:
2200 elif _extraArguments is not None and isinstance(_extraArguments, (list, tuple, map)):
2201 for __xvalue in __xserie:
2202 _jobs.append( [__xvalue, ] + list(_extraArguments) )
2204 raise TypeError("MultiFonction extra arguments unkown input type: %s"%(type(_extraArguments),))
2205 # logging.debug("MULTF Internal multiprocessing calculations begin : evaluation of %i point(s)"%(len(_jobs),))
2206 import multiprocessing
2207 with multiprocessing.Pool(__mpWorkers) as pool:
2208 __multiHX = pool.map( _sFunction, _jobs )
2211 # logging.debug("MULTF Internal multiprocessing calculation end")
2213 # logging.debug("MULTF Internal monoprocessing calculation begin")
2215 if _extraArguments is None:
2216 for __xvalue in __xserie:
2217 __multiHX.append( _sFunction( __xvalue ) )
2218 elif _extraArguments is not None and isinstance(_extraArguments, (list, tuple, map)):
2219 for __xvalue in __xserie:
2220 __multiHX.append( _sFunction( __xvalue, *_extraArguments ) )
2221 elif _extraArguments is not None and isinstance(_extraArguments, dict):
2222 for __xvalue in __xserie:
2223 __multiHX.append( _sFunction( __xvalue, **_extraArguments ) )
2225 raise TypeError("MultiFonction extra arguments unkown input type: %s"%(type(_extraArguments),))
2226 # logging.debug("MULTF Internal monoprocessing calculation end")
2228 # logging.debug("MULTF Internal multifonction calculations end")
2231 # ==============================================================================
2232 def CostFunction3D(_x,
2233 _Hm = None, # Pour simuler Hm(x) : HO["Direct"].appliedTo
2234 _HmX = None, # Simulation déjà faite de Hm(x)
2235 _arg = None, # Arguments supplementaires pour Hm, sous la forme d'un tuple
2240 _SIV = False, # A résorber pour la 8.0
2241 _SSC = [], # self._parameters["StoreSupplementaryCalculations"]
2242 _nPS = 0, # nbPreviousSteps
2243 _QM = "DA", # QualityMeasure
2244 _SSV = {}, # Entrée et/ou sortie : self.StoredVariables
2245 _fRt = False, # Restitue ou pas la sortie étendue
2246 _sSc = True, # Stocke ou pas les SSC
2249 Fonction-coût générale utile pour les algorithmes statiques/3D : 3DVAR, BLUE
2250 et dérivés, Kalman et dérivés, LeastSquares, SamplingTest, PSO, SA, Tabu,
2251 DFO, QuantileRegression
2257 for k in ["CostFunctionJ",
2263 "SimulatedObservationAtCurrentOptimum",
2264 "SimulatedObservationAtCurrentState",
2268 if hasattr(_SSV[k],"store"):
2269 _SSV[k].append = _SSV[k].store # Pour utiliser "append" au lieu de "store"
2271 _X = numpy.asmatrix(numpy.ravel( _x )).T
2272 if _SIV or "CurrentState" in _SSC or "CurrentOptimum" in _SSC:
2273 _SSV["CurrentState"].append( _X )
2275 if _HmX is not None:
2279 raise ValueError("COSTFUNCTION3D Operator has to be defined.")
2283 _HX = _Hm( _X, *_arg )
2284 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
2286 if "SimulatedObservationAtCurrentState" in _SSC or \
2287 "SimulatedObservationAtCurrentOptimum" in _SSC:
2288 _SSV["SimulatedObservationAtCurrentState"].append( _HX )
2290 if numpy.any(numpy.isnan(_HX)):
2291 Jb, Jo, J = numpy.nan, numpy.nan, numpy.nan
2293 _Y = numpy.asmatrix(numpy.ravel( _Y )).T
2294 if _QM in ["AugmentedWeightedLeastSquares", "AWLS", "AugmentedPonderatedLeastSquares", "APLS", "DA"]:
2295 if _BI is None or _RI is None:
2296 raise ValueError("Background and Observation error covariance matrix has to be properly defined!")
2297 _Xb = numpy.asmatrix(numpy.ravel( _Xb )).T
2298 Jb = 0.5 * (_X - _Xb).T * _BI * (_X - _Xb)
2299 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
2300 elif _QM in ["WeightedLeastSquares", "WLS", "PonderatedLeastSquares", "PLS"]:
2302 raise ValueError("Observation error covariance matrix has to be properly defined!")
2304 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
2305 elif _QM in ["LeastSquares", "LS", "L2"]:
2307 Jo = 0.5 * (_Y - _HX).T * (_Y - _HX)
2308 elif _QM in ["AbsoluteValue", "L1"]:
2310 Jo = numpy.sum( numpy.abs(_Y - _HX) )
2311 elif _QM in ["MaximumError", "ME"]:
2313 Jo = numpy.max( numpy.abs(_Y - _HX) )
2314 elif _QM in ["QR", "Null"]:
2318 raise ValueError("Unknown asked quality measure!")
2320 J = float( Jb ) + float( Jo )
2323 _SSV["CostFunctionJb"].append( Jb )
2324 _SSV["CostFunctionJo"].append( Jo )
2325 _SSV["CostFunctionJ" ].append( J )
2327 if "IndexOfOptimum" in _SSC or \
2328 "CurrentOptimum" in _SSC or \
2329 "SimulatedObservationAtCurrentOptimum" in _SSC:
2330 IndexMin = numpy.argmin( _SSV["CostFunctionJ"][_nPS:] ) + _nPS
2331 if "IndexOfOptimum" in _SSC:
2332 _SSV["IndexOfOptimum"].append( IndexMin )
2333 if "CurrentOptimum" in _SSC:
2334 _SSV["CurrentOptimum"].append( _SSV["CurrentState"][IndexMin] )
2335 if "SimulatedObservationAtCurrentOptimum" in _SSC:
2336 _SSV["SimulatedObservationAtCurrentOptimum"].append( _SSV["SimulatedObservationAtCurrentState"][IndexMin] )
2341 if _QM in ["QR"]: # Pour le QuantileRegression
2346 # ==============================================================================
2347 if __name__ == "__main__":
2348 print('\n AUTODIAGNOSTIC\n')