1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2021 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les outils généraux élémentaires.
26 __author__ = "Jean-Philippe ARGAUD"
35 from functools import partial
36 from daCore import Persistence, PlatformInfo, Interfaces
37 from daCore import Templates
39 # ==============================================================================
40 class CacheManager(object):
42 Classe générale de gestion d'un cache de calculs
45 toleranceInRedundancy = 1.e-18,
46 lenghtOfRedundancy = -1,
49 Les caractéristiques de tolérance peuvent être modifiées à la création.
51 self.__tolerBP = float(toleranceInRedundancy)
52 self.__lenghtOR = int(lenghtOfRedundancy)
53 self.__initlnOR = self.__lenghtOR
63 def wasCalculatedIn(self, xValue, oName="" ):
64 "Vérifie l'existence d'un calcul correspondant à la valeur"
68 for i in range(min(len(self.__listOPCV),self.__lenghtOR)-1,-1,-1):
69 if not hasattr(xValue, 'size'):
71 elif (str(oName) != self.__listOPCV[i][3]):
73 elif (xValue.size != self.__listOPCV[i][0].size):
75 elif (numpy.ravel(xValue)[0] - self.__listOPCV[i][0][0]) > (self.__tolerBP * self.__listOPCV[i][2] / self.__listOPCV[i][0].size):
77 elif numpy.linalg.norm(numpy.ravel(xValue) - self.__listOPCV[i][0]) < (self.__tolerBP * self.__listOPCV[i][2]):
79 __HxV = self.__listOPCV[i][1]
83 def storeValueInX(self, xValue, HxValue, oName="" ):
84 "Stocke pour un opérateur o un calcul Hx correspondant à la valeur x"
85 if self.__lenghtOR < 0:
86 self.__lenghtOR = 2 * min(xValue.size, 50) + 2 # 2 * xValue.size + 2
87 self.__initlnOR = self.__lenghtOR
88 self.__seenNames.append(str(oName))
89 if str(oName) not in self.__seenNames: # Etend la liste si nouveau
90 self.__lenghtOR += 2 * min(xValue.size, 50) + 2 # 2 * xValue.size + 2
91 self.__initlnOR += self.__lenghtOR
92 self.__seenNames.append(str(oName))
93 while len(self.__listOPCV) > self.__lenghtOR:
94 self.__listOPCV.pop(0)
95 self.__listOPCV.append( (
96 copy.copy(numpy.ravel(xValue)), # 0 Previous point
97 copy.copy(HxValue), # 1 Previous value
98 numpy.linalg.norm(xValue), # 2 Norm
99 str(oName), # 3 Operator name
104 self.__initlnOR = self.__lenghtOR
106 self.__enabled = False
110 self.__lenghtOR = self.__initlnOR
111 self.__enabled = True
113 # ==============================================================================
114 class Operator(object):
116 Classe générale d'interface de type opérateur simple
124 name = "GenericOperator",
127 avoidingRedundancy = True,
128 inputAsMultiFunction = False,
129 enableMultiProcess = False,
130 extraArguments = None,
133 On construit un objet de ce type en fournissant, à l'aide de l'un des
134 deux mots-clé, soit une fonction ou un multi-fonction python, soit une
137 - name : nom d'opérateur
138 - fromMethod : argument de type fonction Python
139 - fromMatrix : argument adapté au constructeur numpy.matrix
140 - avoidingRedundancy : booléen évitant (ou pas) les calculs redondants
141 - inputAsMultiFunction : booléen indiquant une fonction explicitement
142 définie (ou pas) en multi-fonction
143 - extraArguments : arguments supplémentaires passés à la fonction de
144 base et ses dérivées (tuple ou dictionnaire)
146 self.__name = str(name)
147 self.__NbCallsAsMatrix, self.__NbCallsAsMethod, self.__NbCallsOfCached = 0, 0, 0
148 self.__AvoidRC = bool( avoidingRedundancy )
149 self.__inputAsMF = bool( inputAsMultiFunction )
150 self.__mpEnabled = bool( enableMultiProcess )
151 self.__extraArgs = extraArguments
152 if fromMethod is not None and self.__inputAsMF:
153 self.__Method = fromMethod # logtimer(fromMethod)
155 self.__Type = "Method"
156 elif fromMethod is not None and not self.__inputAsMF:
157 self.__Method = partial( MultiFonction, _sFunction=fromMethod, _mpEnabled=self.__mpEnabled)
159 self.__Type = "Method"
160 elif fromMatrix is not None:
162 self.__Matrix = numpy.matrix( fromMatrix, numpy.float )
163 self.__Type = "Matrix"
169 def disableAvoidingRedundancy(self):
171 Operator.CM.disable()
173 def enableAvoidingRedundancy(self):
178 Operator.CM.disable()
184 def appliedTo(self, xValue, HValue = None, argsAsSerie = False, returnSerieAsArrayMatrix = False):
186 Permet de restituer le résultat de l'application de l'opérateur à une
187 série d'arguments xValue. Cette méthode se contente d'appliquer, chaque
188 argument devant a priori être du bon type.
190 - les arguments par série sont :
191 - xValue : argument adapté pour appliquer l'opérateur
192 - HValue : valeur précalculée de l'opérateur en ce point
193 - argsAsSerie : indique si les arguments sont une mono ou multi-valeur
200 if HValue is not None:
204 PlatformInfo.isIterable( _xValue, True, " in Operator.appliedTo" )
206 if _HValue is not None:
207 assert len(_xValue) == len(_HValue), "Incompatible number of elements in xValue and HValue"
209 for i in range(len(_HValue)):
210 _HxValue.append( numpy.asmatrix( numpy.ravel( _HValue[i] ) ).T )
212 Operator.CM.storeValueInX(_xValue[i],_HxValue[-1],self.__name)
217 for i, xv in enumerate(_xValue):
219 __alreadyCalculated, __HxV = Operator.CM.wasCalculatedIn(xv,self.__name)
221 __alreadyCalculated = False
223 if __alreadyCalculated:
224 self.__addOneCacheCall()
227 if self.__Matrix is not None:
228 self.__addOneMatrixCall()
229 _xv = numpy.matrix(numpy.ravel(xv)).T
230 _hv = self.__Matrix * _xv
232 self.__addOneMethodCall()
236 _HxValue.append( _hv )
238 if len(_xserie)>0 and self.__Matrix is None:
239 if self.__extraArgs is None:
240 _hserie = self.__Method( _xserie ) # Calcul MF
242 _hserie = self.__Method( _xserie, self.__extraArgs ) # Calcul MF
243 if not hasattr(_hserie, "pop"):
244 raise TypeError("The user input multi-function doesn't seem to return sequence results, behaving like a mono-function. It has to be checked.")
250 Operator.CM.storeValueInX(_xv,_hv,self.__name)
252 if returnSerieAsArrayMatrix:
253 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
255 if argsAsSerie: return _HxValue
256 else: return _HxValue[-1]
258 def appliedControledFormTo(self, paires, argsAsSerie = False, returnSerieAsArrayMatrix = False):
260 Permet de restituer le résultat de l'application de l'opérateur à des
261 paires (xValue, uValue). Cette méthode se contente d'appliquer, son
262 argument devant a priori être du bon type. Si la uValue est None,
263 on suppose que l'opérateur ne s'applique qu'à xValue.
265 - paires : les arguments par paire sont :
266 - xValue : argument X adapté pour appliquer l'opérateur
267 - uValue : argument U adapté pour appliquer l'opérateur
268 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
270 if argsAsSerie: _xuValue = paires
271 else: _xuValue = (paires,)
272 PlatformInfo.isIterable( _xuValue, True, " in Operator.appliedControledFormTo" )
274 if self.__Matrix is not None:
276 for paire in _xuValue:
277 _xValue, _uValue = paire
278 _xValue = numpy.matrix(numpy.ravel(_xValue)).T
279 self.__addOneMatrixCall()
280 _HxValue.append( self.__Matrix * _xValue )
283 for paire in _xuValue:
284 _xValue, _uValue = paire
285 if _uValue is not None:
286 _xuArgs.append( paire )
288 _xuArgs.append( _xValue )
289 self.__addOneMethodCall( len(_xuArgs) )
290 if self.__extraArgs is None:
291 _HxValue = self.__Method( _xuArgs ) # Calcul MF
293 _HxValue = self.__Method( _xuArgs, self.__extraArgs ) # Calcul MF
295 if returnSerieAsArrayMatrix:
296 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
298 if argsAsSerie: return _HxValue
299 else: return _HxValue[-1]
301 def appliedInXTo(self, paires, argsAsSerie = False, returnSerieAsArrayMatrix = False):
303 Permet de restituer le résultat de l'application de l'opérateur à une
304 série d'arguments xValue, sachant que l'opérateur est valable en
305 xNominal. Cette méthode se contente d'appliquer, son argument devant a
306 priori être du bon type. Si l'opérateur est linéaire car c'est une
307 matrice, alors il est valable en tout point nominal et xNominal peut
308 être quelconque. Il n'y a qu'une seule paire par défaut, et argsAsSerie
309 permet d'indiquer que l'argument est multi-paires.
311 - paires : les arguments par paire sont :
312 - xNominal : série d'arguments permettant de donner le point où
313 l'opérateur est construit pour être ensuite appliqué
314 - xValue : série d'arguments adaptés pour appliquer l'opérateur
315 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
317 if argsAsSerie: _nxValue = paires
318 else: _nxValue = (paires,)
319 PlatformInfo.isIterable( _nxValue, True, " in Operator.appliedInXTo" )
321 if self.__Matrix is not None:
323 for paire in _nxValue:
324 _xNominal, _xValue = paire
325 _xValue = numpy.matrix(numpy.ravel(_xValue)).T
326 self.__addOneMatrixCall()
327 _HxValue.append( self.__Matrix * _xValue )
329 self.__addOneMethodCall( len(_nxValue) )
330 if self.__extraArgs is None:
331 _HxValue = self.__Method( _nxValue ) # Calcul MF
333 _HxValue = self.__Method( _nxValue, self.__extraArgs ) # Calcul MF
335 if returnSerieAsArrayMatrix:
336 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
338 if argsAsSerie: return _HxValue
339 else: return _HxValue[-1]
341 def asMatrix(self, ValueForMethodForm = "UnknownVoidValue", argsAsSerie = False):
343 Permet de renvoyer l'opérateur sous la forme d'une matrice
345 if self.__Matrix is not None:
346 self.__addOneMatrixCall()
347 mValue = [self.__Matrix,]
348 elif not isinstance(ValueForMethodForm,str) or ValueForMethodForm != "UnknownVoidValue": # Ne pas utiliser "None"
351 self.__addOneMethodCall( len(ValueForMethodForm) )
352 for _vfmf in ValueForMethodForm:
353 mValue.append( numpy.matrix( self.__Method(((_vfmf, None),)) ) )
355 self.__addOneMethodCall()
356 mValue = self.__Method(((ValueForMethodForm, None),))
358 raise ValueError("Matrix form of the operator defined as a function/method requires to give an operating point.")
360 if argsAsSerie: return mValue
361 else: return mValue[-1]
365 Renvoie la taille sous forme numpy si l'opérateur est disponible sous
366 la forme d'une matrice
368 if self.__Matrix is not None:
369 return self.__Matrix.shape
371 raise ValueError("Matrix form of the operator is not available, nor the shape")
373 def nbcalls(self, which=None):
375 Renvoie les nombres d'évaluations de l'opérateur
378 self.__NbCallsAsMatrix+self.__NbCallsAsMethod,
379 self.__NbCallsAsMatrix,
380 self.__NbCallsAsMethod,
381 self.__NbCallsOfCached,
382 Operator.NbCallsAsMatrix+Operator.NbCallsAsMethod,
383 Operator.NbCallsAsMatrix,
384 Operator.NbCallsAsMethod,
385 Operator.NbCallsOfCached,
387 if which is None: return __nbcalls
388 else: return __nbcalls[which]
390 def __addOneMatrixCall(self):
391 "Comptabilise un appel"
392 self.__NbCallsAsMatrix += 1 # Decompte local
393 Operator.NbCallsAsMatrix += 1 # Decompte global
395 def __addOneMethodCall(self, nb = 1):
396 "Comptabilise un appel"
397 self.__NbCallsAsMethod += nb # Decompte local
398 Operator.NbCallsAsMethod += nb # Decompte global
400 def __addOneCacheCall(self):
401 "Comptabilise un appel"
402 self.__NbCallsOfCached += 1 # Decompte local
403 Operator.NbCallsOfCached += 1 # Decompte global
405 # ==============================================================================
406 class FullOperator(object):
408 Classe générale d'interface de type opérateur complet
409 (Direct, Linéaire Tangent, Adjoint)
412 name = "GenericFullOperator",
414 asOneFunction = None, # 1 Fonction
415 asThreeFunctions = None, # 3 Fonctions in a dictionary
416 asScript = None, # 1 or 3 Fonction(s) by script
417 asDict = None, # Parameters
419 extraArguments = None,
421 inputAsMF = False,# Fonction(s) as Multi-Functions
426 self.__name = str(name)
427 self.__check = bool(toBeChecked)
428 self.__extraArgs = extraArguments
433 if (asDict is not None) and isinstance(asDict, dict):
434 __Parameters.update( asDict )
435 # Priorité à EnableMultiProcessingInDerivatives=True
436 if "EnableMultiProcessing" in __Parameters and __Parameters["EnableMultiProcessing"]:
437 __Parameters["EnableMultiProcessingInDerivatives"] = True
438 __Parameters["EnableMultiProcessingInEvaluation"] = False
439 if "EnableMultiProcessingInDerivatives" not in __Parameters:
440 __Parameters["EnableMultiProcessingInDerivatives"] = False
441 if __Parameters["EnableMultiProcessingInDerivatives"]:
442 __Parameters["EnableMultiProcessingInEvaluation"] = False
443 if "EnableMultiProcessingInEvaluation" not in __Parameters:
444 __Parameters["EnableMultiProcessingInEvaluation"] = False
445 if "withIncrement" in __Parameters: # Temporaire
446 __Parameters["DifferentialIncrement"] = __Parameters["withIncrement"]
448 if asScript is not None:
449 __Matrix, __Function = None, None
451 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
453 __Function = { "Direct":Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ) }
454 __Function.update({"useApproximatedDerivatives":True})
455 __Function.update(__Parameters)
456 elif asThreeFunctions:
458 "Direct" :Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ),
459 "Tangent":Interfaces.ImportFromScript(asScript).getvalue( "TangentOperator" ),
460 "Adjoint":Interfaces.ImportFromScript(asScript).getvalue( "AdjointOperator" ),
462 __Function.update(__Parameters)
465 if asOneFunction is not None:
466 if isinstance(asOneFunction, dict) and "Direct" in asOneFunction:
467 if asOneFunction["Direct"] is not None:
468 __Function = asOneFunction
470 raise ValueError("The function has to be given in a dictionnary which have 1 key (\"Direct\")")
472 __Function = { "Direct":asOneFunction }
473 __Function.update({"useApproximatedDerivatives":True})
474 __Function.update(__Parameters)
475 elif asThreeFunctions is not None:
476 if isinstance(asThreeFunctions, dict) and \
477 ("Tangent" in asThreeFunctions) and (asThreeFunctions["Tangent"] is not None) and \
478 ("Adjoint" in asThreeFunctions) and (asThreeFunctions["Adjoint"] is not None) and \
479 (("useApproximatedDerivatives" not in asThreeFunctions) or not bool(asThreeFunctions["useApproximatedDerivatives"])):
480 __Function = asThreeFunctions
481 elif isinstance(asThreeFunctions, dict) and \
482 ("Direct" in asThreeFunctions) and (asThreeFunctions["Direct"] is not None):
483 __Function = asThreeFunctions
484 __Function.update({"useApproximatedDerivatives":True})
486 raise ValueError("The functions has to be given in a dictionnary which have either 1 key (\"Direct\") or 3 keys (\"Direct\" (optionnal), \"Tangent\" and \"Adjoint\")")
487 if "Direct" not in asThreeFunctions:
488 __Function["Direct"] = asThreeFunctions["Tangent"]
489 __Function.update(__Parameters)
493 # if sys.version_info[0] < 3 and isinstance(__Function, dict):
494 # for k in ("Direct", "Tangent", "Adjoint"):
495 # if k in __Function and hasattr(__Function[k],"__class__"):
496 # if type(__Function[k]) is type(self.__init__):
497 # raise TypeError("can't use a class method (%s) as a function for the \"%s\" operator. Use a real function instead."%(type(__Function[k]),k))
499 if appliedInX is not None and isinstance(appliedInX, dict):
500 __appliedInX = appliedInX
501 elif appliedInX is not None:
502 __appliedInX = {"HXb":appliedInX}
506 if scheduledBy is not None:
507 self.__T = scheduledBy
509 if isinstance(__Function, dict) and \
510 ("useApproximatedDerivatives" in __Function) and bool(__Function["useApproximatedDerivatives"]) and \
511 ("Direct" in __Function) and (__Function["Direct"] is not None):
512 if "CenteredFiniteDifference" not in __Function: __Function["CenteredFiniteDifference"] = False
513 if "DifferentialIncrement" not in __Function: __Function["DifferentialIncrement"] = 0.01
514 if "withdX" not in __Function: __Function["withdX"] = None
515 if "withAvoidingRedundancy" not in __Function: __Function["withAvoidingRedundancy"] = avoidRC
516 if "withToleranceInRedundancy" not in __Function: __Function["withToleranceInRedundancy"] = 1.e-18
517 if "withLenghtOfRedundancy" not in __Function: __Function["withLenghtOfRedundancy"] = -1
518 if "NumberOfProcesses" not in __Function: __Function["NumberOfProcesses"] = None
519 if "withmfEnabled" not in __Function: __Function["withmfEnabled"] = inputAsMF
520 from daCore import NumericObjects
521 FDA = NumericObjects.FDApproximation(
523 Function = __Function["Direct"],
524 centeredDF = __Function["CenteredFiniteDifference"],
525 increment = __Function["DifferentialIncrement"],
526 dX = __Function["withdX"],
527 extraArguments = self.__extraArgs,
528 avoidingRedundancy = __Function["withAvoidingRedundancy"],
529 toleranceInRedundancy = __Function["withToleranceInRedundancy"],
530 lenghtOfRedundancy = __Function["withLenghtOfRedundancy"],
531 mpEnabled = __Function["EnableMultiProcessingInDerivatives"],
532 mpWorkers = __Function["NumberOfProcesses"],
533 mfEnabled = __Function["withmfEnabled"],
535 self.__FO["Direct"] = Operator( name = self.__name, fromMethod = FDA.DirectOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
536 self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMethod = FDA.TangentOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
537 self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMethod = FDA.AdjointOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
538 elif isinstance(__Function, dict) and \
539 ("Direct" in __Function) and ("Tangent" in __Function) and ("Adjoint" in __Function) and \
540 (__Function["Direct"] is not None) and (__Function["Tangent"] is not None) and (__Function["Adjoint"] is not None):
541 self.__FO["Direct"] = Operator( name = self.__name, fromMethod = __Function["Direct"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
542 self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMethod = __Function["Tangent"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
543 self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMethod = __Function["Adjoint"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
544 elif asMatrix is not None:
545 __matrice = numpy.matrix( __Matrix, numpy.float )
546 self.__FO["Direct"] = Operator( name = self.__name, fromMatrix = __matrice, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
547 self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMatrix = __matrice, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
548 self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMatrix = __matrice.T, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
551 raise ValueError("The %s object is improperly defined or undefined, it requires at minima either a matrix, a Direct operator for approximate derivatives or a Tangent/Adjoint operators pair. Please check your operator input."%self.__name)
553 if __appliedInX is not None:
554 self.__FO["AppliedInX"] = {}
555 for key in list(__appliedInX.keys()):
556 if type( __appliedInX[key] ) is type( numpy.matrix([]) ):
557 # Pour le cas où l'on a une vraie matrice
558 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].A1, numpy.float ).T
559 elif type( __appliedInX[key] ) is type( numpy.array([]) ) and len(__appliedInX[key].shape) > 1:
560 # Pour le cas où l'on a un vecteur représenté en array avec 2 dimensions
561 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].reshape(len(__appliedInX[key]),), numpy.float ).T
563 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key], numpy.float ).T
565 self.__FO["AppliedInX"] = None
571 "x.__repr__() <==> repr(x)"
572 return repr(self.__FO)
575 "x.__str__() <==> str(x)"
576 return str(self.__FO)
578 # ==============================================================================
579 class Algorithm(object):
581 Classe générale d'interface de type algorithme
583 Elle donne un cadre pour l'écriture d'une classe élémentaire d'algorithme
584 d'assimilation, en fournissant un container (dictionnaire) de variables
585 persistantes initialisées, et des méthodes d'accès à ces variables stockées.
587 Une classe élémentaire d'algorithme doit implémenter la méthode "run".
589 def __init__(self, name):
591 L'initialisation présente permet de fabriquer des variables de stockage
592 disponibles de manière générique dans les algorithmes élémentaires. Ces
593 variables de stockage sont ensuite conservées dans un dictionnaire
594 interne à l'objet, mais auquel on accède par la méthode "get".
596 Les variables prévues sont :
597 - APosterioriCorrelations : matrice de corrélations de la matrice A
598 - APosterioriCovariance : matrice de covariances a posteriori : A
599 - APosterioriStandardDeviations : vecteur des écart-types de la matrice A
600 - APosterioriVariances : vecteur des variances de la matrice A
601 - Analysis : vecteur d'analyse : Xa
602 - BMA : Background moins Analysis : Xa - Xb
603 - CostFunctionJ : fonction-coût globale, somme des deux parties suivantes Jb et Jo
604 - CostFunctionJAtCurrentOptimum : fonction-coût globale à l'état optimal courant lors d'itérations
605 - CostFunctionJb : partie ébauche ou background de la fonction-coût : Jb
606 - CostFunctionJbAtCurrentOptimum : partie ébauche à l'état optimal courant lors d'itérations
607 - CostFunctionJo : partie observations de la fonction-coût : Jo
608 - CostFunctionJoAtCurrentOptimum : partie observations à l'état optimal courant lors d'itérations
609 - CurrentIterationNumber : numéro courant d'itération dans les algorithmes itératifs, à partir de 0
610 - CurrentOptimum : état optimal courant lors d'itérations
611 - CurrentState : état courant lors d'itérations
612 - GradientOfCostFunctionJ : gradient de la fonction-coût globale
613 - GradientOfCostFunctionJb : gradient de la partie ébauche de la fonction-coût
614 - GradientOfCostFunctionJo : gradient de la partie observations de la fonction-coût
615 - IndexOfOptimum : index de l'état optimal courant lors d'itérations
616 - Innovation : l'innovation : d = Y - H(X)
617 - InnovationAtCurrentState : l'innovation à l'état courant : dn = Y - H(Xn)
618 - JacobianMatrixAtBackground : matrice jacobienne à l'état d'ébauche
619 - JacobianMatrixAtCurrentState : matrice jacobienne à l'état courant
620 - JacobianMatrixAtOptimum : matrice jacobienne à l'optimum
621 - KalmanGainAtOptimum : gain de Kalman à l'optimum
622 - MahalanobisConsistency : indicateur de consistance des covariances
623 - OMA : Observation moins Analyse : Y - Xa
624 - OMB : Observation moins Background : Y - Xb
625 - ForecastState : état prédit courant lors d'itérations
626 - Residu : dans le cas des algorithmes de vérification
627 - SigmaBck2 : indicateur de correction optimale des erreurs d'ébauche
628 - SigmaObs2 : indicateur de correction optimale des erreurs d'observation
629 - SimulatedObservationAtBackground : l'état observé H(Xb) à l'ébauche
630 - SimulatedObservationAtCurrentOptimum : l'état observé H(X) à l'état optimal courant
631 - SimulatedObservationAtCurrentState : l'état observé H(X) à l'état courant
632 - SimulatedObservationAtOptimum : l'état observé H(Xa) à l'optimum
633 - SimulationQuantiles : états observés H(X) pour les quantiles demandés
634 On peut rajouter des variables à stocker dans l'initialisation de
635 l'algorithme élémentaire qui va hériter de cette classe
637 logging.debug("%s Initialisation", str(name))
638 self._m = PlatformInfo.SystemUsage()
640 self._name = str( name )
641 self._parameters = {"StoreSupplementaryCalculations":[]}
642 self.__required_parameters = {}
643 self.__required_inputs = {
644 "RequiredInputValues":{"mandatory":(), "optional":()},
645 "ClassificationTags":[],
647 self.__variable_names_not_public = {"nextStep":False} # Duplication dans AlgorithmAndParameters
648 self.__canonical_parameter_name = {} # Correspondance "lower"->"correct"
649 self.__canonical_stored_name = {} # Correspondance "lower"->"correct"
651 self.StoredVariables = {}
652 self.StoredVariables["APosterioriCorrelations"] = Persistence.OneMatrix(name = "APosterioriCorrelations")
653 self.StoredVariables["APosterioriCovariance"] = Persistence.OneMatrix(name = "APosterioriCovariance")
654 self.StoredVariables["APosterioriStandardDeviations"] = Persistence.OneVector(name = "APosterioriStandardDeviations")
655 self.StoredVariables["APosterioriVariances"] = Persistence.OneVector(name = "APosterioriVariances")
656 self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
657 self.StoredVariables["BMA"] = Persistence.OneVector(name = "BMA")
658 self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
659 self.StoredVariables["CostFunctionJAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJAtCurrentOptimum")
660 self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
661 self.StoredVariables["CostFunctionJbAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJbAtCurrentOptimum")
662 self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
663 self.StoredVariables["CostFunctionJoAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJoAtCurrentOptimum")
664 self.StoredVariables["CurrentIterationNumber"] = Persistence.OneIndex(name = "CurrentIterationNumber")
665 self.StoredVariables["CurrentOptimum"] = Persistence.OneVector(name = "CurrentOptimum")
666 self.StoredVariables["CurrentState"] = Persistence.OneVector(name = "CurrentState")
667 self.StoredVariables["ForecastState"] = Persistence.OneVector(name = "ForecastState")
668 self.StoredVariables["GradientOfCostFunctionJ"] = Persistence.OneVector(name = "GradientOfCostFunctionJ")
669 self.StoredVariables["GradientOfCostFunctionJb"] = Persistence.OneVector(name = "GradientOfCostFunctionJb")
670 self.StoredVariables["GradientOfCostFunctionJo"] = Persistence.OneVector(name = "GradientOfCostFunctionJo")
671 self.StoredVariables["IndexOfOptimum"] = Persistence.OneIndex(name = "IndexOfOptimum")
672 self.StoredVariables["Innovation"] = Persistence.OneVector(name = "Innovation")
673 self.StoredVariables["InnovationAtCurrentAnalysis"] = Persistence.OneVector(name = "InnovationAtCurrentAnalysis")
674 self.StoredVariables["InnovationAtCurrentState"] = Persistence.OneVector(name = "InnovationAtCurrentState")
675 self.StoredVariables["JacobianMatrixAtBackground"] = Persistence.OneMatrix(name = "JacobianMatrixAtBackground")
676 self.StoredVariables["JacobianMatrixAtCurrentState"] = Persistence.OneMatrix(name = "JacobianMatrixAtCurrentState")
677 self.StoredVariables["JacobianMatrixAtOptimum"] = Persistence.OneMatrix(name = "JacobianMatrixAtOptimum")
678 self.StoredVariables["KalmanGainAtOptimum"] = Persistence.OneMatrix(name = "KalmanGainAtOptimum")
679 self.StoredVariables["MahalanobisConsistency"] = Persistence.OneScalar(name = "MahalanobisConsistency")
680 self.StoredVariables["OMA"] = Persistence.OneVector(name = "OMA")
681 self.StoredVariables["OMB"] = Persistence.OneVector(name = "OMB")
682 self.StoredVariables["Residu"] = Persistence.OneScalar(name = "Residu")
683 self.StoredVariables["SigmaBck2"] = Persistence.OneScalar(name = "SigmaBck2")
684 self.StoredVariables["SigmaObs2"] = Persistence.OneScalar(name = "SigmaObs2")
685 self.StoredVariables["SimulatedObservationAtBackground"] = Persistence.OneVector(name = "SimulatedObservationAtBackground")
686 self.StoredVariables["SimulatedObservationAtCurrentAnalysis"]= Persistence.OneVector(name = "SimulatedObservationAtCurrentAnalysis")
687 self.StoredVariables["SimulatedObservationAtCurrentOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentOptimum")
688 self.StoredVariables["SimulatedObservationAtCurrentState"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentState")
689 self.StoredVariables["SimulatedObservationAtOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtOptimum")
690 self.StoredVariables["SimulationQuantiles"] = Persistence.OneMatrix(name = "SimulationQuantiles")
692 for k in self.StoredVariables:
693 self.__canonical_stored_name[k.lower()] = k
695 for k, v in self.__variable_names_not_public.items():
696 self.__canonical_parameter_name[k.lower()] = k
697 self.__canonical_parameter_name["algorithm"] = "Algorithm"
698 self.__canonical_parameter_name["storesupplementarycalculations"] = "StoreSupplementaryCalculations"
700 def _pre_run(self, Parameters, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None ):
702 logging.debug("%s Lancement", self._name)
703 logging.debug("%s Taille mémoire utilisée de %.0f Mio"%(self._name, self._m.getUsedMemory("Mio")))
704 self._getTimeState(reset=True)
706 # Mise a jour des paramètres internes avec le contenu de Parameters, en
707 # reprenant les valeurs par défauts pour toutes celles non définies
708 self.__setParameters(Parameters, reset=True)
709 for k, v in self.__variable_names_not_public.items():
710 if k not in self._parameters: self.__setParameters( {k:v} )
712 # Corrections et compléments des vecteurs
713 def __test_vvalue(argument, variable, argname, symbol=None):
714 if symbol is None: symbol = variable
716 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
717 raise ValueError("%s %s vector %s is not set and has to be properly defined!"%(self._name,argname,symbol))
718 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
719 logging.debug("%s %s vector %s is not set, but is optional."%(self._name,argname,symbol))
721 logging.debug("%s %s vector %s is not set, but is not required."%(self._name,argname,symbol))
723 logging.debug("%s %s vector %s is set, and its size is %i."%(self._name,argname,symbol,numpy.array(argument).size))
725 __test_vvalue( Xb, "Xb", "Background or initial state" )
726 __test_vvalue( Y, "Y", "Observation" )
727 __test_vvalue( U, "U", "Control" )
729 # Corrections et compléments des covariances
730 def __test_cvalue(argument, variable, argname, symbol=None):
731 if symbol is None: symbol = variable
733 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
734 raise ValueError("%s %s error covariance matrix %s is not set and has to be properly defined!"%(self._name,argname,symbol))
735 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
736 logging.debug("%s %s error covariance matrix %s is not set, but is optional."%(self._name,argname,symbol))
738 logging.debug("%s %s error covariance matrix %s is not set, but is not required."%(self._name,argname,symbol))
740 logging.debug("%s %s error covariance matrix %s is set."%(self._name,argname,symbol))
742 __test_cvalue( B, "B", "Background" )
743 __test_cvalue( R, "R", "Observation" )
744 __test_cvalue( Q, "Q", "Evolution" )
746 # Corrections et compléments des opérateurs
747 def __test_ovalue(argument, variable, argname, symbol=None):
748 if symbol is None: symbol = variable
749 if argument is None or (isinstance(argument,dict) and len(argument)==0):
750 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
751 raise ValueError("%s %s operator %s is not set and has to be properly defined!"%(self._name,argname,symbol))
752 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
753 logging.debug("%s %s operator %s is not set, but is optional."%(self._name,argname,symbol))
755 logging.debug("%s %s operator %s is not set, but is not required."%(self._name,argname,symbol))
757 logging.debug("%s %s operator %s is set."%(self._name,argname,symbol))
759 __test_ovalue( HO, "HO", "Observation", "H" )
760 __test_ovalue( EM, "EM", "Evolution", "M" )
761 __test_ovalue( CM, "CM", "Control Model", "C" )
763 # Corrections et compléments des bornes
764 if ("Bounds" in self._parameters) and isinstance(self._parameters["Bounds"], (list, tuple)) and (len(self._parameters["Bounds"]) > 0):
765 logging.debug("%s Prise en compte des bornes effectuee"%(self._name,))
767 self._parameters["Bounds"] = None
769 # Corrections et compléments de l'initialisation en X
770 if "InitializationPoint" in self._parameters:
772 if self._parameters["InitializationPoint"] is not None and hasattr(self._parameters["InitializationPoint"],'size'):
773 if self._parameters["InitializationPoint"].size != numpy.ravel(Xb).size:
774 raise ValueError("Incompatible size %i of forced initial point that have to replace the background of size %i" \
775 %(self._parameters["InitializationPoint"].size,numpy.ravel(Xb).size))
776 # Obtenu par typecast : numpy.ravel(self._parameters["InitializationPoint"])
778 self._parameters["InitializationPoint"] = numpy.ravel(Xb)
780 if self._parameters["InitializationPoint"] is None:
781 raise ValueError("Forced initial point can not be set without any given Background or required value")
783 # Correction pour pallier a un bug de TNC sur le retour du Minimum
784 if "Minimizer" in self._parameters and self._parameters["Minimizer"] == "TNC":
785 self.setParameterValue("StoreInternalVariables",True)
787 # Verbosité et logging
788 if logging.getLogger().level < logging.WARNING:
789 self._parameters["optiprint"], self._parameters["optdisp"] = 1, 1
790 if PlatformInfo.has_scipy:
791 import scipy.optimize
792 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_ALL
794 self._parameters["optmessages"] = 15
796 self._parameters["optiprint"], self._parameters["optdisp"] = -1, 0
797 if PlatformInfo.has_scipy:
798 import scipy.optimize
799 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_NONE
801 self._parameters["optmessages"] = 15
805 def _post_run(self,_oH=None):
807 if ("StoreSupplementaryCalculations" in self._parameters) and \
808 "APosterioriCovariance" in self._parameters["StoreSupplementaryCalculations"]:
809 for _A in self.StoredVariables["APosterioriCovariance"]:
810 if "APosterioriVariances" in self._parameters["StoreSupplementaryCalculations"]:
811 self.StoredVariables["APosterioriVariances"].store( numpy.diag(_A) )
812 if "APosterioriStandardDeviations" in self._parameters["StoreSupplementaryCalculations"]:
813 self.StoredVariables["APosterioriStandardDeviations"].store( numpy.sqrt(numpy.diag(_A)) )
814 if "APosterioriCorrelations" in self._parameters["StoreSupplementaryCalculations"]:
815 _EI = numpy.diag(1./numpy.sqrt(numpy.diag(_A)))
816 _C = numpy.dot(_EI, numpy.dot(_A, _EI))
817 self.StoredVariables["APosterioriCorrelations"].store( _C )
818 if _oH is not None and "Direct" in _oH and "Tangent" in _oH and "Adjoint" in _oH:
819 logging.debug("%s Nombre d'évaluation(s) de l'opérateur d'observation direct/tangent/adjoint.: %i/%i/%i", self._name, _oH["Direct"].nbcalls(0),_oH["Tangent"].nbcalls(0),_oH["Adjoint"].nbcalls(0))
820 logging.debug("%s Nombre d'appels au cache d'opérateur d'observation direct/tangent/adjoint..: %i/%i/%i", self._name, _oH["Direct"].nbcalls(3),_oH["Tangent"].nbcalls(3),_oH["Adjoint"].nbcalls(3))
821 logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
822 logging.debug("%s Durées d'utilisation CPU de %.1fs et elapsed de %.1fs", self._name, self._getTimeState()[0], self._getTimeState()[1])
823 logging.debug("%s Terminé", self._name)
826 def _toStore(self, key):
827 "True if in StoreSupplementaryCalculations, else False"
828 return key in self._parameters["StoreSupplementaryCalculations"]
830 def get(self, key=None):
832 Renvoie l'une des variables stockées identifiée par la clé, ou le
833 dictionnaire de l'ensemble des variables disponibles en l'absence de
834 clé. Ce sont directement les variables sous forme objet qui sont
835 renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
836 des classes de persistance.
839 return self.StoredVariables[self.__canonical_stored_name[key.lower()]]
841 return self.StoredVariables
843 def __contains__(self, key=None):
844 "D.__contains__(k) -> True if D has a key k, else False"
845 if key is None or key.lower() not in self.__canonical_stored_name:
848 return self.__canonical_stored_name[key.lower()] in self.StoredVariables
851 "D.keys() -> list of D's keys"
852 if hasattr(self, "StoredVariables"):
853 return self.StoredVariables.keys()
858 "D.pop(k[,d]) -> v, remove specified key and return the corresponding value"
859 if hasattr(self, "StoredVariables") and k.lower() in self.__canonical_stored_name:
860 return self.StoredVariables.pop(self.__canonical_stored_name[k.lower()], d)
865 raise TypeError("pop expected at least 1 arguments, got 0")
866 "If key is not found, d is returned if given, otherwise KeyError is raised"
872 def run(self, Xb=None, Y=None, H=None, M=None, R=None, B=None, Q=None, Parameters=None):
874 Doit implémenter l'opération élémentaire de calcul d'assimilation sous
875 sa forme mathématique la plus naturelle possible.
877 raise NotImplementedError("Mathematical assimilation calculation has not been implemented!")
879 def defineRequiredParameter(self, name = None, default = None, typecast = None, message = None, minval = None, maxval = None, listval = None, listadv = None):
881 Permet de définir dans l'algorithme des paramètres requis et leurs
882 caractéristiques par défaut.
885 raise ValueError("A name is mandatory to define a required parameter.")
887 self.__required_parameters[name] = {
889 "typecast" : typecast,
896 self.__canonical_parameter_name[name.lower()] = name
897 logging.debug("%s %s (valeur par défaut = %s)", self._name, message, self.setParameterValue(name))
899 def getRequiredParameters(self, noDetails=True):
901 Renvoie la liste des noms de paramètres requis ou directement le
902 dictionnaire des paramètres requis.
905 return sorted(self.__required_parameters.keys())
907 return self.__required_parameters
909 def setParameterValue(self, name=None, value=None):
911 Renvoie la valeur d'un paramètre requis de manière contrôlée
913 __k = self.__canonical_parameter_name[name.lower()]
914 default = self.__required_parameters[__k]["default"]
915 typecast = self.__required_parameters[__k]["typecast"]
916 minval = self.__required_parameters[__k]["minval"]
917 maxval = self.__required_parameters[__k]["maxval"]
918 listval = self.__required_parameters[__k]["listval"]
919 listadv = self.__required_parameters[__k]["listadv"]
921 if value is None and default is None:
923 elif value is None and default is not None:
924 if typecast is None: __val = default
925 else: __val = typecast( default )
927 if typecast is None: __val = value
930 __val = typecast( value )
932 raise ValueError("The value '%s' for the parameter named '%s' can not be correctly evaluated with type '%s'."%(value, __k, typecast))
934 if minval is not None and (numpy.array(__val, float) < minval).any():
935 raise ValueError("The parameter named '%s' of value '%s' can not be less than %s."%(__k, __val, minval))
936 if maxval is not None and (numpy.array(__val, float) > maxval).any():
937 raise ValueError("The parameter named '%s' of value '%s' can not be greater than %s."%(__k, __val, maxval))
938 if listval is not None or listadv is not None:
939 if typecast is list or typecast is tuple or isinstance(__val,list) or isinstance(__val,tuple):
941 if listval is not None and v in listval: continue
942 elif listadv is not None and v in listadv: continue
944 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%(v, __k, listval))
945 elif not (listval is not None and __val in listval) and not (listadv is not None and __val in listadv):
946 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%( __val, __k,listval))
950 def requireInputArguments(self, mandatory=(), optional=()):
952 Permet d'imposer des arguments de calcul requis en entrée.
954 self.__required_inputs["RequiredInputValues"]["mandatory"] = tuple( mandatory )
955 self.__required_inputs["RequiredInputValues"]["optional"] = tuple( optional )
957 def getInputArguments(self):
959 Permet d'obtenir les listes des arguments de calcul requis en entrée.
961 return self.__required_inputs["RequiredInputValues"]["mandatory"], self.__required_inputs["RequiredInputValues"]["optional"]
963 def setAttributes(self, tags=()):
965 Permet d'adjoindre des attributs comme les tags de classification.
966 Renvoie la liste actuelle dans tous les cas.
968 self.__required_inputs["ClassificationTags"].extend( tags )
969 return self.__required_inputs["ClassificationTags"]
971 def __setParameters(self, fromDico={}, reset=False):
973 Permet de stocker les paramètres reçus dans le dictionnaire interne.
975 self._parameters.update( fromDico )
976 __inverse_fromDico_keys = {}
977 for k in fromDico.keys():
978 if k.lower() in self.__canonical_parameter_name:
979 __inverse_fromDico_keys[self.__canonical_parameter_name[k.lower()]] = k
980 #~ __inverse_fromDico_keys = dict([(self.__canonical_parameter_name[k.lower()],k) for k in fromDico.keys()])
981 __canonic_fromDico_keys = __inverse_fromDico_keys.keys()
982 for k in self.__required_parameters.keys():
983 if k in __canonic_fromDico_keys:
984 self._parameters[k] = self.setParameterValue(k,fromDico[__inverse_fromDico_keys[k]])
986 self._parameters[k] = self.setParameterValue(k)
989 logging.debug("%s %s : %s", self._name, self.__required_parameters[k]["message"], self._parameters[k])
991 def _getTimeState(self, reset=False):
993 Initialise ou restitue le temps de calcul (cpu/elapsed) à la seconde
996 self.__initial_cpu_time = time.process_time()
997 self.__initial_elapsed_time = time.perf_counter()
1000 self.__cpu_time = time.process_time() - self.__initial_cpu_time
1001 self.__elapsed_time = time.perf_counter() - self.__initial_elapsed_time
1002 return self.__cpu_time, self.__elapsed_time
1004 def _StopOnTimeLimit(self, X=None, withReason=False):
1005 "Stop criteria on time limit: True/False [+ Reason]"
1006 c, e = self._getTimeState()
1007 if "MaximumCpuTime" in self._parameters and c > self._parameters["MaximumCpuTime"]:
1008 __SC, __SR = True, "Reached maximum CPU time (%.1fs > %.1fs)"%(c, self._parameters["MaximumCpuTime"])
1009 elif "MaximumElapsedTime" in self._parameters and e > self._parameters["MaximumElapsedTime"]:
1010 __SC, __SR = True, "Reached maximum elapsed time (%.1fs > %.1fs)"%(e, self._parameters["MaximumElapsedTime"])
1012 __SC, __SR = False, ""
1018 # ==============================================================================
1019 class AlgorithmAndParameters(object):
1021 Classe générale d'interface d'action pour l'algorithme et ses paramètres
1024 name = "GenericAlgorithm",
1031 self.__name = str(name)
1035 self.__algorithm = {}
1036 self.__algorithmFile = None
1037 self.__algorithmName = None
1039 self.updateParameters( asDict, asScript )
1041 if asAlgorithm is None and asScript is not None:
1042 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
1044 __Algo = asAlgorithm
1046 if __Algo is not None:
1047 self.__A = str(__Algo)
1048 self.__P.update( {"Algorithm":self.__A} )
1050 self.__setAlgorithm( self.__A )
1052 self.__variable_names_not_public = {"nextStep":False} # Duplication dans Algorithm
1054 def updateParameters(self,
1058 "Mise a jour des parametres"
1059 if asDict is None and asScript is not None:
1060 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1064 if __Dict is not None:
1065 self.__P.update( dict(__Dict) )
1067 def executePythonScheme(self, asDictAO = None):
1068 "Permet de lancer le calcul d'assimilation"
1069 Operator.CM.clearCache()
1071 if not isinstance(asDictAO, dict):
1072 raise ValueError("The objects for algorithm calculation have to be given together as a dictionnary, and they are not")
1073 if hasattr(asDictAO["Background"],"getO"): self.__Xb = asDictAO["Background"].getO()
1074 elif hasattr(asDictAO["CheckingPoint"],"getO"): self.__Xb = asDictAO["CheckingPoint"].getO()
1075 else: self.__Xb = None
1076 if hasattr(asDictAO["Observation"],"getO"): self.__Y = asDictAO["Observation"].getO()
1077 else: self.__Y = asDictAO["Observation"]
1078 if hasattr(asDictAO["ControlInput"],"getO"): self.__U = asDictAO["ControlInput"].getO()
1079 else: self.__U = asDictAO["ControlInput"]
1080 if hasattr(asDictAO["ObservationOperator"],"getO"): self.__HO = asDictAO["ObservationOperator"].getO()
1081 else: self.__HO = asDictAO["ObservationOperator"]
1082 if hasattr(asDictAO["EvolutionModel"],"getO"): self.__EM = asDictAO["EvolutionModel"].getO()
1083 else: self.__EM = asDictAO["EvolutionModel"]
1084 if hasattr(asDictAO["ControlModel"],"getO"): self.__CM = asDictAO["ControlModel"].getO()
1085 else: self.__CM = asDictAO["ControlModel"]
1086 self.__B = asDictAO["BackgroundError"]
1087 self.__R = asDictAO["ObservationError"]
1088 self.__Q = asDictAO["EvolutionError"]
1090 self.__shape_validate()
1092 self.__algorithm.run(
1102 Parameters = self.__P,
1106 def executeYACSScheme(self, FileName=None):
1107 "Permet de lancer le calcul d'assimilation"
1108 if FileName is None or not os.path.exists(FileName):
1109 raise ValueError("a YACS file name has to be given for YACS execution.\n")
1111 __file = os.path.abspath(FileName)
1112 logging.debug("The YACS file name is \"%s\"."%__file)
1113 if not PlatformInfo.has_salome or \
1114 not PlatformInfo.has_yacs or \
1115 not PlatformInfo.has_adao:
1116 raise ImportError("\n\n"+\
1117 "Unable to get SALOME, YACS or ADAO environnement variables.\n"+\
1118 "Please load the right environnement before trying to use it.\n")
1121 import SALOMERuntime
1123 SALOMERuntime.RuntimeSALOME_setRuntime()
1125 r = pilot.getRuntime()
1126 xmlLoader = loader.YACSLoader()
1127 xmlLoader.registerProcCataLoader()
1129 catalogAd = r.loadCatalog("proc", __file)
1130 r.addCatalog(catalogAd)
1135 p = xmlLoader.load(__file)
1136 except IOError as ex:
1137 print("The YACS XML schema file can not be loaded: %s"%(ex,))
1139 logger = p.getLogger("parser")
1140 if not logger.isEmpty():
1141 print("The imported YACS XML schema has errors on parsing:")
1142 print(logger.getStr())
1145 print("The YACS XML schema is not valid and will not be executed:")
1146 print(p.getErrorReport())
1148 info=pilot.LinkInfo(pilot.LinkInfo.ALL_DONT_STOP)
1149 p.checkConsistency(info)
1150 if info.areWarningsOrErrors():
1151 print("The YACS XML schema is not coherent and will not be executed:")
1152 print(info.getGlobalRepr())
1154 e = pilot.ExecutorSwig()
1156 if p.getEffectiveState() != pilot.DONE:
1157 print(p.getErrorReport())
1161 def get(self, key = None):
1162 "Vérifie l'existence d'une clé de variable ou de paramètres"
1163 if key in self.__algorithm:
1164 return self.__algorithm.get( key )
1165 elif key in self.__P:
1166 return self.__P[key]
1168 allvariables = self.__P
1169 for k in self.__variable_names_not_public: allvariables.pop(k, None)
1172 def pop(self, k, d):
1173 "Necessaire pour le pickling"
1174 return self.__algorithm.pop(k, d)
1176 def getAlgorithmRequiredParameters(self, noDetails=True):
1177 "Renvoie la liste des paramètres requis selon l'algorithme"
1178 return self.__algorithm.getRequiredParameters(noDetails)
1180 def getAlgorithmInputArguments(self):
1181 "Renvoie la liste des entrées requises selon l'algorithme"
1182 return self.__algorithm.getInputArguments()
1184 def getAlgorithmAttributes(self):
1185 "Renvoie la liste des attributs selon l'algorithme"
1186 return self.__algorithm.setAttributes()
1188 def setObserver(self, __V, __O, __I, __S):
1189 if self.__algorithm is None \
1190 or isinstance(self.__algorithm, dict) \
1191 or not hasattr(self.__algorithm,"StoredVariables"):
1192 raise ValueError("No observer can be build before choosing an algorithm.")
1193 if __V not in self.__algorithm:
1194 raise ValueError("An observer requires to be set on a variable named %s which does not exist."%__V)
1196 self.__algorithm.StoredVariables[ __V ].setDataObserver(
1199 HookParameters = __I,
1202 def removeObserver(self, __V, __O, __A = False):
1203 if self.__algorithm is None \
1204 or isinstance(self.__algorithm, dict) \
1205 or not hasattr(self.__algorithm,"StoredVariables"):
1206 raise ValueError("No observer can be removed before choosing an algorithm.")
1207 if __V not in self.__algorithm:
1208 raise ValueError("An observer requires to be removed on a variable named %s which does not exist."%__V)
1210 return self.__algorithm.StoredVariables[ __V ].removeDataObserver(
1215 def hasObserver(self, __V):
1216 if self.__algorithm is None \
1217 or isinstance(self.__algorithm, dict) \
1218 or not hasattr(self.__algorithm,"StoredVariables"):
1220 if __V not in self.__algorithm:
1222 return self.__algorithm.StoredVariables[ __V ].hasDataObserver()
1225 __allvariables = list(self.__algorithm.keys()) + list(self.__P.keys())
1226 for k in self.__variable_names_not_public:
1227 if k in __allvariables: __allvariables.remove(k)
1228 return __allvariables
1230 def __contains__(self, key=None):
1231 "D.__contains__(k) -> True if D has a key k, else False"
1232 return key in self.__algorithm or key in self.__P
1235 "x.__repr__() <==> repr(x)"
1236 return repr(self.__A)+", "+repr(self.__P)
1239 "x.__str__() <==> str(x)"
1240 return str(self.__A)+", "+str(self.__P)
1242 def __setAlgorithm(self, choice = None ):
1244 Permet de sélectionner l'algorithme à utiliser pour mener à bien l'étude
1245 d'assimilation. L'argument est un champ caractère se rapportant au nom
1246 d'un algorithme réalisant l'opération sur les arguments fixes.
1249 raise ValueError("Error: algorithm choice has to be given")
1250 if self.__algorithmName is not None:
1251 raise ValueError("Error: algorithm choice has already been done as \"%s\", it can't be changed."%self.__algorithmName)
1252 daDirectory = "daAlgorithms"
1254 # Recherche explicitement le fichier complet
1255 # ------------------------------------------
1257 for directory in sys.path:
1258 if os.path.isfile(os.path.join(directory, daDirectory, str(choice)+'.py')):
1259 module_path = os.path.abspath(os.path.join(directory, daDirectory))
1260 if module_path is None:
1261 raise ImportError("No algorithm module named \"%s\" has been found in the search path.\n The search path is %s"%(choice, sys.path))
1263 # Importe le fichier complet comme un module
1264 # ------------------------------------------
1266 sys_path_tmp = sys.path ; sys.path.insert(0,module_path)
1267 self.__algorithmFile = __import__(str(choice), globals(), locals(), [])
1268 if not hasattr(self.__algorithmFile, "ElementaryAlgorithm"):
1269 raise ImportError("this module does not define a valid elementary algorithm.")
1270 self.__algorithmName = str(choice)
1271 sys.path = sys_path_tmp ; del sys_path_tmp
1272 except ImportError as e:
1273 raise ImportError("The module named \"%s\" was found, but is incorrect at the import stage.\n The import error message is: %s"%(choice,e))
1275 # Instancie un objet du type élémentaire du fichier
1276 # -------------------------------------------------
1277 self.__algorithm = self.__algorithmFile.ElementaryAlgorithm()
1280 def __shape_validate(self):
1282 Validation de la correspondance correcte des tailles des variables et
1283 des matrices s'il y en a.
1285 if self.__Xb is None: __Xb_shape = (0,)
1286 elif hasattr(self.__Xb,"size"): __Xb_shape = (self.__Xb.size,)
1287 elif hasattr(self.__Xb,"shape"):
1288 if isinstance(self.__Xb.shape, tuple): __Xb_shape = self.__Xb.shape
1289 else: __Xb_shape = self.__Xb.shape()
1290 else: raise TypeError("The background (Xb) has no attribute of shape: problem !")
1292 if self.__Y is None: __Y_shape = (0,)
1293 elif hasattr(self.__Y,"size"): __Y_shape = (self.__Y.size,)
1294 elif hasattr(self.__Y,"shape"):
1295 if isinstance(self.__Y.shape, tuple): __Y_shape = self.__Y.shape
1296 else: __Y_shape = self.__Y.shape()
1297 else: raise TypeError("The observation (Y) has no attribute of shape: problem !")
1299 if self.__U is None: __U_shape = (0,)
1300 elif hasattr(self.__U,"size"): __U_shape = (self.__U.size,)
1301 elif hasattr(self.__U,"shape"):
1302 if isinstance(self.__U.shape, tuple): __U_shape = self.__U.shape
1303 else: __U_shape = self.__U.shape()
1304 else: raise TypeError("The control (U) has no attribute of shape: problem !")
1306 if self.__B is None: __B_shape = (0,0)
1307 elif hasattr(self.__B,"shape"):
1308 if isinstance(self.__B.shape, tuple): __B_shape = self.__B.shape
1309 else: __B_shape = self.__B.shape()
1310 else: raise TypeError("The a priori errors covariance matrix (B) has no attribute of shape: problem !")
1312 if self.__R is None: __R_shape = (0,0)
1313 elif hasattr(self.__R,"shape"):
1314 if isinstance(self.__R.shape, tuple): __R_shape = self.__R.shape
1315 else: __R_shape = self.__R.shape()
1316 else: raise TypeError("The observation errors covariance matrix (R) has no attribute of shape: problem !")
1318 if self.__Q is None: __Q_shape = (0,0)
1319 elif hasattr(self.__Q,"shape"):
1320 if isinstance(self.__Q.shape, tuple): __Q_shape = self.__Q.shape
1321 else: __Q_shape = self.__Q.shape()
1322 else: raise TypeError("The evolution errors covariance matrix (Q) has no attribute of shape: problem !")
1324 if len(self.__HO) == 0: __HO_shape = (0,0)
1325 elif isinstance(self.__HO, dict): __HO_shape = (0,0)
1326 elif hasattr(self.__HO["Direct"],"shape"):
1327 if isinstance(self.__HO["Direct"].shape, tuple): __HO_shape = self.__HO["Direct"].shape
1328 else: __HO_shape = self.__HO["Direct"].shape()
1329 else: raise TypeError("The observation operator (H) has no attribute of shape: problem !")
1331 if len(self.__EM) == 0: __EM_shape = (0,0)
1332 elif isinstance(self.__EM, dict): __EM_shape = (0,0)
1333 elif hasattr(self.__EM["Direct"],"shape"):
1334 if isinstance(self.__EM["Direct"].shape, tuple): __EM_shape = self.__EM["Direct"].shape
1335 else: __EM_shape = self.__EM["Direct"].shape()
1336 else: raise TypeError("The evolution model (EM) has no attribute of shape: problem !")
1338 if len(self.__CM) == 0: __CM_shape = (0,0)
1339 elif isinstance(self.__CM, dict): __CM_shape = (0,0)
1340 elif hasattr(self.__CM["Direct"],"shape"):
1341 if isinstance(self.__CM["Direct"].shape, tuple): __CM_shape = self.__CM["Direct"].shape
1342 else: __CM_shape = self.__CM["Direct"].shape()
1343 else: raise TypeError("The control model (CM) has no attribute of shape: problem !")
1345 # Vérification des conditions
1346 # ---------------------------
1347 if not( len(__Xb_shape) == 1 or min(__Xb_shape) == 1 ):
1348 raise ValueError("Shape characteristic of background (Xb) is incorrect: \"%s\"."%(__Xb_shape,))
1349 if not( len(__Y_shape) == 1 or min(__Y_shape) == 1 ):
1350 raise ValueError("Shape characteristic of observation (Y) is incorrect: \"%s\"."%(__Y_shape,))
1352 if not( min(__B_shape) == max(__B_shape) ):
1353 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) is incorrect: \"%s\"."%(__B_shape,))
1354 if not( min(__R_shape) == max(__R_shape) ):
1355 raise ValueError("Shape characteristic of observation errors covariance matrix (R) is incorrect: \"%s\"."%(__R_shape,))
1356 if not( min(__Q_shape) == max(__Q_shape) ):
1357 raise ValueError("Shape characteristic of evolution errors covariance matrix (Q) is incorrect: \"%s\"."%(__Q_shape,))
1358 if not( min(__EM_shape) == max(__EM_shape) ):
1359 raise ValueError("Shape characteristic of evolution operator (EM) is incorrect: \"%s\"."%(__EM_shape,))
1361 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[1] == max(__Xb_shape) ):
1362 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and state (X) \"%s\" are incompatible."%(__HO_shape,__Xb_shape))
1363 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[0] == max(__Y_shape) ):
1364 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation (Y) \"%s\" are incompatible."%(__HO_shape,__Y_shape))
1365 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__B) > 0 and not( __HO_shape[1] == __B_shape[0] ):
1366 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and a priori errors covariance matrix (B) \"%s\" are incompatible."%(__HO_shape,__B_shape))
1367 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__R) > 0 and not( __HO_shape[0] == __R_shape[1] ):
1368 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation errors covariance matrix (R) \"%s\" are incompatible."%(__HO_shape,__R_shape))
1370 if self.__B is not None and len(self.__B) > 0 and not( __B_shape[1] == max(__Xb_shape) ):
1371 if self.__algorithmName in ["EnsembleBlue",]:
1372 asPersistentVector = self.__Xb.reshape((-1,min(__B_shape)))
1373 self.__Xb = Persistence.OneVector("Background", basetype=numpy.matrix)
1374 for member in asPersistentVector:
1375 self.__Xb.store( numpy.matrix( numpy.ravel(member), numpy.float ).T )
1376 __Xb_shape = min(__B_shape)
1378 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) \"%s\" and background (Xb) \"%s\" are incompatible."%(__B_shape,__Xb_shape))
1380 if self.__R is not None and len(self.__R) > 0 and not( __R_shape[1] == max(__Y_shape) ):
1381 raise ValueError("Shape characteristic of observation errors covariance matrix (R) \"%s\" and observation (Y) \"%s\" are incompatible."%(__R_shape,__Y_shape))
1383 if self.__EM is not None and len(self.__EM) > 0 and not isinstance(self.__EM, dict) and not( __EM_shape[1] == max(__Xb_shape) ):
1384 raise ValueError("Shape characteristic of evolution model (EM) \"%s\" and state (X) \"%s\" are incompatible."%(__EM_shape,__Xb_shape))
1386 if self.__CM is not None and len(self.__CM) > 0 and not isinstance(self.__CM, dict) and not( __CM_shape[1] == max(__U_shape) ):
1387 raise ValueError("Shape characteristic of control model (CM) \"%s\" and control (U) \"%s\" are incompatible."%(__CM_shape,__U_shape))
1389 if ("Bounds" in self.__P) \
1390 and (isinstance(self.__P["Bounds"], list) or isinstance(self.__P["Bounds"], tuple)) \
1391 and (len(self.__P["Bounds"]) != max(__Xb_shape)):
1392 raise ValueError("The number \"%s\" of bound pairs for the state (X) components is different of the size \"%s\" of the state itself." \
1393 %(len(self.__P["Bounds"]),max(__Xb_shape)))
1397 # ==============================================================================
1398 class RegulationAndParameters(object):
1400 Classe générale d'interface d'action pour la régulation et ses paramètres
1403 name = "GenericRegulation",
1410 self.__name = str(name)
1413 if asAlgorithm is None and asScript is not None:
1414 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
1416 __Algo = asAlgorithm
1418 if asDict is None and asScript is not None:
1419 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1423 if __Dict is not None:
1424 self.__P.update( dict(__Dict) )
1426 if __Algo is not None:
1427 self.__P.update( {"Algorithm":str(__Algo)} )
1429 def get(self, key = None):
1430 "Vérifie l'existence d'une clé de variable ou de paramètres"
1432 return self.__P[key]
1436 # ==============================================================================
1437 class DataObserver(object):
1439 Classe générale d'interface de type observer
1442 name = "GenericObserver",
1454 self.__name = str(name)
1459 if onVariable is None:
1460 raise ValueError("setting an observer has to be done over a variable name or a list of variable names, not over None.")
1461 elif type(onVariable) in (tuple, list):
1462 self.__V = tuple(map( str, onVariable ))
1463 if withInfo is None:
1466 self.__I = (str(withInfo),)*len(self.__V)
1467 elif isinstance(onVariable, str):
1468 self.__V = (onVariable,)
1469 if withInfo is None:
1470 self.__I = (onVariable,)
1472 self.__I = (str(withInfo),)
1474 raise ValueError("setting an observer has to be done over a variable name or a list of variable names.")
1476 if asObsObject is not None:
1477 self.__O = asObsObject
1479 __FunctionText = str(UserScript('Observer', asTemplate, asString, asScript))
1480 __Function = Observer2Func(__FunctionText)
1481 self.__O = __Function.getfunc()
1483 for k in range(len(self.__V)):
1486 if ename not in withAlgo:
1487 raise ValueError("An observer is asked to be set on a variable named %s which does not exist."%ename)
1489 withAlgo.setObserver(ename, self.__O, einfo, scheduledBy)
1492 "x.__repr__() <==> repr(x)"
1493 return repr(self.__V)+"\n"+repr(self.__O)
1496 "x.__str__() <==> str(x)"
1497 return str(self.__V)+"\n"+str(self.__O)
1499 # ==============================================================================
1500 class UserScript(object):
1502 Classe générale d'interface de type texte de script utilisateur
1505 name = "GenericUserScript",
1512 self.__name = str(name)
1514 if asString is not None:
1516 elif self.__name == "UserPostAnalysis" and (asTemplate is not None) and (asTemplate in Templates.UserPostAnalysisTemplates):
1517 self.__F = Templates.UserPostAnalysisTemplates[asTemplate]
1518 elif self.__name == "Observer" and (asTemplate is not None) and (asTemplate in Templates.ObserverTemplates):
1519 self.__F = Templates.ObserverTemplates[asTemplate]
1520 elif asScript is not None:
1521 self.__F = Interfaces.ImportFromScript(asScript).getstring()
1526 "x.__repr__() <==> repr(x)"
1527 return repr(self.__F)
1530 "x.__str__() <==> str(x)"
1531 return str(self.__F)
1533 # ==============================================================================
1534 class ExternalParameters(object):
1536 Classe générale d'interface de type texte de script utilisateur
1539 name = "GenericExternalParameters",
1545 self.__name = str(name)
1548 self.updateParameters( asDict, asScript )
1550 def updateParameters(self,
1554 "Mise a jour des parametres"
1555 if asDict is None and asScript is not None:
1556 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "ExternalParameters" )
1560 if __Dict is not None:
1561 self.__P.update( dict(__Dict) )
1563 def get(self, key = None):
1565 return self.__P[key]
1567 return list(self.__P.keys())
1570 return list(self.__P.keys())
1572 def pop(self, k, d):
1573 return self.__P.pop(k, d)
1576 return self.__P.items()
1578 def __contains__(self, key=None):
1579 "D.__contains__(k) -> True if D has a key k, else False"
1580 return key in self.__P
1582 # ==============================================================================
1583 class State(object):
1585 Classe générale d'interface de type état
1588 name = "GenericVector",
1590 asPersistentVector = None,
1596 toBeChecked = False,
1599 Permet de définir un vecteur :
1600 - asVector : entrée des données, comme un vecteur compatible avec le
1601 constructeur de numpy.matrix, ou "True" si entrée par script.
1602 - asPersistentVector : entrée des données, comme une série de vecteurs
1603 compatible avec le constructeur de numpy.matrix, ou comme un objet de
1604 type Persistence, ou "True" si entrée par script.
1605 - asScript : si un script valide est donné contenant une variable
1606 nommée "name", la variable est de type "asVector" (par défaut) ou
1607 "asPersistentVector" selon que l'une de ces variables est placée à
1609 - asDataFile : si un ou plusieurs fichiers valides sont donnés
1610 contenant des valeurs en colonnes, elles-mêmes nommées "colNames"
1611 (s'il n'y a pas de nom de colonne indiquée, on cherche une colonne
1612 nommée "name"), on récupère les colonnes et on les range ligne après
1613 ligne (colMajor=False, par défaut) ou colonne après colonne
1614 (colMajor=True). La variable résultante est de type "asVector" (par
1615 défaut) ou "asPersistentVector" selon que l'une de ces variables est
1618 self.__name = str(name)
1619 self.__check = bool(toBeChecked)
1623 self.__is_vector = False
1624 self.__is_series = False
1626 if asScript is not None:
1627 __Vector, __Series = None, None
1628 if asPersistentVector:
1629 __Series = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1631 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1632 elif asDataFile is not None:
1633 __Vector, __Series = None, None
1634 if asPersistentVector:
1635 if colNames is not None:
1636 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
1638 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1639 if bool(colMajor) and not Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1640 __Series = numpy.transpose(__Series)
1641 elif not bool(colMajor) and Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1642 __Series = numpy.transpose(__Series)
1644 if colNames is not None:
1645 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
1647 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1649 __Vector = numpy.ravel(__Vector, order = "F")
1651 __Vector = numpy.ravel(__Vector, order = "C")
1653 __Vector, __Series = asVector, asPersistentVector
1655 if __Vector is not None:
1656 self.__is_vector = True
1657 self.__V = numpy.matrix( numpy.asmatrix(__Vector).A1, numpy.float ).T
1658 self.shape = self.__V.shape
1659 self.size = self.__V.size
1660 elif __Series is not None:
1661 self.__is_series = True
1662 if isinstance(__Series, (tuple, list, numpy.ndarray, numpy.matrix, str)):
1663 self.__V = Persistence.OneVector(self.__name, basetype=numpy.matrix)
1664 if isinstance(__Series, str): __Series = eval(__Series)
1665 for member in __Series:
1666 self.__V.store( numpy.matrix( numpy.asmatrix(member).A1, numpy.float ).T )
1669 if isinstance(self.__V.shape, (tuple, list)):
1670 self.shape = self.__V.shape
1672 self.shape = self.__V.shape()
1673 if len(self.shape) == 1:
1674 self.shape = (self.shape[0],1)
1675 self.size = self.shape[0] * self.shape[1]
1677 raise ValueError("The %s object is improperly defined or undefined, it requires at minima either a vector, a list/tuple of vectors or a persistent object. Please check your vector input."%self.__name)
1679 if scheduledBy is not None:
1680 self.__T = scheduledBy
1682 def getO(self, withScheduler=False):
1684 return self.__V, self.__T
1685 elif self.__T is None:
1691 "Vérification du type interne"
1692 return self.__is_vector
1695 "Vérification du type interne"
1696 return self.__is_series
1699 "x.__repr__() <==> repr(x)"
1700 return repr(self.__V)
1703 "x.__str__() <==> str(x)"
1704 return str(self.__V)
1706 # ==============================================================================
1707 class Covariance(object):
1709 Classe générale d'interface de type covariance
1712 name = "GenericCovariance",
1713 asCovariance = None,
1714 asEyeByScalar = None,
1715 asEyeByVector = None,
1718 toBeChecked = False,
1721 Permet de définir une covariance :
1722 - asCovariance : entrée des données, comme une matrice compatible avec
1723 le constructeur de numpy.matrix
1724 - asEyeByScalar : entrée des données comme un seul scalaire de variance,
1725 multiplicatif d'une matrice de corrélation identité, aucune matrice
1726 n'étant donc explicitement à donner
1727 - asEyeByVector : entrée des données comme un seul vecteur de variance,
1728 à mettre sur la diagonale d'une matrice de corrélation, aucune matrice
1729 n'étant donc explicitement à donner
1730 - asCovObject : entrée des données comme un objet python, qui a les
1731 methodes obligatoires "getT", "getI", "diag", "trace", "__add__",
1732 "__sub__", "__neg__", "__mul__", "__rmul__" et facultatives "shape",
1733 "size", "cholesky", "choleskyI", "asfullmatrix", "__repr__", "__str__"
1734 - toBeChecked : booléen indiquant si le caractère SDP de la matrice
1735 pleine doit être vérifié
1737 self.__name = str(name)
1738 self.__check = bool(toBeChecked)
1741 self.__is_scalar = False
1742 self.__is_vector = False
1743 self.__is_matrix = False
1744 self.__is_object = False
1746 if asScript is not None:
1747 __Matrix, __Scalar, __Vector, __Object = None, None, None, None
1749 __Scalar = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1751 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1753 __Object = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1755 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1757 __Matrix, __Scalar, __Vector, __Object = asCovariance, asEyeByScalar, asEyeByVector, asCovObject
1759 if __Scalar is not None:
1760 if numpy.matrix(__Scalar).size != 1:
1761 raise ValueError(' The diagonal multiplier given to define a sparse matrix is not a unique scalar value.\n Its actual measured size is %i. Please check your scalar input.'%numpy.matrix(__Scalar).size)
1762 self.__is_scalar = True
1763 self.__C = numpy.abs( float(__Scalar) )
1766 elif __Vector is not None:
1767 self.__is_vector = True
1768 self.__C = numpy.abs( numpy.array( numpy.ravel( numpy.matrix(__Vector, float ) ) ) )
1769 self.shape = (self.__C.size,self.__C.size)
1770 self.size = self.__C.size**2
1771 elif __Matrix is not None:
1772 self.__is_matrix = True
1773 self.__C = numpy.matrix( __Matrix, float )
1774 self.shape = self.__C.shape
1775 self.size = self.__C.size
1776 elif __Object is not None:
1777 self.__is_object = True
1779 for at in ("getT","getI","diag","trace","__add__","__sub__","__neg__","__matmul__","__mul__","__rmatmul__","__rmul__"):
1780 if not hasattr(self.__C,at):
1781 raise ValueError("The matrix given for %s as an object has no attribute \"%s\". Please check your object input."%(self.__name,at))
1782 if hasattr(self.__C,"shape"):
1783 self.shape = self.__C.shape
1786 if hasattr(self.__C,"size"):
1787 self.size = self.__C.size
1792 # raise ValueError("The %s covariance matrix has to be specified either as a matrix, a vector for its diagonal or a scalar multiplying an identity matrix."%self.__name)
1796 def __validate(self):
1798 if self.__C is None:
1799 raise UnboundLocalError("%s covariance matrix value has not been set!"%(self.__name,))
1800 if self.ismatrix() and min(self.shape) != max(self.shape):
1801 raise ValueError("The given matrix for %s is not a square one, its shape is %s. Please check your matrix input."%(self.__name,self.shape))
1802 if self.isobject() and min(self.shape) != max(self.shape):
1803 raise ValueError("The matrix given for \"%s\" is not a square one, its shape is %s. Please check your object input."%(self.__name,self.shape))
1804 if self.isscalar() and self.__C <= 0:
1805 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your scalar input %s."%(self.__name,self.__C))
1806 if self.isvector() and (self.__C <= 0).any():
1807 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your vector input."%(self.__name,))
1808 if self.ismatrix() and (self.__check or logging.getLogger().level < logging.WARNING):
1810 L = numpy.linalg.cholesky( self.__C )
1812 raise ValueError("The %s covariance matrix is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1813 if self.isobject() and (self.__check or logging.getLogger().level < logging.WARNING):
1815 L = self.__C.cholesky()
1817 raise ValueError("The %s covariance object is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1820 "Vérification du type interne"
1821 return self.__is_scalar
1824 "Vérification du type interne"
1825 return self.__is_vector
1828 "Vérification du type interne"
1829 return self.__is_matrix
1832 "Vérification du type interne"
1833 return self.__is_object
1838 return Covariance(self.__name+"I", asCovariance = self.__C.I )
1839 elif self.isvector():
1840 return Covariance(self.__name+"I", asEyeByVector = 1. / self.__C )
1841 elif self.isscalar():
1842 return Covariance(self.__name+"I", asEyeByScalar = 1. / self.__C )
1843 elif self.isobject():
1844 return Covariance(self.__name+"I", asCovObject = self.__C.getI() )
1846 return None # Indispensable
1851 return Covariance(self.__name+"T", asCovariance = self.__C.T )
1852 elif self.isvector():
1853 return Covariance(self.__name+"T", asEyeByVector = self.__C )
1854 elif self.isscalar():
1855 return Covariance(self.__name+"T", asEyeByScalar = self.__C )
1856 elif self.isobject():
1857 return Covariance(self.__name+"T", asCovObject = self.__C.getT() )
1860 "Décomposition de Cholesky"
1862 return Covariance(self.__name+"C", asCovariance = numpy.linalg.cholesky(self.__C) )
1863 elif self.isvector():
1864 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
1865 elif self.isscalar():
1866 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
1867 elif self.isobject() and hasattr(self.__C,"cholesky"):
1868 return Covariance(self.__name+"C", asCovObject = self.__C.cholesky() )
1870 def choleskyI(self):
1871 "Inversion de la décomposition de Cholesky"
1873 return Covariance(self.__name+"H", asCovariance = numpy.linalg.cholesky(self.__C).I )
1874 elif self.isvector():
1875 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
1876 elif self.isscalar():
1877 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
1878 elif self.isobject() and hasattr(self.__C,"choleskyI"):
1879 return Covariance(self.__name+"H", asCovObject = self.__C.choleskyI() )
1882 "Racine carrée matricielle"
1885 return Covariance(self.__name+"C", asCovariance = scipy.linalg.sqrtm(self.__C) )
1886 elif self.isvector():
1887 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
1888 elif self.isscalar():
1889 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
1890 elif self.isobject() and hasattr(self.__C,"sqrt"):
1891 return Covariance(self.__name+"C", asCovObject = self.__C.sqrt() )
1894 "Inversion de la racine carrée matricielle"
1897 return Covariance(self.__name+"H", asCovariance = scipy.linalg.sqrtm(self.__C).I )
1898 elif self.isvector():
1899 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
1900 elif self.isscalar():
1901 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
1902 elif self.isobject() and hasattr(self.__C,"sqrtI"):
1903 return Covariance(self.__name+"H", asCovObject = self.__C.sqrtI() )
1905 def diag(self, msize=None):
1906 "Diagonale de la matrice"
1908 return numpy.diag(self.__C)
1909 elif self.isvector():
1911 elif self.isscalar():
1913 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1915 return self.__C * numpy.ones(int(msize))
1916 elif self.isobject():
1917 return self.__C.diag()
1919 def asfullmatrix(self, msize=None):
1922 return numpy.asarray(self.__C)
1923 elif self.isvector():
1924 return numpy.asarray( numpy.diag(self.__C), float )
1925 elif self.isscalar():
1927 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1929 return numpy.asarray( self.__C * numpy.eye(int(msize)), float )
1930 elif self.isobject() and hasattr(self.__C,"asfullmatrix"):
1931 return self.__C.asfullmatrix()
1933 def trace(self, msize=None):
1934 "Trace de la matrice"
1936 return numpy.trace(self.__C)
1937 elif self.isvector():
1938 return float(numpy.sum(self.__C))
1939 elif self.isscalar():
1941 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1943 return self.__C * int(msize)
1944 elif self.isobject():
1945 return self.__C.trace()
1951 "x.__repr__() <==> repr(x)"
1952 return repr(self.__C)
1955 "x.__str__() <==> str(x)"
1956 return str(self.__C)
1958 def __add__(self, other):
1959 "x.__add__(y) <==> x+y"
1960 if self.ismatrix() or self.isobject():
1961 return self.__C + numpy.asmatrix(other)
1962 elif self.isvector() or self.isscalar():
1963 _A = numpy.asarray(other)
1964 _A.reshape(_A.size)[::_A.shape[1]+1] += self.__C
1965 return numpy.asmatrix(_A)
1967 def __radd__(self, other):
1968 "x.__radd__(y) <==> y+x"
1969 raise NotImplementedError("%s covariance matrix __radd__ method not available for %s type!"%(self.__name,type(other)))
1971 def __sub__(self, other):
1972 "x.__sub__(y) <==> x-y"
1973 if self.ismatrix() or self.isobject():
1974 return self.__C - numpy.asmatrix(other)
1975 elif self.isvector() or self.isscalar():
1976 _A = numpy.asarray(other)
1977 _A.reshape(_A.size)[::_A.shape[1]+1] = self.__C - _A.reshape(_A.size)[::_A.shape[1]+1]
1978 return numpy.asmatrix(_A)
1980 def __rsub__(self, other):
1981 "x.__rsub__(y) <==> y-x"
1982 raise NotImplementedError("%s covariance matrix __rsub__ method not available for %s type!"%(self.__name,type(other)))
1985 "x.__neg__() <==> -x"
1988 def __matmul__(self, other):
1989 "x.__mul__(y) <==> x@y"
1990 if self.ismatrix() and isinstance(other, (int, float)):
1991 return numpy.asarray(self.__C) * other
1992 elif self.ismatrix() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
1993 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1994 return numpy.ravel(self.__C @ numpy.ravel(other))
1995 elif numpy.asarray(other).shape[0] == self.shape[1]: # Matrice
1996 return numpy.asarray(self.__C) @ numpy.asarray(other)
1998 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asarray(other).shape,self.__name))
1999 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2000 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2001 return numpy.ravel(self.__C) * numpy.ravel(other)
2002 elif numpy.asarray(other).shape[0] == self.shape[1]: # Matrice
2003 return numpy.ravel(self.__C).reshape((-1,1)) * numpy.asarray(other)
2005 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
2006 elif self.isscalar() and isinstance(other,numpy.matrix):
2007 return numpy.asarray(self.__C * other)
2008 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
2009 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
2010 return self.__C * numpy.ravel(other)
2012 return self.__C * numpy.asarray(other)
2013 elif self.isobject():
2014 return self.__C.__matmul__(other)
2016 raise NotImplementedError("%s covariance matrix __matmul__ method not available for %s type!"%(self.__name,type(other)))
2018 def __mul__(self, other):
2019 "x.__mul__(y) <==> x*y"
2020 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2021 return self.__C * other
2022 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2023 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2024 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
2025 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2026 return self.__C * numpy.asmatrix(other)
2028 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asmatrix(other).shape,self.__name))
2029 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2030 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2031 return numpy.asmatrix(self.__C * numpy.ravel(other)).T
2032 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2033 return numpy.asmatrix((self.__C * (numpy.asarray(other).transpose())).transpose())
2035 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
2036 elif self.isscalar() and isinstance(other,numpy.matrix):
2037 return self.__C * other
2038 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
2039 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
2040 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
2042 return self.__C * numpy.asmatrix(other)
2043 elif self.isobject():
2044 return self.__C.__mul__(other)
2046 raise NotImplementedError("%s covariance matrix __mul__ method not available for %s type!"%(self.__name,type(other)))
2048 def __rmatmul__(self, other):
2049 "x.__rmul__(y) <==> y@x"
2050 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2051 return other * self.__C
2052 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2053 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2054 return numpy.asmatrix(numpy.ravel(other)) * self.__C
2055 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2056 return numpy.asmatrix(other) * self.__C
2058 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name))
2059 elif self.isvector() and isinstance(other,numpy.matrix):
2060 if numpy.ravel(other).size == self.shape[0]: # Vecteur
2061 return numpy.asmatrix(numpy.ravel(other) * self.__C)
2062 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
2063 return numpy.asmatrix(numpy.array(other) * self.__C)
2065 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
2066 elif self.isscalar() and isinstance(other,numpy.matrix):
2067 return other * self.__C
2068 elif self.isobject():
2069 return self.__C.__rmatmul__(other)
2071 raise NotImplementedError("%s covariance matrix __rmatmul__ method not available for %s type!"%(self.__name,type(other)))
2073 def __rmul__(self, other):
2074 "x.__rmul__(y) <==> y*x"
2075 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2076 return other * self.__C
2077 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2078 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2079 return numpy.asmatrix(numpy.ravel(other)) * self.__C
2080 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2081 return numpy.asmatrix(other) * self.__C
2083 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name))
2084 elif self.isvector() and isinstance(other,numpy.matrix):
2085 if numpy.ravel(other).size == self.shape[0]: # Vecteur
2086 return numpy.asmatrix(numpy.ravel(other) * self.__C)
2087 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
2088 return numpy.asmatrix(numpy.array(other) * self.__C)
2090 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
2091 elif self.isscalar() and isinstance(other,numpy.matrix):
2092 return other * self.__C
2093 elif self.isobject():
2094 return self.__C.__rmul__(other)
2096 raise NotImplementedError("%s covariance matrix __rmul__ method not available for %s type!"%(self.__name,type(other)))
2099 "x.__len__() <==> len(x)"
2100 return self.shape[0]
2102 # ==============================================================================
2103 class Observer2Func(object):
2105 Creation d'une fonction d'observateur a partir de son texte
2107 def __init__(self, corps=""):
2108 self.__corps = corps
2109 def func(self,var,info):
2110 "Fonction d'observation"
2113 "Restitution du pointeur de fonction dans l'objet"
2116 # ==============================================================================
2117 class CaseLogger(object):
2119 Conservation des commandes de creation d'un cas
2121 def __init__(self, __name="", __objname="case", __addViewers=None, __addLoaders=None):
2122 self.__name = str(__name)
2123 self.__objname = str(__objname)
2124 self.__logSerie = []
2125 self.__switchoff = False
2127 "TUI" :Interfaces._TUIViewer,
2128 "SCD" :Interfaces._SCDViewer,
2129 "YACS":Interfaces._YACSViewer,
2132 "TUI" :Interfaces._TUIViewer,
2133 "COM" :Interfaces._COMViewer,
2135 if __addViewers is not None:
2136 self.__viewers.update(dict(__addViewers))
2137 if __addLoaders is not None:
2138 self.__loaders.update(dict(__addLoaders))
2140 def register(self, __command=None, __keys=None, __local=None, __pre=None, __switchoff=False):
2141 "Enregistrement d'une commande individuelle"
2142 if __command is not None and __keys is not None and __local is not None and not self.__switchoff:
2143 if "self" in __keys: __keys.remove("self")
2144 self.__logSerie.append( (str(__command), __keys, __local, __pre, __switchoff) )
2146 self.__switchoff = True
2148 self.__switchoff = False
2150 def dump(self, __filename=None, __format="TUI", __upa=""):
2151 "Restitution normalisée des commandes"
2152 if __format in self.__viewers:
2153 __formater = self.__viewers[__format](self.__name, self.__objname, self.__logSerie)
2155 raise ValueError("Dumping as \"%s\" is not available"%__format)
2156 return __formater.dump(__filename, __upa)
2158 def load(self, __filename=None, __content=None, __object=None, __format="TUI"):
2159 "Chargement normalisé des commandes"
2160 if __format in self.__loaders:
2161 __formater = self.__loaders[__format]()
2163 raise ValueError("Loading as \"%s\" is not available"%__format)
2164 return __formater.load(__filename, __content, __object)
2166 # ==============================================================================
2169 _extraArguments = None,
2170 _sFunction = lambda x: x,
2175 Pour une liste ordonnée de vecteurs en entrée, renvoie en sortie la liste
2176 correspondante de valeurs de la fonction en argument
2178 # Vérifications et définitions initiales
2179 # logging.debug("MULTF Internal multifonction calculations begin with function %s"%(_sFunction.__name__,))
2180 if not PlatformInfo.isIterable( __xserie ):
2181 raise TypeError("MultiFonction not iterable unkown input type: %s"%(type(__xserie),))
2183 if (_mpWorkers is None) or (_mpWorkers is not None and _mpWorkers < 1):
2186 __mpWorkers = int(_mpWorkers)
2188 import multiprocessing
2199 # logging.debug("MULTF Internal multiprocessing calculations begin : evaluation of %i point(s)"%(len(_jobs),))
2200 import multiprocessing
2201 with multiprocessing.Pool(__mpWorkers) as pool:
2202 __multiHX = pool.map( _sFunction, _jobs )
2205 # logging.debug("MULTF Internal multiprocessing calculation end")
2207 # logging.debug("MULTF Internal monoprocessing calculation begin")
2209 if _extraArguments is None:
2210 for __xvalue in __xserie:
2211 __multiHX.append( _sFunction( __xvalue ) )
2212 elif _extraArguments is not None and isinstance(_extraArguments, (list, tuple, map)):
2213 for __xvalue in __xserie:
2214 __multiHX.append( _sFunction( __xvalue, *_extraArguments ) )
2215 elif _extraArguments is not None and isinstance(_extraArguments, dict):
2216 for __xvalue in __xserie:
2217 __multiHX.append( _sFunction( __xvalue, **_extraArguments ) )
2219 raise TypeError("MultiFonction extra arguments unkown input type: %s"%(type(_extraArguments),))
2220 # logging.debug("MULTF Internal monoprocessing calculation end")
2222 # logging.debug("MULTF Internal multifonction calculations end")
2225 # ==============================================================================
2226 def CostFunction3D(_x,
2227 _Hm = None, # Pour simuler Hm(x) : HO["Direct"].appliedTo
2228 _HmX = None, # Simulation déjà faite de Hm(x)
2229 _arg = None, # Arguments supplementaires pour Hm, sous la forme d'un tuple
2234 _SIV = False, # A résorber pour la 8.0
2235 _SSC = [], # self._parameters["StoreSupplementaryCalculations"]
2236 _nPS = 0, # nbPreviousSteps
2237 _QM = "DA", # QualityMeasure
2238 _SSV = {}, # Entrée et/ou sortie : self.StoredVariables
2239 _fRt = False, # Restitue ou pas la sortie étendue
2240 _sSc = True, # Stocke ou pas les SSC
2243 Fonction-coût générale utile pour les algorithmes statiques/3D : 3DVAR, BLUE
2244 et dérivés, Kalman et dérivés, LeastSquares, SamplingTest, PSO, SA, Tabu,
2245 DFO, QuantileRegression
2251 for k in ["CostFunctionJ",
2257 "SimulatedObservationAtCurrentOptimum",
2258 "SimulatedObservationAtCurrentState",
2262 if hasattr(_SSV[k],"store"):
2263 _SSV[k].append = _SSV[k].store # Pour utiliser "append" au lieu de "store"
2265 _X = numpy.asmatrix(numpy.ravel( _x )).T
2266 if _SIV or "CurrentState" in _SSC or "CurrentOptimum" in _SSC:
2267 _SSV["CurrentState"].append( _X )
2269 if _HmX is not None:
2273 raise ValueError("COSTFUNCTION3D Operator has to be defined.")
2277 _HX = _Hm( _X, *_arg )
2278 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
2280 if "SimulatedObservationAtCurrentState" in _SSC or \
2281 "SimulatedObservationAtCurrentOptimum" in _SSC:
2282 _SSV["SimulatedObservationAtCurrentState"].append( _HX )
2284 if numpy.any(numpy.isnan(_HX)):
2285 Jb, Jo, J = numpy.nan, numpy.nan, numpy.nan
2287 _Y = numpy.asmatrix(numpy.ravel( _Y )).T
2288 if _QM in ["AugmentedWeightedLeastSquares", "AWLS", "AugmentedPonderatedLeastSquares", "APLS", "DA"]:
2289 if _BI is None or _RI is None:
2290 raise ValueError("Background and Observation error covariance matrix has to be properly defined!")
2291 _Xb = numpy.asmatrix(numpy.ravel( _Xb )).T
2292 Jb = 0.5 * (_X - _Xb).T * _BI * (_X - _Xb)
2293 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
2294 elif _QM in ["WeightedLeastSquares", "WLS", "PonderatedLeastSquares", "PLS"]:
2296 raise ValueError("Observation error covariance matrix has to be properly defined!")
2298 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
2299 elif _QM in ["LeastSquares", "LS", "L2"]:
2301 Jo = 0.5 * (_Y - _HX).T * (_Y - _HX)
2302 elif _QM in ["AbsoluteValue", "L1"]:
2304 Jo = numpy.sum( numpy.abs(_Y - _HX) )
2305 elif _QM in ["MaximumError", "ME"]:
2307 Jo = numpy.max( numpy.abs(_Y - _HX) )
2308 elif _QM in ["QR", "Null"]:
2312 raise ValueError("Unknown asked quality measure!")
2314 J = float( Jb ) + float( Jo )
2317 _SSV["CostFunctionJb"].append( Jb )
2318 _SSV["CostFunctionJo"].append( Jo )
2319 _SSV["CostFunctionJ" ].append( J )
2321 if "IndexOfOptimum" in _SSC or \
2322 "CurrentOptimum" in _SSC or \
2323 "SimulatedObservationAtCurrentOptimum" in _SSC:
2324 IndexMin = numpy.argmin( _SSV["CostFunctionJ"][_nPS:] ) + _nPS
2325 if "IndexOfOptimum" in _SSC:
2326 _SSV["IndexOfOptimum"].append( IndexMin )
2327 if "CurrentOptimum" in _SSC:
2328 _SSV["CurrentOptimum"].append( _SSV["CurrentState"][IndexMin] )
2329 if "SimulatedObservationAtCurrentOptimum" in _SSC:
2330 _SSV["SimulatedObservationAtCurrentOptimum"].append( _SSV["SimulatedObservationAtCurrentState"][IndexMin] )
2335 if _QM in ["QR"]: # Pour le QuantileRegression
2340 # ==============================================================================
2341 if __name__ == "__main__":
2342 print('\n AUTODIAGNOSTIC\n')