1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2021 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les outils généraux élémentaires.
26 __author__ = "Jean-Philippe ARGAUD"
35 from functools import partial
36 from daCore import Persistence, PlatformInfo, Interfaces
37 from daCore import Templates
39 # ==============================================================================
40 class CacheManager(object):
42 Classe générale de gestion d'un cache de calculs
45 toleranceInRedundancy = 1.e-18,
46 lenghtOfRedundancy = -1,
49 Les caractéristiques de tolérance peuvent être modifiées à la création.
51 self.__tolerBP = float(toleranceInRedundancy)
52 self.__lenghtOR = int(lenghtOfRedundancy)
53 self.__initlnOR = self.__lenghtOR
63 def wasCalculatedIn(self, xValue, oName="" ):
64 "Vérifie l'existence d'un calcul correspondant à la valeur"
68 for i in range(min(len(self.__listOPCV),self.__lenghtOR)-1,-1,-1):
69 if not hasattr(xValue, 'size'):
71 elif (str(oName) != self.__listOPCV[i][3]):
73 elif (xValue.size != self.__listOPCV[i][0].size):
75 elif (numpy.ravel(xValue)[0] - self.__listOPCV[i][0][0]) > (self.__tolerBP * self.__listOPCV[i][2] / self.__listOPCV[i][0].size):
77 elif numpy.linalg.norm(numpy.ravel(xValue) - self.__listOPCV[i][0]) < (self.__tolerBP * self.__listOPCV[i][2]):
79 __HxV = self.__listOPCV[i][1]
83 def storeValueInX(self, xValue, HxValue, oName="" ):
84 "Stocke pour un opérateur o un calcul Hx correspondant à la valeur x"
85 if self.__lenghtOR < 0:
86 self.__lenghtOR = 2 * min(xValue.size, 50) + 2 # 2 * xValue.size + 2
87 self.__initlnOR = self.__lenghtOR
88 self.__seenNames.append(str(oName))
89 if str(oName) not in self.__seenNames: # Etend la liste si nouveau
90 self.__lenghtOR += 2 * min(xValue.size, 50) + 2 # 2 * xValue.size + 2
91 self.__initlnOR += self.__lenghtOR
92 self.__seenNames.append(str(oName))
93 while len(self.__listOPCV) > self.__lenghtOR:
94 self.__listOPCV.pop(0)
95 self.__listOPCV.append( (
96 copy.copy(numpy.ravel(xValue)), # 0 Previous point
97 copy.copy(HxValue), # 1 Previous value
98 numpy.linalg.norm(xValue), # 2 Norm
99 str(oName), # 3 Operator name
104 self.__initlnOR = self.__lenghtOR
106 self.__enabled = False
110 self.__lenghtOR = self.__initlnOR
111 self.__enabled = True
113 # ==============================================================================
114 class Operator(object):
116 Classe générale d'interface de type opérateur simple
124 name = "GenericOperator",
127 avoidingRedundancy = True,
128 inputAsMultiFunction = False,
129 enableMultiProcess = False,
130 extraArguments = None,
133 On construit un objet de ce type en fournissant, à l'aide de l'un des
134 deux mots-clé, soit une fonction ou un multi-fonction python, soit une
137 - name : nom d'opérateur
138 - fromMethod : argument de type fonction Python
139 - fromMatrix : argument adapté au constructeur numpy.matrix
140 - avoidingRedundancy : booléen évitant (ou pas) les calculs redondants
141 - inputAsMultiFunction : booléen indiquant une fonction explicitement
142 définie (ou pas) en multi-fonction
143 - extraArguments : arguments supplémentaires passés à la fonction de
144 base et ses dérivées (tuple ou dictionnaire)
146 self.__name = str(name)
147 self.__NbCallsAsMatrix, self.__NbCallsAsMethod, self.__NbCallsOfCached = 0, 0, 0
148 self.__AvoidRC = bool( avoidingRedundancy )
149 self.__inputAsMF = bool( inputAsMultiFunction )
150 self.__mpEnabled = bool( enableMultiProcess )
151 self.__extraArgs = extraArguments
152 if fromMethod is not None and self.__inputAsMF:
153 self.__Method = fromMethod # logtimer(fromMethod)
155 self.__Type = "Method"
156 elif fromMethod is not None and not self.__inputAsMF:
157 self.__Method = partial( MultiFonction, _sFunction=fromMethod, _mpEnabled=self.__mpEnabled)
159 self.__Type = "Method"
160 elif fromMatrix is not None:
162 self.__Matrix = numpy.matrix( fromMatrix, numpy.float )
163 self.__Type = "Matrix"
169 def disableAvoidingRedundancy(self):
171 Operator.CM.disable()
173 def enableAvoidingRedundancy(self):
178 Operator.CM.disable()
184 def appliedTo(self, xValue, HValue = None, argsAsSerie = False, returnSerieAsArrayMatrix = False):
186 Permet de restituer le résultat de l'application de l'opérateur à une
187 série d'arguments xValue. Cette méthode se contente d'appliquer, chaque
188 argument devant a priori être du bon type.
190 - les arguments par série sont :
191 - xValue : argument adapté pour appliquer l'opérateur
192 - HValue : valeur précalculée de l'opérateur en ce point
193 - argsAsSerie : indique si les arguments sont une mono ou multi-valeur
200 if HValue is not None:
204 PlatformInfo.isIterable( _xValue, True, " in Operator.appliedTo" )
206 if _HValue is not None:
207 assert len(_xValue) == len(_HValue), "Incompatible number of elements in xValue and HValue"
209 for i in range(len(_HValue)):
210 _HxValue.append( numpy.asmatrix( numpy.ravel( _HValue[i] ) ).T )
212 Operator.CM.storeValueInX(_xValue[i],_HxValue[-1],self.__name)
217 for i, xv in enumerate(_xValue):
219 __alreadyCalculated, __HxV = Operator.CM.wasCalculatedIn(xv,self.__name)
221 __alreadyCalculated = False
223 if __alreadyCalculated:
224 self.__addOneCacheCall()
227 if self.__Matrix is not None:
228 self.__addOneMatrixCall()
229 _xv = numpy.matrix(numpy.ravel(xv)).T
230 _hv = self.__Matrix * _xv
232 self.__addOneMethodCall()
236 _HxValue.append( _hv )
238 if len(_xserie)>0 and self.__Matrix is None:
239 if self.__extraArgs is None:
240 _hserie = self.__Method( _xserie ) # Calcul MF
242 _hserie = self.__Method( _xserie, self.__extraArgs ) # Calcul MF
243 if not hasattr(_hserie, "pop"):
244 raise TypeError("The user input multi-function doesn't seem to return sequence results, behaving like a mono-function. It has to be checked.")
250 Operator.CM.storeValueInX(_xv,_hv,self.__name)
252 if returnSerieAsArrayMatrix:
253 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
255 if argsAsSerie: return _HxValue
256 else: return _HxValue[-1]
258 def appliedControledFormTo(self, paires, argsAsSerie = False, returnSerieAsArrayMatrix = False):
260 Permet de restituer le résultat de l'application de l'opérateur à des
261 paires (xValue, uValue). Cette méthode se contente d'appliquer, son
262 argument devant a priori être du bon type. Si la uValue est None,
263 on suppose que l'opérateur ne s'applique qu'à xValue.
265 - paires : les arguments par paire sont :
266 - xValue : argument X adapté pour appliquer l'opérateur
267 - uValue : argument U adapté pour appliquer l'opérateur
268 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
270 if argsAsSerie: _xuValue = paires
271 else: _xuValue = (paires,)
272 PlatformInfo.isIterable( _xuValue, True, " in Operator.appliedControledFormTo" )
274 if self.__Matrix is not None:
276 for paire in _xuValue:
277 _xValue, _uValue = paire
278 _xValue = numpy.matrix(numpy.ravel(_xValue)).T
279 self.__addOneMatrixCall()
280 _HxValue.append( self.__Matrix * _xValue )
283 for paire in _xuValue:
284 _xValue, _uValue = paire
285 if _uValue is not None:
286 _xuArgs.append( paire )
288 _xuArgs.append( _xValue )
289 self.__addOneMethodCall( len(_xuArgs) )
290 if self.__extraArgs is None:
291 _HxValue = self.__Method( _xuArgs ) # Calcul MF
293 _HxValue = self.__Method( _xuArgs, self.__extraArgs ) # Calcul MF
295 if returnSerieAsArrayMatrix:
296 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
298 if argsAsSerie: return _HxValue
299 else: return _HxValue[-1]
301 def appliedInXTo(self, paires, argsAsSerie = False, returnSerieAsArrayMatrix = False):
303 Permet de restituer le résultat de l'application de l'opérateur à une
304 série d'arguments xValue, sachant que l'opérateur est valable en
305 xNominal. Cette méthode se contente d'appliquer, son argument devant a
306 priori être du bon type. Si l'opérateur est linéaire car c'est une
307 matrice, alors il est valable en tout point nominal et xNominal peut
308 être quelconque. Il n'y a qu'une seule paire par défaut, et argsAsSerie
309 permet d'indiquer que l'argument est multi-paires.
311 - paires : les arguments par paire sont :
312 - xNominal : série d'arguments permettant de donner le point où
313 l'opérateur est construit pour être ensuite appliqué
314 - xValue : série d'arguments adaptés pour appliquer l'opérateur
315 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
317 if argsAsSerie: _nxValue = paires
318 else: _nxValue = (paires,)
319 PlatformInfo.isIterable( _nxValue, True, " in Operator.appliedInXTo" )
321 if self.__Matrix is not None:
323 for paire in _nxValue:
324 _xNominal, _xValue = paire
325 _xValue = numpy.matrix(numpy.ravel(_xValue)).T
326 self.__addOneMatrixCall()
327 _HxValue.append( self.__Matrix * _xValue )
329 self.__addOneMethodCall( len(_nxValue) )
330 if self.__extraArgs is None:
331 _HxValue = self.__Method( _nxValue ) # Calcul MF
333 _HxValue = self.__Method( _nxValue, self.__extraArgs ) # Calcul MF
335 if returnSerieAsArrayMatrix:
336 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
338 if argsAsSerie: return _HxValue
339 else: return _HxValue[-1]
341 def asMatrix(self, ValueForMethodForm = "UnknownVoidValue", argsAsSerie = False):
343 Permet de renvoyer l'opérateur sous la forme d'une matrice
345 if self.__Matrix is not None:
346 self.__addOneMatrixCall()
347 mValue = [self.__Matrix,]
348 elif not isinstance(ValueForMethodForm,str) or ValueForMethodForm != "UnknownVoidValue": # Ne pas utiliser "None"
351 self.__addOneMethodCall( len(ValueForMethodForm) )
352 for _vfmf in ValueForMethodForm:
353 mValue.append( numpy.matrix( self.__Method(((_vfmf, None),)) ) )
355 self.__addOneMethodCall()
356 mValue = self.__Method(((ValueForMethodForm, None),))
358 raise ValueError("Matrix form of the operator defined as a function/method requires to give an operating point.")
360 if argsAsSerie: return mValue
361 else: return mValue[-1]
365 Renvoie la taille sous forme numpy si l'opérateur est disponible sous
366 la forme d'une matrice
368 if self.__Matrix is not None:
369 return self.__Matrix.shape
371 raise ValueError("Matrix form of the operator is not available, nor the shape")
373 def nbcalls(self, which=None):
375 Renvoie les nombres d'évaluations de l'opérateur
378 self.__NbCallsAsMatrix+self.__NbCallsAsMethod,
379 self.__NbCallsAsMatrix,
380 self.__NbCallsAsMethod,
381 self.__NbCallsOfCached,
382 Operator.NbCallsAsMatrix+Operator.NbCallsAsMethod,
383 Operator.NbCallsAsMatrix,
384 Operator.NbCallsAsMethod,
385 Operator.NbCallsOfCached,
387 if which is None: return __nbcalls
388 else: return __nbcalls[which]
390 def __addOneMatrixCall(self):
391 "Comptabilise un appel"
392 self.__NbCallsAsMatrix += 1 # Decompte local
393 Operator.NbCallsAsMatrix += 1 # Decompte global
395 def __addOneMethodCall(self, nb = 1):
396 "Comptabilise un appel"
397 self.__NbCallsAsMethod += nb # Decompte local
398 Operator.NbCallsAsMethod += nb # Decompte global
400 def __addOneCacheCall(self):
401 "Comptabilise un appel"
402 self.__NbCallsOfCached += 1 # Decompte local
403 Operator.NbCallsOfCached += 1 # Decompte global
405 # ==============================================================================
406 class FullOperator(object):
408 Classe générale d'interface de type opérateur complet
409 (Direct, Linéaire Tangent, Adjoint)
412 name = "GenericFullOperator",
414 asOneFunction = None, # 1 Fonction
415 asThreeFunctions = None, # 3 Fonctions in a dictionary
416 asScript = None, # 1 or 3 Fonction(s) by script
417 asDict = None, # Parameters
419 extraArguments = None,
421 inputAsMF = False,# Fonction(s) as Multi-Functions
426 self.__name = str(name)
427 self.__check = bool(toBeChecked)
428 self.__extraArgs = extraArguments
433 if (asDict is not None) and isinstance(asDict, dict):
434 __Parameters.update( asDict )
435 # Priorité à EnableMultiProcessingInDerivatives=True
436 if "EnableMultiProcessing" in __Parameters and __Parameters["EnableMultiProcessing"]:
437 __Parameters["EnableMultiProcessingInDerivatives"] = True
438 __Parameters["EnableMultiProcessingInEvaluation"] = False
439 if "EnableMultiProcessingInDerivatives" not in __Parameters:
440 __Parameters["EnableMultiProcessingInDerivatives"] = False
441 if __Parameters["EnableMultiProcessingInDerivatives"]:
442 __Parameters["EnableMultiProcessingInEvaluation"] = False
443 if "EnableMultiProcessingInEvaluation" not in __Parameters:
444 __Parameters["EnableMultiProcessingInEvaluation"] = False
445 if "withIncrement" in __Parameters: # Temporaire
446 __Parameters["DifferentialIncrement"] = __Parameters["withIncrement"]
448 if asScript is not None:
449 __Matrix, __Function = None, None
451 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
453 __Function = { "Direct":Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ) }
454 __Function.update({"useApproximatedDerivatives":True})
455 __Function.update(__Parameters)
456 elif asThreeFunctions:
458 "Direct" :Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ),
459 "Tangent":Interfaces.ImportFromScript(asScript).getvalue( "TangentOperator" ),
460 "Adjoint":Interfaces.ImportFromScript(asScript).getvalue( "AdjointOperator" ),
462 __Function.update(__Parameters)
465 if asOneFunction is not None:
466 if isinstance(asOneFunction, dict) and "Direct" in asOneFunction:
467 if asOneFunction["Direct"] is not None:
468 __Function = asOneFunction
470 raise ValueError("The function has to be given in a dictionnary which have 1 key (\"Direct\")")
472 __Function = { "Direct":asOneFunction }
473 __Function.update({"useApproximatedDerivatives":True})
474 __Function.update(__Parameters)
475 elif asThreeFunctions is not None:
476 if isinstance(asThreeFunctions, dict) and \
477 ("Tangent" in asThreeFunctions) and (asThreeFunctions["Tangent"] is not None) and \
478 ("Adjoint" in asThreeFunctions) and (asThreeFunctions["Adjoint"] is not None) and \
479 (("useApproximatedDerivatives" not in asThreeFunctions) or not bool(asThreeFunctions["useApproximatedDerivatives"])):
480 __Function = asThreeFunctions
481 elif isinstance(asThreeFunctions, dict) and \
482 ("Direct" in asThreeFunctions) and (asThreeFunctions["Direct"] is not None):
483 __Function = asThreeFunctions
484 __Function.update({"useApproximatedDerivatives":True})
486 raise ValueError("The functions has to be given in a dictionnary which have either 1 key (\"Direct\") or 3 keys (\"Direct\" (optionnal), \"Tangent\" and \"Adjoint\")")
487 if "Direct" not in asThreeFunctions:
488 __Function["Direct"] = asThreeFunctions["Tangent"]
489 __Function.update(__Parameters)
493 # if sys.version_info[0] < 3 and isinstance(__Function, dict):
494 # for k in ("Direct", "Tangent", "Adjoint"):
495 # if k in __Function and hasattr(__Function[k],"__class__"):
496 # if type(__Function[k]) is type(self.__init__):
497 # raise TypeError("can't use a class method (%s) as a function for the \"%s\" operator. Use a real function instead."%(type(__Function[k]),k))
499 if appliedInX is not None and isinstance(appliedInX, dict):
500 __appliedInX = appliedInX
501 elif appliedInX is not None:
502 __appliedInX = {"HXb":appliedInX}
506 if scheduledBy is not None:
507 self.__T = scheduledBy
509 if isinstance(__Function, dict) and \
510 ("useApproximatedDerivatives" in __Function) and bool(__Function["useApproximatedDerivatives"]) and \
511 ("Direct" in __Function) and (__Function["Direct"] is not None):
512 if "CenteredFiniteDifference" not in __Function: __Function["CenteredFiniteDifference"] = False
513 if "DifferentialIncrement" not in __Function: __Function["DifferentialIncrement"] = 0.01
514 if "withdX" not in __Function: __Function["withdX"] = None
515 if "withAvoidingRedundancy" not in __Function: __Function["withAvoidingRedundancy"] = avoidRC
516 if "withToleranceInRedundancy" not in __Function: __Function["withToleranceInRedundancy"] = 1.e-18
517 if "withLenghtOfRedundancy" not in __Function: __Function["withLenghtOfRedundancy"] = -1
518 if "NumberOfProcesses" not in __Function: __Function["NumberOfProcesses"] = None
519 if "withmfEnabled" not in __Function: __Function["withmfEnabled"] = inputAsMF
520 from daCore import NumericObjects
521 FDA = NumericObjects.FDApproximation(
523 Function = __Function["Direct"],
524 centeredDF = __Function["CenteredFiniteDifference"],
525 increment = __Function["DifferentialIncrement"],
526 dX = __Function["withdX"],
527 extraArguments = self.__extraArgs,
528 avoidingRedundancy = __Function["withAvoidingRedundancy"],
529 toleranceInRedundancy = __Function["withToleranceInRedundancy"],
530 lenghtOfRedundancy = __Function["withLenghtOfRedundancy"],
531 mpEnabled = __Function["EnableMultiProcessingInDerivatives"],
532 mpWorkers = __Function["NumberOfProcesses"],
533 mfEnabled = __Function["withmfEnabled"],
535 self.__FO["Direct"] = Operator( name = self.__name, fromMethod = FDA.DirectOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
536 self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMethod = FDA.TangentOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
537 self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMethod = FDA.AdjointOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
538 elif isinstance(__Function, dict) and \
539 ("Direct" in __Function) and ("Tangent" in __Function) and ("Adjoint" in __Function) and \
540 (__Function["Direct"] is not None) and (__Function["Tangent"] is not None) and (__Function["Adjoint"] is not None):
541 self.__FO["Direct"] = Operator( name = self.__name, fromMethod = __Function["Direct"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
542 self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMethod = __Function["Tangent"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
543 self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMethod = __Function["Adjoint"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
544 elif asMatrix is not None:
545 __matrice = numpy.matrix( __Matrix, numpy.float )
546 self.__FO["Direct"] = Operator( name = self.__name, fromMatrix = __matrice, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
547 self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMatrix = __matrice, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
548 self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMatrix = __matrice.T, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
551 raise ValueError("The %s object is improperly defined or undefined, it requires at minima either a matrix, a Direct operator for approximate derivatives or a Tangent/Adjoint operators pair. Please check your operator input."%self.__name)
553 if __appliedInX is not None:
554 self.__FO["AppliedInX"] = {}
555 for key in list(__appliedInX.keys()):
556 if type( __appliedInX[key] ) is type( numpy.matrix([]) ):
557 # Pour le cas où l'on a une vraie matrice
558 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].A1, numpy.float ).T
559 elif type( __appliedInX[key] ) is type( numpy.array([]) ) and len(__appliedInX[key].shape) > 1:
560 # Pour le cas où l'on a un vecteur représenté en array avec 2 dimensions
561 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].reshape(len(__appliedInX[key]),), numpy.float ).T
563 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key], numpy.float ).T
565 self.__FO["AppliedInX"] = None
571 "x.__repr__() <==> repr(x)"
572 return repr(self.__FO)
575 "x.__str__() <==> str(x)"
576 return str(self.__FO)
578 # ==============================================================================
579 class Algorithm(object):
581 Classe générale d'interface de type algorithme
583 Elle donne un cadre pour l'écriture d'une classe élémentaire d'algorithme
584 d'assimilation, en fournissant un container (dictionnaire) de variables
585 persistantes initialisées, et des méthodes d'accès à ces variables stockées.
587 Une classe élémentaire d'algorithme doit implémenter la méthode "run".
589 def __init__(self, name):
591 L'initialisation présente permet de fabriquer des variables de stockage
592 disponibles de manière générique dans les algorithmes élémentaires. Ces
593 variables de stockage sont ensuite conservées dans un dictionnaire
594 interne à l'objet, mais auquel on accède par la méthode "get".
596 Les variables prévues sont :
597 - APosterioriCorrelations : matrice de corrélations de la matrice A
598 - APosterioriCovariance : matrice de covariances a posteriori : A
599 - APosterioriStandardDeviations : vecteur des écart-types de la matrice A
600 - APosterioriVariances : vecteur des variances de la matrice A
601 - Analysis : vecteur d'analyse : Xa
602 - BMA : Background moins Analysis : Xa - Xb
603 - CostFunctionJ : fonction-coût globale, somme des deux parties suivantes Jb et Jo
604 - CostFunctionJAtCurrentOptimum : fonction-coût globale à l'état optimal courant lors d'itérations
605 - CostFunctionJb : partie ébauche ou background de la fonction-coût : Jb
606 - CostFunctionJbAtCurrentOptimum : partie ébauche à l'état optimal courant lors d'itérations
607 - CostFunctionJo : partie observations de la fonction-coût : Jo
608 - CostFunctionJoAtCurrentOptimum : partie observations à l'état optimal courant lors d'itérations
609 - CurrentIterationNumber : numéro courant d'itération dans les algorithmes itératifs, à partir de 0
610 - CurrentOptimum : état optimal courant lors d'itérations
611 - CurrentState : état courant lors d'itérations
612 - GradientOfCostFunctionJ : gradient de la fonction-coût globale
613 - GradientOfCostFunctionJb : gradient de la partie ébauche de la fonction-coût
614 - GradientOfCostFunctionJo : gradient de la partie observations de la fonction-coût
615 - IndexOfOptimum : index de l'état optimal courant lors d'itérations
616 - Innovation : l'innovation : d = Y - H(X)
617 - InnovationAtCurrentState : l'innovation à l'état courant : dn = Y - H(Xn)
618 - JacobianMatrixAtBackground : matrice jacobienne à l'état d'ébauche
619 - JacobianMatrixAtCurrentState : matrice jacobienne à l'état courant
620 - JacobianMatrixAtOptimum : matrice jacobienne à l'optimum
621 - KalmanGainAtOptimum : gain de Kalman à l'optimum
622 - MahalanobisConsistency : indicateur de consistance des covariances
623 - OMA : Observation moins Analyse : Y - Xa
624 - OMB : Observation moins Background : Y - Xb
625 - ForecastState : état prédit courant lors d'itérations
626 - Residu : dans le cas des algorithmes de vérification
627 - SigmaBck2 : indicateur de correction optimale des erreurs d'ébauche
628 - SigmaObs2 : indicateur de correction optimale des erreurs d'observation
629 - SimulatedObservationAtBackground : l'état observé H(Xb) à l'ébauche
630 - SimulatedObservationAtCurrentOptimum : l'état observé H(X) à l'état optimal courant
631 - SimulatedObservationAtCurrentState : l'état observé H(X) à l'état courant
632 - SimulatedObservationAtOptimum : l'état observé H(Xa) à l'optimum
633 - SimulationQuantiles : états observés H(X) pour les quantiles demandés
634 On peut rajouter des variables à stocker dans l'initialisation de
635 l'algorithme élémentaire qui va hériter de cette classe
637 logging.debug("%s Initialisation", str(name))
638 self._m = PlatformInfo.SystemUsage()
640 self._name = str( name )
641 self._parameters = {"StoreSupplementaryCalculations":[]}
642 self.__required_parameters = {}
643 self.__required_inputs = {
644 "RequiredInputValues":{"mandatory":(), "optional":()},
645 "ClassificationTags":[],
647 self.__variable_names_not_public = {"nextStep":False} # Duplication dans AlgorithmAndParameters
648 self.__canonical_parameter_name = {} # Correspondance "lower"->"correct"
649 self.__canonical_stored_name = {} # Correspondance "lower"->"correct"
651 self.StoredVariables = {}
652 self.StoredVariables["APosterioriCorrelations"] = Persistence.OneMatrix(name = "APosterioriCorrelations")
653 self.StoredVariables["APosterioriCovariance"] = Persistence.OneMatrix(name = "APosterioriCovariance")
654 self.StoredVariables["APosterioriStandardDeviations"] = Persistence.OneVector(name = "APosterioriStandardDeviations")
655 self.StoredVariables["APosterioriVariances"] = Persistence.OneVector(name = "APosterioriVariances")
656 self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
657 self.StoredVariables["BMA"] = Persistence.OneVector(name = "BMA")
658 self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
659 self.StoredVariables["CostFunctionJAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJAtCurrentOptimum")
660 self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
661 self.StoredVariables["CostFunctionJbAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJbAtCurrentOptimum")
662 self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
663 self.StoredVariables["CostFunctionJoAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJoAtCurrentOptimum")
664 self.StoredVariables["CurrentEnsembleState"] = Persistence.OneMatrix(name = "CurrentEnsembleState")
665 self.StoredVariables["CurrentIterationNumber"] = Persistence.OneIndex(name = "CurrentIterationNumber")
666 self.StoredVariables["CurrentOptimum"] = Persistence.OneVector(name = "CurrentOptimum")
667 self.StoredVariables["CurrentState"] = Persistence.OneVector(name = "CurrentState")
668 self.StoredVariables["ForecastState"] = Persistence.OneVector(name = "ForecastState")
669 self.StoredVariables["GradientOfCostFunctionJ"] = Persistence.OneVector(name = "GradientOfCostFunctionJ")
670 self.StoredVariables["GradientOfCostFunctionJb"] = Persistence.OneVector(name = "GradientOfCostFunctionJb")
671 self.StoredVariables["GradientOfCostFunctionJo"] = Persistence.OneVector(name = "GradientOfCostFunctionJo")
672 self.StoredVariables["IndexOfOptimum"] = Persistence.OneIndex(name = "IndexOfOptimum")
673 self.StoredVariables["Innovation"] = Persistence.OneVector(name = "Innovation")
674 self.StoredVariables["InnovationAtCurrentAnalysis"] = Persistence.OneVector(name = "InnovationAtCurrentAnalysis")
675 self.StoredVariables["InnovationAtCurrentState"] = Persistence.OneVector(name = "InnovationAtCurrentState")
676 self.StoredVariables["JacobianMatrixAtBackground"] = Persistence.OneMatrix(name = "JacobianMatrixAtBackground")
677 self.StoredVariables["JacobianMatrixAtCurrentState"] = Persistence.OneMatrix(name = "JacobianMatrixAtCurrentState")
678 self.StoredVariables["JacobianMatrixAtOptimum"] = Persistence.OneMatrix(name = "JacobianMatrixAtOptimum")
679 self.StoredVariables["KalmanGainAtOptimum"] = Persistence.OneMatrix(name = "KalmanGainAtOptimum")
680 self.StoredVariables["LastEnsembleForecastState"] = Persistence.OneMatrix(name = "LastEnsembleForecastState")
681 self.StoredVariables["MahalanobisConsistency"] = Persistence.OneScalar(name = "MahalanobisConsistency")
682 self.StoredVariables["OMA"] = Persistence.OneVector(name = "OMA")
683 self.StoredVariables["OMB"] = Persistence.OneVector(name = "OMB")
684 self.StoredVariables["Residu"] = Persistence.OneScalar(name = "Residu")
685 self.StoredVariables["SigmaBck2"] = Persistence.OneScalar(name = "SigmaBck2")
686 self.StoredVariables["SigmaObs2"] = Persistence.OneScalar(name = "SigmaObs2")
687 self.StoredVariables["SimulatedObservationAtBackground"] = Persistence.OneVector(name = "SimulatedObservationAtBackground")
688 self.StoredVariables["SimulatedObservationAtCurrentAnalysis"]= Persistence.OneVector(name = "SimulatedObservationAtCurrentAnalysis")
689 self.StoredVariables["SimulatedObservationAtCurrentOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentOptimum")
690 self.StoredVariables["SimulatedObservationAtCurrentState"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentState")
691 self.StoredVariables["SimulatedObservationAtOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtOptimum")
692 self.StoredVariables["SimulationQuantiles"] = Persistence.OneMatrix(name = "SimulationQuantiles")
694 for k in self.StoredVariables:
695 self.__canonical_stored_name[k.lower()] = k
697 for k, v in self.__variable_names_not_public.items():
698 self.__canonical_parameter_name[k.lower()] = k
699 self.__canonical_parameter_name["algorithm"] = "Algorithm"
700 self.__canonical_parameter_name["storesupplementarycalculations"] = "StoreSupplementaryCalculations"
702 def _pre_run(self, Parameters, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None ):
704 logging.debug("%s Lancement", self._name)
705 logging.debug("%s Taille mémoire utilisée de %.0f Mio"%(self._name, self._m.getUsedMemory("Mio")))
706 self._getTimeState(reset=True)
708 # Mise a jour des paramètres internes avec le contenu de Parameters, en
709 # reprenant les valeurs par défauts pour toutes celles non définies
710 self.__setParameters(Parameters, reset=True)
711 for k, v in self.__variable_names_not_public.items():
712 if k not in self._parameters: self.__setParameters( {k:v} )
714 # Corrections et compléments des vecteurs
715 def __test_vvalue(argument, variable, argname, symbol=None):
716 if symbol is None: symbol = variable
718 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
719 raise ValueError("%s %s vector %s is not set and has to be properly defined!"%(self._name,argname,symbol))
720 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
721 logging.debug("%s %s vector %s is not set, but is optional."%(self._name,argname,symbol))
723 logging.debug("%s %s vector %s is not set, but is not required."%(self._name,argname,symbol))
725 logging.debug("%s %s vector %s is set, and its size is %i."%(self._name,argname,symbol,numpy.array(argument).size))
727 __test_vvalue( Xb, "Xb", "Background or initial state" )
728 __test_vvalue( Y, "Y", "Observation" )
729 __test_vvalue( U, "U", "Control" )
731 # Corrections et compléments des covariances
732 def __test_cvalue(argument, variable, argname, symbol=None):
733 if symbol is None: symbol = variable
735 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
736 raise ValueError("%s %s error covariance matrix %s is not set and has to be properly defined!"%(self._name,argname,symbol))
737 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
738 logging.debug("%s %s error covariance matrix %s is not set, but is optional."%(self._name,argname,symbol))
740 logging.debug("%s %s error covariance matrix %s is not set, but is not required."%(self._name,argname,symbol))
742 logging.debug("%s %s error covariance matrix %s is set."%(self._name,argname,symbol))
744 __test_cvalue( B, "B", "Background" )
745 __test_cvalue( R, "R", "Observation" )
746 __test_cvalue( Q, "Q", "Evolution" )
748 # Corrections et compléments des opérateurs
749 def __test_ovalue(argument, variable, argname, symbol=None):
750 if symbol is None: symbol = variable
751 if argument is None or (isinstance(argument,dict) and len(argument)==0):
752 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
753 raise ValueError("%s %s operator %s is not set and has to be properly defined!"%(self._name,argname,symbol))
754 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
755 logging.debug("%s %s operator %s is not set, but is optional."%(self._name,argname,symbol))
757 logging.debug("%s %s operator %s is not set, but is not required."%(self._name,argname,symbol))
759 logging.debug("%s %s operator %s is set."%(self._name,argname,symbol))
761 __test_ovalue( HO, "HO", "Observation", "H" )
762 __test_ovalue( EM, "EM", "Evolution", "M" )
763 __test_ovalue( CM, "CM", "Control Model", "C" )
765 # Corrections et compléments des bornes
766 if ("Bounds" in self._parameters) and isinstance(self._parameters["Bounds"], (list, tuple)) and (len(self._parameters["Bounds"]) > 0):
767 logging.debug("%s Prise en compte des bornes effectuee"%(self._name,))
769 self._parameters["Bounds"] = None
771 # Corrections et compléments de l'initialisation en X
772 if "InitializationPoint" in self._parameters:
774 if self._parameters["InitializationPoint"] is not None and hasattr(self._parameters["InitializationPoint"],'size'):
775 if self._parameters["InitializationPoint"].size != numpy.ravel(Xb).size:
776 raise ValueError("Incompatible size %i of forced initial point that have to replace the background of size %i" \
777 %(self._parameters["InitializationPoint"].size,numpy.ravel(Xb).size))
778 # Obtenu par typecast : numpy.ravel(self._parameters["InitializationPoint"])
780 self._parameters["InitializationPoint"] = numpy.ravel(Xb)
782 if self._parameters["InitializationPoint"] is None:
783 raise ValueError("Forced initial point can not be set without any given Background or required value")
785 # Correction pour pallier a un bug de TNC sur le retour du Minimum
786 if "Minimizer" in self._parameters and self._parameters["Minimizer"] == "TNC":
787 self.setParameterValue("StoreInternalVariables",True)
789 # Verbosité et logging
790 if logging.getLogger().level < logging.WARNING:
791 self._parameters["optiprint"], self._parameters["optdisp"] = 1, 1
792 if PlatformInfo.has_scipy:
793 import scipy.optimize
794 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_ALL
796 self._parameters["optmessages"] = 15
798 self._parameters["optiprint"], self._parameters["optdisp"] = -1, 0
799 if PlatformInfo.has_scipy:
800 import scipy.optimize
801 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_NONE
803 self._parameters["optmessages"] = 15
807 def _post_run(self,_oH=None):
809 if ("StoreSupplementaryCalculations" in self._parameters) and \
810 "APosterioriCovariance" in self._parameters["StoreSupplementaryCalculations"]:
811 for _A in self.StoredVariables["APosterioriCovariance"]:
812 if "APosterioriVariances" in self._parameters["StoreSupplementaryCalculations"]:
813 self.StoredVariables["APosterioriVariances"].store( numpy.diag(_A) )
814 if "APosterioriStandardDeviations" in self._parameters["StoreSupplementaryCalculations"]:
815 self.StoredVariables["APosterioriStandardDeviations"].store( numpy.sqrt(numpy.diag(_A)) )
816 if "APosterioriCorrelations" in self._parameters["StoreSupplementaryCalculations"]:
817 _EI = numpy.diag(1./numpy.sqrt(numpy.diag(_A)))
818 _C = numpy.dot(_EI, numpy.dot(_A, _EI))
819 self.StoredVariables["APosterioriCorrelations"].store( _C )
820 if _oH is not None and "Direct" in _oH and "Tangent" in _oH and "Adjoint" in _oH:
821 logging.debug("%s Nombre d'évaluation(s) de l'opérateur d'observation direct/tangent/adjoint.: %i/%i/%i", self._name, _oH["Direct"].nbcalls(0),_oH["Tangent"].nbcalls(0),_oH["Adjoint"].nbcalls(0))
822 logging.debug("%s Nombre d'appels au cache d'opérateur d'observation direct/tangent/adjoint..: %i/%i/%i", self._name, _oH["Direct"].nbcalls(3),_oH["Tangent"].nbcalls(3),_oH["Adjoint"].nbcalls(3))
823 logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
824 logging.debug("%s Durées d'utilisation CPU de %.1fs et elapsed de %.1fs", self._name, self._getTimeState()[0], self._getTimeState()[1])
825 logging.debug("%s Terminé", self._name)
828 def _toStore(self, key):
829 "True if in StoreSupplementaryCalculations, else False"
830 return key in self._parameters["StoreSupplementaryCalculations"]
832 def get(self, key=None):
834 Renvoie l'une des variables stockées identifiée par la clé, ou le
835 dictionnaire de l'ensemble des variables disponibles en l'absence de
836 clé. Ce sont directement les variables sous forme objet qui sont
837 renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
838 des classes de persistance.
841 return self.StoredVariables[self.__canonical_stored_name[key.lower()]]
843 return self.StoredVariables
845 def __contains__(self, key=None):
846 "D.__contains__(k) -> True if D has a key k, else False"
847 if key is None or key.lower() not in self.__canonical_stored_name:
850 return self.__canonical_stored_name[key.lower()] in self.StoredVariables
853 "D.keys() -> list of D's keys"
854 if hasattr(self, "StoredVariables"):
855 return self.StoredVariables.keys()
860 "D.pop(k[,d]) -> v, remove specified key and return the corresponding value"
861 if hasattr(self, "StoredVariables") and k.lower() in self.__canonical_stored_name:
862 return self.StoredVariables.pop(self.__canonical_stored_name[k.lower()], d)
867 raise TypeError("pop expected at least 1 arguments, got 0")
868 "If key is not found, d is returned if given, otherwise KeyError is raised"
874 def run(self, Xb=None, Y=None, H=None, M=None, R=None, B=None, Q=None, Parameters=None):
876 Doit implémenter l'opération élémentaire de calcul d'assimilation sous
877 sa forme mathématique la plus naturelle possible.
879 raise NotImplementedError("Mathematical assimilation calculation has not been implemented!")
881 def defineRequiredParameter(self, name = None, default = None, typecast = None, message = None, minval = None, maxval = None, listval = None, listadv = None):
883 Permet de définir dans l'algorithme des paramètres requis et leurs
884 caractéristiques par défaut.
887 raise ValueError("A name is mandatory to define a required parameter.")
889 self.__required_parameters[name] = {
891 "typecast" : typecast,
898 self.__canonical_parameter_name[name.lower()] = name
899 logging.debug("%s %s (valeur par défaut = %s)", self._name, message, self.setParameterValue(name))
901 def getRequiredParameters(self, noDetails=True):
903 Renvoie la liste des noms de paramètres requis ou directement le
904 dictionnaire des paramètres requis.
907 return sorted(self.__required_parameters.keys())
909 return self.__required_parameters
911 def setParameterValue(self, name=None, value=None):
913 Renvoie la valeur d'un paramètre requis de manière contrôlée
915 __k = self.__canonical_parameter_name[name.lower()]
916 default = self.__required_parameters[__k]["default"]
917 typecast = self.__required_parameters[__k]["typecast"]
918 minval = self.__required_parameters[__k]["minval"]
919 maxval = self.__required_parameters[__k]["maxval"]
920 listval = self.__required_parameters[__k]["listval"]
921 listadv = self.__required_parameters[__k]["listadv"]
923 if value is None and default is None:
925 elif value is None and default is not None:
926 if typecast is None: __val = default
927 else: __val = typecast( default )
929 if typecast is None: __val = value
932 __val = typecast( value )
934 raise ValueError("The value '%s' for the parameter named '%s' can not be correctly evaluated with type '%s'."%(value, __k, typecast))
936 if minval is not None and (numpy.array(__val, float) < minval).any():
937 raise ValueError("The parameter named '%s' of value '%s' can not be less than %s."%(__k, __val, minval))
938 if maxval is not None and (numpy.array(__val, float) > maxval).any():
939 raise ValueError("The parameter named '%s' of value '%s' can not be greater than %s."%(__k, __val, maxval))
940 if listval is not None or listadv is not None:
941 if typecast is list or typecast is tuple or isinstance(__val,list) or isinstance(__val,tuple):
943 if listval is not None and v in listval: continue
944 elif listadv is not None and v in listadv: continue
946 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%(v, __k, listval))
947 elif not (listval is not None and __val in listval) and not (listadv is not None and __val in listadv):
948 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%( __val, __k,listval))
952 def requireInputArguments(self, mandatory=(), optional=()):
954 Permet d'imposer des arguments de calcul requis en entrée.
956 self.__required_inputs["RequiredInputValues"]["mandatory"] = tuple( mandatory )
957 self.__required_inputs["RequiredInputValues"]["optional"] = tuple( optional )
959 def getInputArguments(self):
961 Permet d'obtenir les listes des arguments de calcul requis en entrée.
963 return self.__required_inputs["RequiredInputValues"]["mandatory"], self.__required_inputs["RequiredInputValues"]["optional"]
965 def setAttributes(self, tags=()):
967 Permet d'adjoindre des attributs comme les tags de classification.
968 Renvoie la liste actuelle dans tous les cas.
970 self.__required_inputs["ClassificationTags"].extend( tags )
971 return self.__required_inputs["ClassificationTags"]
973 def __setParameters(self, fromDico={}, reset=False):
975 Permet de stocker les paramètres reçus dans le dictionnaire interne.
977 self._parameters.update( fromDico )
978 __inverse_fromDico_keys = {}
979 for k in fromDico.keys():
980 if k.lower() in self.__canonical_parameter_name:
981 __inverse_fromDico_keys[self.__canonical_parameter_name[k.lower()]] = k
982 #~ __inverse_fromDico_keys = dict([(self.__canonical_parameter_name[k.lower()],k) for k in fromDico.keys()])
983 __canonic_fromDico_keys = __inverse_fromDico_keys.keys()
984 for k in self.__required_parameters.keys():
985 if k in __canonic_fromDico_keys:
986 self._parameters[k] = self.setParameterValue(k,fromDico[__inverse_fromDico_keys[k]])
988 self._parameters[k] = self.setParameterValue(k)
991 logging.debug("%s %s : %s", self._name, self.__required_parameters[k]["message"], self._parameters[k])
993 def _getTimeState(self, reset=False):
995 Initialise ou restitue le temps de calcul (cpu/elapsed) à la seconde
998 self.__initial_cpu_time = time.process_time()
999 self.__initial_elapsed_time = time.perf_counter()
1002 self.__cpu_time = time.process_time() - self.__initial_cpu_time
1003 self.__elapsed_time = time.perf_counter() - self.__initial_elapsed_time
1004 return self.__cpu_time, self.__elapsed_time
1006 def _StopOnTimeLimit(self, X=None, withReason=False):
1007 "Stop criteria on time limit: True/False [+ Reason]"
1008 c, e = self._getTimeState()
1009 if "MaximumCpuTime" in self._parameters and c > self._parameters["MaximumCpuTime"]:
1010 __SC, __SR = True, "Reached maximum CPU time (%.1fs > %.1fs)"%(c, self._parameters["MaximumCpuTime"])
1011 elif "MaximumElapsedTime" in self._parameters and e > self._parameters["MaximumElapsedTime"]:
1012 __SC, __SR = True, "Reached maximum elapsed time (%.1fs > %.1fs)"%(e, self._parameters["MaximumElapsedTime"])
1014 __SC, __SR = False, ""
1020 # ==============================================================================
1021 class AlgorithmAndParameters(object):
1023 Classe générale d'interface d'action pour l'algorithme et ses paramètres
1026 name = "GenericAlgorithm",
1033 self.__name = str(name)
1037 self.__algorithm = {}
1038 self.__algorithmFile = None
1039 self.__algorithmName = None
1041 self.updateParameters( asDict, asScript )
1043 if asAlgorithm is None and asScript is not None:
1044 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
1046 __Algo = asAlgorithm
1048 if __Algo is not None:
1049 self.__A = str(__Algo)
1050 self.__P.update( {"Algorithm":self.__A} )
1052 self.__setAlgorithm( self.__A )
1054 self.__variable_names_not_public = {"nextStep":False} # Duplication dans Algorithm
1056 def updateParameters(self,
1060 "Mise a jour des parametres"
1061 if asDict is None and asScript is not None:
1062 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1066 if __Dict is not None:
1067 self.__P.update( dict(__Dict) )
1069 def executePythonScheme(self, asDictAO = None):
1070 "Permet de lancer le calcul d'assimilation"
1071 Operator.CM.clearCache()
1073 if not isinstance(asDictAO, dict):
1074 raise ValueError("The objects for algorithm calculation have to be given together as a dictionnary, and they are not")
1075 if hasattr(asDictAO["Background"],"getO"): self.__Xb = asDictAO["Background"].getO()
1076 elif hasattr(asDictAO["CheckingPoint"],"getO"): self.__Xb = asDictAO["CheckingPoint"].getO()
1077 else: self.__Xb = None
1078 if hasattr(asDictAO["Observation"],"getO"): self.__Y = asDictAO["Observation"].getO()
1079 else: self.__Y = asDictAO["Observation"]
1080 if hasattr(asDictAO["ControlInput"],"getO"): self.__U = asDictAO["ControlInput"].getO()
1081 else: self.__U = asDictAO["ControlInput"]
1082 if hasattr(asDictAO["ObservationOperator"],"getO"): self.__HO = asDictAO["ObservationOperator"].getO()
1083 else: self.__HO = asDictAO["ObservationOperator"]
1084 if hasattr(asDictAO["EvolutionModel"],"getO"): self.__EM = asDictAO["EvolutionModel"].getO()
1085 else: self.__EM = asDictAO["EvolutionModel"]
1086 if hasattr(asDictAO["ControlModel"],"getO"): self.__CM = asDictAO["ControlModel"].getO()
1087 else: self.__CM = asDictAO["ControlModel"]
1088 self.__B = asDictAO["BackgroundError"]
1089 self.__R = asDictAO["ObservationError"]
1090 self.__Q = asDictAO["EvolutionError"]
1092 self.__shape_validate()
1094 self.__algorithm.run(
1104 Parameters = self.__P,
1108 def executeYACSScheme(self, FileName=None):
1109 "Permet de lancer le calcul d'assimilation"
1110 if FileName is None or not os.path.exists(FileName):
1111 raise ValueError("a YACS file name has to be given for YACS execution.\n")
1113 __file = os.path.abspath(FileName)
1114 logging.debug("The YACS file name is \"%s\"."%__file)
1115 if not PlatformInfo.has_salome or \
1116 not PlatformInfo.has_yacs or \
1117 not PlatformInfo.has_adao:
1118 raise ImportError("\n\n"+\
1119 "Unable to get SALOME, YACS or ADAO environnement variables.\n"+\
1120 "Please load the right environnement before trying to use it.\n")
1123 import SALOMERuntime
1125 SALOMERuntime.RuntimeSALOME_setRuntime()
1127 r = pilot.getRuntime()
1128 xmlLoader = loader.YACSLoader()
1129 xmlLoader.registerProcCataLoader()
1131 catalogAd = r.loadCatalog("proc", __file)
1132 r.addCatalog(catalogAd)
1137 p = xmlLoader.load(__file)
1138 except IOError as ex:
1139 print("The YACS XML schema file can not be loaded: %s"%(ex,))
1141 logger = p.getLogger("parser")
1142 if not logger.isEmpty():
1143 print("The imported YACS XML schema has errors on parsing:")
1144 print(logger.getStr())
1147 print("The YACS XML schema is not valid and will not be executed:")
1148 print(p.getErrorReport())
1150 info=pilot.LinkInfo(pilot.LinkInfo.ALL_DONT_STOP)
1151 p.checkConsistency(info)
1152 if info.areWarningsOrErrors():
1153 print("The YACS XML schema is not coherent and will not be executed:")
1154 print(info.getGlobalRepr())
1156 e = pilot.ExecutorSwig()
1158 if p.getEffectiveState() != pilot.DONE:
1159 print(p.getErrorReport())
1163 def get(self, key = None):
1164 "Vérifie l'existence d'une clé de variable ou de paramètres"
1165 if key in self.__algorithm:
1166 return self.__algorithm.get( key )
1167 elif key in self.__P:
1168 return self.__P[key]
1170 allvariables = self.__P
1171 for k in self.__variable_names_not_public: allvariables.pop(k, None)
1174 def pop(self, k, d):
1175 "Necessaire pour le pickling"
1176 return self.__algorithm.pop(k, d)
1178 def getAlgorithmRequiredParameters(self, noDetails=True):
1179 "Renvoie la liste des paramètres requis selon l'algorithme"
1180 return self.__algorithm.getRequiredParameters(noDetails)
1182 def getAlgorithmInputArguments(self):
1183 "Renvoie la liste des entrées requises selon l'algorithme"
1184 return self.__algorithm.getInputArguments()
1186 def getAlgorithmAttributes(self):
1187 "Renvoie la liste des attributs selon l'algorithme"
1188 return self.__algorithm.setAttributes()
1190 def setObserver(self, __V, __O, __I, __S):
1191 if self.__algorithm is None \
1192 or isinstance(self.__algorithm, dict) \
1193 or not hasattr(self.__algorithm,"StoredVariables"):
1194 raise ValueError("No observer can be build before choosing an algorithm.")
1195 if __V not in self.__algorithm:
1196 raise ValueError("An observer requires to be set on a variable named %s which does not exist."%__V)
1198 self.__algorithm.StoredVariables[ __V ].setDataObserver(
1201 HookParameters = __I,
1204 def removeObserver(self, __V, __O, __A = False):
1205 if self.__algorithm is None \
1206 or isinstance(self.__algorithm, dict) \
1207 or not hasattr(self.__algorithm,"StoredVariables"):
1208 raise ValueError("No observer can be removed before choosing an algorithm.")
1209 if __V not in self.__algorithm:
1210 raise ValueError("An observer requires to be removed on a variable named %s which does not exist."%__V)
1212 return self.__algorithm.StoredVariables[ __V ].removeDataObserver(
1217 def hasObserver(self, __V):
1218 if self.__algorithm is None \
1219 or isinstance(self.__algorithm, dict) \
1220 or not hasattr(self.__algorithm,"StoredVariables"):
1222 if __V not in self.__algorithm:
1224 return self.__algorithm.StoredVariables[ __V ].hasDataObserver()
1227 __allvariables = list(self.__algorithm.keys()) + list(self.__P.keys())
1228 for k in self.__variable_names_not_public:
1229 if k in __allvariables: __allvariables.remove(k)
1230 return __allvariables
1232 def __contains__(self, key=None):
1233 "D.__contains__(k) -> True if D has a key k, else False"
1234 return key in self.__algorithm or key in self.__P
1237 "x.__repr__() <==> repr(x)"
1238 return repr(self.__A)+", "+repr(self.__P)
1241 "x.__str__() <==> str(x)"
1242 return str(self.__A)+", "+str(self.__P)
1244 def __setAlgorithm(self, choice = None ):
1246 Permet de sélectionner l'algorithme à utiliser pour mener à bien l'étude
1247 d'assimilation. L'argument est un champ caractère se rapportant au nom
1248 d'un algorithme réalisant l'opération sur les arguments fixes.
1251 raise ValueError("Error: algorithm choice has to be given")
1252 if self.__algorithmName is not None:
1253 raise ValueError("Error: algorithm choice has already been done as \"%s\", it can't be changed."%self.__algorithmName)
1254 daDirectory = "daAlgorithms"
1256 # Recherche explicitement le fichier complet
1257 # ------------------------------------------
1259 for directory in sys.path:
1260 if os.path.isfile(os.path.join(directory, daDirectory, str(choice)+'.py')):
1261 module_path = os.path.abspath(os.path.join(directory, daDirectory))
1262 if module_path is None:
1263 raise ImportError("No algorithm module named \"%s\" has been found in the search path.\n The search path is %s"%(choice, sys.path))
1265 # Importe le fichier complet comme un module
1266 # ------------------------------------------
1268 sys_path_tmp = sys.path ; sys.path.insert(0,module_path)
1269 self.__algorithmFile = __import__(str(choice), globals(), locals(), [])
1270 if not hasattr(self.__algorithmFile, "ElementaryAlgorithm"):
1271 raise ImportError("this module does not define a valid elementary algorithm.")
1272 self.__algorithmName = str(choice)
1273 sys.path = sys_path_tmp ; del sys_path_tmp
1274 except ImportError as e:
1275 raise ImportError("The module named \"%s\" was found, but is incorrect at the import stage.\n The import error message is: %s"%(choice,e))
1277 # Instancie un objet du type élémentaire du fichier
1278 # -------------------------------------------------
1279 self.__algorithm = self.__algorithmFile.ElementaryAlgorithm()
1282 def __shape_validate(self):
1284 Validation de la correspondance correcte des tailles des variables et
1285 des matrices s'il y en a.
1287 if self.__Xb is None: __Xb_shape = (0,)
1288 elif hasattr(self.__Xb,"size"): __Xb_shape = (self.__Xb.size,)
1289 elif hasattr(self.__Xb,"shape"):
1290 if isinstance(self.__Xb.shape, tuple): __Xb_shape = self.__Xb.shape
1291 else: __Xb_shape = self.__Xb.shape()
1292 else: raise TypeError("The background (Xb) has no attribute of shape: problem !")
1294 if self.__Y is None: __Y_shape = (0,)
1295 elif hasattr(self.__Y,"size"): __Y_shape = (self.__Y.size,)
1296 elif hasattr(self.__Y,"shape"):
1297 if isinstance(self.__Y.shape, tuple): __Y_shape = self.__Y.shape
1298 else: __Y_shape = self.__Y.shape()
1299 else: raise TypeError("The observation (Y) has no attribute of shape: problem !")
1301 if self.__U is None: __U_shape = (0,)
1302 elif hasattr(self.__U,"size"): __U_shape = (self.__U.size,)
1303 elif hasattr(self.__U,"shape"):
1304 if isinstance(self.__U.shape, tuple): __U_shape = self.__U.shape
1305 else: __U_shape = self.__U.shape()
1306 else: raise TypeError("The control (U) has no attribute of shape: problem !")
1308 if self.__B is None: __B_shape = (0,0)
1309 elif hasattr(self.__B,"shape"):
1310 if isinstance(self.__B.shape, tuple): __B_shape = self.__B.shape
1311 else: __B_shape = self.__B.shape()
1312 else: raise TypeError("The a priori errors covariance matrix (B) has no attribute of shape: problem !")
1314 if self.__R is None: __R_shape = (0,0)
1315 elif hasattr(self.__R,"shape"):
1316 if isinstance(self.__R.shape, tuple): __R_shape = self.__R.shape
1317 else: __R_shape = self.__R.shape()
1318 else: raise TypeError("The observation errors covariance matrix (R) has no attribute of shape: problem !")
1320 if self.__Q is None: __Q_shape = (0,0)
1321 elif hasattr(self.__Q,"shape"):
1322 if isinstance(self.__Q.shape, tuple): __Q_shape = self.__Q.shape
1323 else: __Q_shape = self.__Q.shape()
1324 else: raise TypeError("The evolution errors covariance matrix (Q) has no attribute of shape: problem !")
1326 if len(self.__HO) == 0: __HO_shape = (0,0)
1327 elif isinstance(self.__HO, dict): __HO_shape = (0,0)
1328 elif hasattr(self.__HO["Direct"],"shape"):
1329 if isinstance(self.__HO["Direct"].shape, tuple): __HO_shape = self.__HO["Direct"].shape
1330 else: __HO_shape = self.__HO["Direct"].shape()
1331 else: raise TypeError("The observation operator (H) has no attribute of shape: problem !")
1333 if len(self.__EM) == 0: __EM_shape = (0,0)
1334 elif isinstance(self.__EM, dict): __EM_shape = (0,0)
1335 elif hasattr(self.__EM["Direct"],"shape"):
1336 if isinstance(self.__EM["Direct"].shape, tuple): __EM_shape = self.__EM["Direct"].shape
1337 else: __EM_shape = self.__EM["Direct"].shape()
1338 else: raise TypeError("The evolution model (EM) has no attribute of shape: problem !")
1340 if len(self.__CM) == 0: __CM_shape = (0,0)
1341 elif isinstance(self.__CM, dict): __CM_shape = (0,0)
1342 elif hasattr(self.__CM["Direct"],"shape"):
1343 if isinstance(self.__CM["Direct"].shape, tuple): __CM_shape = self.__CM["Direct"].shape
1344 else: __CM_shape = self.__CM["Direct"].shape()
1345 else: raise TypeError("The control model (CM) has no attribute of shape: problem !")
1347 # Vérification des conditions
1348 # ---------------------------
1349 if not( len(__Xb_shape) == 1 or min(__Xb_shape) == 1 ):
1350 raise ValueError("Shape characteristic of background (Xb) is incorrect: \"%s\"."%(__Xb_shape,))
1351 if not( len(__Y_shape) == 1 or min(__Y_shape) == 1 ):
1352 raise ValueError("Shape characteristic of observation (Y) is incorrect: \"%s\"."%(__Y_shape,))
1354 if not( min(__B_shape) == max(__B_shape) ):
1355 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) is incorrect: \"%s\"."%(__B_shape,))
1356 if not( min(__R_shape) == max(__R_shape) ):
1357 raise ValueError("Shape characteristic of observation errors covariance matrix (R) is incorrect: \"%s\"."%(__R_shape,))
1358 if not( min(__Q_shape) == max(__Q_shape) ):
1359 raise ValueError("Shape characteristic of evolution errors covariance matrix (Q) is incorrect: \"%s\"."%(__Q_shape,))
1360 if not( min(__EM_shape) == max(__EM_shape) ):
1361 raise ValueError("Shape characteristic of evolution operator (EM) is incorrect: \"%s\"."%(__EM_shape,))
1363 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[1] == max(__Xb_shape) ):
1364 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and state (X) \"%s\" are incompatible."%(__HO_shape,__Xb_shape))
1365 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[0] == max(__Y_shape) ):
1366 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation (Y) \"%s\" are incompatible."%(__HO_shape,__Y_shape))
1367 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__B) > 0 and not( __HO_shape[1] == __B_shape[0] ):
1368 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and a priori errors covariance matrix (B) \"%s\" are incompatible."%(__HO_shape,__B_shape))
1369 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__R) > 0 and not( __HO_shape[0] == __R_shape[1] ):
1370 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation errors covariance matrix (R) \"%s\" are incompatible."%(__HO_shape,__R_shape))
1372 if self.__B is not None and len(self.__B) > 0 and not( __B_shape[1] == max(__Xb_shape) ):
1373 if self.__algorithmName in ["EnsembleBlue",]:
1374 asPersistentVector = self.__Xb.reshape((-1,min(__B_shape)))
1375 self.__Xb = Persistence.OneVector("Background", basetype=numpy.matrix)
1376 for member in asPersistentVector:
1377 self.__Xb.store( numpy.matrix( numpy.ravel(member), numpy.float ).T )
1378 __Xb_shape = min(__B_shape)
1380 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) \"%s\" and background (Xb) \"%s\" are incompatible."%(__B_shape,__Xb_shape))
1382 if self.__R is not None and len(self.__R) > 0 and not( __R_shape[1] == max(__Y_shape) ):
1383 raise ValueError("Shape characteristic of observation errors covariance matrix (R) \"%s\" and observation (Y) \"%s\" are incompatible."%(__R_shape,__Y_shape))
1385 if self.__EM is not None and len(self.__EM) > 0 and not isinstance(self.__EM, dict) and not( __EM_shape[1] == max(__Xb_shape) ):
1386 raise ValueError("Shape characteristic of evolution model (EM) \"%s\" and state (X) \"%s\" are incompatible."%(__EM_shape,__Xb_shape))
1388 if self.__CM is not None and len(self.__CM) > 0 and not isinstance(self.__CM, dict) and not( __CM_shape[1] == max(__U_shape) ):
1389 raise ValueError("Shape characteristic of control model (CM) \"%s\" and control (U) \"%s\" are incompatible."%(__CM_shape,__U_shape))
1391 if ("Bounds" in self.__P) \
1392 and (isinstance(self.__P["Bounds"], list) or isinstance(self.__P["Bounds"], tuple)) \
1393 and (len(self.__P["Bounds"]) != max(__Xb_shape)):
1394 raise ValueError("The number \"%s\" of bound pairs for the state (X) components is different of the size \"%s\" of the state itself." \
1395 %(len(self.__P["Bounds"]),max(__Xb_shape)))
1399 # ==============================================================================
1400 class RegulationAndParameters(object):
1402 Classe générale d'interface d'action pour la régulation et ses paramètres
1405 name = "GenericRegulation",
1412 self.__name = str(name)
1415 if asAlgorithm is None and asScript is not None:
1416 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
1418 __Algo = asAlgorithm
1420 if asDict is None and asScript is not None:
1421 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1425 if __Dict is not None:
1426 self.__P.update( dict(__Dict) )
1428 if __Algo is not None:
1429 self.__P.update( {"Algorithm":str(__Algo)} )
1431 def get(self, key = None):
1432 "Vérifie l'existence d'une clé de variable ou de paramètres"
1434 return self.__P[key]
1438 # ==============================================================================
1439 class DataObserver(object):
1441 Classe générale d'interface de type observer
1444 name = "GenericObserver",
1456 self.__name = str(name)
1461 if onVariable is None:
1462 raise ValueError("setting an observer has to be done over a variable name or a list of variable names, not over None.")
1463 elif type(onVariable) in (tuple, list):
1464 self.__V = tuple(map( str, onVariable ))
1465 if withInfo is None:
1468 self.__I = (str(withInfo),)*len(self.__V)
1469 elif isinstance(onVariable, str):
1470 self.__V = (onVariable,)
1471 if withInfo is None:
1472 self.__I = (onVariable,)
1474 self.__I = (str(withInfo),)
1476 raise ValueError("setting an observer has to be done over a variable name or a list of variable names.")
1478 if asObsObject is not None:
1479 self.__O = asObsObject
1481 __FunctionText = str(UserScript('Observer', asTemplate, asString, asScript))
1482 __Function = Observer2Func(__FunctionText)
1483 self.__O = __Function.getfunc()
1485 for k in range(len(self.__V)):
1488 if ename not in withAlgo:
1489 raise ValueError("An observer is asked to be set on a variable named %s which does not exist."%ename)
1491 withAlgo.setObserver(ename, self.__O, einfo, scheduledBy)
1494 "x.__repr__() <==> repr(x)"
1495 return repr(self.__V)+"\n"+repr(self.__O)
1498 "x.__str__() <==> str(x)"
1499 return str(self.__V)+"\n"+str(self.__O)
1501 # ==============================================================================
1502 class UserScript(object):
1504 Classe générale d'interface de type texte de script utilisateur
1507 name = "GenericUserScript",
1514 self.__name = str(name)
1516 if asString is not None:
1518 elif self.__name == "UserPostAnalysis" and (asTemplate is not None) and (asTemplate in Templates.UserPostAnalysisTemplates):
1519 self.__F = Templates.UserPostAnalysisTemplates[asTemplate]
1520 elif self.__name == "Observer" and (asTemplate is not None) and (asTemplate in Templates.ObserverTemplates):
1521 self.__F = Templates.ObserverTemplates[asTemplate]
1522 elif asScript is not None:
1523 self.__F = Interfaces.ImportFromScript(asScript).getstring()
1528 "x.__repr__() <==> repr(x)"
1529 return repr(self.__F)
1532 "x.__str__() <==> str(x)"
1533 return str(self.__F)
1535 # ==============================================================================
1536 class ExternalParameters(object):
1538 Classe générale d'interface de type texte de script utilisateur
1541 name = "GenericExternalParameters",
1547 self.__name = str(name)
1550 self.updateParameters( asDict, asScript )
1552 def updateParameters(self,
1556 "Mise a jour des parametres"
1557 if asDict is None and asScript is not None:
1558 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "ExternalParameters" )
1562 if __Dict is not None:
1563 self.__P.update( dict(__Dict) )
1565 def get(self, key = None):
1567 return self.__P[key]
1569 return list(self.__P.keys())
1572 return list(self.__P.keys())
1574 def pop(self, k, d):
1575 return self.__P.pop(k, d)
1578 return self.__P.items()
1580 def __contains__(self, key=None):
1581 "D.__contains__(k) -> True if D has a key k, else False"
1582 return key in self.__P
1584 # ==============================================================================
1585 class State(object):
1587 Classe générale d'interface de type état
1590 name = "GenericVector",
1592 asPersistentVector = None,
1598 toBeChecked = False,
1601 Permet de définir un vecteur :
1602 - asVector : entrée des données, comme un vecteur compatible avec le
1603 constructeur de numpy.matrix, ou "True" si entrée par script.
1604 - asPersistentVector : entrée des données, comme une série de vecteurs
1605 compatible avec le constructeur de numpy.matrix, ou comme un objet de
1606 type Persistence, ou "True" si entrée par script.
1607 - asScript : si un script valide est donné contenant une variable
1608 nommée "name", la variable est de type "asVector" (par défaut) ou
1609 "asPersistentVector" selon que l'une de ces variables est placée à
1611 - asDataFile : si un ou plusieurs fichiers valides sont donnés
1612 contenant des valeurs en colonnes, elles-mêmes nommées "colNames"
1613 (s'il n'y a pas de nom de colonne indiquée, on cherche une colonne
1614 nommée "name"), on récupère les colonnes et on les range ligne après
1615 ligne (colMajor=False, par défaut) ou colonne après colonne
1616 (colMajor=True). La variable résultante est de type "asVector" (par
1617 défaut) ou "asPersistentVector" selon que l'une de ces variables est
1620 self.__name = str(name)
1621 self.__check = bool(toBeChecked)
1625 self.__is_vector = False
1626 self.__is_series = False
1628 if asScript is not None:
1629 __Vector, __Series = None, None
1630 if asPersistentVector:
1631 __Series = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1633 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1634 elif asDataFile is not None:
1635 __Vector, __Series = None, None
1636 if asPersistentVector:
1637 if colNames is not None:
1638 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
1640 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1641 if bool(colMajor) and not Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1642 __Series = numpy.transpose(__Series)
1643 elif not bool(colMajor) and Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1644 __Series = numpy.transpose(__Series)
1646 if colNames is not None:
1647 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
1649 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1651 __Vector = numpy.ravel(__Vector, order = "F")
1653 __Vector = numpy.ravel(__Vector, order = "C")
1655 __Vector, __Series = asVector, asPersistentVector
1657 if __Vector is not None:
1658 self.__is_vector = True
1659 self.__V = numpy.matrix( numpy.asmatrix(__Vector).A1, numpy.float ).T
1660 self.shape = self.__V.shape
1661 self.size = self.__V.size
1662 elif __Series is not None:
1663 self.__is_series = True
1664 if isinstance(__Series, (tuple, list, numpy.ndarray, numpy.matrix, str)):
1665 self.__V = Persistence.OneVector(self.__name, basetype=numpy.matrix)
1666 if isinstance(__Series, str): __Series = eval(__Series)
1667 for member in __Series:
1668 self.__V.store( numpy.matrix( numpy.asmatrix(member).A1, numpy.float ).T )
1671 if isinstance(self.__V.shape, (tuple, list)):
1672 self.shape = self.__V.shape
1674 self.shape = self.__V.shape()
1675 if len(self.shape) == 1:
1676 self.shape = (self.shape[0],1)
1677 self.size = self.shape[0] * self.shape[1]
1679 raise ValueError("The %s object is improperly defined or undefined, it requires at minima either a vector, a list/tuple of vectors or a persistent object. Please check your vector input."%self.__name)
1681 if scheduledBy is not None:
1682 self.__T = scheduledBy
1684 def getO(self, withScheduler=False):
1686 return self.__V, self.__T
1687 elif self.__T is None:
1693 "Vérification du type interne"
1694 return self.__is_vector
1697 "Vérification du type interne"
1698 return self.__is_series
1701 "x.__repr__() <==> repr(x)"
1702 return repr(self.__V)
1705 "x.__str__() <==> str(x)"
1706 return str(self.__V)
1708 # ==============================================================================
1709 class Covariance(object):
1711 Classe générale d'interface de type covariance
1714 name = "GenericCovariance",
1715 asCovariance = None,
1716 asEyeByScalar = None,
1717 asEyeByVector = None,
1720 toBeChecked = False,
1723 Permet de définir une covariance :
1724 - asCovariance : entrée des données, comme une matrice compatible avec
1725 le constructeur de numpy.matrix
1726 - asEyeByScalar : entrée des données comme un seul scalaire de variance,
1727 multiplicatif d'une matrice de corrélation identité, aucune matrice
1728 n'étant donc explicitement à donner
1729 - asEyeByVector : entrée des données comme un seul vecteur de variance,
1730 à mettre sur la diagonale d'une matrice de corrélation, aucune matrice
1731 n'étant donc explicitement à donner
1732 - asCovObject : entrée des données comme un objet python, qui a les
1733 methodes obligatoires "getT", "getI", "diag", "trace", "__add__",
1734 "__sub__", "__neg__", "__mul__", "__rmul__" et facultatives "shape",
1735 "size", "cholesky", "choleskyI", "asfullmatrix", "__repr__", "__str__"
1736 - toBeChecked : booléen indiquant si le caractère SDP de la matrice
1737 pleine doit être vérifié
1739 self.__name = str(name)
1740 self.__check = bool(toBeChecked)
1743 self.__is_scalar = False
1744 self.__is_vector = False
1745 self.__is_matrix = False
1746 self.__is_object = False
1748 if asScript is not None:
1749 __Matrix, __Scalar, __Vector, __Object = None, None, None, None
1751 __Scalar = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1753 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1755 __Object = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1757 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1759 __Matrix, __Scalar, __Vector, __Object = asCovariance, asEyeByScalar, asEyeByVector, asCovObject
1761 if __Scalar is not None:
1762 if numpy.matrix(__Scalar).size != 1:
1763 raise ValueError(' The diagonal multiplier given to define a sparse matrix is not a unique scalar value.\n Its actual measured size is %i. Please check your scalar input.'%numpy.matrix(__Scalar).size)
1764 self.__is_scalar = True
1765 self.__C = numpy.abs( float(__Scalar) )
1768 elif __Vector is not None:
1769 self.__is_vector = True
1770 self.__C = numpy.abs( numpy.array( numpy.ravel( numpy.matrix(__Vector, float ) ) ) )
1771 self.shape = (self.__C.size,self.__C.size)
1772 self.size = self.__C.size**2
1773 elif __Matrix is not None:
1774 self.__is_matrix = True
1775 self.__C = numpy.matrix( __Matrix, float )
1776 self.shape = self.__C.shape
1777 self.size = self.__C.size
1778 elif __Object is not None:
1779 self.__is_object = True
1781 for at in ("getT","getI","diag","trace","__add__","__sub__","__neg__","__matmul__","__mul__","__rmatmul__","__rmul__"):
1782 if not hasattr(self.__C,at):
1783 raise ValueError("The matrix given for %s as an object has no attribute \"%s\". Please check your object input."%(self.__name,at))
1784 if hasattr(self.__C,"shape"):
1785 self.shape = self.__C.shape
1788 if hasattr(self.__C,"size"):
1789 self.size = self.__C.size
1794 # raise ValueError("The %s covariance matrix has to be specified either as a matrix, a vector for its diagonal or a scalar multiplying an identity matrix."%self.__name)
1798 def __validate(self):
1800 if self.__C is None:
1801 raise UnboundLocalError("%s covariance matrix value has not been set!"%(self.__name,))
1802 if self.ismatrix() and min(self.shape) != max(self.shape):
1803 raise ValueError("The given matrix for %s is not a square one, its shape is %s. Please check your matrix input."%(self.__name,self.shape))
1804 if self.isobject() and min(self.shape) != max(self.shape):
1805 raise ValueError("The matrix given for \"%s\" is not a square one, its shape is %s. Please check your object input."%(self.__name,self.shape))
1806 if self.isscalar() and self.__C <= 0:
1807 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your scalar input %s."%(self.__name,self.__C))
1808 if self.isvector() and (self.__C <= 0).any():
1809 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your vector input."%(self.__name,))
1810 if self.ismatrix() and (self.__check or logging.getLogger().level < logging.WARNING):
1812 L = numpy.linalg.cholesky( self.__C )
1814 raise ValueError("The %s covariance matrix is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1815 if self.isobject() and (self.__check or logging.getLogger().level < logging.WARNING):
1817 L = self.__C.cholesky()
1819 raise ValueError("The %s covariance object is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1822 "Vérification du type interne"
1823 return self.__is_scalar
1826 "Vérification du type interne"
1827 return self.__is_vector
1830 "Vérification du type interne"
1831 return self.__is_matrix
1834 "Vérification du type interne"
1835 return self.__is_object
1840 return Covariance(self.__name+"I", asCovariance = numpy.linalg.inv(self.__C) )
1841 elif self.isvector():
1842 return Covariance(self.__name+"I", asEyeByVector = 1. / self.__C )
1843 elif self.isscalar():
1844 return Covariance(self.__name+"I", asEyeByScalar = 1. / self.__C )
1845 elif self.isobject() and hasattr(self.__C,"getI"):
1846 return Covariance(self.__name+"I", asCovObject = self.__C.getI() )
1848 return None # Indispensable
1853 return Covariance(self.__name+"T", asCovariance = self.__C.T )
1854 elif self.isvector():
1855 return Covariance(self.__name+"T", asEyeByVector = self.__C )
1856 elif self.isscalar():
1857 return Covariance(self.__name+"T", asEyeByScalar = self.__C )
1858 elif self.isobject() and hasattr(self.__C,"getT"):
1859 return Covariance(self.__name+"T", asCovObject = self.__C.getT() )
1861 raise AttributeError("the %s covariance matrix has no getT attribute."%(self.__name,))
1864 "Décomposition de Cholesky"
1866 return Covariance(self.__name+"C", asCovariance = numpy.linalg.cholesky(self.__C) )
1867 elif self.isvector():
1868 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
1869 elif self.isscalar():
1870 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
1871 elif self.isobject() and hasattr(self.__C,"cholesky"):
1872 return Covariance(self.__name+"C", asCovObject = self.__C.cholesky() )
1874 raise AttributeError("the %s covariance matrix has no cholesky attribute."%(self.__name,))
1876 def choleskyI(self):
1877 "Inversion de la décomposition de Cholesky"
1879 return Covariance(self.__name+"H", asCovariance = numpy.linalg.inv(numpy.linalg.cholesky(self.__C)) )
1880 elif self.isvector():
1881 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
1882 elif self.isscalar():
1883 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
1884 elif self.isobject() and hasattr(self.__C,"choleskyI"):
1885 return Covariance(self.__name+"H", asCovObject = self.__C.choleskyI() )
1887 raise AttributeError("the %s covariance matrix has no choleskyI attribute."%(self.__name,))
1890 "Racine carrée matricielle"
1893 return Covariance(self.__name+"C", asCovariance = numpy.real(scipy.linalg.sqrtm(self.__C)) )
1894 elif self.isvector():
1895 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
1896 elif self.isscalar():
1897 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
1898 elif self.isobject() and hasattr(self.__C,"sqrtm"):
1899 return Covariance(self.__name+"C", asCovObject = self.__C.sqrtm() )
1901 raise AttributeError("the %s covariance matrix has no sqrtm attribute."%(self.__name,))
1904 "Inversion de la racine carrée matricielle"
1907 return Covariance(self.__name+"H", asCovariance = numpy.linalg.inv(numpy.real(scipy.linalg.sqrtm(self.__C))) )
1908 elif self.isvector():
1909 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
1910 elif self.isscalar():
1911 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
1912 elif self.isobject() and hasattr(self.__C,"sqrtmI"):
1913 return Covariance(self.__name+"H", asCovObject = self.__C.sqrtmI() )
1915 raise AttributeError("the %s covariance matrix has no sqrtmI attribute."%(self.__name,))
1917 def diag(self, msize=None):
1918 "Diagonale de la matrice"
1920 return numpy.diag(self.__C)
1921 elif self.isvector():
1923 elif self.isscalar():
1925 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1927 return self.__C * numpy.ones(int(msize))
1928 elif self.isobject() and hasattr(self.__C,"diag"):
1929 return self.__C.diag()
1931 raise AttributeError("the %s covariance matrix has no diag attribute."%(self.__name,))
1933 def asfullmatrix(self, msize=None):
1936 return numpy.asarray(self.__C)
1937 elif self.isvector():
1938 return numpy.asarray( numpy.diag(self.__C), float )
1939 elif self.isscalar():
1941 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1943 return numpy.asarray( self.__C * numpy.eye(int(msize)), float )
1944 elif self.isobject() and hasattr(self.__C,"asfullmatrix"):
1945 return self.__C.asfullmatrix()
1947 raise AttributeError("the %s covariance matrix has no asfullmatrix attribute."%(self.__name,))
1949 def trace(self, msize=None):
1950 "Trace de la matrice"
1952 return numpy.trace(self.__C)
1953 elif self.isvector():
1954 return float(numpy.sum(self.__C))
1955 elif self.isscalar():
1957 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1959 return self.__C * int(msize)
1960 elif self.isobject():
1961 return self.__C.trace()
1963 raise AttributeError("the %s covariance matrix has no trace attribute."%(self.__name,))
1969 "x.__repr__() <==> repr(x)"
1970 return repr(self.__C)
1973 "x.__str__() <==> str(x)"
1974 return str(self.__C)
1976 def __add__(self, other):
1977 "x.__add__(y) <==> x+y"
1978 if self.ismatrix() or self.isobject():
1979 return self.__C + numpy.asmatrix(other)
1980 elif self.isvector() or self.isscalar():
1981 _A = numpy.asarray(other)
1982 _A.reshape(_A.size)[::_A.shape[1]+1] += self.__C
1983 return numpy.asmatrix(_A)
1985 def __radd__(self, other):
1986 "x.__radd__(y) <==> y+x"
1987 raise NotImplementedError("%s covariance matrix __radd__ method not available for %s type!"%(self.__name,type(other)))
1989 def __sub__(self, other):
1990 "x.__sub__(y) <==> x-y"
1991 if self.ismatrix() or self.isobject():
1992 return self.__C - numpy.asmatrix(other)
1993 elif self.isvector() or self.isscalar():
1994 _A = numpy.asarray(other)
1995 _A.reshape(_A.size)[::_A.shape[1]+1] = self.__C - _A.reshape(_A.size)[::_A.shape[1]+1]
1996 return numpy.asmatrix(_A)
1998 def __rsub__(self, other):
1999 "x.__rsub__(y) <==> y-x"
2000 raise NotImplementedError("%s covariance matrix __rsub__ method not available for %s type!"%(self.__name,type(other)))
2003 "x.__neg__() <==> -x"
2006 def __matmul__(self, other):
2007 "x.__mul__(y) <==> x@y"
2008 if self.ismatrix() and isinstance(other, (int, float)):
2009 return numpy.asarray(self.__C) * other
2010 elif self.ismatrix() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2011 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2012 return numpy.ravel(self.__C @ numpy.ravel(other))
2013 elif numpy.asarray(other).shape[0] == self.shape[1]: # Matrice
2014 return numpy.asarray(self.__C) @ numpy.asarray(other)
2016 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asarray(other).shape,self.__name))
2017 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2018 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2019 return numpy.ravel(self.__C) * numpy.ravel(other)
2020 elif numpy.asarray(other).shape[0] == self.shape[1]: # Matrice
2021 return numpy.ravel(self.__C).reshape((-1,1)) * numpy.asarray(other)
2023 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
2024 elif self.isscalar() and isinstance(other,numpy.matrix):
2025 return numpy.asarray(self.__C * other)
2026 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
2027 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
2028 return self.__C * numpy.ravel(other)
2030 return self.__C * numpy.asarray(other)
2031 elif self.isobject():
2032 return self.__C.__matmul__(other)
2034 raise NotImplementedError("%s covariance matrix __matmul__ method not available for %s type!"%(self.__name,type(other)))
2036 def __mul__(self, other):
2037 "x.__mul__(y) <==> x*y"
2038 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2039 return self.__C * other
2040 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2041 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2042 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
2043 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2044 return self.__C * numpy.asmatrix(other)
2046 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asmatrix(other).shape,self.__name))
2047 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2048 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2049 return numpy.asmatrix(self.__C * numpy.ravel(other)).T
2050 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2051 return numpy.asmatrix((self.__C * (numpy.asarray(other).transpose())).transpose())
2053 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
2054 elif self.isscalar() and isinstance(other,numpy.matrix):
2055 return self.__C * other
2056 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
2057 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
2058 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
2060 return self.__C * numpy.asmatrix(other)
2061 elif self.isobject():
2062 return self.__C.__mul__(other)
2064 raise NotImplementedError("%s covariance matrix __mul__ method not available for %s type!"%(self.__name,type(other)))
2066 def __rmatmul__(self, other):
2067 "x.__rmul__(y) <==> y@x"
2068 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2069 return other * self.__C
2070 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2071 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2072 return numpy.asmatrix(numpy.ravel(other)) * self.__C
2073 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2074 return numpy.asmatrix(other) * self.__C
2076 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name))
2077 elif self.isvector() and isinstance(other,numpy.matrix):
2078 if numpy.ravel(other).size == self.shape[0]: # Vecteur
2079 return numpy.asmatrix(numpy.ravel(other) * self.__C)
2080 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
2081 return numpy.asmatrix(numpy.array(other) * self.__C)
2083 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
2084 elif self.isscalar() and isinstance(other,numpy.matrix):
2085 return other * self.__C
2086 elif self.isobject():
2087 return self.__C.__rmatmul__(other)
2089 raise NotImplementedError("%s covariance matrix __rmatmul__ method not available for %s type!"%(self.__name,type(other)))
2091 def __rmul__(self, other):
2092 "x.__rmul__(y) <==> y*x"
2093 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2094 return other * self.__C
2095 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2096 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2097 return numpy.asmatrix(numpy.ravel(other)) * self.__C
2098 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2099 return numpy.asmatrix(other) * self.__C
2101 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name))
2102 elif self.isvector() and isinstance(other,numpy.matrix):
2103 if numpy.ravel(other).size == self.shape[0]: # Vecteur
2104 return numpy.asmatrix(numpy.ravel(other) * self.__C)
2105 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
2106 return numpy.asmatrix(numpy.array(other) * self.__C)
2108 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
2109 elif self.isscalar() and isinstance(other,numpy.matrix):
2110 return other * self.__C
2111 elif self.isobject():
2112 return self.__C.__rmul__(other)
2114 raise NotImplementedError("%s covariance matrix __rmul__ method not available for %s type!"%(self.__name,type(other)))
2117 "x.__len__() <==> len(x)"
2118 return self.shape[0]
2120 # ==============================================================================
2121 class Observer2Func(object):
2123 Creation d'une fonction d'observateur a partir de son texte
2125 def __init__(self, corps=""):
2126 self.__corps = corps
2127 def func(self,var,info):
2128 "Fonction d'observation"
2131 "Restitution du pointeur de fonction dans l'objet"
2134 # ==============================================================================
2135 class CaseLogger(object):
2137 Conservation des commandes de creation d'un cas
2139 def __init__(self, __name="", __objname="case", __addViewers=None, __addLoaders=None):
2140 self.__name = str(__name)
2141 self.__objname = str(__objname)
2142 self.__logSerie = []
2143 self.__switchoff = False
2145 "TUI" :Interfaces._TUIViewer,
2146 "SCD" :Interfaces._SCDViewer,
2147 "YACS":Interfaces._YACSViewer,
2150 "TUI" :Interfaces._TUIViewer,
2151 "COM" :Interfaces._COMViewer,
2153 if __addViewers is not None:
2154 self.__viewers.update(dict(__addViewers))
2155 if __addLoaders is not None:
2156 self.__loaders.update(dict(__addLoaders))
2158 def register(self, __command=None, __keys=None, __local=None, __pre=None, __switchoff=False):
2159 "Enregistrement d'une commande individuelle"
2160 if __command is not None and __keys is not None and __local is not None and not self.__switchoff:
2161 if "self" in __keys: __keys.remove("self")
2162 self.__logSerie.append( (str(__command), __keys, __local, __pre, __switchoff) )
2164 self.__switchoff = True
2166 self.__switchoff = False
2168 def dump(self, __filename=None, __format="TUI", __upa=""):
2169 "Restitution normalisée des commandes"
2170 if __format in self.__viewers:
2171 __formater = self.__viewers[__format](self.__name, self.__objname, self.__logSerie)
2173 raise ValueError("Dumping as \"%s\" is not available"%__format)
2174 return __formater.dump(__filename, __upa)
2176 def load(self, __filename=None, __content=None, __object=None, __format="TUI"):
2177 "Chargement normalisé des commandes"
2178 if __format in self.__loaders:
2179 __formater = self.__loaders[__format]()
2181 raise ValueError("Loading as \"%s\" is not available"%__format)
2182 return __formater.load(__filename, __content, __object)
2184 # ==============================================================================
2187 _extraArguments = None,
2188 _sFunction = lambda x: x,
2193 Pour une liste ordonnée de vecteurs en entrée, renvoie en sortie la liste
2194 correspondante de valeurs de la fonction en argument
2196 # Vérifications et définitions initiales
2197 # logging.debug("MULTF Internal multifonction calculations begin with function %s"%(_sFunction.__name__,))
2198 if not PlatformInfo.isIterable( __xserie ):
2199 raise TypeError("MultiFonction not iterable unkown input type: %s"%(type(__xserie),))
2201 if (_mpWorkers is None) or (_mpWorkers is not None and _mpWorkers < 1):
2204 __mpWorkers = int(_mpWorkers)
2206 import multiprocessing
2217 # logging.debug("MULTF Internal multiprocessing calculations begin : evaluation of %i point(s)"%(len(_jobs),))
2218 import multiprocessing
2219 with multiprocessing.Pool(__mpWorkers) as pool:
2220 __multiHX = pool.map( _sFunction, _jobs )
2223 # logging.debug("MULTF Internal multiprocessing calculation end")
2225 # logging.debug("MULTF Internal monoprocessing calculation begin")
2227 if _extraArguments is None:
2228 for __xvalue in __xserie:
2229 __multiHX.append( _sFunction( __xvalue ) )
2230 elif _extraArguments is not None and isinstance(_extraArguments, (list, tuple, map)):
2231 for __xvalue in __xserie:
2232 __multiHX.append( _sFunction( __xvalue, *_extraArguments ) )
2233 elif _extraArguments is not None and isinstance(_extraArguments, dict):
2234 for __xvalue in __xserie:
2235 __multiHX.append( _sFunction( __xvalue, **_extraArguments ) )
2237 raise TypeError("MultiFonction extra arguments unkown input type: %s"%(type(_extraArguments),))
2238 # logging.debug("MULTF Internal monoprocessing calculation end")
2240 # logging.debug("MULTF Internal multifonction calculations end")
2243 # ==============================================================================
2244 def CostFunction3D(_x,
2245 _Hm = None, # Pour simuler Hm(x) : HO["Direct"].appliedTo
2246 _HmX = None, # Simulation déjà faite de Hm(x)
2247 _arg = None, # Arguments supplementaires pour Hm, sous la forme d'un tuple
2252 _SIV = False, # A résorber pour la 8.0
2253 _SSC = [], # self._parameters["StoreSupplementaryCalculations"]
2254 _nPS = 0, # nbPreviousSteps
2255 _QM = "DA", # QualityMeasure
2256 _SSV = {}, # Entrée et/ou sortie : self.StoredVariables
2257 _fRt = False, # Restitue ou pas la sortie étendue
2258 _sSc = True, # Stocke ou pas les SSC
2261 Fonction-coût générale utile pour les algorithmes statiques/3D : 3DVAR, BLUE
2262 et dérivés, Kalman et dérivés, LeastSquares, SamplingTest, PSO, SA, Tabu,
2263 DFO, QuantileRegression
2269 for k in ["CostFunctionJ",
2275 "SimulatedObservationAtCurrentOptimum",
2276 "SimulatedObservationAtCurrentState",
2280 if hasattr(_SSV[k],"store"):
2281 _SSV[k].append = _SSV[k].store # Pour utiliser "append" au lieu de "store"
2283 _X = numpy.asmatrix(numpy.ravel( _x )).T
2284 if _SIV or "CurrentState" in _SSC or "CurrentOptimum" in _SSC:
2285 _SSV["CurrentState"].append( _X )
2287 if _HmX is not None:
2291 raise ValueError("COSTFUNCTION3D Operator has to be defined.")
2295 _HX = _Hm( _X, *_arg )
2296 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
2298 if "SimulatedObservationAtCurrentState" in _SSC or \
2299 "SimulatedObservationAtCurrentOptimum" in _SSC:
2300 _SSV["SimulatedObservationAtCurrentState"].append( _HX )
2302 if numpy.any(numpy.isnan(_HX)):
2303 Jb, Jo, J = numpy.nan, numpy.nan, numpy.nan
2305 _Y = numpy.asmatrix(numpy.ravel( _Y )).T
2306 if _QM in ["AugmentedWeightedLeastSquares", "AWLS", "AugmentedPonderatedLeastSquares", "APLS", "DA"]:
2307 if _BI is None or _RI is None:
2308 raise ValueError("Background and Observation error covariance matrix has to be properly defined!")
2309 _Xb = numpy.asmatrix(numpy.ravel( _Xb )).T
2310 Jb = 0.5 * (_X - _Xb).T * _BI * (_X - _Xb)
2311 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
2312 elif _QM in ["WeightedLeastSquares", "WLS", "PonderatedLeastSquares", "PLS"]:
2314 raise ValueError("Observation error covariance matrix has to be properly defined!")
2316 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
2317 elif _QM in ["LeastSquares", "LS", "L2"]:
2319 Jo = 0.5 * (_Y - _HX).T * (_Y - _HX)
2320 elif _QM in ["AbsoluteValue", "L1"]:
2322 Jo = numpy.sum( numpy.abs(_Y - _HX) )
2323 elif _QM in ["MaximumError", "ME"]:
2325 Jo = numpy.max( numpy.abs(_Y - _HX) )
2326 elif _QM in ["QR", "Null"]:
2330 raise ValueError("Unknown asked quality measure!")
2332 J = float( Jb ) + float( Jo )
2335 _SSV["CostFunctionJb"].append( Jb )
2336 _SSV["CostFunctionJo"].append( Jo )
2337 _SSV["CostFunctionJ" ].append( J )
2339 if "IndexOfOptimum" in _SSC or \
2340 "CurrentOptimum" in _SSC or \
2341 "SimulatedObservationAtCurrentOptimum" in _SSC:
2342 IndexMin = numpy.argmin( _SSV["CostFunctionJ"][_nPS:] ) + _nPS
2343 if "IndexOfOptimum" in _SSC:
2344 _SSV["IndexOfOptimum"].append( IndexMin )
2345 if "CurrentOptimum" in _SSC:
2346 _SSV["CurrentOptimum"].append( _SSV["CurrentState"][IndexMin] )
2347 if "SimulatedObservationAtCurrentOptimum" in _SSC:
2348 _SSV["SimulatedObservationAtCurrentOptimum"].append( _SSV["SimulatedObservationAtCurrentState"][IndexMin] )
2353 if _QM in ["QR"]: # Pour le QuantileRegression
2358 # ==============================================================================
2359 if __name__ == "__main__":
2360 print('\n AUTODIAGNOSTIC\n')