1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2021 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les outils généraux élémentaires.
26 __author__ = "Jean-Philippe ARGAUD"
35 from functools import partial
36 from daCore import Persistence, PlatformInfo, Interfaces
37 from daCore import Templates
39 # ==============================================================================
40 class CacheManager(object):
42 Classe générale de gestion d'un cache de calculs
45 toleranceInRedundancy = 1.e-18,
46 lenghtOfRedundancy = -1,
49 Les caractéristiques de tolérance peuvent être modifiées à la création.
51 self.__tolerBP = float(toleranceInRedundancy)
52 self.__lenghtOR = int(lenghtOfRedundancy)
53 self.__initlnOR = self.__lenghtOR
63 def wasCalculatedIn(self, xValue, oName="" ):
64 "Vérifie l'existence d'un calcul correspondant à la valeur"
68 for i in range(min(len(self.__listOPCV),self.__lenghtOR)-1,-1,-1):
69 if not hasattr(xValue, 'size'):
71 elif (str(oName) != self.__listOPCV[i][3]):
73 elif (xValue.size != self.__listOPCV[i][0].size):
75 elif (numpy.ravel(xValue)[0] - self.__listOPCV[i][0][0]) > (self.__tolerBP * self.__listOPCV[i][2] / self.__listOPCV[i][0].size):
77 elif numpy.linalg.norm(numpy.ravel(xValue) - self.__listOPCV[i][0]) < (self.__tolerBP * self.__listOPCV[i][2]):
79 __HxV = self.__listOPCV[i][1]
83 def storeValueInX(self, xValue, HxValue, oName="" ):
84 "Stocke pour un opérateur o un calcul Hx correspondant à la valeur x"
85 if self.__lenghtOR < 0:
86 self.__lenghtOR = 2 * min(xValue.size, 50) + 2 # 2 * xValue.size + 2
87 self.__initlnOR = self.__lenghtOR
88 self.__seenNames.append(str(oName))
89 if str(oName) not in self.__seenNames: # Etend la liste si nouveau
90 self.__lenghtOR += 2 * min(xValue.size, 50) + 2 # 2 * xValue.size + 2
91 self.__initlnOR += self.__lenghtOR
92 self.__seenNames.append(str(oName))
93 while len(self.__listOPCV) > self.__lenghtOR:
94 self.__listOPCV.pop(0)
95 self.__listOPCV.append( (
96 copy.copy(numpy.ravel(xValue)), # 0 Previous point
97 copy.copy(HxValue), # 1 Previous value
98 numpy.linalg.norm(xValue), # 2 Norm
99 str(oName), # 3 Operator name
104 self.__initlnOR = self.__lenghtOR
106 self.__enabled = False
110 self.__lenghtOR = self.__initlnOR
111 self.__enabled = True
113 # ==============================================================================
114 class Operator(object):
116 Classe générale d'interface de type opérateur simple
124 name = "GenericOperator",
127 avoidingRedundancy = True,
128 inputAsMultiFunction = False,
129 enableMultiProcess = False,
130 extraArguments = None,
133 On construit un objet de ce type en fournissant, à l'aide de l'un des
134 deux mots-clé, soit une fonction ou un multi-fonction python, soit une
137 - name : nom d'opérateur
138 - fromMethod : argument de type fonction Python
139 - fromMatrix : argument adapté au constructeur numpy.matrix
140 - avoidingRedundancy : booléen évitant (ou pas) les calculs redondants
141 - inputAsMultiFunction : booléen indiquant une fonction explicitement
142 définie (ou pas) en multi-fonction
143 - extraArguments : arguments supplémentaires passés à la fonction de
144 base et ses dérivées (tuple ou dictionnaire)
146 self.__name = str(name)
147 self.__NbCallsAsMatrix, self.__NbCallsAsMethod, self.__NbCallsOfCached = 0, 0, 0
148 self.__AvoidRC = bool( avoidingRedundancy )
149 self.__inputAsMF = bool( inputAsMultiFunction )
150 self.__mpEnabled = bool( enableMultiProcess )
151 self.__extraArgs = extraArguments
152 if fromMethod is not None and self.__inputAsMF:
153 self.__Method = fromMethod # logtimer(fromMethod)
155 self.__Type = "Method"
156 elif fromMethod is not None and not self.__inputAsMF:
157 self.__Method = partial( MultiFonction, _sFunction=fromMethod, _mpEnabled=self.__mpEnabled)
159 self.__Type = "Method"
160 elif fromMatrix is not None:
162 self.__Matrix = numpy.matrix( fromMatrix, numpy.float )
163 self.__Type = "Matrix"
169 def disableAvoidingRedundancy(self):
171 Operator.CM.disable()
173 def enableAvoidingRedundancy(self):
178 Operator.CM.disable()
184 def appliedTo(self, xValue, HValue = None, argsAsSerie = False, returnSerieAsArrayMatrix = False):
186 Permet de restituer le résultat de l'application de l'opérateur à une
187 série d'arguments xValue. Cette méthode se contente d'appliquer, chaque
188 argument devant a priori être du bon type.
190 - les arguments par série sont :
191 - xValue : argument adapté pour appliquer l'opérateur
192 - HValue : valeur précalculée de l'opérateur en ce point
193 - argsAsSerie : indique si les arguments sont une mono ou multi-valeur
200 if HValue is not None:
204 PlatformInfo.isIterable( _xValue, True, " in Operator.appliedTo" )
206 if _HValue is not None:
207 assert len(_xValue) == len(_HValue), "Incompatible number of elements in xValue and HValue"
209 for i in range(len(_HValue)):
210 _HxValue.append( numpy.asmatrix( numpy.ravel( _HValue[i] ) ).T )
212 Operator.CM.storeValueInX(_xValue[i],_HxValue[-1],self.__name)
217 for i, xv in enumerate(_xValue):
219 __alreadyCalculated, __HxV = Operator.CM.wasCalculatedIn(xv,self.__name)
221 __alreadyCalculated = False
223 if __alreadyCalculated:
224 self.__addOneCacheCall()
227 if self.__Matrix is not None:
228 self.__addOneMatrixCall()
229 _xv = numpy.matrix(numpy.ravel(xv)).T
230 _hv = self.__Matrix * _xv
232 self.__addOneMethodCall()
236 _HxValue.append( _hv )
238 if len(_xserie)>0 and self.__Matrix is None:
239 if self.__extraArgs is None:
240 _hserie = self.__Method( _xserie ) # Calcul MF
242 _hserie = self.__Method( _xserie, self.__extraArgs ) # Calcul MF
243 if not hasattr(_hserie, "pop"):
244 raise TypeError("The user input multi-function doesn't seem to return sequence results, behaving like a mono-function. It has to be checked.")
250 Operator.CM.storeValueInX(_xv,_hv,self.__name)
252 if returnSerieAsArrayMatrix:
253 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
255 if argsAsSerie: return _HxValue
256 else: return _HxValue[-1]
258 def appliedControledFormTo(self, paires, argsAsSerie = False, returnSerieAsArrayMatrix = False):
260 Permet de restituer le résultat de l'application de l'opérateur à des
261 paires (xValue, uValue). Cette méthode se contente d'appliquer, son
262 argument devant a priori être du bon type. Si la uValue est None,
263 on suppose que l'opérateur ne s'applique qu'à xValue.
265 - paires : les arguments par paire sont :
266 - xValue : argument X adapté pour appliquer l'opérateur
267 - uValue : argument U adapté pour appliquer l'opérateur
268 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
270 if argsAsSerie: _xuValue = paires
271 else: _xuValue = (paires,)
272 PlatformInfo.isIterable( _xuValue, True, " in Operator.appliedControledFormTo" )
274 if self.__Matrix is not None:
276 for paire in _xuValue:
277 _xValue, _uValue = paire
278 _xValue = numpy.matrix(numpy.ravel(_xValue)).T
279 self.__addOneMatrixCall()
280 _HxValue.append( self.__Matrix * _xValue )
283 for paire in _xuValue:
284 _xValue, _uValue = paire
285 if _uValue is not None:
286 _xuArgs.append( paire )
288 _xuArgs.append( _xValue )
289 self.__addOneMethodCall( len(_xuArgs) )
290 if self.__extraArgs is None:
291 _HxValue = self.__Method( _xuArgs ) # Calcul MF
293 _HxValue = self.__Method( _xuArgs, self.__extraArgs ) # Calcul MF
295 if returnSerieAsArrayMatrix:
296 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
298 if argsAsSerie: return _HxValue
299 else: return _HxValue[-1]
301 def appliedInXTo(self, paires, argsAsSerie = False, returnSerieAsArrayMatrix = False):
303 Permet de restituer le résultat de l'application de l'opérateur à une
304 série d'arguments xValue, sachant que l'opérateur est valable en
305 xNominal. Cette méthode se contente d'appliquer, son argument devant a
306 priori être du bon type. Si l'opérateur est linéaire car c'est une
307 matrice, alors il est valable en tout point nominal et xNominal peut
308 être quelconque. Il n'y a qu'une seule paire par défaut, et argsAsSerie
309 permet d'indiquer que l'argument est multi-paires.
311 - paires : les arguments par paire sont :
312 - xNominal : série d'arguments permettant de donner le point où
313 l'opérateur est construit pour être ensuite appliqué
314 - xValue : série d'arguments adaptés pour appliquer l'opérateur
315 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
317 if argsAsSerie: _nxValue = paires
318 else: _nxValue = (paires,)
319 PlatformInfo.isIterable( _nxValue, True, " in Operator.appliedInXTo" )
321 if self.__Matrix is not None:
323 for paire in _nxValue:
324 _xNominal, _xValue = paire
325 _xValue = numpy.matrix(numpy.ravel(_xValue)).T
326 self.__addOneMatrixCall()
327 _HxValue.append( self.__Matrix * _xValue )
329 self.__addOneMethodCall( len(_nxValue) )
330 if self.__extraArgs is None:
331 _HxValue = self.__Method( _nxValue ) # Calcul MF
333 _HxValue = self.__Method( _nxValue, self.__extraArgs ) # Calcul MF
335 if returnSerieAsArrayMatrix:
336 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
338 if argsAsSerie: return _HxValue
339 else: return _HxValue[-1]
341 def asMatrix(self, ValueForMethodForm = "UnknownVoidValue", argsAsSerie = False):
343 Permet de renvoyer l'opérateur sous la forme d'une matrice
345 if self.__Matrix is not None:
346 self.__addOneMatrixCall()
347 mValue = [self.__Matrix,]
348 elif not isinstance(ValueForMethodForm,str) or ValueForMethodForm != "UnknownVoidValue": # Ne pas utiliser "None"
351 self.__addOneMethodCall( len(ValueForMethodForm) )
352 for _vfmf in ValueForMethodForm:
353 mValue.append( numpy.matrix( self.__Method(((_vfmf, None),)) ) )
355 self.__addOneMethodCall()
356 mValue = self.__Method(((ValueForMethodForm, None),))
358 raise ValueError("Matrix form of the operator defined as a function/method requires to give an operating point.")
360 if argsAsSerie: return mValue
361 else: return mValue[-1]
365 Renvoie la taille sous forme numpy si l'opérateur est disponible sous
366 la forme d'une matrice
368 if self.__Matrix is not None:
369 return self.__Matrix.shape
371 raise ValueError("Matrix form of the operator is not available, nor the shape")
373 def nbcalls(self, which=None):
375 Renvoie les nombres d'évaluations de l'opérateur
378 self.__NbCallsAsMatrix+self.__NbCallsAsMethod,
379 self.__NbCallsAsMatrix,
380 self.__NbCallsAsMethod,
381 self.__NbCallsOfCached,
382 Operator.NbCallsAsMatrix+Operator.NbCallsAsMethod,
383 Operator.NbCallsAsMatrix,
384 Operator.NbCallsAsMethod,
385 Operator.NbCallsOfCached,
387 if which is None: return __nbcalls
388 else: return __nbcalls[which]
390 def __addOneMatrixCall(self):
391 "Comptabilise un appel"
392 self.__NbCallsAsMatrix += 1 # Decompte local
393 Operator.NbCallsAsMatrix += 1 # Decompte global
395 def __addOneMethodCall(self, nb = 1):
396 "Comptabilise un appel"
397 self.__NbCallsAsMethod += nb # Decompte local
398 Operator.NbCallsAsMethod += nb # Decompte global
400 def __addOneCacheCall(self):
401 "Comptabilise un appel"
402 self.__NbCallsOfCached += 1 # Decompte local
403 Operator.NbCallsOfCached += 1 # Decompte global
405 # ==============================================================================
406 class FullOperator(object):
408 Classe générale d'interface de type opérateur complet
409 (Direct, Linéaire Tangent, Adjoint)
412 name = "GenericFullOperator",
414 asOneFunction = None, # 1 Fonction
415 asThreeFunctions = None, # 3 Fonctions in a dictionary
416 asScript = None, # 1 or 3 Fonction(s) by script
417 asDict = None, # Parameters
419 extraArguments = None,
421 inputAsMF = False,# Fonction(s) as Multi-Functions
426 self.__name = str(name)
427 self.__check = bool(toBeChecked)
428 self.__extraArgs = extraArguments
433 if (asDict is not None) and isinstance(asDict, dict):
434 __Parameters.update( asDict )
435 # Priorité à EnableMultiProcessingInDerivatives=True
436 if "EnableMultiProcessing" in __Parameters and __Parameters["EnableMultiProcessing"]:
437 __Parameters["EnableMultiProcessingInDerivatives"] = True
438 __Parameters["EnableMultiProcessingInEvaluation"] = False
439 if "EnableMultiProcessingInDerivatives" not in __Parameters:
440 __Parameters["EnableMultiProcessingInDerivatives"] = False
441 if __Parameters["EnableMultiProcessingInDerivatives"]:
442 __Parameters["EnableMultiProcessingInEvaluation"] = False
443 if "EnableMultiProcessingInEvaluation" not in __Parameters:
444 __Parameters["EnableMultiProcessingInEvaluation"] = False
445 if "withIncrement" in __Parameters: # Temporaire
446 __Parameters["DifferentialIncrement"] = __Parameters["withIncrement"]
448 if asScript is not None:
449 __Matrix, __Function = None, None
451 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
453 __Function = { "Direct":Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ) }
454 __Function.update({"useApproximatedDerivatives":True})
455 __Function.update(__Parameters)
456 elif asThreeFunctions:
458 "Direct" :Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ),
459 "Tangent":Interfaces.ImportFromScript(asScript).getvalue( "TangentOperator" ),
460 "Adjoint":Interfaces.ImportFromScript(asScript).getvalue( "AdjointOperator" ),
462 __Function.update(__Parameters)
465 if asOneFunction is not None:
466 if isinstance(asOneFunction, dict) and "Direct" in asOneFunction:
467 if asOneFunction["Direct"] is not None:
468 __Function = asOneFunction
470 raise ValueError("The function has to be given in a dictionnary which have 1 key (\"Direct\")")
472 __Function = { "Direct":asOneFunction }
473 __Function.update({"useApproximatedDerivatives":True})
474 __Function.update(__Parameters)
475 elif asThreeFunctions is not None:
476 if isinstance(asThreeFunctions, dict) and \
477 ("Tangent" in asThreeFunctions) and (asThreeFunctions["Tangent"] is not None) and \
478 ("Adjoint" in asThreeFunctions) and (asThreeFunctions["Adjoint"] is not None) and \
479 (("useApproximatedDerivatives" not in asThreeFunctions) or not bool(asThreeFunctions["useApproximatedDerivatives"])):
480 __Function = asThreeFunctions
481 elif isinstance(asThreeFunctions, dict) and \
482 ("Direct" in asThreeFunctions) and (asThreeFunctions["Direct"] is not None):
483 __Function = asThreeFunctions
484 __Function.update({"useApproximatedDerivatives":True})
486 raise ValueError("The functions has to be given in a dictionnary which have either 1 key (\"Direct\") or 3 keys (\"Direct\" (optionnal), \"Tangent\" and \"Adjoint\")")
487 if "Direct" not in asThreeFunctions:
488 __Function["Direct"] = asThreeFunctions["Tangent"]
489 __Function.update(__Parameters)
493 # if sys.version_info[0] < 3 and isinstance(__Function, dict):
494 # for k in ("Direct", "Tangent", "Adjoint"):
495 # if k in __Function and hasattr(__Function[k],"__class__"):
496 # if type(__Function[k]) is type(self.__init__):
497 # raise TypeError("can't use a class method (%s) as a function for the \"%s\" operator. Use a real function instead."%(type(__Function[k]),k))
499 if appliedInX is not None and isinstance(appliedInX, dict):
500 __appliedInX = appliedInX
501 elif appliedInX is not None:
502 __appliedInX = {"HXb":appliedInX}
506 if scheduledBy is not None:
507 self.__T = scheduledBy
509 if isinstance(__Function, dict) and \
510 ("useApproximatedDerivatives" in __Function) and bool(__Function["useApproximatedDerivatives"]) and \
511 ("Direct" in __Function) and (__Function["Direct"] is not None):
512 if "CenteredFiniteDifference" not in __Function: __Function["CenteredFiniteDifference"] = False
513 if "DifferentialIncrement" not in __Function: __Function["DifferentialIncrement"] = 0.01
514 if "withdX" not in __Function: __Function["withdX"] = None
515 if "withAvoidingRedundancy" not in __Function: __Function["withAvoidingRedundancy"] = avoidRC
516 if "withToleranceInRedundancy" not in __Function: __Function["withToleranceInRedundancy"] = 1.e-18
517 if "withLenghtOfRedundancy" not in __Function: __Function["withLenghtOfRedundancy"] = -1
518 if "NumberOfProcesses" not in __Function: __Function["NumberOfProcesses"] = None
519 if "withmfEnabled" not in __Function: __Function["withmfEnabled"] = inputAsMF
520 from daCore import NumericObjects
521 FDA = NumericObjects.FDApproximation(
523 Function = __Function["Direct"],
524 centeredDF = __Function["CenteredFiniteDifference"],
525 increment = __Function["DifferentialIncrement"],
526 dX = __Function["withdX"],
527 extraArguments = self.__extraArgs,
528 avoidingRedundancy = __Function["withAvoidingRedundancy"],
529 toleranceInRedundancy = __Function["withToleranceInRedundancy"],
530 lenghtOfRedundancy = __Function["withLenghtOfRedundancy"],
531 mpEnabled = __Function["EnableMultiProcessingInDerivatives"],
532 mpWorkers = __Function["NumberOfProcesses"],
533 mfEnabled = __Function["withmfEnabled"],
535 self.__FO["Direct"] = Operator( name = self.__name, fromMethod = FDA.DirectOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
536 self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMethod = FDA.TangentOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
537 self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMethod = FDA.AdjointOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
538 elif isinstance(__Function, dict) and \
539 ("Direct" in __Function) and ("Tangent" in __Function) and ("Adjoint" in __Function) and \
540 (__Function["Direct"] is not None) and (__Function["Tangent"] is not None) and (__Function["Adjoint"] is not None):
541 self.__FO["Direct"] = Operator( name = self.__name, fromMethod = __Function["Direct"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
542 self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMethod = __Function["Tangent"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
543 self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMethod = __Function["Adjoint"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
544 elif asMatrix is not None:
545 __matrice = numpy.matrix( __Matrix, numpy.float )
546 self.__FO["Direct"] = Operator( name = self.__name, fromMatrix = __matrice, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
547 self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMatrix = __matrice, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
548 self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMatrix = __matrice.T, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
551 raise ValueError("The %s object is improperly defined or undefined, it requires at minima either a matrix, a Direct operator for approximate derivatives or a Tangent/Adjoint operators pair. Please check your operator input."%self.__name)
553 if __appliedInX is not None:
554 self.__FO["AppliedInX"] = {}
555 for key in list(__appliedInX.keys()):
556 if type( __appliedInX[key] ) is type( numpy.matrix([]) ):
557 # Pour le cas où l'on a une vraie matrice
558 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].A1, numpy.float ).T
559 elif type( __appliedInX[key] ) is type( numpy.array([]) ) and len(__appliedInX[key].shape) > 1:
560 # Pour le cas où l'on a un vecteur représenté en array avec 2 dimensions
561 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].reshape(len(__appliedInX[key]),), numpy.float ).T
563 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key], numpy.float ).T
565 self.__FO["AppliedInX"] = None
571 "x.__repr__() <==> repr(x)"
572 return repr(self.__FO)
575 "x.__str__() <==> str(x)"
576 return str(self.__FO)
578 # ==============================================================================
579 class Algorithm(object):
581 Classe générale d'interface de type algorithme
583 Elle donne un cadre pour l'écriture d'une classe élémentaire d'algorithme
584 d'assimilation, en fournissant un container (dictionnaire) de variables
585 persistantes initialisées, et des méthodes d'accès à ces variables stockées.
587 Une classe élémentaire d'algorithme doit implémenter la méthode "run".
589 def __init__(self, name):
591 L'initialisation présente permet de fabriquer des variables de stockage
592 disponibles de manière générique dans les algorithmes élémentaires. Ces
593 variables de stockage sont ensuite conservées dans un dictionnaire
594 interne à l'objet, mais auquel on accède par la méthode "get".
596 Les variables prévues sont :
597 - APosterioriCorrelations : matrice de corrélations de la matrice A
598 - APosterioriCovariance : matrice de covariances a posteriori : A
599 - APosterioriStandardDeviations : vecteur des écart-types de la matrice A
600 - APosterioriVariances : vecteur des variances de la matrice A
601 - Analysis : vecteur d'analyse : Xa
602 - BMA : Background moins Analysis : Xa - Xb
603 - CostFunctionJ : fonction-coût globale, somme des deux parties suivantes Jb et Jo
604 - CostFunctionJAtCurrentOptimum : fonction-coût globale à l'état optimal courant lors d'itérations
605 - CostFunctionJb : partie ébauche ou background de la fonction-coût : Jb
606 - CostFunctionJbAtCurrentOptimum : partie ébauche à l'état optimal courant lors d'itérations
607 - CostFunctionJo : partie observations de la fonction-coût : Jo
608 - CostFunctionJoAtCurrentOptimum : partie observations à l'état optimal courant lors d'itérations
609 - CurrentIterationNumber : numéro courant d'itération dans les algorithmes itératifs, à partir de 0
610 - CurrentOptimum : état optimal courant lors d'itérations
611 - CurrentState : état courant lors d'itérations
612 - GradientOfCostFunctionJ : gradient de la fonction-coût globale
613 - GradientOfCostFunctionJb : gradient de la partie ébauche de la fonction-coût
614 - GradientOfCostFunctionJo : gradient de la partie observations de la fonction-coût
615 - IndexOfOptimum : index de l'état optimal courant lors d'itérations
616 - Innovation : l'innovation : d = Y - H(X)
617 - InnovationAtCurrentState : l'innovation à l'état courant : dn = Y - H(Xn)
618 - JacobianMatrixAtBackground : matrice jacobienne à l'état d'ébauche
619 - JacobianMatrixAtCurrentState : matrice jacobienne à l'état courant
620 - JacobianMatrixAtOptimum : matrice jacobienne à l'optimum
621 - KalmanGainAtOptimum : gain de Kalman à l'optimum
622 - MahalanobisConsistency : indicateur de consistance des covariances
623 - OMA : Observation moins Analyse : Y - Xa
624 - OMB : Observation moins Background : Y - Xb
625 - ForecastState : état prédit courant lors d'itérations
626 - Residu : dans le cas des algorithmes de vérification
627 - SigmaBck2 : indicateur de correction optimale des erreurs d'ébauche
628 - SigmaObs2 : indicateur de correction optimale des erreurs d'observation
629 - SimulatedObservationAtBackground : l'état observé H(Xb) à l'ébauche
630 - SimulatedObservationAtCurrentOptimum : l'état observé H(X) à l'état optimal courant
631 - SimulatedObservationAtCurrentState : l'état observé H(X) à l'état courant
632 - SimulatedObservationAtOptimum : l'état observé H(Xa) à l'optimum
633 - SimulationQuantiles : états observés H(X) pour les quantiles demandés
634 On peut rajouter des variables à stocker dans l'initialisation de
635 l'algorithme élémentaire qui va hériter de cette classe
637 logging.debug("%s Initialisation", str(name))
638 self._m = PlatformInfo.SystemUsage()
640 self._name = str( name )
641 self._parameters = {"StoreSupplementaryCalculations":[]}
642 self.__required_parameters = {}
643 self.__required_inputs = {
644 "RequiredInputValues":{"mandatory":(), "optional":()},
645 "ClassificationTags":[],
647 self.__variable_names_not_public = {"nextStep":False} # Duplication dans AlgorithmAndParameters
648 self.__canonical_parameter_name = {} # Correspondance "lower"->"correct"
649 self.__canonical_stored_name = {} # Correspondance "lower"->"correct"
651 self.StoredVariables = {}
652 self.StoredVariables["APosterioriCorrelations"] = Persistence.OneMatrix(name = "APosterioriCorrelations")
653 self.StoredVariables["APosterioriCovariance"] = Persistence.OneMatrix(name = "APosterioriCovariance")
654 self.StoredVariables["APosterioriStandardDeviations"] = Persistence.OneVector(name = "APosterioriStandardDeviations")
655 self.StoredVariables["APosterioriVariances"] = Persistence.OneVector(name = "APosterioriVariances")
656 self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
657 self.StoredVariables["BMA"] = Persistence.OneVector(name = "BMA")
658 self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
659 self.StoredVariables["CostFunctionJAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJAtCurrentOptimum")
660 self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
661 self.StoredVariables["CostFunctionJbAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJbAtCurrentOptimum")
662 self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
663 self.StoredVariables["CostFunctionJoAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJoAtCurrentOptimum")
664 self.StoredVariables["CurrentEnsembleState"] = Persistence.OneMatrix(name = "CurrentEnsembleState")
665 self.StoredVariables["CurrentIterationNumber"] = Persistence.OneIndex(name = "CurrentIterationNumber")
666 self.StoredVariables["CurrentOptimum"] = Persistence.OneVector(name = "CurrentOptimum")
667 self.StoredVariables["CurrentState"] = Persistence.OneVector(name = "CurrentState")
668 self.StoredVariables["ForecastState"] = Persistence.OneVector(name = "ForecastState")
669 self.StoredVariables["GradientOfCostFunctionJ"] = Persistence.OneVector(name = "GradientOfCostFunctionJ")
670 self.StoredVariables["GradientOfCostFunctionJb"] = Persistence.OneVector(name = "GradientOfCostFunctionJb")
671 self.StoredVariables["GradientOfCostFunctionJo"] = Persistence.OneVector(name = "GradientOfCostFunctionJo")
672 self.StoredVariables["IndexOfOptimum"] = Persistence.OneIndex(name = "IndexOfOptimum")
673 self.StoredVariables["Innovation"] = Persistence.OneVector(name = "Innovation")
674 self.StoredVariables["InnovationAtCurrentAnalysis"] = Persistence.OneVector(name = "InnovationAtCurrentAnalysis")
675 self.StoredVariables["InnovationAtCurrentState"] = Persistence.OneVector(name = "InnovationAtCurrentState")
676 self.StoredVariables["JacobianMatrixAtBackground"] = Persistence.OneMatrix(name = "JacobianMatrixAtBackground")
677 self.StoredVariables["JacobianMatrixAtCurrentState"] = Persistence.OneMatrix(name = "JacobianMatrixAtCurrentState")
678 self.StoredVariables["JacobianMatrixAtOptimum"] = Persistence.OneMatrix(name = "JacobianMatrixAtOptimum")
679 self.StoredVariables["KalmanGainAtOptimum"] = Persistence.OneMatrix(name = "KalmanGainAtOptimum")
680 self.StoredVariables["MahalanobisConsistency"] = Persistence.OneScalar(name = "MahalanobisConsistency")
681 self.StoredVariables["OMA"] = Persistence.OneVector(name = "OMA")
682 self.StoredVariables["OMB"] = Persistence.OneVector(name = "OMB")
683 self.StoredVariables["Residu"] = Persistence.OneScalar(name = "Residu")
684 self.StoredVariables["SigmaBck2"] = Persistence.OneScalar(name = "SigmaBck2")
685 self.StoredVariables["SigmaObs2"] = Persistence.OneScalar(name = "SigmaObs2")
686 self.StoredVariables["SimulatedObservationAtBackground"] = Persistence.OneVector(name = "SimulatedObservationAtBackground")
687 self.StoredVariables["SimulatedObservationAtCurrentAnalysis"]= Persistence.OneVector(name = "SimulatedObservationAtCurrentAnalysis")
688 self.StoredVariables["SimulatedObservationAtCurrentOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentOptimum")
689 self.StoredVariables["SimulatedObservationAtCurrentState"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentState")
690 self.StoredVariables["SimulatedObservationAtOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtOptimum")
691 self.StoredVariables["SimulationQuantiles"] = Persistence.OneMatrix(name = "SimulationQuantiles")
693 for k in self.StoredVariables:
694 self.__canonical_stored_name[k.lower()] = k
696 for k, v in self.__variable_names_not_public.items():
697 self.__canonical_parameter_name[k.lower()] = k
698 self.__canonical_parameter_name["algorithm"] = "Algorithm"
699 self.__canonical_parameter_name["storesupplementarycalculations"] = "StoreSupplementaryCalculations"
701 def _pre_run(self, Parameters, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None ):
703 logging.debug("%s Lancement", self._name)
704 logging.debug("%s Taille mémoire utilisée de %.0f Mio"%(self._name, self._m.getUsedMemory("Mio")))
705 self._getTimeState(reset=True)
707 # Mise a jour des paramètres internes avec le contenu de Parameters, en
708 # reprenant les valeurs par défauts pour toutes celles non définies
709 self.__setParameters(Parameters, reset=True)
710 for k, v in self.__variable_names_not_public.items():
711 if k not in self._parameters: self.__setParameters( {k:v} )
713 # Corrections et compléments des vecteurs
714 def __test_vvalue(argument, variable, argname, symbol=None):
715 if symbol is None: symbol = variable
717 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
718 raise ValueError("%s %s vector %s is not set and has to be properly defined!"%(self._name,argname,symbol))
719 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
720 logging.debug("%s %s vector %s is not set, but is optional."%(self._name,argname,symbol))
722 logging.debug("%s %s vector %s is not set, but is not required."%(self._name,argname,symbol))
724 logging.debug("%s %s vector %s is set, and its size is %i."%(self._name,argname,symbol,numpy.array(argument).size))
726 __test_vvalue( Xb, "Xb", "Background or initial state" )
727 __test_vvalue( Y, "Y", "Observation" )
728 __test_vvalue( U, "U", "Control" )
730 # Corrections et compléments des covariances
731 def __test_cvalue(argument, variable, argname, symbol=None):
732 if symbol is None: symbol = variable
734 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
735 raise ValueError("%s %s error covariance matrix %s is not set and has to be properly defined!"%(self._name,argname,symbol))
736 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
737 logging.debug("%s %s error covariance matrix %s is not set, but is optional."%(self._name,argname,symbol))
739 logging.debug("%s %s error covariance matrix %s is not set, but is not required."%(self._name,argname,symbol))
741 logging.debug("%s %s error covariance matrix %s is set."%(self._name,argname,symbol))
743 __test_cvalue( B, "B", "Background" )
744 __test_cvalue( R, "R", "Observation" )
745 __test_cvalue( Q, "Q", "Evolution" )
747 # Corrections et compléments des opérateurs
748 def __test_ovalue(argument, variable, argname, symbol=None):
749 if symbol is None: symbol = variable
750 if argument is None or (isinstance(argument,dict) and len(argument)==0):
751 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
752 raise ValueError("%s %s operator %s is not set and has to be properly defined!"%(self._name,argname,symbol))
753 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
754 logging.debug("%s %s operator %s is not set, but is optional."%(self._name,argname,symbol))
756 logging.debug("%s %s operator %s is not set, but is not required."%(self._name,argname,symbol))
758 logging.debug("%s %s operator %s is set."%(self._name,argname,symbol))
760 __test_ovalue( HO, "HO", "Observation", "H" )
761 __test_ovalue( EM, "EM", "Evolution", "M" )
762 __test_ovalue( CM, "CM", "Control Model", "C" )
764 # Corrections et compléments des bornes
765 if ("Bounds" in self._parameters) and isinstance(self._parameters["Bounds"], (list, tuple)) and (len(self._parameters["Bounds"]) > 0):
766 logging.debug("%s Prise en compte des bornes effectuee"%(self._name,))
768 self._parameters["Bounds"] = None
770 # Corrections et compléments de l'initialisation en X
771 if "InitializationPoint" in self._parameters:
773 if self._parameters["InitializationPoint"] is not None and hasattr(self._parameters["InitializationPoint"],'size'):
774 if self._parameters["InitializationPoint"].size != numpy.ravel(Xb).size:
775 raise ValueError("Incompatible size %i of forced initial point that have to replace the background of size %i" \
776 %(self._parameters["InitializationPoint"].size,numpy.ravel(Xb).size))
777 # Obtenu par typecast : numpy.ravel(self._parameters["InitializationPoint"])
779 self._parameters["InitializationPoint"] = numpy.ravel(Xb)
781 if self._parameters["InitializationPoint"] is None:
782 raise ValueError("Forced initial point can not be set without any given Background or required value")
784 # Correction pour pallier a un bug de TNC sur le retour du Minimum
785 if "Minimizer" in self._parameters and self._parameters["Minimizer"] == "TNC":
786 self.setParameterValue("StoreInternalVariables",True)
788 # Verbosité et logging
789 if logging.getLogger().level < logging.WARNING:
790 self._parameters["optiprint"], self._parameters["optdisp"] = 1, 1
791 if PlatformInfo.has_scipy:
792 import scipy.optimize
793 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_ALL
795 self._parameters["optmessages"] = 15
797 self._parameters["optiprint"], self._parameters["optdisp"] = -1, 0
798 if PlatformInfo.has_scipy:
799 import scipy.optimize
800 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_NONE
802 self._parameters["optmessages"] = 15
806 def _post_run(self,_oH=None):
808 if ("StoreSupplementaryCalculations" in self._parameters) and \
809 "APosterioriCovariance" in self._parameters["StoreSupplementaryCalculations"]:
810 for _A in self.StoredVariables["APosterioriCovariance"]:
811 if "APosterioriVariances" in self._parameters["StoreSupplementaryCalculations"]:
812 self.StoredVariables["APosterioriVariances"].store( numpy.diag(_A) )
813 if "APosterioriStandardDeviations" in self._parameters["StoreSupplementaryCalculations"]:
814 self.StoredVariables["APosterioriStandardDeviations"].store( numpy.sqrt(numpy.diag(_A)) )
815 if "APosterioriCorrelations" in self._parameters["StoreSupplementaryCalculations"]:
816 _EI = numpy.diag(1./numpy.sqrt(numpy.diag(_A)))
817 _C = numpy.dot(_EI, numpy.dot(_A, _EI))
818 self.StoredVariables["APosterioriCorrelations"].store( _C )
819 if _oH is not None and "Direct" in _oH and "Tangent" in _oH and "Adjoint" in _oH:
820 logging.debug("%s Nombre d'évaluation(s) de l'opérateur d'observation direct/tangent/adjoint.: %i/%i/%i", self._name, _oH["Direct"].nbcalls(0),_oH["Tangent"].nbcalls(0),_oH["Adjoint"].nbcalls(0))
821 logging.debug("%s Nombre d'appels au cache d'opérateur d'observation direct/tangent/adjoint..: %i/%i/%i", self._name, _oH["Direct"].nbcalls(3),_oH["Tangent"].nbcalls(3),_oH["Adjoint"].nbcalls(3))
822 logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
823 logging.debug("%s Durées d'utilisation CPU de %.1fs et elapsed de %.1fs", self._name, self._getTimeState()[0], self._getTimeState()[1])
824 logging.debug("%s Terminé", self._name)
827 def _toStore(self, key):
828 "True if in StoreSupplementaryCalculations, else False"
829 return key in self._parameters["StoreSupplementaryCalculations"]
831 def get(self, key=None):
833 Renvoie l'une des variables stockées identifiée par la clé, ou le
834 dictionnaire de l'ensemble des variables disponibles en l'absence de
835 clé. Ce sont directement les variables sous forme objet qui sont
836 renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
837 des classes de persistance.
840 return self.StoredVariables[self.__canonical_stored_name[key.lower()]]
842 return self.StoredVariables
844 def __contains__(self, key=None):
845 "D.__contains__(k) -> True if D has a key k, else False"
846 if key is None or key.lower() not in self.__canonical_stored_name:
849 return self.__canonical_stored_name[key.lower()] in self.StoredVariables
852 "D.keys() -> list of D's keys"
853 if hasattr(self, "StoredVariables"):
854 return self.StoredVariables.keys()
859 "D.pop(k[,d]) -> v, remove specified key and return the corresponding value"
860 if hasattr(self, "StoredVariables") and k.lower() in self.__canonical_stored_name:
861 return self.StoredVariables.pop(self.__canonical_stored_name[k.lower()], d)
866 raise TypeError("pop expected at least 1 arguments, got 0")
867 "If key is not found, d is returned if given, otherwise KeyError is raised"
873 def run(self, Xb=None, Y=None, H=None, M=None, R=None, B=None, Q=None, Parameters=None):
875 Doit implémenter l'opération élémentaire de calcul d'assimilation sous
876 sa forme mathématique la plus naturelle possible.
878 raise NotImplementedError("Mathematical assimilation calculation has not been implemented!")
880 def defineRequiredParameter(self, name = None, default = None, typecast = None, message = None, minval = None, maxval = None, listval = None, listadv = None):
882 Permet de définir dans l'algorithme des paramètres requis et leurs
883 caractéristiques par défaut.
886 raise ValueError("A name is mandatory to define a required parameter.")
888 self.__required_parameters[name] = {
890 "typecast" : typecast,
897 self.__canonical_parameter_name[name.lower()] = name
898 logging.debug("%s %s (valeur par défaut = %s)", self._name, message, self.setParameterValue(name))
900 def getRequiredParameters(self, noDetails=True):
902 Renvoie la liste des noms de paramètres requis ou directement le
903 dictionnaire des paramètres requis.
906 return sorted(self.__required_parameters.keys())
908 return self.__required_parameters
910 def setParameterValue(self, name=None, value=None):
912 Renvoie la valeur d'un paramètre requis de manière contrôlée
914 __k = self.__canonical_parameter_name[name.lower()]
915 default = self.__required_parameters[__k]["default"]
916 typecast = self.__required_parameters[__k]["typecast"]
917 minval = self.__required_parameters[__k]["minval"]
918 maxval = self.__required_parameters[__k]["maxval"]
919 listval = self.__required_parameters[__k]["listval"]
920 listadv = self.__required_parameters[__k]["listadv"]
922 if value is None and default is None:
924 elif value is None and default is not None:
925 if typecast is None: __val = default
926 else: __val = typecast( default )
928 if typecast is None: __val = value
931 __val = typecast( value )
933 raise ValueError("The value '%s' for the parameter named '%s' can not be correctly evaluated with type '%s'."%(value, __k, typecast))
935 if minval is not None and (numpy.array(__val, float) < minval).any():
936 raise ValueError("The parameter named '%s' of value '%s' can not be less than %s."%(__k, __val, minval))
937 if maxval is not None and (numpy.array(__val, float) > maxval).any():
938 raise ValueError("The parameter named '%s' of value '%s' can not be greater than %s."%(__k, __val, maxval))
939 if listval is not None or listadv is not None:
940 if typecast is list or typecast is tuple or isinstance(__val,list) or isinstance(__val,tuple):
942 if listval is not None and v in listval: continue
943 elif listadv is not None and v in listadv: continue
945 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%(v, __k, listval))
946 elif not (listval is not None and __val in listval) and not (listadv is not None and __val in listadv):
947 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%( __val, __k,listval))
951 def requireInputArguments(self, mandatory=(), optional=()):
953 Permet d'imposer des arguments de calcul requis en entrée.
955 self.__required_inputs["RequiredInputValues"]["mandatory"] = tuple( mandatory )
956 self.__required_inputs["RequiredInputValues"]["optional"] = tuple( optional )
958 def getInputArguments(self):
960 Permet d'obtenir les listes des arguments de calcul requis en entrée.
962 return self.__required_inputs["RequiredInputValues"]["mandatory"], self.__required_inputs["RequiredInputValues"]["optional"]
964 def setAttributes(self, tags=()):
966 Permet d'adjoindre des attributs comme les tags de classification.
967 Renvoie la liste actuelle dans tous les cas.
969 self.__required_inputs["ClassificationTags"].extend( tags )
970 return self.__required_inputs["ClassificationTags"]
972 def __setParameters(self, fromDico={}, reset=False):
974 Permet de stocker les paramètres reçus dans le dictionnaire interne.
976 self._parameters.update( fromDico )
977 __inverse_fromDico_keys = {}
978 for k in fromDico.keys():
979 if k.lower() in self.__canonical_parameter_name:
980 __inverse_fromDico_keys[self.__canonical_parameter_name[k.lower()]] = k
981 #~ __inverse_fromDico_keys = dict([(self.__canonical_parameter_name[k.lower()],k) for k in fromDico.keys()])
982 __canonic_fromDico_keys = __inverse_fromDico_keys.keys()
983 for k in self.__required_parameters.keys():
984 if k in __canonic_fromDico_keys:
985 self._parameters[k] = self.setParameterValue(k,fromDico[__inverse_fromDico_keys[k]])
987 self._parameters[k] = self.setParameterValue(k)
990 logging.debug("%s %s : %s", self._name, self.__required_parameters[k]["message"], self._parameters[k])
992 def _getTimeState(self, reset=False):
994 Initialise ou restitue le temps de calcul (cpu/elapsed) à la seconde
997 self.__initial_cpu_time = time.process_time()
998 self.__initial_elapsed_time = time.perf_counter()
1001 self.__cpu_time = time.process_time() - self.__initial_cpu_time
1002 self.__elapsed_time = time.perf_counter() - self.__initial_elapsed_time
1003 return self.__cpu_time, self.__elapsed_time
1005 def _StopOnTimeLimit(self, X=None, withReason=False):
1006 "Stop criteria on time limit: True/False [+ Reason]"
1007 c, e = self._getTimeState()
1008 if "MaximumCpuTime" in self._parameters and c > self._parameters["MaximumCpuTime"]:
1009 __SC, __SR = True, "Reached maximum CPU time (%.1fs > %.1fs)"%(c, self._parameters["MaximumCpuTime"])
1010 elif "MaximumElapsedTime" in self._parameters and e > self._parameters["MaximumElapsedTime"]:
1011 __SC, __SR = True, "Reached maximum elapsed time (%.1fs > %.1fs)"%(e, self._parameters["MaximumElapsedTime"])
1013 __SC, __SR = False, ""
1019 # ==============================================================================
1020 class AlgorithmAndParameters(object):
1022 Classe générale d'interface d'action pour l'algorithme et ses paramètres
1025 name = "GenericAlgorithm",
1032 self.__name = str(name)
1036 self.__algorithm = {}
1037 self.__algorithmFile = None
1038 self.__algorithmName = None
1040 self.updateParameters( asDict, asScript )
1042 if asAlgorithm is None and asScript is not None:
1043 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
1045 __Algo = asAlgorithm
1047 if __Algo is not None:
1048 self.__A = str(__Algo)
1049 self.__P.update( {"Algorithm":self.__A} )
1051 self.__setAlgorithm( self.__A )
1053 self.__variable_names_not_public = {"nextStep":False} # Duplication dans Algorithm
1055 def updateParameters(self,
1059 "Mise a jour des parametres"
1060 if asDict is None and asScript is not None:
1061 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1065 if __Dict is not None:
1066 self.__P.update( dict(__Dict) )
1068 def executePythonScheme(self, asDictAO = None):
1069 "Permet de lancer le calcul d'assimilation"
1070 Operator.CM.clearCache()
1072 if not isinstance(asDictAO, dict):
1073 raise ValueError("The objects for algorithm calculation have to be given together as a dictionnary, and they are not")
1074 if hasattr(asDictAO["Background"],"getO"): self.__Xb = asDictAO["Background"].getO()
1075 elif hasattr(asDictAO["CheckingPoint"],"getO"): self.__Xb = asDictAO["CheckingPoint"].getO()
1076 else: self.__Xb = None
1077 if hasattr(asDictAO["Observation"],"getO"): self.__Y = asDictAO["Observation"].getO()
1078 else: self.__Y = asDictAO["Observation"]
1079 if hasattr(asDictAO["ControlInput"],"getO"): self.__U = asDictAO["ControlInput"].getO()
1080 else: self.__U = asDictAO["ControlInput"]
1081 if hasattr(asDictAO["ObservationOperator"],"getO"): self.__HO = asDictAO["ObservationOperator"].getO()
1082 else: self.__HO = asDictAO["ObservationOperator"]
1083 if hasattr(asDictAO["EvolutionModel"],"getO"): self.__EM = asDictAO["EvolutionModel"].getO()
1084 else: self.__EM = asDictAO["EvolutionModel"]
1085 if hasattr(asDictAO["ControlModel"],"getO"): self.__CM = asDictAO["ControlModel"].getO()
1086 else: self.__CM = asDictAO["ControlModel"]
1087 self.__B = asDictAO["BackgroundError"]
1088 self.__R = asDictAO["ObservationError"]
1089 self.__Q = asDictAO["EvolutionError"]
1091 self.__shape_validate()
1093 self.__algorithm.run(
1103 Parameters = self.__P,
1107 def executeYACSScheme(self, FileName=None):
1108 "Permet de lancer le calcul d'assimilation"
1109 if FileName is None or not os.path.exists(FileName):
1110 raise ValueError("a YACS file name has to be given for YACS execution.\n")
1112 __file = os.path.abspath(FileName)
1113 logging.debug("The YACS file name is \"%s\"."%__file)
1114 if not PlatformInfo.has_salome or \
1115 not PlatformInfo.has_yacs or \
1116 not PlatformInfo.has_adao:
1117 raise ImportError("\n\n"+\
1118 "Unable to get SALOME, YACS or ADAO environnement variables.\n"+\
1119 "Please load the right environnement before trying to use it.\n")
1122 import SALOMERuntime
1124 SALOMERuntime.RuntimeSALOME_setRuntime()
1126 r = pilot.getRuntime()
1127 xmlLoader = loader.YACSLoader()
1128 xmlLoader.registerProcCataLoader()
1130 catalogAd = r.loadCatalog("proc", __file)
1131 r.addCatalog(catalogAd)
1136 p = xmlLoader.load(__file)
1137 except IOError as ex:
1138 print("The YACS XML schema file can not be loaded: %s"%(ex,))
1140 logger = p.getLogger("parser")
1141 if not logger.isEmpty():
1142 print("The imported YACS XML schema has errors on parsing:")
1143 print(logger.getStr())
1146 print("The YACS XML schema is not valid and will not be executed:")
1147 print(p.getErrorReport())
1149 info=pilot.LinkInfo(pilot.LinkInfo.ALL_DONT_STOP)
1150 p.checkConsistency(info)
1151 if info.areWarningsOrErrors():
1152 print("The YACS XML schema is not coherent and will not be executed:")
1153 print(info.getGlobalRepr())
1155 e = pilot.ExecutorSwig()
1157 if p.getEffectiveState() != pilot.DONE:
1158 print(p.getErrorReport())
1162 def get(self, key = None):
1163 "Vérifie l'existence d'une clé de variable ou de paramètres"
1164 if key in self.__algorithm:
1165 return self.__algorithm.get( key )
1166 elif key in self.__P:
1167 return self.__P[key]
1169 allvariables = self.__P
1170 for k in self.__variable_names_not_public: allvariables.pop(k, None)
1173 def pop(self, k, d):
1174 "Necessaire pour le pickling"
1175 return self.__algorithm.pop(k, d)
1177 def getAlgorithmRequiredParameters(self, noDetails=True):
1178 "Renvoie la liste des paramètres requis selon l'algorithme"
1179 return self.__algorithm.getRequiredParameters(noDetails)
1181 def getAlgorithmInputArguments(self):
1182 "Renvoie la liste des entrées requises selon l'algorithme"
1183 return self.__algorithm.getInputArguments()
1185 def getAlgorithmAttributes(self):
1186 "Renvoie la liste des attributs selon l'algorithme"
1187 return self.__algorithm.setAttributes()
1189 def setObserver(self, __V, __O, __I, __S):
1190 if self.__algorithm is None \
1191 or isinstance(self.__algorithm, dict) \
1192 or not hasattr(self.__algorithm,"StoredVariables"):
1193 raise ValueError("No observer can be build before choosing an algorithm.")
1194 if __V not in self.__algorithm:
1195 raise ValueError("An observer requires to be set on a variable named %s which does not exist."%__V)
1197 self.__algorithm.StoredVariables[ __V ].setDataObserver(
1200 HookParameters = __I,
1203 def removeObserver(self, __V, __O, __A = False):
1204 if self.__algorithm is None \
1205 or isinstance(self.__algorithm, dict) \
1206 or not hasattr(self.__algorithm,"StoredVariables"):
1207 raise ValueError("No observer can be removed before choosing an algorithm.")
1208 if __V not in self.__algorithm:
1209 raise ValueError("An observer requires to be removed on a variable named %s which does not exist."%__V)
1211 return self.__algorithm.StoredVariables[ __V ].removeDataObserver(
1216 def hasObserver(self, __V):
1217 if self.__algorithm is None \
1218 or isinstance(self.__algorithm, dict) \
1219 or not hasattr(self.__algorithm,"StoredVariables"):
1221 if __V not in self.__algorithm:
1223 return self.__algorithm.StoredVariables[ __V ].hasDataObserver()
1226 __allvariables = list(self.__algorithm.keys()) + list(self.__P.keys())
1227 for k in self.__variable_names_not_public:
1228 if k in __allvariables: __allvariables.remove(k)
1229 return __allvariables
1231 def __contains__(self, key=None):
1232 "D.__contains__(k) -> True if D has a key k, else False"
1233 return key in self.__algorithm or key in self.__P
1236 "x.__repr__() <==> repr(x)"
1237 return repr(self.__A)+", "+repr(self.__P)
1240 "x.__str__() <==> str(x)"
1241 return str(self.__A)+", "+str(self.__P)
1243 def __setAlgorithm(self, choice = None ):
1245 Permet de sélectionner l'algorithme à utiliser pour mener à bien l'étude
1246 d'assimilation. L'argument est un champ caractère se rapportant au nom
1247 d'un algorithme réalisant l'opération sur les arguments fixes.
1250 raise ValueError("Error: algorithm choice has to be given")
1251 if self.__algorithmName is not None:
1252 raise ValueError("Error: algorithm choice has already been done as \"%s\", it can't be changed."%self.__algorithmName)
1253 daDirectory = "daAlgorithms"
1255 # Recherche explicitement le fichier complet
1256 # ------------------------------------------
1258 for directory in sys.path:
1259 if os.path.isfile(os.path.join(directory, daDirectory, str(choice)+'.py')):
1260 module_path = os.path.abspath(os.path.join(directory, daDirectory))
1261 if module_path is None:
1262 raise ImportError("No algorithm module named \"%s\" has been found in the search path.\n The search path is %s"%(choice, sys.path))
1264 # Importe le fichier complet comme un module
1265 # ------------------------------------------
1267 sys_path_tmp = sys.path ; sys.path.insert(0,module_path)
1268 self.__algorithmFile = __import__(str(choice), globals(), locals(), [])
1269 if not hasattr(self.__algorithmFile, "ElementaryAlgorithm"):
1270 raise ImportError("this module does not define a valid elementary algorithm.")
1271 self.__algorithmName = str(choice)
1272 sys.path = sys_path_tmp ; del sys_path_tmp
1273 except ImportError as e:
1274 raise ImportError("The module named \"%s\" was found, but is incorrect at the import stage.\n The import error message is: %s"%(choice,e))
1276 # Instancie un objet du type élémentaire du fichier
1277 # -------------------------------------------------
1278 self.__algorithm = self.__algorithmFile.ElementaryAlgorithm()
1281 def __shape_validate(self):
1283 Validation de la correspondance correcte des tailles des variables et
1284 des matrices s'il y en a.
1286 if self.__Xb is None: __Xb_shape = (0,)
1287 elif hasattr(self.__Xb,"size"): __Xb_shape = (self.__Xb.size,)
1288 elif hasattr(self.__Xb,"shape"):
1289 if isinstance(self.__Xb.shape, tuple): __Xb_shape = self.__Xb.shape
1290 else: __Xb_shape = self.__Xb.shape()
1291 else: raise TypeError("The background (Xb) has no attribute of shape: problem !")
1293 if self.__Y is None: __Y_shape = (0,)
1294 elif hasattr(self.__Y,"size"): __Y_shape = (self.__Y.size,)
1295 elif hasattr(self.__Y,"shape"):
1296 if isinstance(self.__Y.shape, tuple): __Y_shape = self.__Y.shape
1297 else: __Y_shape = self.__Y.shape()
1298 else: raise TypeError("The observation (Y) has no attribute of shape: problem !")
1300 if self.__U is None: __U_shape = (0,)
1301 elif hasattr(self.__U,"size"): __U_shape = (self.__U.size,)
1302 elif hasattr(self.__U,"shape"):
1303 if isinstance(self.__U.shape, tuple): __U_shape = self.__U.shape
1304 else: __U_shape = self.__U.shape()
1305 else: raise TypeError("The control (U) has no attribute of shape: problem !")
1307 if self.__B is None: __B_shape = (0,0)
1308 elif hasattr(self.__B,"shape"):
1309 if isinstance(self.__B.shape, tuple): __B_shape = self.__B.shape
1310 else: __B_shape = self.__B.shape()
1311 else: raise TypeError("The a priori errors covariance matrix (B) has no attribute of shape: problem !")
1313 if self.__R is None: __R_shape = (0,0)
1314 elif hasattr(self.__R,"shape"):
1315 if isinstance(self.__R.shape, tuple): __R_shape = self.__R.shape
1316 else: __R_shape = self.__R.shape()
1317 else: raise TypeError("The observation errors covariance matrix (R) has no attribute of shape: problem !")
1319 if self.__Q is None: __Q_shape = (0,0)
1320 elif hasattr(self.__Q,"shape"):
1321 if isinstance(self.__Q.shape, tuple): __Q_shape = self.__Q.shape
1322 else: __Q_shape = self.__Q.shape()
1323 else: raise TypeError("The evolution errors covariance matrix (Q) has no attribute of shape: problem !")
1325 if len(self.__HO) == 0: __HO_shape = (0,0)
1326 elif isinstance(self.__HO, dict): __HO_shape = (0,0)
1327 elif hasattr(self.__HO["Direct"],"shape"):
1328 if isinstance(self.__HO["Direct"].shape, tuple): __HO_shape = self.__HO["Direct"].shape
1329 else: __HO_shape = self.__HO["Direct"].shape()
1330 else: raise TypeError("The observation operator (H) has no attribute of shape: problem !")
1332 if len(self.__EM) == 0: __EM_shape = (0,0)
1333 elif isinstance(self.__EM, dict): __EM_shape = (0,0)
1334 elif hasattr(self.__EM["Direct"],"shape"):
1335 if isinstance(self.__EM["Direct"].shape, tuple): __EM_shape = self.__EM["Direct"].shape
1336 else: __EM_shape = self.__EM["Direct"].shape()
1337 else: raise TypeError("The evolution model (EM) has no attribute of shape: problem !")
1339 if len(self.__CM) == 0: __CM_shape = (0,0)
1340 elif isinstance(self.__CM, dict): __CM_shape = (0,0)
1341 elif hasattr(self.__CM["Direct"],"shape"):
1342 if isinstance(self.__CM["Direct"].shape, tuple): __CM_shape = self.__CM["Direct"].shape
1343 else: __CM_shape = self.__CM["Direct"].shape()
1344 else: raise TypeError("The control model (CM) has no attribute of shape: problem !")
1346 # Vérification des conditions
1347 # ---------------------------
1348 if not( len(__Xb_shape) == 1 or min(__Xb_shape) == 1 ):
1349 raise ValueError("Shape characteristic of background (Xb) is incorrect: \"%s\"."%(__Xb_shape,))
1350 if not( len(__Y_shape) == 1 or min(__Y_shape) == 1 ):
1351 raise ValueError("Shape characteristic of observation (Y) is incorrect: \"%s\"."%(__Y_shape,))
1353 if not( min(__B_shape) == max(__B_shape) ):
1354 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) is incorrect: \"%s\"."%(__B_shape,))
1355 if not( min(__R_shape) == max(__R_shape) ):
1356 raise ValueError("Shape characteristic of observation errors covariance matrix (R) is incorrect: \"%s\"."%(__R_shape,))
1357 if not( min(__Q_shape) == max(__Q_shape) ):
1358 raise ValueError("Shape characteristic of evolution errors covariance matrix (Q) is incorrect: \"%s\"."%(__Q_shape,))
1359 if not( min(__EM_shape) == max(__EM_shape) ):
1360 raise ValueError("Shape characteristic of evolution operator (EM) is incorrect: \"%s\"."%(__EM_shape,))
1362 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[1] == max(__Xb_shape) ):
1363 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and state (X) \"%s\" are incompatible."%(__HO_shape,__Xb_shape))
1364 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[0] == max(__Y_shape) ):
1365 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation (Y) \"%s\" are incompatible."%(__HO_shape,__Y_shape))
1366 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__B) > 0 and not( __HO_shape[1] == __B_shape[0] ):
1367 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and a priori errors covariance matrix (B) \"%s\" are incompatible."%(__HO_shape,__B_shape))
1368 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__R) > 0 and not( __HO_shape[0] == __R_shape[1] ):
1369 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation errors covariance matrix (R) \"%s\" are incompatible."%(__HO_shape,__R_shape))
1371 if self.__B is not None and len(self.__B) > 0 and not( __B_shape[1] == max(__Xb_shape) ):
1372 if self.__algorithmName in ["EnsembleBlue",]:
1373 asPersistentVector = self.__Xb.reshape((-1,min(__B_shape)))
1374 self.__Xb = Persistence.OneVector("Background", basetype=numpy.matrix)
1375 for member in asPersistentVector:
1376 self.__Xb.store( numpy.matrix( numpy.ravel(member), numpy.float ).T )
1377 __Xb_shape = min(__B_shape)
1379 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) \"%s\" and background (Xb) \"%s\" are incompatible."%(__B_shape,__Xb_shape))
1381 if self.__R is not None and len(self.__R) > 0 and not( __R_shape[1] == max(__Y_shape) ):
1382 raise ValueError("Shape characteristic of observation errors covariance matrix (R) \"%s\" and observation (Y) \"%s\" are incompatible."%(__R_shape,__Y_shape))
1384 if self.__EM is not None and len(self.__EM) > 0 and not isinstance(self.__EM, dict) and not( __EM_shape[1] == max(__Xb_shape) ):
1385 raise ValueError("Shape characteristic of evolution model (EM) \"%s\" and state (X) \"%s\" are incompatible."%(__EM_shape,__Xb_shape))
1387 if self.__CM is not None and len(self.__CM) > 0 and not isinstance(self.__CM, dict) and not( __CM_shape[1] == max(__U_shape) ):
1388 raise ValueError("Shape characteristic of control model (CM) \"%s\" and control (U) \"%s\" are incompatible."%(__CM_shape,__U_shape))
1390 if ("Bounds" in self.__P) \
1391 and (isinstance(self.__P["Bounds"], list) or isinstance(self.__P["Bounds"], tuple)) \
1392 and (len(self.__P["Bounds"]) != max(__Xb_shape)):
1393 raise ValueError("The number \"%s\" of bound pairs for the state (X) components is different of the size \"%s\" of the state itself." \
1394 %(len(self.__P["Bounds"]),max(__Xb_shape)))
1398 # ==============================================================================
1399 class RegulationAndParameters(object):
1401 Classe générale d'interface d'action pour la régulation et ses paramètres
1404 name = "GenericRegulation",
1411 self.__name = str(name)
1414 if asAlgorithm is None and asScript is not None:
1415 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
1417 __Algo = asAlgorithm
1419 if asDict is None and asScript is not None:
1420 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1424 if __Dict is not None:
1425 self.__P.update( dict(__Dict) )
1427 if __Algo is not None:
1428 self.__P.update( {"Algorithm":str(__Algo)} )
1430 def get(self, key = None):
1431 "Vérifie l'existence d'une clé de variable ou de paramètres"
1433 return self.__P[key]
1437 # ==============================================================================
1438 class DataObserver(object):
1440 Classe générale d'interface de type observer
1443 name = "GenericObserver",
1455 self.__name = str(name)
1460 if onVariable is None:
1461 raise ValueError("setting an observer has to be done over a variable name or a list of variable names, not over None.")
1462 elif type(onVariable) in (tuple, list):
1463 self.__V = tuple(map( str, onVariable ))
1464 if withInfo is None:
1467 self.__I = (str(withInfo),)*len(self.__V)
1468 elif isinstance(onVariable, str):
1469 self.__V = (onVariable,)
1470 if withInfo is None:
1471 self.__I = (onVariable,)
1473 self.__I = (str(withInfo),)
1475 raise ValueError("setting an observer has to be done over a variable name or a list of variable names.")
1477 if asObsObject is not None:
1478 self.__O = asObsObject
1480 __FunctionText = str(UserScript('Observer', asTemplate, asString, asScript))
1481 __Function = Observer2Func(__FunctionText)
1482 self.__O = __Function.getfunc()
1484 for k in range(len(self.__V)):
1487 if ename not in withAlgo:
1488 raise ValueError("An observer is asked to be set on a variable named %s which does not exist."%ename)
1490 withAlgo.setObserver(ename, self.__O, einfo, scheduledBy)
1493 "x.__repr__() <==> repr(x)"
1494 return repr(self.__V)+"\n"+repr(self.__O)
1497 "x.__str__() <==> str(x)"
1498 return str(self.__V)+"\n"+str(self.__O)
1500 # ==============================================================================
1501 class UserScript(object):
1503 Classe générale d'interface de type texte de script utilisateur
1506 name = "GenericUserScript",
1513 self.__name = str(name)
1515 if asString is not None:
1517 elif self.__name == "UserPostAnalysis" and (asTemplate is not None) and (asTemplate in Templates.UserPostAnalysisTemplates):
1518 self.__F = Templates.UserPostAnalysisTemplates[asTemplate]
1519 elif self.__name == "Observer" and (asTemplate is not None) and (asTemplate in Templates.ObserverTemplates):
1520 self.__F = Templates.ObserverTemplates[asTemplate]
1521 elif asScript is not None:
1522 self.__F = Interfaces.ImportFromScript(asScript).getstring()
1527 "x.__repr__() <==> repr(x)"
1528 return repr(self.__F)
1531 "x.__str__() <==> str(x)"
1532 return str(self.__F)
1534 # ==============================================================================
1535 class ExternalParameters(object):
1537 Classe générale d'interface de type texte de script utilisateur
1540 name = "GenericExternalParameters",
1546 self.__name = str(name)
1549 self.updateParameters( asDict, asScript )
1551 def updateParameters(self,
1555 "Mise a jour des parametres"
1556 if asDict is None and asScript is not None:
1557 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "ExternalParameters" )
1561 if __Dict is not None:
1562 self.__P.update( dict(__Dict) )
1564 def get(self, key = None):
1566 return self.__P[key]
1568 return list(self.__P.keys())
1571 return list(self.__P.keys())
1573 def pop(self, k, d):
1574 return self.__P.pop(k, d)
1577 return self.__P.items()
1579 def __contains__(self, key=None):
1580 "D.__contains__(k) -> True if D has a key k, else False"
1581 return key in self.__P
1583 # ==============================================================================
1584 class State(object):
1586 Classe générale d'interface de type état
1589 name = "GenericVector",
1591 asPersistentVector = None,
1597 toBeChecked = False,
1600 Permet de définir un vecteur :
1601 - asVector : entrée des données, comme un vecteur compatible avec le
1602 constructeur de numpy.matrix, ou "True" si entrée par script.
1603 - asPersistentVector : entrée des données, comme une série de vecteurs
1604 compatible avec le constructeur de numpy.matrix, ou comme un objet de
1605 type Persistence, ou "True" si entrée par script.
1606 - asScript : si un script valide est donné contenant une variable
1607 nommée "name", la variable est de type "asVector" (par défaut) ou
1608 "asPersistentVector" selon que l'une de ces variables est placée à
1610 - asDataFile : si un ou plusieurs fichiers valides sont donnés
1611 contenant des valeurs en colonnes, elles-mêmes nommées "colNames"
1612 (s'il n'y a pas de nom de colonne indiquée, on cherche une colonne
1613 nommée "name"), on récupère les colonnes et on les range ligne après
1614 ligne (colMajor=False, par défaut) ou colonne après colonne
1615 (colMajor=True). La variable résultante est de type "asVector" (par
1616 défaut) ou "asPersistentVector" selon que l'une de ces variables est
1619 self.__name = str(name)
1620 self.__check = bool(toBeChecked)
1624 self.__is_vector = False
1625 self.__is_series = False
1627 if asScript is not None:
1628 __Vector, __Series = None, None
1629 if asPersistentVector:
1630 __Series = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1632 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1633 elif asDataFile is not None:
1634 __Vector, __Series = None, None
1635 if asPersistentVector:
1636 if colNames is not None:
1637 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
1639 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1640 if bool(colMajor) and not Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1641 __Series = numpy.transpose(__Series)
1642 elif not bool(colMajor) and Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1643 __Series = numpy.transpose(__Series)
1645 if colNames is not None:
1646 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
1648 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1650 __Vector = numpy.ravel(__Vector, order = "F")
1652 __Vector = numpy.ravel(__Vector, order = "C")
1654 __Vector, __Series = asVector, asPersistentVector
1656 if __Vector is not None:
1657 self.__is_vector = True
1658 self.__V = numpy.matrix( numpy.asmatrix(__Vector).A1, numpy.float ).T
1659 self.shape = self.__V.shape
1660 self.size = self.__V.size
1661 elif __Series is not None:
1662 self.__is_series = True
1663 if isinstance(__Series, (tuple, list, numpy.ndarray, numpy.matrix, str)):
1664 self.__V = Persistence.OneVector(self.__name, basetype=numpy.matrix)
1665 if isinstance(__Series, str): __Series = eval(__Series)
1666 for member in __Series:
1667 self.__V.store( numpy.matrix( numpy.asmatrix(member).A1, numpy.float ).T )
1670 if isinstance(self.__V.shape, (tuple, list)):
1671 self.shape = self.__V.shape
1673 self.shape = self.__V.shape()
1674 if len(self.shape) == 1:
1675 self.shape = (self.shape[0],1)
1676 self.size = self.shape[0] * self.shape[1]
1678 raise ValueError("The %s object is improperly defined or undefined, it requires at minima either a vector, a list/tuple of vectors or a persistent object. Please check your vector input."%self.__name)
1680 if scheduledBy is not None:
1681 self.__T = scheduledBy
1683 def getO(self, withScheduler=False):
1685 return self.__V, self.__T
1686 elif self.__T is None:
1692 "Vérification du type interne"
1693 return self.__is_vector
1696 "Vérification du type interne"
1697 return self.__is_series
1700 "x.__repr__() <==> repr(x)"
1701 return repr(self.__V)
1704 "x.__str__() <==> str(x)"
1705 return str(self.__V)
1707 # ==============================================================================
1708 class Covariance(object):
1710 Classe générale d'interface de type covariance
1713 name = "GenericCovariance",
1714 asCovariance = None,
1715 asEyeByScalar = None,
1716 asEyeByVector = None,
1719 toBeChecked = False,
1722 Permet de définir une covariance :
1723 - asCovariance : entrée des données, comme une matrice compatible avec
1724 le constructeur de numpy.matrix
1725 - asEyeByScalar : entrée des données comme un seul scalaire de variance,
1726 multiplicatif d'une matrice de corrélation identité, aucune matrice
1727 n'étant donc explicitement à donner
1728 - asEyeByVector : entrée des données comme un seul vecteur de variance,
1729 à mettre sur la diagonale d'une matrice de corrélation, aucune matrice
1730 n'étant donc explicitement à donner
1731 - asCovObject : entrée des données comme un objet python, qui a les
1732 methodes obligatoires "getT", "getI", "diag", "trace", "__add__",
1733 "__sub__", "__neg__", "__mul__", "__rmul__" et facultatives "shape",
1734 "size", "cholesky", "choleskyI", "asfullmatrix", "__repr__", "__str__"
1735 - toBeChecked : booléen indiquant si le caractère SDP de la matrice
1736 pleine doit être vérifié
1738 self.__name = str(name)
1739 self.__check = bool(toBeChecked)
1742 self.__is_scalar = False
1743 self.__is_vector = False
1744 self.__is_matrix = False
1745 self.__is_object = False
1747 if asScript is not None:
1748 __Matrix, __Scalar, __Vector, __Object = None, None, None, None
1750 __Scalar = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1752 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1754 __Object = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1756 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1758 __Matrix, __Scalar, __Vector, __Object = asCovariance, asEyeByScalar, asEyeByVector, asCovObject
1760 if __Scalar is not None:
1761 if numpy.matrix(__Scalar).size != 1:
1762 raise ValueError(' The diagonal multiplier given to define a sparse matrix is not a unique scalar value.\n Its actual measured size is %i. Please check your scalar input.'%numpy.matrix(__Scalar).size)
1763 self.__is_scalar = True
1764 self.__C = numpy.abs( float(__Scalar) )
1767 elif __Vector is not None:
1768 self.__is_vector = True
1769 self.__C = numpy.abs( numpy.array( numpy.ravel( numpy.matrix(__Vector, float ) ) ) )
1770 self.shape = (self.__C.size,self.__C.size)
1771 self.size = self.__C.size**2
1772 elif __Matrix is not None:
1773 self.__is_matrix = True
1774 self.__C = numpy.matrix( __Matrix, float )
1775 self.shape = self.__C.shape
1776 self.size = self.__C.size
1777 elif __Object is not None:
1778 self.__is_object = True
1780 for at in ("getT","getI","diag","trace","__add__","__sub__","__neg__","__matmul__","__mul__","__rmatmul__","__rmul__"):
1781 if not hasattr(self.__C,at):
1782 raise ValueError("The matrix given for %s as an object has no attribute \"%s\". Please check your object input."%(self.__name,at))
1783 if hasattr(self.__C,"shape"):
1784 self.shape = self.__C.shape
1787 if hasattr(self.__C,"size"):
1788 self.size = self.__C.size
1793 # raise ValueError("The %s covariance matrix has to be specified either as a matrix, a vector for its diagonal or a scalar multiplying an identity matrix."%self.__name)
1797 def __validate(self):
1799 if self.__C is None:
1800 raise UnboundLocalError("%s covariance matrix value has not been set!"%(self.__name,))
1801 if self.ismatrix() and min(self.shape) != max(self.shape):
1802 raise ValueError("The given matrix for %s is not a square one, its shape is %s. Please check your matrix input."%(self.__name,self.shape))
1803 if self.isobject() and min(self.shape) != max(self.shape):
1804 raise ValueError("The matrix given for \"%s\" is not a square one, its shape is %s. Please check your object input."%(self.__name,self.shape))
1805 if self.isscalar() and self.__C <= 0:
1806 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your scalar input %s."%(self.__name,self.__C))
1807 if self.isvector() and (self.__C <= 0).any():
1808 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your vector input."%(self.__name,))
1809 if self.ismatrix() and (self.__check or logging.getLogger().level < logging.WARNING):
1811 L = numpy.linalg.cholesky( self.__C )
1813 raise ValueError("The %s covariance matrix is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1814 if self.isobject() and (self.__check or logging.getLogger().level < logging.WARNING):
1816 L = self.__C.cholesky()
1818 raise ValueError("The %s covariance object is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1821 "Vérification du type interne"
1822 return self.__is_scalar
1825 "Vérification du type interne"
1826 return self.__is_vector
1829 "Vérification du type interne"
1830 return self.__is_matrix
1833 "Vérification du type interne"
1834 return self.__is_object
1839 return Covariance(self.__name+"I", asCovariance = numpy.linalg.inv(self.__C) )
1840 elif self.isvector():
1841 return Covariance(self.__name+"I", asEyeByVector = 1. / self.__C )
1842 elif self.isscalar():
1843 return Covariance(self.__name+"I", asEyeByScalar = 1. / self.__C )
1844 elif self.isobject() and hasattr(self.__C,"getI"):
1845 return Covariance(self.__name+"I", asCovObject = self.__C.getI() )
1847 return None # Indispensable
1852 return Covariance(self.__name+"T", asCovariance = self.__C.T )
1853 elif self.isvector():
1854 return Covariance(self.__name+"T", asEyeByVector = self.__C )
1855 elif self.isscalar():
1856 return Covariance(self.__name+"T", asEyeByScalar = self.__C )
1857 elif self.isobject() and hasattr(self.__C,"getT"):
1858 return Covariance(self.__name+"T", asCovObject = self.__C.getT() )
1860 raise AttributeError("the %s covariance matrix has no getT attribute."%(self.__name,))
1863 "Décomposition de Cholesky"
1865 return Covariance(self.__name+"C", asCovariance = numpy.linalg.cholesky(self.__C) )
1866 elif self.isvector():
1867 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
1868 elif self.isscalar():
1869 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
1870 elif self.isobject() and hasattr(self.__C,"cholesky"):
1871 return Covariance(self.__name+"C", asCovObject = self.__C.cholesky() )
1873 raise AttributeError("the %s covariance matrix has no cholesky attribute."%(self.__name,))
1875 def choleskyI(self):
1876 "Inversion de la décomposition de Cholesky"
1878 return Covariance(self.__name+"H", asCovariance = numpy.linalg.inv(numpy.linalg.cholesky(self.__C)) )
1879 elif self.isvector():
1880 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
1881 elif self.isscalar():
1882 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
1883 elif self.isobject() and hasattr(self.__C,"choleskyI"):
1884 return Covariance(self.__name+"H", asCovObject = self.__C.choleskyI() )
1886 raise AttributeError("the %s covariance matrix has no choleskyI attribute."%(self.__name,))
1889 "Racine carrée matricielle"
1892 return Covariance(self.__name+"C", asCovariance = numpy.real(scipy.linalg.sqrtm(self.__C)) )
1893 elif self.isvector():
1894 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
1895 elif self.isscalar():
1896 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
1897 elif self.isobject() and hasattr(self.__C,"sqrtm"):
1898 return Covariance(self.__name+"C", asCovObject = self.__C.sqrtm() )
1900 raise AttributeError("the %s covariance matrix has no sqrtm attribute."%(self.__name,))
1903 "Inversion de la racine carrée matricielle"
1906 return Covariance(self.__name+"H", asCovariance = numpy.linalg.inv(numpy.real(scipy.linalg.sqrtm(self.__C))) )
1907 elif self.isvector():
1908 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
1909 elif self.isscalar():
1910 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
1911 elif self.isobject() and hasattr(self.__C,"sqrtmI"):
1912 return Covariance(self.__name+"H", asCovObject = self.__C.sqrtmI() )
1914 raise AttributeError("the %s covariance matrix has no sqrtmI attribute."%(self.__name,))
1916 def diag(self, msize=None):
1917 "Diagonale de la matrice"
1919 return numpy.diag(self.__C)
1920 elif self.isvector():
1922 elif self.isscalar():
1924 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1926 return self.__C * numpy.ones(int(msize))
1927 elif self.isobject() and hasattr(self.__C,"diag"):
1928 return self.__C.diag()
1930 raise AttributeError("the %s covariance matrix has no diag attribute."%(self.__name,))
1932 def asfullmatrix(self, msize=None):
1935 return numpy.asarray(self.__C)
1936 elif self.isvector():
1937 return numpy.asarray( numpy.diag(self.__C), float )
1938 elif self.isscalar():
1940 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1942 return numpy.asarray( self.__C * numpy.eye(int(msize)), float )
1943 elif self.isobject() and hasattr(self.__C,"asfullmatrix"):
1944 return self.__C.asfullmatrix()
1946 raise AttributeError("the %s covariance matrix has no asfullmatrix attribute."%(self.__name,))
1948 def trace(self, msize=None):
1949 "Trace de la matrice"
1951 return numpy.trace(self.__C)
1952 elif self.isvector():
1953 return float(numpy.sum(self.__C))
1954 elif self.isscalar():
1956 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1958 return self.__C * int(msize)
1959 elif self.isobject():
1960 return self.__C.trace()
1962 raise AttributeError("the %s covariance matrix has no trace attribute."%(self.__name,))
1968 "x.__repr__() <==> repr(x)"
1969 return repr(self.__C)
1972 "x.__str__() <==> str(x)"
1973 return str(self.__C)
1975 def __add__(self, other):
1976 "x.__add__(y) <==> x+y"
1977 if self.ismatrix() or self.isobject():
1978 return self.__C + numpy.asmatrix(other)
1979 elif self.isvector() or self.isscalar():
1980 _A = numpy.asarray(other)
1981 _A.reshape(_A.size)[::_A.shape[1]+1] += self.__C
1982 return numpy.asmatrix(_A)
1984 def __radd__(self, other):
1985 "x.__radd__(y) <==> y+x"
1986 raise NotImplementedError("%s covariance matrix __radd__ method not available for %s type!"%(self.__name,type(other)))
1988 def __sub__(self, other):
1989 "x.__sub__(y) <==> x-y"
1990 if self.ismatrix() or self.isobject():
1991 return self.__C - numpy.asmatrix(other)
1992 elif self.isvector() or self.isscalar():
1993 _A = numpy.asarray(other)
1994 _A.reshape(_A.size)[::_A.shape[1]+1] = self.__C - _A.reshape(_A.size)[::_A.shape[1]+1]
1995 return numpy.asmatrix(_A)
1997 def __rsub__(self, other):
1998 "x.__rsub__(y) <==> y-x"
1999 raise NotImplementedError("%s covariance matrix __rsub__ method not available for %s type!"%(self.__name,type(other)))
2002 "x.__neg__() <==> -x"
2005 def __matmul__(self, other):
2006 "x.__mul__(y) <==> x@y"
2007 if self.ismatrix() and isinstance(other, (int, float)):
2008 return numpy.asarray(self.__C) * other
2009 elif self.ismatrix() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2010 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2011 return numpy.ravel(self.__C @ numpy.ravel(other))
2012 elif numpy.asarray(other).shape[0] == self.shape[1]: # Matrice
2013 return numpy.asarray(self.__C) @ numpy.asarray(other)
2015 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asarray(other).shape,self.__name))
2016 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2017 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2018 return numpy.ravel(self.__C) * numpy.ravel(other)
2019 elif numpy.asarray(other).shape[0] == self.shape[1]: # Matrice
2020 return numpy.ravel(self.__C).reshape((-1,1)) * numpy.asarray(other)
2022 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
2023 elif self.isscalar() and isinstance(other,numpy.matrix):
2024 return numpy.asarray(self.__C * other)
2025 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
2026 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
2027 return self.__C * numpy.ravel(other)
2029 return self.__C * numpy.asarray(other)
2030 elif self.isobject():
2031 return self.__C.__matmul__(other)
2033 raise NotImplementedError("%s covariance matrix __matmul__ method not available for %s type!"%(self.__name,type(other)))
2035 def __mul__(self, other):
2036 "x.__mul__(y) <==> x*y"
2037 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2038 return self.__C * other
2039 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2040 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2041 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
2042 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2043 return self.__C * numpy.asmatrix(other)
2045 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asmatrix(other).shape,self.__name))
2046 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2047 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2048 return numpy.asmatrix(self.__C * numpy.ravel(other)).T
2049 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2050 return numpy.asmatrix((self.__C * (numpy.asarray(other).transpose())).transpose())
2052 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
2053 elif self.isscalar() and isinstance(other,numpy.matrix):
2054 return self.__C * other
2055 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
2056 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
2057 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
2059 return self.__C * numpy.asmatrix(other)
2060 elif self.isobject():
2061 return self.__C.__mul__(other)
2063 raise NotImplementedError("%s covariance matrix __mul__ method not available for %s type!"%(self.__name,type(other)))
2065 def __rmatmul__(self, other):
2066 "x.__rmul__(y) <==> y@x"
2067 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2068 return other * self.__C
2069 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2070 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2071 return numpy.asmatrix(numpy.ravel(other)) * self.__C
2072 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2073 return numpy.asmatrix(other) * self.__C
2075 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name))
2076 elif self.isvector() and isinstance(other,numpy.matrix):
2077 if numpy.ravel(other).size == self.shape[0]: # Vecteur
2078 return numpy.asmatrix(numpy.ravel(other) * self.__C)
2079 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
2080 return numpy.asmatrix(numpy.array(other) * self.__C)
2082 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
2083 elif self.isscalar() and isinstance(other,numpy.matrix):
2084 return other * self.__C
2085 elif self.isobject():
2086 return self.__C.__rmatmul__(other)
2088 raise NotImplementedError("%s covariance matrix __rmatmul__ method not available for %s type!"%(self.__name,type(other)))
2090 def __rmul__(self, other):
2091 "x.__rmul__(y) <==> y*x"
2092 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2093 return other * self.__C
2094 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2095 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2096 return numpy.asmatrix(numpy.ravel(other)) * self.__C
2097 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2098 return numpy.asmatrix(other) * self.__C
2100 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name))
2101 elif self.isvector() and isinstance(other,numpy.matrix):
2102 if numpy.ravel(other).size == self.shape[0]: # Vecteur
2103 return numpy.asmatrix(numpy.ravel(other) * self.__C)
2104 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
2105 return numpy.asmatrix(numpy.array(other) * self.__C)
2107 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
2108 elif self.isscalar() and isinstance(other,numpy.matrix):
2109 return other * self.__C
2110 elif self.isobject():
2111 return self.__C.__rmul__(other)
2113 raise NotImplementedError("%s covariance matrix __rmul__ method not available for %s type!"%(self.__name,type(other)))
2116 "x.__len__() <==> len(x)"
2117 return self.shape[0]
2119 # ==============================================================================
2120 class Observer2Func(object):
2122 Creation d'une fonction d'observateur a partir de son texte
2124 def __init__(self, corps=""):
2125 self.__corps = corps
2126 def func(self,var,info):
2127 "Fonction d'observation"
2130 "Restitution du pointeur de fonction dans l'objet"
2133 # ==============================================================================
2134 class CaseLogger(object):
2136 Conservation des commandes de creation d'un cas
2138 def __init__(self, __name="", __objname="case", __addViewers=None, __addLoaders=None):
2139 self.__name = str(__name)
2140 self.__objname = str(__objname)
2141 self.__logSerie = []
2142 self.__switchoff = False
2144 "TUI" :Interfaces._TUIViewer,
2145 "SCD" :Interfaces._SCDViewer,
2146 "YACS":Interfaces._YACSViewer,
2149 "TUI" :Interfaces._TUIViewer,
2150 "COM" :Interfaces._COMViewer,
2152 if __addViewers is not None:
2153 self.__viewers.update(dict(__addViewers))
2154 if __addLoaders is not None:
2155 self.__loaders.update(dict(__addLoaders))
2157 def register(self, __command=None, __keys=None, __local=None, __pre=None, __switchoff=False):
2158 "Enregistrement d'une commande individuelle"
2159 if __command is not None and __keys is not None and __local is not None and not self.__switchoff:
2160 if "self" in __keys: __keys.remove("self")
2161 self.__logSerie.append( (str(__command), __keys, __local, __pre, __switchoff) )
2163 self.__switchoff = True
2165 self.__switchoff = False
2167 def dump(self, __filename=None, __format="TUI", __upa=""):
2168 "Restitution normalisée des commandes"
2169 if __format in self.__viewers:
2170 __formater = self.__viewers[__format](self.__name, self.__objname, self.__logSerie)
2172 raise ValueError("Dumping as \"%s\" is not available"%__format)
2173 return __formater.dump(__filename, __upa)
2175 def load(self, __filename=None, __content=None, __object=None, __format="TUI"):
2176 "Chargement normalisé des commandes"
2177 if __format in self.__loaders:
2178 __formater = self.__loaders[__format]()
2180 raise ValueError("Loading as \"%s\" is not available"%__format)
2181 return __formater.load(__filename, __content, __object)
2183 # ==============================================================================
2186 _extraArguments = None,
2187 _sFunction = lambda x: x,
2192 Pour une liste ordonnée de vecteurs en entrée, renvoie en sortie la liste
2193 correspondante de valeurs de la fonction en argument
2195 # Vérifications et définitions initiales
2196 # logging.debug("MULTF Internal multifonction calculations begin with function %s"%(_sFunction.__name__,))
2197 if not PlatformInfo.isIterable( __xserie ):
2198 raise TypeError("MultiFonction not iterable unkown input type: %s"%(type(__xserie),))
2200 if (_mpWorkers is None) or (_mpWorkers is not None and _mpWorkers < 1):
2203 __mpWorkers = int(_mpWorkers)
2205 import multiprocessing
2216 # logging.debug("MULTF Internal multiprocessing calculations begin : evaluation of %i point(s)"%(len(_jobs),))
2217 import multiprocessing
2218 with multiprocessing.Pool(__mpWorkers) as pool:
2219 __multiHX = pool.map( _sFunction, _jobs )
2222 # logging.debug("MULTF Internal multiprocessing calculation end")
2224 # logging.debug("MULTF Internal monoprocessing calculation begin")
2226 if _extraArguments is None:
2227 for __xvalue in __xserie:
2228 __multiHX.append( _sFunction( __xvalue ) )
2229 elif _extraArguments is not None and isinstance(_extraArguments, (list, tuple, map)):
2230 for __xvalue in __xserie:
2231 __multiHX.append( _sFunction( __xvalue, *_extraArguments ) )
2232 elif _extraArguments is not None and isinstance(_extraArguments, dict):
2233 for __xvalue in __xserie:
2234 __multiHX.append( _sFunction( __xvalue, **_extraArguments ) )
2236 raise TypeError("MultiFonction extra arguments unkown input type: %s"%(type(_extraArguments),))
2237 # logging.debug("MULTF Internal monoprocessing calculation end")
2239 # logging.debug("MULTF Internal multifonction calculations end")
2242 # ==============================================================================
2243 def CostFunction3D(_x,
2244 _Hm = None, # Pour simuler Hm(x) : HO["Direct"].appliedTo
2245 _HmX = None, # Simulation déjà faite de Hm(x)
2246 _arg = None, # Arguments supplementaires pour Hm, sous la forme d'un tuple
2251 _SIV = False, # A résorber pour la 8.0
2252 _SSC = [], # self._parameters["StoreSupplementaryCalculations"]
2253 _nPS = 0, # nbPreviousSteps
2254 _QM = "DA", # QualityMeasure
2255 _SSV = {}, # Entrée et/ou sortie : self.StoredVariables
2256 _fRt = False, # Restitue ou pas la sortie étendue
2257 _sSc = True, # Stocke ou pas les SSC
2260 Fonction-coût générale utile pour les algorithmes statiques/3D : 3DVAR, BLUE
2261 et dérivés, Kalman et dérivés, LeastSquares, SamplingTest, PSO, SA, Tabu,
2262 DFO, QuantileRegression
2268 for k in ["CostFunctionJ",
2274 "SimulatedObservationAtCurrentOptimum",
2275 "SimulatedObservationAtCurrentState",
2279 if hasattr(_SSV[k],"store"):
2280 _SSV[k].append = _SSV[k].store # Pour utiliser "append" au lieu de "store"
2282 _X = numpy.asmatrix(numpy.ravel( _x )).T
2283 if _SIV or "CurrentState" in _SSC or "CurrentOptimum" in _SSC:
2284 _SSV["CurrentState"].append( _X )
2286 if _HmX is not None:
2290 raise ValueError("COSTFUNCTION3D Operator has to be defined.")
2294 _HX = _Hm( _X, *_arg )
2295 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
2297 if "SimulatedObservationAtCurrentState" in _SSC or \
2298 "SimulatedObservationAtCurrentOptimum" in _SSC:
2299 _SSV["SimulatedObservationAtCurrentState"].append( _HX )
2301 if numpy.any(numpy.isnan(_HX)):
2302 Jb, Jo, J = numpy.nan, numpy.nan, numpy.nan
2304 _Y = numpy.asmatrix(numpy.ravel( _Y )).T
2305 if _QM in ["AugmentedWeightedLeastSquares", "AWLS", "AugmentedPonderatedLeastSquares", "APLS", "DA"]:
2306 if _BI is None or _RI is None:
2307 raise ValueError("Background and Observation error covariance matrix has to be properly defined!")
2308 _Xb = numpy.asmatrix(numpy.ravel( _Xb )).T
2309 Jb = 0.5 * (_X - _Xb).T * _BI * (_X - _Xb)
2310 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
2311 elif _QM in ["WeightedLeastSquares", "WLS", "PonderatedLeastSquares", "PLS"]:
2313 raise ValueError("Observation error covariance matrix has to be properly defined!")
2315 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
2316 elif _QM in ["LeastSquares", "LS", "L2"]:
2318 Jo = 0.5 * (_Y - _HX).T * (_Y - _HX)
2319 elif _QM in ["AbsoluteValue", "L1"]:
2321 Jo = numpy.sum( numpy.abs(_Y - _HX) )
2322 elif _QM in ["MaximumError", "ME"]:
2324 Jo = numpy.max( numpy.abs(_Y - _HX) )
2325 elif _QM in ["QR", "Null"]:
2329 raise ValueError("Unknown asked quality measure!")
2331 J = float( Jb ) + float( Jo )
2334 _SSV["CostFunctionJb"].append( Jb )
2335 _SSV["CostFunctionJo"].append( Jo )
2336 _SSV["CostFunctionJ" ].append( J )
2338 if "IndexOfOptimum" in _SSC or \
2339 "CurrentOptimum" in _SSC or \
2340 "SimulatedObservationAtCurrentOptimum" in _SSC:
2341 IndexMin = numpy.argmin( _SSV["CostFunctionJ"][_nPS:] ) + _nPS
2342 if "IndexOfOptimum" in _SSC:
2343 _SSV["IndexOfOptimum"].append( IndexMin )
2344 if "CurrentOptimum" in _SSC:
2345 _SSV["CurrentOptimum"].append( _SSV["CurrentState"][IndexMin] )
2346 if "SimulatedObservationAtCurrentOptimum" in _SSC:
2347 _SSV["SimulatedObservationAtCurrentOptimum"].append( _SSV["SimulatedObservationAtCurrentState"][IndexMin] )
2352 if _QM in ["QR"]: # Pour le QuantileRegression
2357 # ==============================================================================
2358 if __name__ == "__main__":
2359 print('\n AUTODIAGNOSTIC\n')