1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2021 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les outils généraux élémentaires.
26 __author__ = "Jean-Philippe ARGAUD"
35 from functools import partial
36 from daCore import Persistence, PlatformInfo, Interfaces
37 from daCore import Templates
39 # ==============================================================================
40 class CacheManager(object):
42 Classe générale de gestion d'un cache de calculs
45 toleranceInRedundancy = 1.e-18,
46 lenghtOfRedundancy = -1,
49 Les caractéristiques de tolérance peuvent être modifiées à la création.
51 self.__tolerBP = float(toleranceInRedundancy)
52 self.__lenghtOR = int(lenghtOfRedundancy)
53 self.__initlnOR = self.__lenghtOR
63 def wasCalculatedIn(self, xValue, oName="" ):
64 "Vérifie l'existence d'un calcul correspondant à la valeur"
68 for i in range(min(len(self.__listOPCV),self.__lenghtOR)-1,-1,-1):
69 if not hasattr(xValue, 'size'):
71 elif (str(oName) != self.__listOPCV[i][3]):
73 elif (xValue.size != self.__listOPCV[i][0].size):
75 elif (numpy.ravel(xValue)[0] - self.__listOPCV[i][0][0]) > (self.__tolerBP * self.__listOPCV[i][2] / self.__listOPCV[i][0].size):
77 elif numpy.linalg.norm(numpy.ravel(xValue) - self.__listOPCV[i][0]) < (self.__tolerBP * self.__listOPCV[i][2]):
79 __HxV = self.__listOPCV[i][1]
83 def storeValueInX(self, xValue, HxValue, oName="" ):
84 "Stocke pour un opérateur o un calcul Hx correspondant à la valeur x"
85 if self.__lenghtOR < 0:
86 self.__lenghtOR = 2 * min(xValue.size, 50) + 2 # 2 * xValue.size + 2
87 self.__initlnOR = self.__lenghtOR
88 self.__seenNames.append(str(oName))
89 if str(oName) not in self.__seenNames: # Etend la liste si nouveau
90 self.__lenghtOR += 2 * min(xValue.size, 50) + 2 # 2 * xValue.size + 2
91 self.__initlnOR += self.__lenghtOR
92 self.__seenNames.append(str(oName))
93 while len(self.__listOPCV) > self.__lenghtOR:
94 self.__listOPCV.pop(0)
95 self.__listOPCV.append( (
96 copy.copy(numpy.ravel(xValue)), # 0 Previous point
97 copy.copy(HxValue), # 1 Previous value
98 numpy.linalg.norm(xValue), # 2 Norm
99 str(oName), # 3 Operator name
104 self.__initlnOR = self.__lenghtOR
106 self.__enabled = False
110 self.__lenghtOR = self.__initlnOR
111 self.__enabled = True
113 # ==============================================================================
114 class Operator(object):
116 Classe générale d'interface de type opérateur simple
124 name = "GenericOperator",
127 avoidingRedundancy = True,
128 inputAsMultiFunction = False,
129 enableMultiProcess = False,
130 extraArguments = None,
133 On construit un objet de ce type en fournissant, à l'aide de l'un des
134 deux mots-clé, soit une fonction ou un multi-fonction python, soit une
137 - name : nom d'opérateur
138 - fromMethod : argument de type fonction Python
139 - fromMatrix : argument adapté au constructeur numpy.matrix
140 - avoidingRedundancy : booléen évitant (ou pas) les calculs redondants
141 - inputAsMultiFunction : booléen indiquant une fonction explicitement
142 définie (ou pas) en multi-fonction
143 - extraArguments : arguments supplémentaires passés à la fonction de
144 base et ses dérivées (tuple ou dictionnaire)
146 self.__name = str(name)
147 self.__NbCallsAsMatrix, self.__NbCallsAsMethod, self.__NbCallsOfCached = 0, 0, 0
148 self.__AvoidRC = bool( avoidingRedundancy )
149 self.__inputAsMF = bool( inputAsMultiFunction )
150 self.__mpEnabled = bool( enableMultiProcess )
151 self.__extraArgs = extraArguments
152 if fromMethod is not None and self.__inputAsMF:
153 self.__Method = fromMethod # logtimer(fromMethod)
155 self.__Type = "Method"
156 elif fromMethod is not None and not self.__inputAsMF:
157 self.__Method = partial( MultiFonction, _sFunction=fromMethod, _mpEnabled=self.__mpEnabled)
159 self.__Type = "Method"
160 elif fromMatrix is not None:
162 self.__Matrix = numpy.matrix( fromMatrix, numpy.float )
163 self.__Type = "Matrix"
169 def disableAvoidingRedundancy(self):
171 Operator.CM.disable()
173 def enableAvoidingRedundancy(self):
178 Operator.CM.disable()
184 def appliedTo(self, xValue, HValue = None, argsAsSerie = False, returnSerieAsArrayMatrix = False):
186 Permet de restituer le résultat de l'application de l'opérateur à une
187 série d'arguments xValue. Cette méthode se contente d'appliquer, chaque
188 argument devant a priori être du bon type.
190 - les arguments par série sont :
191 - xValue : argument adapté pour appliquer l'opérateur
192 - HValue : valeur précalculée de l'opérateur en ce point
193 - argsAsSerie : indique si les arguments sont une mono ou multi-valeur
200 if HValue is not None:
204 PlatformInfo.isIterable( _xValue, True, " in Operator.appliedTo" )
206 if _HValue is not None:
207 assert len(_xValue) == len(_HValue), "Incompatible number of elements in xValue and HValue"
209 for i in range(len(_HValue)):
210 _HxValue.append( numpy.asmatrix( numpy.ravel( _HValue[i] ) ).T )
212 Operator.CM.storeValueInX(_xValue[i],_HxValue[-1],self.__name)
217 for i, xv in enumerate(_xValue):
219 __alreadyCalculated, __HxV = Operator.CM.wasCalculatedIn(xv,self.__name)
221 __alreadyCalculated = False
223 if __alreadyCalculated:
224 self.__addOneCacheCall()
227 if self.__Matrix is not None:
228 self.__addOneMatrixCall()
229 _xv = numpy.matrix(numpy.ravel(xv)).T
230 _hv = self.__Matrix * _xv
232 self.__addOneMethodCall()
236 _HxValue.append( _hv )
238 if len(_xserie)>0 and self.__Matrix is None:
239 if self.__extraArgs is None:
240 _hserie = self.__Method( _xserie ) # Calcul MF
242 _hserie = self.__Method( _xserie, self.__extraArgs ) # Calcul MF
243 if not hasattr(_hserie, "pop"):
244 raise TypeError("The user input multi-function doesn't seem to return sequence results, behaving like a mono-function. It has to be checked.")
250 Operator.CM.storeValueInX(_xv,_hv,self.__name)
252 if returnSerieAsArrayMatrix:
253 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
255 if argsAsSerie: return _HxValue
256 else: return _HxValue[-1]
258 def appliedControledFormTo(self, paires, argsAsSerie = False, returnSerieAsArrayMatrix = False):
260 Permet de restituer le résultat de l'application de l'opérateur à des
261 paires (xValue, uValue). Cette méthode se contente d'appliquer, son
262 argument devant a priori être du bon type. Si la uValue est None,
263 on suppose que l'opérateur ne s'applique qu'à xValue.
265 - paires : les arguments par paire sont :
266 - xValue : argument X adapté pour appliquer l'opérateur
267 - uValue : argument U adapté pour appliquer l'opérateur
268 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
270 if argsAsSerie: _xuValue = paires
271 else: _xuValue = (paires,)
272 PlatformInfo.isIterable( _xuValue, True, " in Operator.appliedControledFormTo" )
274 if self.__Matrix is not None:
276 for paire in _xuValue:
277 _xValue, _uValue = paire
278 _xValue = numpy.matrix(numpy.ravel(_xValue)).T
279 self.__addOneMatrixCall()
280 _HxValue.append( self.__Matrix * _xValue )
283 for paire in _xuValue:
284 _xValue, _uValue = paire
285 if _uValue is not None:
286 _xuArgs.append( paire )
288 _xuArgs.append( _xValue )
289 self.__addOneMethodCall( len(_xuArgs) )
290 if self.__extraArgs is None:
291 _HxValue = self.__Method( _xuArgs ) # Calcul MF
293 _HxValue = self.__Method( _xuArgs, self.__extraArgs ) # Calcul MF
295 if returnSerieAsArrayMatrix:
296 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
298 if argsAsSerie: return _HxValue
299 else: return _HxValue[-1]
301 def appliedInXTo(self, paires, argsAsSerie = False, returnSerieAsArrayMatrix = False):
303 Permet de restituer le résultat de l'application de l'opérateur à une
304 série d'arguments xValue, sachant que l'opérateur est valable en
305 xNominal. Cette méthode se contente d'appliquer, son argument devant a
306 priori être du bon type. Si l'opérateur est linéaire car c'est une
307 matrice, alors il est valable en tout point nominal et xNominal peut
308 être quelconque. Il n'y a qu'une seule paire par défaut, et argsAsSerie
309 permet d'indiquer que l'argument est multi-paires.
311 - paires : les arguments par paire sont :
312 - xNominal : série d'arguments permettant de donner le point où
313 l'opérateur est construit pour être ensuite appliqué
314 - xValue : série d'arguments adaptés pour appliquer l'opérateur
315 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
317 if argsAsSerie: _nxValue = paires
318 else: _nxValue = (paires,)
319 PlatformInfo.isIterable( _nxValue, True, " in Operator.appliedInXTo" )
321 if self.__Matrix is not None:
323 for paire in _nxValue:
324 _xNominal, _xValue = paire
325 _xValue = numpy.matrix(numpy.ravel(_xValue)).T
326 self.__addOneMatrixCall()
327 _HxValue.append( self.__Matrix * _xValue )
329 self.__addOneMethodCall( len(_nxValue) )
330 if self.__extraArgs is None:
331 _HxValue = self.__Method( _nxValue ) # Calcul MF
333 _HxValue = self.__Method( _nxValue, self.__extraArgs ) # Calcul MF
335 if returnSerieAsArrayMatrix:
336 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
338 if argsAsSerie: return _HxValue
339 else: return _HxValue[-1]
341 def asMatrix(self, ValueForMethodForm = "UnknownVoidValue", argsAsSerie = False):
343 Permet de renvoyer l'opérateur sous la forme d'une matrice
345 if self.__Matrix is not None:
346 self.__addOneMatrixCall()
347 mValue = [self.__Matrix,]
348 elif not isinstance(ValueForMethodForm,str) or ValueForMethodForm != "UnknownVoidValue": # Ne pas utiliser "None"
351 self.__addOneMethodCall( len(ValueForMethodForm) )
352 for _vfmf in ValueForMethodForm:
353 mValue.append( numpy.matrix( self.__Method(((_vfmf, None),)) ) )
355 self.__addOneMethodCall()
356 mValue = self.__Method(((ValueForMethodForm, None),))
358 raise ValueError("Matrix form of the operator defined as a function/method requires to give an operating point.")
360 if argsAsSerie: return mValue
361 else: return mValue[-1]
365 Renvoie la taille sous forme numpy si l'opérateur est disponible sous
366 la forme d'une matrice
368 if self.__Matrix is not None:
369 return self.__Matrix.shape
371 raise ValueError("Matrix form of the operator is not available, nor the shape")
373 def nbcalls(self, which=None):
375 Renvoie les nombres d'évaluations de l'opérateur
378 self.__NbCallsAsMatrix+self.__NbCallsAsMethod,
379 self.__NbCallsAsMatrix,
380 self.__NbCallsAsMethod,
381 self.__NbCallsOfCached,
382 Operator.NbCallsAsMatrix+Operator.NbCallsAsMethod,
383 Operator.NbCallsAsMatrix,
384 Operator.NbCallsAsMethod,
385 Operator.NbCallsOfCached,
387 if which is None: return __nbcalls
388 else: return __nbcalls[which]
390 def __addOneMatrixCall(self):
391 "Comptabilise un appel"
392 self.__NbCallsAsMatrix += 1 # Decompte local
393 Operator.NbCallsAsMatrix += 1 # Decompte global
395 def __addOneMethodCall(self, nb = 1):
396 "Comptabilise un appel"
397 self.__NbCallsAsMethod += nb # Decompte local
398 Operator.NbCallsAsMethod += nb # Decompte global
400 def __addOneCacheCall(self):
401 "Comptabilise un appel"
402 self.__NbCallsOfCached += 1 # Decompte local
403 Operator.NbCallsOfCached += 1 # Decompte global
405 # ==============================================================================
406 class FullOperator(object):
408 Classe générale d'interface de type opérateur complet
409 (Direct, Linéaire Tangent, Adjoint)
412 name = "GenericFullOperator",
414 asOneFunction = None, # 1 Fonction
415 asThreeFunctions = None, # 3 Fonctions in a dictionary
416 asScript = None, # 1 or 3 Fonction(s) by script
417 asDict = None, # Parameters
419 extraArguments = None,
421 inputAsMF = False,# Fonction(s) as Multi-Functions
426 self.__name = str(name)
427 self.__check = bool(toBeChecked)
428 self.__extraArgs = extraArguments
433 if (asDict is not None) and isinstance(asDict, dict):
434 __Parameters.update( asDict )
435 # Priorité à EnableMultiProcessingInDerivatives=True
436 if "EnableMultiProcessing" in __Parameters and __Parameters["EnableMultiProcessing"]:
437 __Parameters["EnableMultiProcessingInDerivatives"] = True
438 __Parameters["EnableMultiProcessingInEvaluation"] = False
439 if "EnableMultiProcessingInDerivatives" not in __Parameters:
440 __Parameters["EnableMultiProcessingInDerivatives"] = False
441 if __Parameters["EnableMultiProcessingInDerivatives"]:
442 __Parameters["EnableMultiProcessingInEvaluation"] = False
443 if "EnableMultiProcessingInEvaluation" not in __Parameters:
444 __Parameters["EnableMultiProcessingInEvaluation"] = False
445 if "withIncrement" in __Parameters: # Temporaire
446 __Parameters["DifferentialIncrement"] = __Parameters["withIncrement"]
448 if asScript is not None:
449 __Matrix, __Function = None, None
451 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
453 __Function = { "Direct":Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ) }
454 __Function.update({"useApproximatedDerivatives":True})
455 __Function.update(__Parameters)
456 elif asThreeFunctions:
458 "Direct" :Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ),
459 "Tangent":Interfaces.ImportFromScript(asScript).getvalue( "TangentOperator" ),
460 "Adjoint":Interfaces.ImportFromScript(asScript).getvalue( "AdjointOperator" ),
462 __Function.update(__Parameters)
465 if asOneFunction is not None:
466 if isinstance(asOneFunction, dict) and "Direct" in asOneFunction:
467 if asOneFunction["Direct"] is not None:
468 __Function = asOneFunction
470 raise ValueError("The function has to be given in a dictionnary which have 1 key (\"Direct\")")
472 __Function = { "Direct":asOneFunction }
473 __Function.update({"useApproximatedDerivatives":True})
474 __Function.update(__Parameters)
475 elif asThreeFunctions is not None:
476 if isinstance(asThreeFunctions, dict) and \
477 ("Tangent" in asThreeFunctions) and (asThreeFunctions["Tangent"] is not None) and \
478 ("Adjoint" in asThreeFunctions) and (asThreeFunctions["Adjoint"] is not None) and \
479 (("useApproximatedDerivatives" not in asThreeFunctions) or not bool(asThreeFunctions["useApproximatedDerivatives"])):
480 __Function = asThreeFunctions
481 elif isinstance(asThreeFunctions, dict) and \
482 ("Direct" in asThreeFunctions) and (asThreeFunctions["Direct"] is not None):
483 __Function = asThreeFunctions
484 __Function.update({"useApproximatedDerivatives":True})
486 raise ValueError("The functions has to be given in a dictionnary which have either 1 key (\"Direct\") or 3 keys (\"Direct\" (optionnal), \"Tangent\" and \"Adjoint\")")
487 if "Direct" not in asThreeFunctions:
488 __Function["Direct"] = asThreeFunctions["Tangent"]
489 __Function.update(__Parameters)
493 # if sys.version_info[0] < 3 and isinstance(__Function, dict):
494 # for k in ("Direct", "Tangent", "Adjoint"):
495 # if k in __Function and hasattr(__Function[k],"__class__"):
496 # if type(__Function[k]) is type(self.__init__):
497 # raise TypeError("can't use a class method (%s) as a function for the \"%s\" operator. Use a real function instead."%(type(__Function[k]),k))
499 if appliedInX is not None and isinstance(appliedInX, dict):
500 __appliedInX = appliedInX
501 elif appliedInX is not None:
502 __appliedInX = {"HXb":appliedInX}
506 if scheduledBy is not None:
507 self.__T = scheduledBy
509 if isinstance(__Function, dict) and \
510 ("useApproximatedDerivatives" in __Function) and bool(__Function["useApproximatedDerivatives"]) and \
511 ("Direct" in __Function) and (__Function["Direct"] is not None):
512 if "CenteredFiniteDifference" not in __Function: __Function["CenteredFiniteDifference"] = False
513 if "DifferentialIncrement" not in __Function: __Function["DifferentialIncrement"] = 0.01
514 if "withdX" not in __Function: __Function["withdX"] = None
515 if "withAvoidingRedundancy" not in __Function: __Function["withAvoidingRedundancy"] = avoidRC
516 if "withToleranceInRedundancy" not in __Function: __Function["withToleranceInRedundancy"] = 1.e-18
517 if "withLenghtOfRedundancy" not in __Function: __Function["withLenghtOfRedundancy"] = -1
518 if "NumberOfProcesses" not in __Function: __Function["NumberOfProcesses"] = None
519 if "withmfEnabled" not in __Function: __Function["withmfEnabled"] = inputAsMF
520 from daCore import NumericObjects
521 FDA = NumericObjects.FDApproximation(
523 Function = __Function["Direct"],
524 centeredDF = __Function["CenteredFiniteDifference"],
525 increment = __Function["DifferentialIncrement"],
526 dX = __Function["withdX"],
527 avoidingRedundancy = __Function["withAvoidingRedundancy"],
528 toleranceInRedundancy = __Function["withToleranceInRedundancy"],
529 lenghtOfRedundancy = __Function["withLenghtOfRedundancy"],
530 mpEnabled = __Function["EnableMultiProcessingInDerivatives"],
531 mpWorkers = __Function["NumberOfProcesses"],
532 mfEnabled = __Function["withmfEnabled"],
534 self.__FO["Direct"] = Operator( name = self.__name, fromMethod = FDA.DirectOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
535 self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMethod = FDA.TangentOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
536 self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMethod = FDA.AdjointOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
537 elif isinstance(__Function, dict) and \
538 ("Direct" in __Function) and ("Tangent" in __Function) and ("Adjoint" in __Function) and \
539 (__Function["Direct"] is not None) and (__Function["Tangent"] is not None) and (__Function["Adjoint"] is not None):
540 self.__FO["Direct"] = Operator( name = self.__name, fromMethod = __Function["Direct"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
541 self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMethod = __Function["Tangent"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
542 self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMethod = __Function["Adjoint"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
543 elif asMatrix is not None:
544 __matrice = numpy.matrix( __Matrix, numpy.float )
545 self.__FO["Direct"] = Operator( name = self.__name, fromMatrix = __matrice, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
546 self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMatrix = __matrice, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
547 self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMatrix = __matrice.T, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
550 raise ValueError("The %s object is improperly defined or undefined, it requires at minima either a matrix, a Direct operator for approximate derivatives or a Tangent/Adjoint operators pair. Please check your operator input."%self.__name)
552 if __appliedInX is not None:
553 self.__FO["AppliedInX"] = {}
554 for key in list(__appliedInX.keys()):
555 if type( __appliedInX[key] ) is type( numpy.matrix([]) ):
556 # Pour le cas où l'on a une vraie matrice
557 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].A1, numpy.float ).T
558 elif type( __appliedInX[key] ) is type( numpy.array([]) ) and len(__appliedInX[key].shape) > 1:
559 # Pour le cas où l'on a un vecteur représenté en array avec 2 dimensions
560 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].reshape(len(__appliedInX[key]),), numpy.float ).T
562 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key], numpy.float ).T
564 self.__FO["AppliedInX"] = None
570 "x.__repr__() <==> repr(x)"
571 return repr(self.__FO)
574 "x.__str__() <==> str(x)"
575 return str(self.__FO)
577 # ==============================================================================
578 class Algorithm(object):
580 Classe générale d'interface de type algorithme
582 Elle donne un cadre pour l'écriture d'une classe élémentaire d'algorithme
583 d'assimilation, en fournissant un container (dictionnaire) de variables
584 persistantes initialisées, et des méthodes d'accès à ces variables stockées.
586 Une classe élémentaire d'algorithme doit implémenter la méthode "run".
588 def __init__(self, name):
590 L'initialisation présente permet de fabriquer des variables de stockage
591 disponibles de manière générique dans les algorithmes élémentaires. Ces
592 variables de stockage sont ensuite conservées dans un dictionnaire
593 interne à l'objet, mais auquel on accède par la méthode "get".
595 Les variables prévues sont :
596 - APosterioriCorrelations : matrice de corrélations de la matrice A
597 - APosterioriCovariance : matrice de covariances a posteriori : A
598 - APosterioriStandardDeviations : vecteur des écart-types de la matrice A
599 - APosterioriVariances : vecteur des variances de la matrice A
600 - Analysis : vecteur d'analyse : Xa
601 - BMA : Background moins Analysis : Xa - Xb
602 - CostFunctionJ : fonction-coût globale, somme des deux parties suivantes Jb et Jo
603 - CostFunctionJAtCurrentOptimum : fonction-coût globale à l'état optimal courant lors d'itérations
604 - CostFunctionJb : partie ébauche ou background de la fonction-coût : Jb
605 - CostFunctionJbAtCurrentOptimum : partie ébauche à l'état optimal courant lors d'itérations
606 - CostFunctionJo : partie observations de la fonction-coût : Jo
607 - CostFunctionJoAtCurrentOptimum : partie observations à l'état optimal courant lors d'itérations
608 - CurrentIterationNumber : numéro courant d'itération dans les algorithmes itératifs, à partir de 0
609 - CurrentOptimum : état optimal courant lors d'itérations
610 - CurrentState : état courant lors d'itérations
611 - GradientOfCostFunctionJ : gradient de la fonction-coût globale
612 - GradientOfCostFunctionJb : gradient de la partie ébauche de la fonction-coût
613 - GradientOfCostFunctionJo : gradient de la partie observations de la fonction-coût
614 - IndexOfOptimum : index de l'état optimal courant lors d'itérations
615 - Innovation : l'innovation : d = Y - H(X)
616 - InnovationAtCurrentState : l'innovation à l'état courant : dn = Y - H(Xn)
617 - JacobianMatrixAtBackground : matrice jacobienne à l'état d'ébauche
618 - JacobianMatrixAtCurrentState : matrice jacobienne à l'état courant
619 - JacobianMatrixAtOptimum : matrice jacobienne à l'optimum
620 - KalmanGainAtOptimum : gain de Kalman à l'optimum
621 - MahalanobisConsistency : indicateur de consistance des covariances
622 - OMA : Observation moins Analyse : Y - Xa
623 - OMB : Observation moins Background : Y - Xb
624 - ForecastState : état prédit courant lors d'itérations
625 - Residu : dans le cas des algorithmes de vérification
626 - SigmaBck2 : indicateur de correction optimale des erreurs d'ébauche
627 - SigmaObs2 : indicateur de correction optimale des erreurs d'observation
628 - SimulatedObservationAtBackground : l'état observé H(Xb) à l'ébauche
629 - SimulatedObservationAtCurrentOptimum : l'état observé H(X) à l'état optimal courant
630 - SimulatedObservationAtCurrentState : l'état observé H(X) à l'état courant
631 - SimulatedObservationAtOptimum : l'état observé H(Xa) à l'optimum
632 - SimulationQuantiles : états observés H(X) pour les quantiles demandés
633 On peut rajouter des variables à stocker dans l'initialisation de
634 l'algorithme élémentaire qui va hériter de cette classe
636 logging.debug("%s Initialisation", str(name))
637 self._m = PlatformInfo.SystemUsage()
639 self._name = str( name )
640 self._parameters = {"StoreSupplementaryCalculations":[]}
641 self.__required_parameters = {}
642 self.__required_inputs = {
643 "RequiredInputValues":{"mandatory":(), "optional":()},
644 "ClassificationTags":[],
646 self.__variable_names_not_public = {"nextStep":False} # Duplication dans AlgorithmAndParameters
647 self.__canonical_parameter_name = {} # Correspondance "lower"->"correct"
648 self.__canonical_stored_name = {} # Correspondance "lower"->"correct"
650 self.StoredVariables = {}
651 self.StoredVariables["APosterioriCorrelations"] = Persistence.OneMatrix(name = "APosterioriCorrelations")
652 self.StoredVariables["APosterioriCovariance"] = Persistence.OneMatrix(name = "APosterioriCovariance")
653 self.StoredVariables["APosterioriStandardDeviations"] = Persistence.OneVector(name = "APosterioriStandardDeviations")
654 self.StoredVariables["APosterioriVariances"] = Persistence.OneVector(name = "APosterioriVariances")
655 self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
656 self.StoredVariables["BMA"] = Persistence.OneVector(name = "BMA")
657 self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
658 self.StoredVariables["CostFunctionJAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJAtCurrentOptimum")
659 self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
660 self.StoredVariables["CostFunctionJbAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJbAtCurrentOptimum")
661 self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
662 self.StoredVariables["CostFunctionJoAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJoAtCurrentOptimum")
663 self.StoredVariables["CurrentIterationNumber"] = Persistence.OneIndex(name = "CurrentIterationNumber")
664 self.StoredVariables["CurrentOptimum"] = Persistence.OneVector(name = "CurrentOptimum")
665 self.StoredVariables["CurrentState"] = Persistence.OneVector(name = "CurrentState")
666 self.StoredVariables["ForecastState"] = Persistence.OneVector(name = "ForecastState")
667 self.StoredVariables["GradientOfCostFunctionJ"] = Persistence.OneVector(name = "GradientOfCostFunctionJ")
668 self.StoredVariables["GradientOfCostFunctionJb"] = Persistence.OneVector(name = "GradientOfCostFunctionJb")
669 self.StoredVariables["GradientOfCostFunctionJo"] = Persistence.OneVector(name = "GradientOfCostFunctionJo")
670 self.StoredVariables["IndexOfOptimum"] = Persistence.OneIndex(name = "IndexOfOptimum")
671 self.StoredVariables["Innovation"] = Persistence.OneVector(name = "Innovation")
672 self.StoredVariables["InnovationAtCurrentAnalysis"] = Persistence.OneVector(name = "InnovationAtCurrentAnalysis")
673 self.StoredVariables["InnovationAtCurrentState"] = Persistence.OneVector(name = "InnovationAtCurrentState")
674 self.StoredVariables["JacobianMatrixAtBackground"] = Persistence.OneMatrix(name = "JacobianMatrixAtBackground")
675 self.StoredVariables["JacobianMatrixAtCurrentState"] = Persistence.OneMatrix(name = "JacobianMatrixAtCurrentState")
676 self.StoredVariables["JacobianMatrixAtOptimum"] = Persistence.OneMatrix(name = "JacobianMatrixAtOptimum")
677 self.StoredVariables["KalmanGainAtOptimum"] = Persistence.OneMatrix(name = "KalmanGainAtOptimum")
678 self.StoredVariables["MahalanobisConsistency"] = Persistence.OneScalar(name = "MahalanobisConsistency")
679 self.StoredVariables["OMA"] = Persistence.OneVector(name = "OMA")
680 self.StoredVariables["OMB"] = Persistence.OneVector(name = "OMB")
681 self.StoredVariables["Residu"] = Persistence.OneScalar(name = "Residu")
682 self.StoredVariables["SigmaBck2"] = Persistence.OneScalar(name = "SigmaBck2")
683 self.StoredVariables["SigmaObs2"] = Persistence.OneScalar(name = "SigmaObs2")
684 self.StoredVariables["SimulatedObservationAtBackground"] = Persistence.OneVector(name = "SimulatedObservationAtBackground")
685 self.StoredVariables["SimulatedObservationAtCurrentAnalysis"]= Persistence.OneVector(name = "SimulatedObservationAtCurrentAnalysis")
686 self.StoredVariables["SimulatedObservationAtCurrentOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentOptimum")
687 self.StoredVariables["SimulatedObservationAtCurrentState"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentState")
688 self.StoredVariables["SimulatedObservationAtOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtOptimum")
689 self.StoredVariables["SimulationQuantiles"] = Persistence.OneMatrix(name = "SimulationQuantiles")
691 for k in self.StoredVariables:
692 self.__canonical_stored_name[k.lower()] = k
694 for k, v in self.__variable_names_not_public.items():
695 self.__canonical_parameter_name[k.lower()] = k
696 self.__canonical_parameter_name["algorithm"] = "Algorithm"
697 self.__canonical_parameter_name["storesupplementarycalculations"] = "StoreSupplementaryCalculations"
699 def _pre_run(self, Parameters, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None ):
701 logging.debug("%s Lancement", self._name)
702 logging.debug("%s Taille mémoire utilisée de %.0f Mio"%(self._name, self._m.getUsedMemory("Mio")))
703 self._getTimeState(reset=True)
705 # Mise a jour des paramètres internes avec le contenu de Parameters, en
706 # reprenant les valeurs par défauts pour toutes celles non définies
707 self.__setParameters(Parameters, reset=True)
708 for k, v in self.__variable_names_not_public.items():
709 if k not in self._parameters: self.__setParameters( {k:v} )
711 # Corrections et compléments des vecteurs
712 def __test_vvalue(argument, variable, argname, symbol=None):
713 if symbol is None: symbol = variable
715 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
716 raise ValueError("%s %s vector %s is not set and has to be properly defined!"%(self._name,argname,symbol))
717 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
718 logging.debug("%s %s vector %s is not set, but is optional."%(self._name,argname,symbol))
720 logging.debug("%s %s vector %s is not set, but is not required."%(self._name,argname,symbol))
722 logging.debug("%s %s vector %s is set, and its size is %i."%(self._name,argname,symbol,numpy.array(argument).size))
724 __test_vvalue( Xb, "Xb", "Background or initial state" )
725 __test_vvalue( Y, "Y", "Observation" )
726 __test_vvalue( U, "U", "Control" )
728 # Corrections et compléments des covariances
729 def __test_cvalue(argument, variable, argname, symbol=None):
730 if symbol is None: symbol = variable
732 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
733 raise ValueError("%s %s error covariance matrix %s is not set and has to be properly defined!"%(self._name,argname,symbol))
734 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
735 logging.debug("%s %s error covariance matrix %s is not set, but is optional."%(self._name,argname,symbol))
737 logging.debug("%s %s error covariance matrix %s is not set, but is not required."%(self._name,argname,symbol))
739 logging.debug("%s %s error covariance matrix %s is set."%(self._name,argname,symbol))
741 __test_cvalue( B, "B", "Background" )
742 __test_cvalue( R, "R", "Observation" )
743 __test_cvalue( Q, "Q", "Evolution" )
745 # Corrections et compléments des opérateurs
746 def __test_ovalue(argument, variable, argname, symbol=None):
747 if symbol is None: symbol = variable
748 if argument is None or (isinstance(argument,dict) and len(argument)==0):
749 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
750 raise ValueError("%s %s operator %s is not set and has to be properly defined!"%(self._name,argname,symbol))
751 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
752 logging.debug("%s %s operator %s is not set, but is optional."%(self._name,argname,symbol))
754 logging.debug("%s %s operator %s is not set, but is not required."%(self._name,argname,symbol))
756 logging.debug("%s %s operator %s is set."%(self._name,argname,symbol))
758 __test_ovalue( HO, "HO", "Observation", "H" )
759 __test_ovalue( EM, "EM", "Evolution", "M" )
760 __test_ovalue( CM, "CM", "Control Model", "C" )
762 if ("Bounds" in self._parameters) and isinstance(self._parameters["Bounds"], (list, tuple)) and (len(self._parameters["Bounds"]) > 0):
763 logging.debug("%s Prise en compte des bornes effectuee"%(self._name,))
765 self._parameters["Bounds"] = None
767 if logging.getLogger().level < logging.WARNING:
768 self._parameters["optiprint"], self._parameters["optdisp"] = 1, 1
769 if PlatformInfo.has_scipy:
770 import scipy.optimize
771 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_ALL
773 self._parameters["optmessages"] = 15
775 self._parameters["optiprint"], self._parameters["optdisp"] = -1, 0
776 if PlatformInfo.has_scipy:
777 import scipy.optimize
778 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_NONE
780 self._parameters["optmessages"] = 15
784 def _post_run(self,_oH=None):
786 if ("StoreSupplementaryCalculations" in self._parameters) and \
787 "APosterioriCovariance" in self._parameters["StoreSupplementaryCalculations"]:
788 for _A in self.StoredVariables["APosterioriCovariance"]:
789 if "APosterioriVariances" in self._parameters["StoreSupplementaryCalculations"]:
790 self.StoredVariables["APosterioriVariances"].store( numpy.diag(_A) )
791 if "APosterioriStandardDeviations" in self._parameters["StoreSupplementaryCalculations"]:
792 self.StoredVariables["APosterioriStandardDeviations"].store( numpy.sqrt(numpy.diag(_A)) )
793 if "APosterioriCorrelations" in self._parameters["StoreSupplementaryCalculations"]:
794 _EI = numpy.diag(1./numpy.sqrt(numpy.diag(_A)))
795 _C = numpy.dot(_EI, numpy.dot(_A, _EI))
796 self.StoredVariables["APosterioriCorrelations"].store( _C )
797 if _oH is not None and "Direct" in _oH and "Tangent" in _oH and "Adjoint" in _oH:
798 logging.debug("%s Nombre d'évaluation(s) de l'opérateur d'observation direct/tangent/adjoint.: %i/%i/%i", self._name, _oH["Direct"].nbcalls(0),_oH["Tangent"].nbcalls(0),_oH["Adjoint"].nbcalls(0))
799 logging.debug("%s Nombre d'appels au cache d'opérateur d'observation direct/tangent/adjoint..: %i/%i/%i", self._name, _oH["Direct"].nbcalls(3),_oH["Tangent"].nbcalls(3),_oH["Adjoint"].nbcalls(3))
800 logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
801 logging.debug("%s Durées d'utilisation CPU de %.1fs et elapsed de %.1fs", self._name, self._getTimeState()[0], self._getTimeState()[1])
802 logging.debug("%s Terminé", self._name)
805 def _toStore(self, key):
806 "True if in StoreSupplementaryCalculations, else False"
807 return key in self._parameters["StoreSupplementaryCalculations"]
809 def get(self, key=None):
811 Renvoie l'une des variables stockées identifiée par la clé, ou le
812 dictionnaire de l'ensemble des variables disponibles en l'absence de
813 clé. Ce sont directement les variables sous forme objet qui sont
814 renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
815 des classes de persistance.
818 return self.StoredVariables[self.__canonical_stored_name[key.lower()]]
820 return self.StoredVariables
822 def __contains__(self, key=None):
823 "D.__contains__(k) -> True if D has a key k, else False"
824 if key is None or key.lower() not in self.__canonical_stored_name:
827 return self.__canonical_stored_name[key.lower()] in self.StoredVariables
830 "D.keys() -> list of D's keys"
831 if hasattr(self, "StoredVariables"):
832 return self.StoredVariables.keys()
837 "D.pop(k[,d]) -> v, remove specified key and return the corresponding value"
838 if hasattr(self, "StoredVariables") and k.lower() in self.__canonical_stored_name:
839 return self.StoredVariables.pop(self.__canonical_stored_name[k.lower()], d)
844 raise TypeError("pop expected at least 1 arguments, got 0")
845 "If key is not found, d is returned if given, otherwise KeyError is raised"
851 def run(self, Xb=None, Y=None, H=None, M=None, R=None, B=None, Q=None, Parameters=None):
853 Doit implémenter l'opération élémentaire de calcul d'assimilation sous
854 sa forme mathématique la plus naturelle possible.
856 raise NotImplementedError("Mathematical assimilation calculation has not been implemented!")
858 def defineRequiredParameter(self, name = None, default = None, typecast = None, message = None, minval = None, maxval = None, listval = None, listadv = None):
860 Permet de définir dans l'algorithme des paramètres requis et leurs
861 caractéristiques par défaut.
864 raise ValueError("A name is mandatory to define a required parameter.")
866 self.__required_parameters[name] = {
868 "typecast" : typecast,
875 self.__canonical_parameter_name[name.lower()] = name
876 logging.debug("%s %s (valeur par défaut = %s)", self._name, message, self.setParameterValue(name))
878 def getRequiredParameters(self, noDetails=True):
880 Renvoie la liste des noms de paramètres requis ou directement le
881 dictionnaire des paramètres requis.
884 return sorted(self.__required_parameters.keys())
886 return self.__required_parameters
888 def setParameterValue(self, name=None, value=None):
890 Renvoie la valeur d'un paramètre requis de manière contrôlée
892 __k = self.__canonical_parameter_name[name.lower()]
893 default = self.__required_parameters[__k]["default"]
894 typecast = self.__required_parameters[__k]["typecast"]
895 minval = self.__required_parameters[__k]["minval"]
896 maxval = self.__required_parameters[__k]["maxval"]
897 listval = self.__required_parameters[__k]["listval"]
898 listadv = self.__required_parameters[__k]["listadv"]
900 if value is None and default is None:
902 elif value is None and default is not None:
903 if typecast is None: __val = default
904 else: __val = typecast( default )
906 if typecast is None: __val = value
909 __val = typecast( value )
911 raise ValueError("The value '%s' for the parameter named '%s' can not be correctly evaluated with type '%s'."%(value, __k, typecast))
913 if minval is not None and (numpy.array(__val, float) < minval).any():
914 raise ValueError("The parameter named '%s' of value '%s' can not be less than %s."%(__k, __val, minval))
915 if maxval is not None and (numpy.array(__val, float) > maxval).any():
916 raise ValueError("The parameter named '%s' of value '%s' can not be greater than %s."%(__k, __val, maxval))
917 if listval is not None or listadv is not None:
918 if typecast is list or typecast is tuple or isinstance(__val,list) or isinstance(__val,tuple):
920 if listval is not None and v in listval: continue
921 elif listadv is not None and v in listadv: continue
923 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%(v, __k, listval))
924 elif not (listval is not None and __val in listval) and not (listadv is not None and __val in listadv):
925 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%( __val, __k,listval))
929 def requireInputArguments(self, mandatory=(), optional=()):
931 Permet d'imposer des arguments de calcul requis en entrée.
933 self.__required_inputs["RequiredInputValues"]["mandatory"] = tuple( mandatory )
934 self.__required_inputs["RequiredInputValues"]["optional"] = tuple( optional )
936 def getInputArguments(self):
938 Permet d'obtenir les listes des arguments de calcul requis en entrée.
940 return self.__required_inputs["RequiredInputValues"]["mandatory"], self.__required_inputs["RequiredInputValues"]["optional"]
942 def setAttributes(self, tags=()):
944 Permet d'adjoindre des attributs comme les tags de classification.
945 Renvoie la liste actuelle dans tous les cas.
947 self.__required_inputs["ClassificationTags"].extend( tags )
948 return self.__required_inputs["ClassificationTags"]
950 def __setParameters(self, fromDico={}, reset=False):
952 Permet de stocker les paramètres reçus dans le dictionnaire interne.
954 self._parameters.update( fromDico )
955 __inverse_fromDico_keys = {}
956 for k in fromDico.keys():
957 if k.lower() in self.__canonical_parameter_name:
958 __inverse_fromDico_keys[self.__canonical_parameter_name[k.lower()]] = k
959 #~ __inverse_fromDico_keys = dict([(self.__canonical_parameter_name[k.lower()],k) for k in fromDico.keys()])
960 __canonic_fromDico_keys = __inverse_fromDico_keys.keys()
961 for k in self.__required_parameters.keys():
962 if k in __canonic_fromDico_keys:
963 self._parameters[k] = self.setParameterValue(k,fromDico[__inverse_fromDico_keys[k]])
965 self._parameters[k] = self.setParameterValue(k)
968 logging.debug("%s %s : %s", self._name, self.__required_parameters[k]["message"], self._parameters[k])
970 def _getTimeState(self, reset=False):
972 Initialise ou restitue le temps de calcul (cpu/elapsed) à la seconde
975 self.__initial_cpu_time = time.process_time()
976 self.__initial_elapsed_time = time.perf_counter()
979 self.__cpu_time = time.process_time() - self.__initial_cpu_time
980 self.__elapsed_time = time.perf_counter() - self.__initial_elapsed_time
981 return self.__cpu_time, self.__elapsed_time
983 def _StopOnTimeLimit(self, X=None, withReason=False):
984 "Stop criteria on time limit: True/False [+ Reason]"
985 c, e = self._getTimeState()
986 if "MaximumCpuTime" in self._parameters and c > self._parameters["MaximumCpuTime"]:
987 __SC, __SR = True, "Reached maximum CPU time (%.1fs > %.1fs)"%(c, self._parameters["MaximumCpuTime"])
988 elif "MaximumElapsedTime" in self._parameters and e > self._parameters["MaximumElapsedTime"]:
989 __SC, __SR = True, "Reached maximum elapsed time (%.1fs > %.1fs)"%(e, self._parameters["MaximumElapsedTime"])
991 __SC, __SR = False, ""
997 # ==============================================================================
998 class AlgorithmAndParameters(object):
1000 Classe générale d'interface d'action pour l'algorithme et ses paramètres
1003 name = "GenericAlgorithm",
1010 self.__name = str(name)
1014 self.__algorithm = {}
1015 self.__algorithmFile = None
1016 self.__algorithmName = None
1018 self.updateParameters( asDict, asScript )
1020 if asAlgorithm is None and asScript is not None:
1021 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
1023 __Algo = asAlgorithm
1025 if __Algo is not None:
1026 self.__A = str(__Algo)
1027 self.__P.update( {"Algorithm":self.__A} )
1029 self.__setAlgorithm( self.__A )
1031 self.__variable_names_not_public = {"nextStep":False} # Duplication dans Algorithm
1033 def updateParameters(self,
1037 "Mise a jour des parametres"
1038 if asDict is None and asScript is not None:
1039 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1043 if __Dict is not None:
1044 self.__P.update( dict(__Dict) )
1046 def executePythonScheme(self, asDictAO = None):
1047 "Permet de lancer le calcul d'assimilation"
1048 Operator.CM.clearCache()
1050 if not isinstance(asDictAO, dict):
1051 raise ValueError("The objects for algorithm calculation have to be given together as a dictionnary, and they are not")
1052 if hasattr(asDictAO["Background"],"getO"): self.__Xb = asDictAO["Background"].getO()
1053 elif hasattr(asDictAO["CheckingPoint"],"getO"): self.__Xb = asDictAO["CheckingPoint"].getO()
1054 else: self.__Xb = None
1055 if hasattr(asDictAO["Observation"],"getO"): self.__Y = asDictAO["Observation"].getO()
1056 else: self.__Y = asDictAO["Observation"]
1057 if hasattr(asDictAO["ControlInput"],"getO"): self.__U = asDictAO["ControlInput"].getO()
1058 else: self.__U = asDictAO["ControlInput"]
1059 if hasattr(asDictAO["ObservationOperator"],"getO"): self.__HO = asDictAO["ObservationOperator"].getO()
1060 else: self.__HO = asDictAO["ObservationOperator"]
1061 if hasattr(asDictAO["EvolutionModel"],"getO"): self.__EM = asDictAO["EvolutionModel"].getO()
1062 else: self.__EM = asDictAO["EvolutionModel"]
1063 if hasattr(asDictAO["ControlModel"],"getO"): self.__CM = asDictAO["ControlModel"].getO()
1064 else: self.__CM = asDictAO["ControlModel"]
1065 self.__B = asDictAO["BackgroundError"]
1066 self.__R = asDictAO["ObservationError"]
1067 self.__Q = asDictAO["EvolutionError"]
1069 self.__shape_validate()
1071 self.__algorithm.run(
1081 Parameters = self.__P,
1085 def executeYACSScheme(self, FileName=None):
1086 "Permet de lancer le calcul d'assimilation"
1087 if FileName is None or not os.path.exists(FileName):
1088 raise ValueError("a YACS file name has to be given for YACS execution.\n")
1090 __file = os.path.abspath(FileName)
1091 logging.debug("The YACS file name is \"%s\"."%__file)
1092 if not PlatformInfo.has_salome or \
1093 not PlatformInfo.has_yacs or \
1094 not PlatformInfo.has_adao:
1095 raise ImportError("\n\n"+\
1096 "Unable to get SALOME, YACS or ADAO environnement variables.\n"+\
1097 "Please load the right environnement before trying to use it.\n")
1100 import SALOMERuntime
1102 SALOMERuntime.RuntimeSALOME_setRuntime()
1104 r = pilot.getRuntime()
1105 xmlLoader = loader.YACSLoader()
1106 xmlLoader.registerProcCataLoader()
1108 catalogAd = r.loadCatalog("proc", __file)
1109 r.addCatalog(catalogAd)
1114 p = xmlLoader.load(__file)
1115 except IOError as ex:
1116 print("The YACS XML schema file can not be loaded: %s"%(ex,))
1118 logger = p.getLogger("parser")
1119 if not logger.isEmpty():
1120 print("The imported YACS XML schema has errors on parsing:")
1121 print(logger.getStr())
1124 print("The YACS XML schema is not valid and will not be executed:")
1125 print(p.getErrorReport())
1127 info=pilot.LinkInfo(pilot.LinkInfo.ALL_DONT_STOP)
1128 p.checkConsistency(info)
1129 if info.areWarningsOrErrors():
1130 print("The YACS XML schema is not coherent and will not be executed:")
1131 print(info.getGlobalRepr())
1133 e = pilot.ExecutorSwig()
1135 if p.getEffectiveState() != pilot.DONE:
1136 print(p.getErrorReport())
1140 def get(self, key = None):
1141 "Vérifie l'existence d'une clé de variable ou de paramètres"
1142 if key in self.__algorithm:
1143 return self.__algorithm.get( key )
1144 elif key in self.__P:
1145 return self.__P[key]
1147 allvariables = self.__P
1148 for k in self.__variable_names_not_public: allvariables.pop(k, None)
1151 def pop(self, k, d):
1152 "Necessaire pour le pickling"
1153 return self.__algorithm.pop(k, d)
1155 def getAlgorithmRequiredParameters(self, noDetails=True):
1156 "Renvoie la liste des paramètres requis selon l'algorithme"
1157 return self.__algorithm.getRequiredParameters(noDetails)
1159 def getAlgorithmInputArguments(self):
1160 "Renvoie la liste des entrées requises selon l'algorithme"
1161 return self.__algorithm.getInputArguments()
1163 def getAlgorithmAttributes(self):
1164 "Renvoie la liste des attributs selon l'algorithme"
1165 return self.__algorithm.setAttributes()
1167 def setObserver(self, __V, __O, __I, __S):
1168 if self.__algorithm is None \
1169 or isinstance(self.__algorithm, dict) \
1170 or not hasattr(self.__algorithm,"StoredVariables"):
1171 raise ValueError("No observer can be build before choosing an algorithm.")
1172 if __V not in self.__algorithm:
1173 raise ValueError("An observer requires to be set on a variable named %s which does not exist."%__V)
1175 self.__algorithm.StoredVariables[ __V ].setDataObserver(
1178 HookParameters = __I,
1181 def removeObserver(self, __V, __O, __A = False):
1182 if self.__algorithm is None \
1183 or isinstance(self.__algorithm, dict) \
1184 or not hasattr(self.__algorithm,"StoredVariables"):
1185 raise ValueError("No observer can be removed before choosing an algorithm.")
1186 if __V not in self.__algorithm:
1187 raise ValueError("An observer requires to be removed on a variable named %s which does not exist."%__V)
1189 return self.__algorithm.StoredVariables[ __V ].removeDataObserver(
1194 def hasObserver(self, __V):
1195 if self.__algorithm is None \
1196 or isinstance(self.__algorithm, dict) \
1197 or not hasattr(self.__algorithm,"StoredVariables"):
1199 if __V not in self.__algorithm:
1201 return self.__algorithm.StoredVariables[ __V ].hasDataObserver()
1204 __allvariables = list(self.__algorithm.keys()) + list(self.__P.keys())
1205 for k in self.__variable_names_not_public:
1206 if k in __allvariables: __allvariables.remove(k)
1207 return __allvariables
1209 def __contains__(self, key=None):
1210 "D.__contains__(k) -> True if D has a key k, else False"
1211 return key in self.__algorithm or key in self.__P
1214 "x.__repr__() <==> repr(x)"
1215 return repr(self.__A)+", "+repr(self.__P)
1218 "x.__str__() <==> str(x)"
1219 return str(self.__A)+", "+str(self.__P)
1221 def __setAlgorithm(self, choice = None ):
1223 Permet de sélectionner l'algorithme à utiliser pour mener à bien l'étude
1224 d'assimilation. L'argument est un champ caractère se rapportant au nom
1225 d'un algorithme réalisant l'opération sur les arguments fixes.
1228 raise ValueError("Error: algorithm choice has to be given")
1229 if self.__algorithmName is not None:
1230 raise ValueError("Error: algorithm choice has already been done as \"%s\", it can't be changed."%self.__algorithmName)
1231 daDirectory = "daAlgorithms"
1233 # Recherche explicitement le fichier complet
1234 # ------------------------------------------
1236 for directory in sys.path:
1237 if os.path.isfile(os.path.join(directory, daDirectory, str(choice)+'.py')):
1238 module_path = os.path.abspath(os.path.join(directory, daDirectory))
1239 if module_path is None:
1240 raise ImportError("No algorithm module named \"%s\" has been found in the search path.\n The search path is %s"%(choice, sys.path))
1242 # Importe le fichier complet comme un module
1243 # ------------------------------------------
1245 sys_path_tmp = sys.path ; sys.path.insert(0,module_path)
1246 self.__algorithmFile = __import__(str(choice), globals(), locals(), [])
1247 if not hasattr(self.__algorithmFile, "ElementaryAlgorithm"):
1248 raise ImportError("this module does not define a valid elementary algorithm.")
1249 self.__algorithmName = str(choice)
1250 sys.path = sys_path_tmp ; del sys_path_tmp
1251 except ImportError as e:
1252 raise ImportError("The module named \"%s\" was found, but is incorrect at the import stage.\n The import error message is: %s"%(choice,e))
1254 # Instancie un objet du type élémentaire du fichier
1255 # -------------------------------------------------
1256 self.__algorithm = self.__algorithmFile.ElementaryAlgorithm()
1259 def __shape_validate(self):
1261 Validation de la correspondance correcte des tailles des variables et
1262 des matrices s'il y en a.
1264 if self.__Xb is None: __Xb_shape = (0,)
1265 elif hasattr(self.__Xb,"size"): __Xb_shape = (self.__Xb.size,)
1266 elif hasattr(self.__Xb,"shape"):
1267 if isinstance(self.__Xb.shape, tuple): __Xb_shape = self.__Xb.shape
1268 else: __Xb_shape = self.__Xb.shape()
1269 else: raise TypeError("The background (Xb) has no attribute of shape: problem !")
1271 if self.__Y is None: __Y_shape = (0,)
1272 elif hasattr(self.__Y,"size"): __Y_shape = (self.__Y.size,)
1273 elif hasattr(self.__Y,"shape"):
1274 if isinstance(self.__Y.shape, tuple): __Y_shape = self.__Y.shape
1275 else: __Y_shape = self.__Y.shape()
1276 else: raise TypeError("The observation (Y) has no attribute of shape: problem !")
1278 if self.__U is None: __U_shape = (0,)
1279 elif hasattr(self.__U,"size"): __U_shape = (self.__U.size,)
1280 elif hasattr(self.__U,"shape"):
1281 if isinstance(self.__U.shape, tuple): __U_shape = self.__U.shape
1282 else: __U_shape = self.__U.shape()
1283 else: raise TypeError("The control (U) has no attribute of shape: problem !")
1285 if self.__B is None: __B_shape = (0,0)
1286 elif hasattr(self.__B,"shape"):
1287 if isinstance(self.__B.shape, tuple): __B_shape = self.__B.shape
1288 else: __B_shape = self.__B.shape()
1289 else: raise TypeError("The a priori errors covariance matrix (B) has no attribute of shape: problem !")
1291 if self.__R is None: __R_shape = (0,0)
1292 elif hasattr(self.__R,"shape"):
1293 if isinstance(self.__R.shape, tuple): __R_shape = self.__R.shape
1294 else: __R_shape = self.__R.shape()
1295 else: raise TypeError("The observation errors covariance matrix (R) has no attribute of shape: problem !")
1297 if self.__Q is None: __Q_shape = (0,0)
1298 elif hasattr(self.__Q,"shape"):
1299 if isinstance(self.__Q.shape, tuple): __Q_shape = self.__Q.shape
1300 else: __Q_shape = self.__Q.shape()
1301 else: raise TypeError("The evolution errors covariance matrix (Q) has no attribute of shape: problem !")
1303 if len(self.__HO) == 0: __HO_shape = (0,0)
1304 elif isinstance(self.__HO, dict): __HO_shape = (0,0)
1305 elif hasattr(self.__HO["Direct"],"shape"):
1306 if isinstance(self.__HO["Direct"].shape, tuple): __HO_shape = self.__HO["Direct"].shape
1307 else: __HO_shape = self.__HO["Direct"].shape()
1308 else: raise TypeError("The observation operator (H) has no attribute of shape: problem !")
1310 if len(self.__EM) == 0: __EM_shape = (0,0)
1311 elif isinstance(self.__EM, dict): __EM_shape = (0,0)
1312 elif hasattr(self.__EM["Direct"],"shape"):
1313 if isinstance(self.__EM["Direct"].shape, tuple): __EM_shape = self.__EM["Direct"].shape
1314 else: __EM_shape = self.__EM["Direct"].shape()
1315 else: raise TypeError("The evolution model (EM) has no attribute of shape: problem !")
1317 if len(self.__CM) == 0: __CM_shape = (0,0)
1318 elif isinstance(self.__CM, dict): __CM_shape = (0,0)
1319 elif hasattr(self.__CM["Direct"],"shape"):
1320 if isinstance(self.__CM["Direct"].shape, tuple): __CM_shape = self.__CM["Direct"].shape
1321 else: __CM_shape = self.__CM["Direct"].shape()
1322 else: raise TypeError("The control model (CM) has no attribute of shape: problem !")
1324 # Vérification des conditions
1325 # ---------------------------
1326 if not( len(__Xb_shape) == 1 or min(__Xb_shape) == 1 ):
1327 raise ValueError("Shape characteristic of background (Xb) is incorrect: \"%s\"."%(__Xb_shape,))
1328 if not( len(__Y_shape) == 1 or min(__Y_shape) == 1 ):
1329 raise ValueError("Shape characteristic of observation (Y) is incorrect: \"%s\"."%(__Y_shape,))
1331 if not( min(__B_shape) == max(__B_shape) ):
1332 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) is incorrect: \"%s\"."%(__B_shape,))
1333 if not( min(__R_shape) == max(__R_shape) ):
1334 raise ValueError("Shape characteristic of observation errors covariance matrix (R) is incorrect: \"%s\"."%(__R_shape,))
1335 if not( min(__Q_shape) == max(__Q_shape) ):
1336 raise ValueError("Shape characteristic of evolution errors covariance matrix (Q) is incorrect: \"%s\"."%(__Q_shape,))
1337 if not( min(__EM_shape) == max(__EM_shape) ):
1338 raise ValueError("Shape characteristic of evolution operator (EM) is incorrect: \"%s\"."%(__EM_shape,))
1340 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[1] == max(__Xb_shape) ):
1341 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and state (X) \"%s\" are incompatible."%(__HO_shape,__Xb_shape))
1342 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[0] == max(__Y_shape) ):
1343 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation (Y) \"%s\" are incompatible."%(__HO_shape,__Y_shape))
1344 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__B) > 0 and not( __HO_shape[1] == __B_shape[0] ):
1345 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and a priori errors covariance matrix (B) \"%s\" are incompatible."%(__HO_shape,__B_shape))
1346 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__R) > 0 and not( __HO_shape[0] == __R_shape[1] ):
1347 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation errors covariance matrix (R) \"%s\" are incompatible."%(__HO_shape,__R_shape))
1349 if self.__B is not None and len(self.__B) > 0 and not( __B_shape[1] == max(__Xb_shape) ):
1350 if self.__algorithmName in ["EnsembleBlue",]:
1351 asPersistentVector = self.__Xb.reshape((-1,min(__B_shape)))
1352 self.__Xb = Persistence.OneVector("Background", basetype=numpy.matrix)
1353 for member in asPersistentVector:
1354 self.__Xb.store( numpy.matrix( numpy.ravel(member), numpy.float ).T )
1355 __Xb_shape = min(__B_shape)
1357 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) \"%s\" and background (Xb) \"%s\" are incompatible."%(__B_shape,__Xb_shape))
1359 if self.__R is not None and len(self.__R) > 0 and not( __R_shape[1] == max(__Y_shape) ):
1360 raise ValueError("Shape characteristic of observation errors covariance matrix (R) \"%s\" and observation (Y) \"%s\" are incompatible."%(__R_shape,__Y_shape))
1362 if self.__EM is not None and len(self.__EM) > 0 and not isinstance(self.__EM, dict) and not( __EM_shape[1] == max(__Xb_shape) ):
1363 raise ValueError("Shape characteristic of evolution model (EM) \"%s\" and state (X) \"%s\" are incompatible."%(__EM_shape,__Xb_shape))
1365 if self.__CM is not None and len(self.__CM) > 0 and not isinstance(self.__CM, dict) and not( __CM_shape[1] == max(__U_shape) ):
1366 raise ValueError("Shape characteristic of control model (CM) \"%s\" and control (U) \"%s\" are incompatible."%(__CM_shape,__U_shape))
1368 if ("Bounds" in self.__P) \
1369 and (isinstance(self.__P["Bounds"], list) or isinstance(self.__P["Bounds"], tuple)) \
1370 and (len(self.__P["Bounds"]) != max(__Xb_shape)):
1371 raise ValueError("The number \"%s\" of bound pairs for the state (X) components is different of the size \"%s\" of the state itself." \
1372 %(len(self.__P["Bounds"]),max(__Xb_shape)))
1376 # ==============================================================================
1377 class RegulationAndParameters(object):
1379 Classe générale d'interface d'action pour la régulation et ses paramètres
1382 name = "GenericRegulation",
1389 self.__name = str(name)
1392 if asAlgorithm is None and asScript is not None:
1393 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
1395 __Algo = asAlgorithm
1397 if asDict is None and asScript is not None:
1398 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1402 if __Dict is not None:
1403 self.__P.update( dict(__Dict) )
1405 if __Algo is not None:
1406 self.__P.update( {"Algorithm":str(__Algo)} )
1408 def get(self, key = None):
1409 "Vérifie l'existence d'une clé de variable ou de paramètres"
1411 return self.__P[key]
1415 # ==============================================================================
1416 class DataObserver(object):
1418 Classe générale d'interface de type observer
1421 name = "GenericObserver",
1433 self.__name = str(name)
1438 if onVariable is None:
1439 raise ValueError("setting an observer has to be done over a variable name or a list of variable names, not over None.")
1440 elif type(onVariable) in (tuple, list):
1441 self.__V = tuple(map( str, onVariable ))
1442 if withInfo is None:
1445 self.__I = (str(withInfo),)*len(self.__V)
1446 elif isinstance(onVariable, str):
1447 self.__V = (onVariable,)
1448 if withInfo is None:
1449 self.__I = (onVariable,)
1451 self.__I = (str(withInfo),)
1453 raise ValueError("setting an observer has to be done over a variable name or a list of variable names.")
1455 if asObsObject is not None:
1456 self.__O = asObsObject
1458 __FunctionText = str(UserScript('Observer', asTemplate, asString, asScript))
1459 __Function = Observer2Func(__FunctionText)
1460 self.__O = __Function.getfunc()
1462 for k in range(len(self.__V)):
1465 if ename not in withAlgo:
1466 raise ValueError("An observer is asked to be set on a variable named %s which does not exist."%ename)
1468 withAlgo.setObserver(ename, self.__O, einfo, scheduledBy)
1471 "x.__repr__() <==> repr(x)"
1472 return repr(self.__V)+"\n"+repr(self.__O)
1475 "x.__str__() <==> str(x)"
1476 return str(self.__V)+"\n"+str(self.__O)
1478 # ==============================================================================
1479 class UserScript(object):
1481 Classe générale d'interface de type texte de script utilisateur
1484 name = "GenericUserScript",
1491 self.__name = str(name)
1493 if asString is not None:
1495 elif self.__name == "UserPostAnalysis" and (asTemplate is not None) and (asTemplate in Templates.UserPostAnalysisTemplates):
1496 self.__F = Templates.UserPostAnalysisTemplates[asTemplate]
1497 elif self.__name == "Observer" and (asTemplate is not None) and (asTemplate in Templates.ObserverTemplates):
1498 self.__F = Templates.ObserverTemplates[asTemplate]
1499 elif asScript is not None:
1500 self.__F = Interfaces.ImportFromScript(asScript).getstring()
1505 "x.__repr__() <==> repr(x)"
1506 return repr(self.__F)
1509 "x.__str__() <==> str(x)"
1510 return str(self.__F)
1512 # ==============================================================================
1513 class ExternalParameters(object):
1515 Classe générale d'interface de type texte de script utilisateur
1518 name = "GenericExternalParameters",
1524 self.__name = str(name)
1527 self.updateParameters( asDict, asScript )
1529 def updateParameters(self,
1533 "Mise a jour des parametres"
1534 if asDict is None and asScript is not None:
1535 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "ExternalParameters" )
1539 if __Dict is not None:
1540 self.__P.update( dict(__Dict) )
1542 def get(self, key = None):
1544 return self.__P[key]
1546 return list(self.__P.keys())
1549 return list(self.__P.keys())
1551 def pop(self, k, d):
1552 return self.__P.pop(k, d)
1555 return self.__P.items()
1557 def __contains__(self, key=None):
1558 "D.__contains__(k) -> True if D has a key k, else False"
1559 return key in self.__P
1561 # ==============================================================================
1562 class State(object):
1564 Classe générale d'interface de type état
1567 name = "GenericVector",
1569 asPersistentVector = None,
1575 toBeChecked = False,
1578 Permet de définir un vecteur :
1579 - asVector : entrée des données, comme un vecteur compatible avec le
1580 constructeur de numpy.matrix, ou "True" si entrée par script.
1581 - asPersistentVector : entrée des données, comme une série de vecteurs
1582 compatible avec le constructeur de numpy.matrix, ou comme un objet de
1583 type Persistence, ou "True" si entrée par script.
1584 - asScript : si un script valide est donné contenant une variable
1585 nommée "name", la variable est de type "asVector" (par défaut) ou
1586 "asPersistentVector" selon que l'une de ces variables est placée à
1588 - asDataFile : si un ou plusieurs fichiers valides sont donnés
1589 contenant des valeurs en colonnes, elles-mêmes nommées "colNames"
1590 (s'il n'y a pas de nom de colonne indiquée, on cherche une colonne
1591 nommée "name"), on récupère les colonnes et on les range ligne après
1592 ligne (colMajor=False, par défaut) ou colonne après colonne
1593 (colMajor=True). La variable résultante est de type "asVector" (par
1594 défaut) ou "asPersistentVector" selon que l'une de ces variables est
1597 self.__name = str(name)
1598 self.__check = bool(toBeChecked)
1602 self.__is_vector = False
1603 self.__is_series = False
1605 if asScript is not None:
1606 __Vector, __Series = None, None
1607 if asPersistentVector:
1608 __Series = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1610 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1611 elif asDataFile is not None:
1612 __Vector, __Series = None, None
1613 if asPersistentVector:
1614 if colNames is not None:
1615 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
1617 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1618 if bool(colMajor) and not Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1619 __Series = numpy.transpose(__Series)
1620 elif not bool(colMajor) and Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1621 __Series = numpy.transpose(__Series)
1623 if colNames is not None:
1624 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
1626 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1628 __Vector = numpy.ravel(__Vector, order = "F")
1630 __Vector = numpy.ravel(__Vector, order = "C")
1632 __Vector, __Series = asVector, asPersistentVector
1634 if __Vector is not None:
1635 self.__is_vector = True
1636 self.__V = numpy.matrix( numpy.asmatrix(__Vector).A1, numpy.float ).T
1637 self.shape = self.__V.shape
1638 self.size = self.__V.size
1639 elif __Series is not None:
1640 self.__is_series = True
1641 if isinstance(__Series, (tuple, list, numpy.ndarray, numpy.matrix, str)):
1642 self.__V = Persistence.OneVector(self.__name, basetype=numpy.matrix)
1643 if isinstance(__Series, str): __Series = eval(__Series)
1644 for member in __Series:
1645 self.__V.store( numpy.matrix( numpy.asmatrix(member).A1, numpy.float ).T )
1648 if isinstance(self.__V.shape, (tuple, list)):
1649 self.shape = self.__V.shape
1651 self.shape = self.__V.shape()
1652 if len(self.shape) == 1:
1653 self.shape = (self.shape[0],1)
1654 self.size = self.shape[0] * self.shape[1]
1656 raise ValueError("The %s object is improperly defined or undefined, it requires at minima either a vector, a list/tuple of vectors or a persistent object. Please check your vector input."%self.__name)
1658 if scheduledBy is not None:
1659 self.__T = scheduledBy
1661 def getO(self, withScheduler=False):
1663 return self.__V, self.__T
1664 elif self.__T is None:
1670 "Vérification du type interne"
1671 return self.__is_vector
1674 "Vérification du type interne"
1675 return self.__is_series
1678 "x.__repr__() <==> repr(x)"
1679 return repr(self.__V)
1682 "x.__str__() <==> str(x)"
1683 return str(self.__V)
1685 # ==============================================================================
1686 class Covariance(object):
1688 Classe générale d'interface de type covariance
1691 name = "GenericCovariance",
1692 asCovariance = None,
1693 asEyeByScalar = None,
1694 asEyeByVector = None,
1697 toBeChecked = False,
1700 Permet de définir une covariance :
1701 - asCovariance : entrée des données, comme une matrice compatible avec
1702 le constructeur de numpy.matrix
1703 - asEyeByScalar : entrée des données comme un seul scalaire de variance,
1704 multiplicatif d'une matrice de corrélation identité, aucune matrice
1705 n'étant donc explicitement à donner
1706 - asEyeByVector : entrée des données comme un seul vecteur de variance,
1707 à mettre sur la diagonale d'une matrice de corrélation, aucune matrice
1708 n'étant donc explicitement à donner
1709 - asCovObject : entrée des données comme un objet python, qui a les
1710 methodes obligatoires "getT", "getI", "diag", "trace", "__add__",
1711 "__sub__", "__neg__", "__mul__", "__rmul__" et facultatives "shape",
1712 "size", "cholesky", "choleskyI", "asfullmatrix", "__repr__", "__str__"
1713 - toBeChecked : booléen indiquant si le caractère SDP de la matrice
1714 pleine doit être vérifié
1716 self.__name = str(name)
1717 self.__check = bool(toBeChecked)
1720 self.__is_scalar = False
1721 self.__is_vector = False
1722 self.__is_matrix = False
1723 self.__is_object = False
1725 if asScript is not None:
1726 __Matrix, __Scalar, __Vector, __Object = None, None, None, None
1728 __Scalar = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1730 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1732 __Object = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1734 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1736 __Matrix, __Scalar, __Vector, __Object = asCovariance, asEyeByScalar, asEyeByVector, asCovObject
1738 if __Scalar is not None:
1739 if numpy.matrix(__Scalar).size != 1:
1740 raise ValueError(' The diagonal multiplier given to define a sparse matrix is not a unique scalar value.\n Its actual measured size is %i. Please check your scalar input.'%numpy.matrix(__Scalar).size)
1741 self.__is_scalar = True
1742 self.__C = numpy.abs( float(__Scalar) )
1745 elif __Vector is not None:
1746 self.__is_vector = True
1747 self.__C = numpy.abs( numpy.array( numpy.ravel( numpy.matrix(__Vector, float ) ) ) )
1748 self.shape = (self.__C.size,self.__C.size)
1749 self.size = self.__C.size**2
1750 elif __Matrix is not None:
1751 self.__is_matrix = True
1752 self.__C = numpy.matrix( __Matrix, float )
1753 self.shape = self.__C.shape
1754 self.size = self.__C.size
1755 elif __Object is not None:
1756 self.__is_object = True
1758 for at in ("getT","getI","diag","trace","__add__","__sub__","__neg__","__mul__","__rmul__"):
1759 if not hasattr(self.__C,at):
1760 raise ValueError("The matrix given for %s as an object has no attribute \"%s\". Please check your object input."%(self.__name,at))
1761 if hasattr(self.__C,"shape"):
1762 self.shape = self.__C.shape
1765 if hasattr(self.__C,"size"):
1766 self.size = self.__C.size
1771 # raise ValueError("The %s covariance matrix has to be specified either as a matrix, a vector for its diagonal or a scalar multiplying an identity matrix."%self.__name)
1775 def __validate(self):
1777 if self.__C is None:
1778 raise UnboundLocalError("%s covariance matrix value has not been set!"%(self.__name,))
1779 if self.ismatrix() and min(self.shape) != max(self.shape):
1780 raise ValueError("The given matrix for %s is not a square one, its shape is %s. Please check your matrix input."%(self.__name,self.shape))
1781 if self.isobject() and min(self.shape) != max(self.shape):
1782 raise ValueError("The matrix given for \"%s\" is not a square one, its shape is %s. Please check your object input."%(self.__name,self.shape))
1783 if self.isscalar() and self.__C <= 0:
1784 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your scalar input %s."%(self.__name,self.__C))
1785 if self.isvector() and (self.__C <= 0).any():
1786 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your vector input."%(self.__name,))
1787 if self.ismatrix() and (self.__check or logging.getLogger().level < logging.WARNING):
1789 L = numpy.linalg.cholesky( self.__C )
1791 raise ValueError("The %s covariance matrix is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1792 if self.isobject() and (self.__check or logging.getLogger().level < logging.WARNING):
1794 L = self.__C.cholesky()
1796 raise ValueError("The %s covariance object is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1799 "Vérification du type interne"
1800 return self.__is_scalar
1803 "Vérification du type interne"
1804 return self.__is_vector
1807 "Vérification du type interne"
1808 return self.__is_matrix
1811 "Vérification du type interne"
1812 return self.__is_object
1817 return Covariance(self.__name+"I", asCovariance = self.__C.I )
1818 elif self.isvector():
1819 return Covariance(self.__name+"I", asEyeByVector = 1. / self.__C )
1820 elif self.isscalar():
1821 return Covariance(self.__name+"I", asEyeByScalar = 1. / self.__C )
1822 elif self.isobject():
1823 return Covariance(self.__name+"I", asCovObject = self.__C.getI() )
1825 return None # Indispensable
1830 return Covariance(self.__name+"T", asCovariance = self.__C.T )
1831 elif self.isvector():
1832 return Covariance(self.__name+"T", asEyeByVector = self.__C )
1833 elif self.isscalar():
1834 return Covariance(self.__name+"T", asEyeByScalar = self.__C )
1835 elif self.isobject():
1836 return Covariance(self.__name+"T", asCovObject = self.__C.getT() )
1839 "Décomposition de Cholesky"
1841 return Covariance(self.__name+"C", asCovariance = numpy.linalg.cholesky(self.__C) )
1842 elif self.isvector():
1843 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
1844 elif self.isscalar():
1845 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
1846 elif self.isobject() and hasattr(self.__C,"cholesky"):
1847 return Covariance(self.__name+"C", asCovObject = self.__C.cholesky() )
1849 def choleskyI(self):
1850 "Inversion de la décomposition de Cholesky"
1852 return Covariance(self.__name+"H", asCovariance = numpy.linalg.cholesky(self.__C).I )
1853 elif self.isvector():
1854 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
1855 elif self.isscalar():
1856 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
1857 elif self.isobject() and hasattr(self.__C,"choleskyI"):
1858 return Covariance(self.__name+"H", asCovObject = self.__C.choleskyI() )
1861 "Racine carrée matricielle"
1864 return Covariance(self.__name+"C", asCovariance = scipy.linalg.sqrtm(self.__C) )
1865 elif self.isvector():
1866 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
1867 elif self.isscalar():
1868 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
1869 elif self.isobject() and hasattr(self.__C,"sqrt"):
1870 return Covariance(self.__name+"C", asCovObject = self.__C.sqrt() )
1873 "Inversion de la racine carrée matricielle"
1876 return Covariance(self.__name+"H", asCovariance = scipy.linalg.sqrtm(self.__C).I )
1877 elif self.isvector():
1878 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
1879 elif self.isscalar():
1880 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
1881 elif self.isobject() and hasattr(self.__C,"sqrtI"):
1882 return Covariance(self.__name+"H", asCovObject = self.__C.sqrtI() )
1884 def diag(self, msize=None):
1885 "Diagonale de la matrice"
1887 return numpy.diag(self.__C)
1888 elif self.isvector():
1890 elif self.isscalar():
1892 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1894 return self.__C * numpy.ones(int(msize))
1895 elif self.isobject():
1896 return self.__C.diag()
1898 def asfullmatrix(self, msize=None):
1902 elif self.isvector():
1903 return numpy.matrix( numpy.diag(self.__C), float )
1904 elif self.isscalar():
1906 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1908 return numpy.matrix( self.__C * numpy.eye(int(msize)), float )
1909 elif self.isobject() and hasattr(self.__C,"asfullmatrix"):
1910 return self.__C.asfullmatrix()
1912 def trace(self, msize=None):
1913 "Trace de la matrice"
1915 return numpy.trace(self.__C)
1916 elif self.isvector():
1917 return float(numpy.sum(self.__C))
1918 elif self.isscalar():
1920 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1922 return self.__C * int(msize)
1923 elif self.isobject():
1924 return self.__C.trace()
1930 "x.__repr__() <==> repr(x)"
1931 return repr(self.__C)
1934 "x.__str__() <==> str(x)"
1935 return str(self.__C)
1937 def __add__(self, other):
1938 "x.__add__(y) <==> x+y"
1939 if self.ismatrix() or self.isobject():
1940 return self.__C + numpy.asmatrix(other)
1941 elif self.isvector() or self.isscalar():
1942 _A = numpy.asarray(other)
1943 _A.reshape(_A.size)[::_A.shape[1]+1] += self.__C
1944 return numpy.asmatrix(_A)
1946 def __radd__(self, other):
1947 "x.__radd__(y) <==> y+x"
1948 raise NotImplementedError("%s covariance matrix __radd__ method not available for %s type!"%(self.__name,type(other)))
1950 def __sub__(self, other):
1951 "x.__sub__(y) <==> x-y"
1952 if self.ismatrix() or self.isobject():
1953 return self.__C - numpy.asmatrix(other)
1954 elif self.isvector() or self.isscalar():
1955 _A = numpy.asarray(other)
1956 _A.reshape(_A.size)[::_A.shape[1]+1] = self.__C - _A.reshape(_A.size)[::_A.shape[1]+1]
1957 return numpy.asmatrix(_A)
1959 def __rsub__(self, other):
1960 "x.__rsub__(y) <==> y-x"
1961 raise NotImplementedError("%s covariance matrix __rsub__ method not available for %s type!"%(self.__name,type(other)))
1964 "x.__neg__() <==> -x"
1967 def __mul__(self, other):
1968 "x.__mul__(y) <==> x*y"
1969 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
1970 return self.__C * other
1971 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
1972 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1973 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
1974 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1975 return self.__C * numpy.asmatrix(other)
1977 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asmatrix(other).shape,self.__name))
1978 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
1979 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1980 return numpy.asmatrix(self.__C * numpy.ravel(other)).T
1981 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1982 return numpy.asmatrix((self.__C * (numpy.asarray(other).transpose())).transpose())
1984 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
1985 elif self.isscalar() and isinstance(other,numpy.matrix):
1986 return self.__C * other
1987 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
1988 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
1989 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
1991 return self.__C * numpy.asmatrix(other)
1992 elif self.isobject():
1993 return self.__C.__mul__(other)
1995 raise NotImplementedError("%s covariance matrix __mul__ method not available for %s type!"%(self.__name,type(other)))
1997 def __rmul__(self, other):
1998 "x.__rmul__(y) <==> y*x"
1999 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2000 return other * self.__C
2001 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2002 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2003 return numpy.asmatrix(numpy.ravel(other)) * self.__C
2004 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2005 return numpy.asmatrix(other) * self.__C
2007 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name))
2008 elif self.isvector() and isinstance(other,numpy.matrix):
2009 if numpy.ravel(other).size == self.shape[0]: # Vecteur
2010 return numpy.asmatrix(numpy.ravel(other) * self.__C)
2011 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
2012 return numpy.asmatrix(numpy.array(other) * self.__C)
2014 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
2015 elif self.isscalar() and isinstance(other,numpy.matrix):
2016 return other * self.__C
2017 elif self.isobject():
2018 return self.__C.__rmul__(other)
2020 raise NotImplementedError("%s covariance matrix __rmul__ method not available for %s type!"%(self.__name,type(other)))
2023 "x.__len__() <==> len(x)"
2024 return self.shape[0]
2026 # ==============================================================================
2027 class Observer2Func(object):
2029 Creation d'une fonction d'observateur a partir de son texte
2031 def __init__(self, corps=""):
2032 self.__corps = corps
2033 def func(self,var,info):
2034 "Fonction d'observation"
2037 "Restitution du pointeur de fonction dans l'objet"
2040 # ==============================================================================
2041 class CaseLogger(object):
2043 Conservation des commandes de creation d'un cas
2045 def __init__(self, __name="", __objname="case", __addViewers=None, __addLoaders=None):
2046 self.__name = str(__name)
2047 self.__objname = str(__objname)
2048 self.__logSerie = []
2049 self.__switchoff = False
2051 "TUI" :Interfaces._TUIViewer,
2052 "SCD" :Interfaces._SCDViewer,
2053 "YACS":Interfaces._YACSViewer,
2056 "TUI" :Interfaces._TUIViewer,
2057 "COM" :Interfaces._COMViewer,
2059 if __addViewers is not None:
2060 self.__viewers.update(dict(__addViewers))
2061 if __addLoaders is not None:
2062 self.__loaders.update(dict(__addLoaders))
2064 def register(self, __command=None, __keys=None, __local=None, __pre=None, __switchoff=False):
2065 "Enregistrement d'une commande individuelle"
2066 if __command is not None and __keys is not None and __local is not None and not self.__switchoff:
2067 if "self" in __keys: __keys.remove("self")
2068 self.__logSerie.append( (str(__command), __keys, __local, __pre, __switchoff) )
2070 self.__switchoff = True
2072 self.__switchoff = False
2074 def dump(self, __filename=None, __format="TUI", __upa=""):
2075 "Restitution normalisée des commandes"
2076 if __format in self.__viewers:
2077 __formater = self.__viewers[__format](self.__name, self.__objname, self.__logSerie)
2079 raise ValueError("Dumping as \"%s\" is not available"%__format)
2080 return __formater.dump(__filename, __upa)
2082 def load(self, __filename=None, __content=None, __object=None, __format="TUI"):
2083 "Chargement normalisé des commandes"
2084 if __format in self.__loaders:
2085 __formater = self.__loaders[__format]()
2087 raise ValueError("Loading as \"%s\" is not available"%__format)
2088 return __formater.load(__filename, __content, __object)
2090 # ==============================================================================
2093 _extraArguments = None,
2094 _sFunction = lambda x: x,
2099 Pour une liste ordonnée de vecteurs en entrée, renvoie en sortie la liste
2100 correspondante de valeurs de la fonction en argument
2102 # Vérifications et définitions initiales
2103 # logging.debug("MULTF Internal multifonction calculations begin with function %s"%(_sFunction.__name__,))
2104 if not PlatformInfo.isIterable( __xserie ):
2105 raise TypeError("MultiFonction not iterable unkown input type: %s"%(type(__xserie),))
2107 if (_mpWorkers is None) or (_mpWorkers is not None and _mpWorkers < 1):
2110 __mpWorkers = int(_mpWorkers)
2112 import multiprocessing
2123 if _extraArguments is None:
2125 elif _extraArguments is not None and isinstance(_extraArguments, (list, tuple, map)):
2126 for __xvalue in __xserie:
2127 _jobs.append( [__xvalue, ] + list(_extraArguments) )
2129 raise TypeError("MultiFonction extra arguments unkown input type: %s"%(type(_extraArguments),))
2130 # logging.debug("MULTF Internal multiprocessing calculations begin : evaluation of %i point(s)"%(len(_jobs),))
2131 import multiprocessing
2132 with multiprocessing.Pool(__mpWorkers) as pool:
2133 __multiHX = pool.map( _sFunction, _jobs )
2136 # logging.debug("MULTF Internal multiprocessing calculation end")
2138 # logging.debug("MULTF Internal monoprocessing calculation begin")
2140 if _extraArguments is None:
2141 for __xvalue in __xserie:
2142 __multiHX.append( _sFunction( __xvalue ) )
2143 elif _extraArguments is not None and isinstance(_extraArguments, (list, tuple, map)):
2144 for __xvalue in __xserie:
2145 __multiHX.append( _sFunction( __xvalue, *_extraArguments ) )
2146 elif _extraArguments is not None and isinstance(_extraArguments, dict):
2147 for __xvalue in __xserie:
2148 __multiHX.append( _sFunction( __xvalue, **_extraArguments ) )
2150 raise TypeError("MultiFonction extra arguments unkown input type: %s"%(type(_extraArguments),))
2151 # logging.debug("MULTF Internal monoprocessing calculation end")
2153 # logging.debug("MULTF Internal multifonction calculations end")
2156 # ==============================================================================
2157 def CostFunction3D(_x,
2158 _Hm = None, # Pour simuler Hm(x) : HO["Direct"].appliedTo
2159 _HmX = None, # Simulation déjà faite de Hm(x)
2160 _arg = None, # Arguments supplementaires pour Hm, sous la forme d'un tuple
2165 _SIV = False, # A résorber pour la 8.0
2166 _SSC = [], # self._parameters["StoreSupplementaryCalculations"]
2167 _nPS = 0, # nbPreviousSteps
2168 _QM = "DA", # QualityMeasure
2169 _SSV = {}, # Entrée et/ou sortie : self.StoredVariables
2170 _fRt = False, # Restitue ou pas la sortie étendue
2171 _sSc = True, # Stocke ou pas les SSC
2174 Fonction-coût générale utile pour les algorithmes statiques/3D : 3DVAR, BLUE
2175 et dérivés, Kalman et dérivés, LeastSquares, SamplingTest, PSO, SA, Tabu,
2176 DFO, QuantileRegression
2182 for k in ["CostFunctionJ",
2188 "SimulatedObservationAtCurrentOptimum",
2189 "SimulatedObservationAtCurrentState",
2193 if hasattr(_SSV[k],"store"):
2194 _SSV[k].append = _SSV[k].store # Pour utiliser "append" au lieu de "store"
2196 _X = numpy.asmatrix(numpy.ravel( _x )).T
2197 if _SIV or "CurrentState" in _SSC or "CurrentOptimum" in _SSC:
2198 _SSV["CurrentState"].append( _X )
2200 if _HmX is not None:
2204 raise ValueError("COSTFUNCTION3D Operator has to be defined.")
2208 _HX = _Hm( _X, *_arg )
2209 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
2211 if "SimulatedObservationAtCurrentState" in _SSC or \
2212 "SimulatedObservationAtCurrentOptimum" in _SSC:
2213 _SSV["SimulatedObservationAtCurrentState"].append( _HX )
2215 if numpy.any(numpy.isnan(_HX)):
2216 Jb, Jo, J = numpy.nan, numpy.nan, numpy.nan
2218 _Y = numpy.asmatrix(numpy.ravel( _Y )).T
2219 if _QM in ["AugmentedWeightedLeastSquares", "AWLS", "AugmentedPonderatedLeastSquares", "APLS", "DA"]:
2220 if _BI is None or _RI is None:
2221 raise ValueError("Background and Observation error covariance matrix has to be properly defined!")
2222 _Xb = numpy.asmatrix(numpy.ravel( _Xb )).T
2223 Jb = 0.5 * (_X - _Xb).T * _BI * (_X - _Xb)
2224 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
2225 elif _QM in ["WeightedLeastSquares", "WLS", "PonderatedLeastSquares", "PLS"]:
2227 raise ValueError("Observation error covariance matrix has to be properly defined!")
2229 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
2230 elif _QM in ["LeastSquares", "LS", "L2"]:
2232 Jo = 0.5 * (_Y - _HX).T * (_Y - _HX)
2233 elif _QM in ["AbsoluteValue", "L1"]:
2235 Jo = numpy.sum( numpy.abs(_Y - _HX) )
2236 elif _QM in ["MaximumError", "ME"]:
2238 Jo = numpy.max( numpy.abs(_Y - _HX) )
2239 elif _QM in ["QR", "Null"]:
2243 raise ValueError("Unknown asked quality measure!")
2245 J = float( Jb ) + float( Jo )
2248 _SSV["CostFunctionJb"].append( Jb )
2249 _SSV["CostFunctionJo"].append( Jo )
2250 _SSV["CostFunctionJ" ].append( J )
2252 if "IndexOfOptimum" in _SSC or \
2253 "CurrentOptimum" in _SSC or \
2254 "SimulatedObservationAtCurrentOptimum" in _SSC:
2255 IndexMin = numpy.argmin( _SSV["CostFunctionJ"][_nPS:] ) + _nPS
2256 if "IndexOfOptimum" in _SSC:
2257 _SSV["IndexOfOptimum"].append( IndexMin )
2258 if "CurrentOptimum" in _SSC:
2259 _SSV["CurrentOptimum"].append( _SSV["CurrentState"][IndexMin] )
2260 if "SimulatedObservationAtCurrentOptimum" in _SSC:
2261 _SSV["SimulatedObservationAtCurrentOptimum"].append( _SSV["SimulatedObservationAtCurrentState"][IndexMin] )
2266 if _QM in ["QR"]: # Pour le QuantileRegression
2271 # ==============================================================================
2272 if __name__ == "__main__":
2273 print('\n AUTODIAGNOSTIC\n')