1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2021 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les outils généraux élémentaires.
26 __author__ = "Jean-Philippe ARGAUD"
35 from functools import partial
36 from daCore import Persistence, PlatformInfo, Interfaces
37 from daCore import Templates
39 # ==============================================================================
40 class CacheManager(object):
42 Classe générale de gestion d'un cache de calculs
45 toleranceInRedundancy = 1.e-18,
46 lenghtOfRedundancy = -1,
49 Les caractéristiques de tolérance peuvent être modifiées à la création.
51 self.__tolerBP = float(toleranceInRedundancy)
52 self.__lenghtOR = int(lenghtOfRedundancy)
53 self.__initlnOR = self.__lenghtOR
60 self.__listOPCV = [] # Previous Calculated Points, Results, Point Norms, Operator
62 # logging.debug("CM Tolerance de determination des doublons : %.2e", self.__tolerBP)
64 def wasCalculatedIn(self, xValue, oName="" ): #, info="" ):
65 "Vérifie l'existence d'un calcul correspondant à la valeur"
69 for i in range(min(len(self.__listOPCV),self.__lenghtOR)-1,-1,-1):
70 if not hasattr(xValue, 'size') or (str(oName) != self.__listOPCV[i][3]) or (xValue.size != self.__listOPCV[i][0].size):
71 # logging.debug("CM Différence de la taille %s de X et de celle %s du point %i déjà calculé", xValue.shape,i,self.__listOPCP[i].shape)
73 elif numpy.linalg.norm(numpy.ravel(xValue) - self.__listOPCV[i][0]) < self.__tolerBP * self.__listOPCV[i][2]:
75 __HxV = self.__listOPCV[i][1]
76 # logging.debug("CM Cas%s déja calculé, portant le numéro %i", info, i)
80 def storeValueInX(self, xValue, HxValue, oName="" ):
81 "Stocke pour un opérateur o un calcul Hx correspondant à la valeur x"
82 if self.__lenghtOR < 0:
83 self.__lenghtOR = 2 * xValue.size + 2
84 self.__initlnOR = self.__lenghtOR
85 self.__seenNames.append(str(oName))
86 if str(oName) not in self.__seenNames: # Etend la liste si nouveau
87 self.__lenghtOR += 2 * xValue.size + 2
88 self.__initlnOR += self.__lenghtOR
89 self.__seenNames.append(str(oName))
90 while len(self.__listOPCV) > self.__lenghtOR:
91 # logging.debug("CM Réduction de la liste des cas à %i éléments par suppression du premier", self.__lenghtOR)
92 self.__listOPCV.pop(0)
93 self.__listOPCV.append( (
94 copy.copy(numpy.ravel(xValue)),
96 numpy.linalg.norm(xValue),
102 self.__initlnOR = self.__lenghtOR
104 self.__enabled = False
108 self.__lenghtOR = self.__initlnOR
109 self.__enabled = True
111 # ==============================================================================
112 class Operator(object):
114 Classe générale d'interface de type opérateur simple
122 name = "GenericOperator",
125 avoidingRedundancy = True,
126 inputAsMultiFunction = False,
127 enableMultiProcess = False,
128 extraArguments = None,
131 On construit un objet de ce type en fournissant, à l'aide de l'un des
132 deux mots-clé, soit une fonction ou un multi-fonction python, soit une
135 - name : nom d'opérateur
136 - fromMethod : argument de type fonction Python
137 - fromMatrix : argument adapté au constructeur numpy.matrix
138 - avoidingRedundancy : booléen évitant (ou pas) les calculs redondants
139 - inputAsMultiFunction : booléen indiquant une fonction explicitement
140 définie (ou pas) en multi-fonction
141 - extraArguments : arguments supplémentaires passés à la fonction de
142 base et ses dérivées (tuple ou dictionnaire)
144 self.__name = str(name)
145 self.__NbCallsAsMatrix, self.__NbCallsAsMethod, self.__NbCallsOfCached = 0, 0, 0
146 self.__AvoidRC = bool( avoidingRedundancy )
147 self.__inputAsMF = bool( inputAsMultiFunction )
148 self.__mpEnabled = bool( enableMultiProcess )
149 self.__extraArgs = extraArguments
150 if fromMethod is not None and self.__inputAsMF:
151 self.__Method = fromMethod # logtimer(fromMethod)
153 self.__Type = "Method"
154 elif fromMethod is not None and not self.__inputAsMF:
155 self.__Method = partial( MultiFonction, _sFunction=fromMethod, _mpEnabled=self.__mpEnabled)
157 self.__Type = "Method"
158 elif fromMatrix is not None:
160 self.__Matrix = numpy.matrix( fromMatrix, numpy.float )
161 self.__Type = "Matrix"
167 def disableAvoidingRedundancy(self):
169 Operator.CM.disable()
171 def enableAvoidingRedundancy(self):
176 Operator.CM.disable()
182 def appliedTo(self, xValue, HValue = None, argsAsSerie = False):
184 Permet de restituer le résultat de l'application de l'opérateur à une
185 série d'arguments xValue. Cette méthode se contente d'appliquer, chaque
186 argument devant a priori être du bon type.
188 - les arguments par série sont :
189 - xValue : argument adapté pour appliquer l'opérateur
190 - HValue : valeur précalculée de l'opérateur en ce point
191 - argsAsSerie : indique si les arguments sont une mono ou multi-valeur
198 if HValue is not None:
202 PlatformInfo.isIterable( _xValue, True, " in Operator.appliedTo" )
204 if _HValue is not None:
205 assert len(_xValue) == len(_HValue), "Incompatible number of elements in xValue and HValue"
207 for i in range(len(_HValue)):
208 HxValue.append( numpy.asmatrix( numpy.ravel( _HValue[i] ) ).T )
210 Operator.CM.storeValueInX(_xValue[i],HxValue[-1],self.__name)
215 for i, xv in enumerate(_xValue):
217 __alreadyCalculated, __HxV = Operator.CM.wasCalculatedIn(xv,self.__name)
219 __alreadyCalculated = False
221 if __alreadyCalculated:
222 self.__addOneCacheCall()
225 if self.__Matrix is not None:
226 self.__addOneMatrixCall()
227 _hv = self.__Matrix * xv
229 self.__addOneMethodCall()
233 HxValue.append( _hv )
235 if len(_xserie)>0 and self.__Matrix is None:
236 if self.__extraArgs is None:
237 _hserie = self.__Method( _xserie ) # Calcul MF
239 _hserie = self.__Method( _xserie, self.__extraArgs ) # Calcul MF
240 if not hasattr(_hserie, "pop"):
241 raise TypeError("The user input multi-function doesn't seem to return sequence results, behaving like a mono-function. It has to be checked.")
247 Operator.CM.storeValueInX(_xv,_hv,self.__name)
249 if argsAsSerie: return HxValue
250 else: return HxValue[-1]
252 def appliedControledFormTo(self, paires, argsAsSerie = False):
254 Permet de restituer le résultat de l'application de l'opérateur à des
255 paires (xValue, uValue). Cette méthode se contente d'appliquer, son
256 argument devant a priori être du bon type. Si la uValue est None,
257 on suppose que l'opérateur ne s'applique qu'à xValue.
259 - paires : les arguments par paire sont :
260 - xValue : argument X adapté pour appliquer l'opérateur
261 - uValue : argument U adapté pour appliquer l'opérateur
262 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
264 if argsAsSerie: _xuValue = paires
265 else: _xuValue = (paires,)
266 PlatformInfo.isIterable( _xuValue, True, " in Operator.appliedControledFormTo" )
268 if self.__Matrix is not None:
270 for paire in _xuValue:
271 _xValue, _uValue = paire
272 self.__addOneMatrixCall()
273 HxValue.append( self.__Matrix * _xValue )
276 for paire in _xuValue:
278 _xValue, _uValue = paire
279 if _uValue is not None:
280 _xuValue.append( paire )
282 _xuValue.append( _xValue )
283 self.__addOneMethodCall( len(_xuValue) )
284 if self.__extraArgs is None:
285 HxValue = self.__Method( _xuValue ) # Calcul MF
287 HxValue = self.__Method( _xuValue, self.__extraArgs ) # Calcul MF
289 if argsAsSerie: return HxValue
290 else: return HxValue[-1]
292 def appliedInXTo(self, paires, argsAsSerie = False):
294 Permet de restituer le résultat de l'application de l'opérateur à une
295 série d'arguments xValue, sachant que l'opérateur est valable en
296 xNominal. Cette méthode se contente d'appliquer, son argument devant a
297 priori être du bon type. Si l'opérateur est linéaire car c'est une
298 matrice, alors il est valable en tout point nominal et xNominal peut
299 être quelconque. Il n'y a qu'une seule paire par défaut, et argsAsSerie
300 permet d'indiquer que l'argument est multi-paires.
302 - paires : les arguments par paire sont :
303 - xNominal : série d'arguments permettant de donner le point où
304 l'opérateur est construit pour être ensuite appliqué
305 - xValue : série d'arguments adaptés pour appliquer l'opérateur
306 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
308 if argsAsSerie: _nxValue = paires
309 else: _nxValue = (paires,)
310 PlatformInfo.isIterable( _nxValue, True, " in Operator.appliedInXTo" )
312 if self.__Matrix is not None:
314 for paire in _nxValue:
315 _xNominal, _xValue = paire
316 self.__addOneMatrixCall()
317 HxValue.append( self.__Matrix * _xValue )
319 self.__addOneMethodCall( len(_nxValue) )
320 if self.__extraArgs is None:
321 HxValue = self.__Method( _nxValue ) # Calcul MF
323 HxValue = self.__Method( _nxValue, self.__extraArgs ) # Calcul MF
325 if argsAsSerie: return HxValue
326 else: return HxValue[-1]
328 def asMatrix(self, ValueForMethodForm = "UnknownVoidValue", argsAsSerie = False):
330 Permet de renvoyer l'opérateur sous la forme d'une matrice
332 if self.__Matrix is not None:
333 self.__addOneMatrixCall()
334 mValue = [self.__Matrix,]
335 elif not isinstance(ValueForMethodForm,str) or ValueForMethodForm != "UnknownVoidValue": # Ne pas utiliser "None"
338 self.__addOneMethodCall( len(ValueForMethodForm) )
339 for _vfmf in ValueForMethodForm:
340 mValue.append( numpy.matrix( self.__Method(((_vfmf, None),)) ) )
342 self.__addOneMethodCall()
343 mValue = self.__Method(((ValueForMethodForm, None),))
345 raise ValueError("Matrix form of the operator defined as a function/method requires to give an operating point.")
347 if argsAsSerie: return mValue
348 else: return mValue[-1]
352 Renvoie la taille sous forme numpy si l'opérateur est disponible sous
353 la forme d'une matrice
355 if self.__Matrix is not None:
356 return self.__Matrix.shape
358 raise ValueError("Matrix form of the operator is not available, nor the shape")
360 def nbcalls(self, which=None):
362 Renvoie les nombres d'évaluations de l'opérateur
365 self.__NbCallsAsMatrix+self.__NbCallsAsMethod,
366 self.__NbCallsAsMatrix,
367 self.__NbCallsAsMethod,
368 self.__NbCallsOfCached,
369 Operator.NbCallsAsMatrix+Operator.NbCallsAsMethod,
370 Operator.NbCallsAsMatrix,
371 Operator.NbCallsAsMethod,
372 Operator.NbCallsOfCached,
374 if which is None: return __nbcalls
375 else: return __nbcalls[which]
377 def __addOneMatrixCall(self):
378 "Comptabilise un appel"
379 self.__NbCallsAsMatrix += 1 # Decompte local
380 Operator.NbCallsAsMatrix += 1 # Decompte global
382 def __addOneMethodCall(self, nb = 1):
383 "Comptabilise un appel"
384 self.__NbCallsAsMethod += nb # Decompte local
385 Operator.NbCallsAsMethod += nb # Decompte global
387 def __addOneCacheCall(self):
388 "Comptabilise un appel"
389 self.__NbCallsOfCached += 1 # Decompte local
390 Operator.NbCallsOfCached += 1 # Decompte global
392 # ==============================================================================
393 class FullOperator(object):
395 Classe générale d'interface de type opérateur complet
396 (Direct, Linéaire Tangent, Adjoint)
399 name = "GenericFullOperator",
401 asOneFunction = None, # 1 Fonction
402 asThreeFunctions = None, # 3 Fonctions in a dictionary
403 asScript = None, # 1 or 3 Fonction(s) by script
404 asDict = None, # Parameters
406 extraArguments = None,
408 inputAsMF = False,# Fonction(s) as Multi-Functions
413 self.__name = str(name)
414 self.__check = bool(toBeChecked)
415 self.__extraArgs = extraArguments
420 if (asDict is not None) and isinstance(asDict, dict):
421 __Parameters.update( asDict )
422 # Priorité à EnableMultiProcessingInDerivatives=True
423 if "EnableMultiProcessing" in __Parameters and __Parameters["EnableMultiProcessing"]:
424 __Parameters["EnableMultiProcessingInDerivatives"] = True
425 __Parameters["EnableMultiProcessingInEvaluation"] = False
426 if "EnableMultiProcessingInDerivatives" not in __Parameters:
427 __Parameters["EnableMultiProcessingInDerivatives"] = False
428 if __Parameters["EnableMultiProcessingInDerivatives"]:
429 __Parameters["EnableMultiProcessingInEvaluation"] = False
430 if "EnableMultiProcessingInEvaluation" not in __Parameters:
431 __Parameters["EnableMultiProcessingInEvaluation"] = False
432 if "withIncrement" in __Parameters: # Temporaire
433 __Parameters["DifferentialIncrement"] = __Parameters["withIncrement"]
435 if asScript is not None:
436 __Matrix, __Function = None, None
438 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
440 __Function = { "Direct":Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ) }
441 __Function.update({"useApproximatedDerivatives":True})
442 __Function.update(__Parameters)
443 elif asThreeFunctions:
445 "Direct" :Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ),
446 "Tangent":Interfaces.ImportFromScript(asScript).getvalue( "TangentOperator" ),
447 "Adjoint":Interfaces.ImportFromScript(asScript).getvalue( "AdjointOperator" ),
449 __Function.update(__Parameters)
452 if asOneFunction is not None:
453 if isinstance(asOneFunction, dict) and "Direct" in asOneFunction:
454 if asOneFunction["Direct"] is not None:
455 __Function = asOneFunction
457 raise ValueError("The function has to be given in a dictionnary which have 1 key (\"Direct\")")
459 __Function = { "Direct":asOneFunction }
460 __Function.update({"useApproximatedDerivatives":True})
461 __Function.update(__Parameters)
462 elif asThreeFunctions is not None:
463 if isinstance(asThreeFunctions, dict) and \
464 ("Tangent" in asThreeFunctions) and (asThreeFunctions["Tangent"] is not None) and \
465 ("Adjoint" in asThreeFunctions) and (asThreeFunctions["Adjoint"] is not None) and \
466 (("useApproximatedDerivatives" not in asThreeFunctions) or not bool(asThreeFunctions["useApproximatedDerivatives"])):
467 __Function = asThreeFunctions
468 elif isinstance(asThreeFunctions, dict) and \
469 ("Direct" in asThreeFunctions) and (asThreeFunctions["Direct"] is not None):
470 __Function = asThreeFunctions
471 __Function.update({"useApproximatedDerivatives":True})
473 raise ValueError("The functions has to be given in a dictionnary which have either 1 key (\"Direct\") or 3 keys (\"Direct\" (optionnal), \"Tangent\" and \"Adjoint\")")
474 if "Direct" not in asThreeFunctions:
475 __Function["Direct"] = asThreeFunctions["Tangent"]
476 __Function.update(__Parameters)
480 # if sys.version_info[0] < 3 and isinstance(__Function, dict):
481 # for k in ("Direct", "Tangent", "Adjoint"):
482 # if k in __Function and hasattr(__Function[k],"__class__"):
483 # if type(__Function[k]) is type(self.__init__):
484 # raise TypeError("can't use a class method (%s) as a function for the \"%s\" operator. Use a real function instead."%(type(__Function[k]),k))
486 if appliedInX is not None and isinstance(appliedInX, dict):
487 __appliedInX = appliedInX
488 elif appliedInX is not None:
489 __appliedInX = {"HXb":appliedInX}
493 if scheduledBy is not None:
494 self.__T = scheduledBy
496 if isinstance(__Function, dict) and \
497 ("useApproximatedDerivatives" in __Function) and bool(__Function["useApproximatedDerivatives"]) and \
498 ("Direct" in __Function) and (__Function["Direct"] is not None):
499 if "CenteredFiniteDifference" not in __Function: __Function["CenteredFiniteDifference"] = False
500 if "DifferentialIncrement" not in __Function: __Function["DifferentialIncrement"] = 0.01
501 if "withdX" not in __Function: __Function["withdX"] = None
502 if "withAvoidingRedundancy" not in __Function: __Function["withAvoidingRedundancy"] = avoidRC
503 if "withToleranceInRedundancy" not in __Function: __Function["withToleranceInRedundancy"] = 1.e-18
504 if "withLenghtOfRedundancy" not in __Function: __Function["withLenghtOfRedundancy"] = -1
505 if "NumberOfProcesses" not in __Function: __Function["NumberOfProcesses"] = None
506 if "withmfEnabled" not in __Function: __Function["withmfEnabled"] = inputAsMF
507 from daCore import NumericObjects
508 FDA = NumericObjects.FDApproximation(
510 Function = __Function["Direct"],
511 centeredDF = __Function["CenteredFiniteDifference"],
512 increment = __Function["DifferentialIncrement"],
513 dX = __Function["withdX"],
514 avoidingRedundancy = __Function["withAvoidingRedundancy"],
515 toleranceInRedundancy = __Function["withToleranceInRedundancy"],
516 lenghtOfRedundancy = __Function["withLenghtOfRedundancy"],
517 mpEnabled = __Function["EnableMultiProcessingInDerivatives"],
518 mpWorkers = __Function["NumberOfProcesses"],
519 mfEnabled = __Function["withmfEnabled"],
521 self.__FO["Direct"] = Operator( name = self.__name, fromMethod = FDA.DirectOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
522 self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMethod = FDA.TangentOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
523 self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMethod = FDA.AdjointOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
524 elif isinstance(__Function, dict) and \
525 ("Direct" in __Function) and ("Tangent" in __Function) and ("Adjoint" in __Function) and \
526 (__Function["Direct"] is not None) and (__Function["Tangent"] is not None) and (__Function["Adjoint"] is not None):
527 self.__FO["Direct"] = Operator( name = self.__name, fromMethod = __Function["Direct"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
528 self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMethod = __Function["Tangent"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
529 self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMethod = __Function["Adjoint"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
530 elif asMatrix is not None:
531 __matrice = numpy.matrix( __Matrix, numpy.float )
532 self.__FO["Direct"] = Operator( name = self.__name, fromMatrix = __matrice, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
533 self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMatrix = __matrice, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
534 self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMatrix = __matrice.T, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
537 raise ValueError("The %s object is improperly defined or undefined, it requires at minima either a matrix, a Direct operator for approximate derivatives or a Tangent/Adjoint operators pair. Please check your operator input."%self.__name)
539 if __appliedInX is not None:
540 self.__FO["AppliedInX"] = {}
541 for key in list(__appliedInX.keys()):
542 if type( __appliedInX[key] ) is type( numpy.matrix([]) ):
543 # Pour le cas où l'on a une vraie matrice
544 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].A1, numpy.float ).T
545 elif type( __appliedInX[key] ) is type( numpy.array([]) ) and len(__appliedInX[key].shape) > 1:
546 # Pour le cas où l'on a un vecteur représenté en array avec 2 dimensions
547 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].reshape(len(__appliedInX[key]),), numpy.float ).T
549 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key], numpy.float ).T
551 self.__FO["AppliedInX"] = None
557 "x.__repr__() <==> repr(x)"
558 return repr(self.__FO)
561 "x.__str__() <==> str(x)"
562 return str(self.__FO)
564 # ==============================================================================
565 class Algorithm(object):
567 Classe générale d'interface de type algorithme
569 Elle donne un cadre pour l'écriture d'une classe élémentaire d'algorithme
570 d'assimilation, en fournissant un container (dictionnaire) de variables
571 persistantes initialisées, et des méthodes d'accès à ces variables stockées.
573 Une classe élémentaire d'algorithme doit implémenter la méthode "run".
575 def __init__(self, name):
577 L'initialisation présente permet de fabriquer des variables de stockage
578 disponibles de manière générique dans les algorithmes élémentaires. Ces
579 variables de stockage sont ensuite conservées dans un dictionnaire
580 interne à l'objet, mais auquel on accède par la méthode "get".
582 Les variables prévues sont :
583 - APosterioriCorrelations : matrice de corrélations de la matrice A
584 - APosterioriCovariance : matrice de covariances a posteriori : A
585 - APosterioriStandardDeviations : vecteur des écart-types de la matrice A
586 - APosterioriVariances : vecteur des variances de la matrice A
587 - Analysis : vecteur d'analyse : Xa
588 - BMA : Background moins Analysis : Xa - Xb
589 - CostFunctionJ : fonction-coût globale, somme des deux parties suivantes Jb et Jo
590 - CostFunctionJAtCurrentOptimum : fonction-coût globale à l'état optimal courant lors d'itérations
591 - CostFunctionJb : partie ébauche ou background de la fonction-coût : Jb
592 - CostFunctionJbAtCurrentOptimum : partie ébauche à l'état optimal courant lors d'itérations
593 - CostFunctionJo : partie observations de la fonction-coût : Jo
594 - CostFunctionJoAtCurrentOptimum : partie observations à l'état optimal courant lors d'itérations
595 - CurrentIterationNumber : numéro courant d'itération dans les algorithmes itératifs, à partir de 0
596 - CurrentOptimum : état optimal courant lors d'itérations
597 - CurrentState : état courant lors d'itérations
598 - GradientOfCostFunctionJ : gradient de la fonction-coût globale
599 - GradientOfCostFunctionJb : gradient de la partie ébauche de la fonction-coût
600 - GradientOfCostFunctionJo : gradient de la partie observations de la fonction-coût
601 - IndexOfOptimum : index de l'état optimal courant lors d'itérations
602 - Innovation : l'innovation : d = Y - H(X)
603 - InnovationAtCurrentState : l'innovation à l'état courant : dn = Y - H(Xn)
604 - JacobianMatrixAtBackground : matrice jacobienne à l'état d'ébauche
605 - JacobianMatrixAtCurrentState : matrice jacobienne à l'état courant
606 - JacobianMatrixAtOptimum : matrice jacobienne à l'optimum
607 - KalmanGainAtOptimum : gain de Kalman à l'optimum
608 - MahalanobisConsistency : indicateur de consistance des covariances
609 - OMA : Observation moins Analyse : Y - Xa
610 - OMB : Observation moins Background : Y - Xb
611 - ForecastState : état prédit courant lors d'itérations
612 - Residu : dans le cas des algorithmes de vérification
613 - SigmaBck2 : indicateur de correction optimale des erreurs d'ébauche
614 - SigmaObs2 : indicateur de correction optimale des erreurs d'observation
615 - SimulatedObservationAtBackground : l'état observé H(Xb) à l'ébauche
616 - SimulatedObservationAtCurrentOptimum : l'état observé H(X) à l'état optimal courant
617 - SimulatedObservationAtCurrentState : l'état observé H(X) à l'état courant
618 - SimulatedObservationAtOptimum : l'état observé H(Xa) à l'optimum
619 - SimulationQuantiles : états observés H(X) pour les quantiles demandés
620 On peut rajouter des variables à stocker dans l'initialisation de
621 l'algorithme élémentaire qui va hériter de cette classe
623 logging.debug("%s Initialisation", str(name))
624 self._m = PlatformInfo.SystemUsage()
626 self._name = str( name )
627 self._parameters = {"StoreSupplementaryCalculations":[]}
628 self.__required_parameters = {}
629 self.__required_inputs = {
630 "RequiredInputValues":{"mandatory":(), "optional":()},
631 "ClassificationTags":[],
633 self.__variable_names_not_public = {"nextStep":False} # Duplication dans AlgorithmAndParameters
634 self.__canonical_parameter_name = {} # Correspondance "lower"->"correct"
635 self.__canonical_stored_name = {} # Correspondance "lower"->"correct"
637 self.StoredVariables = {}
638 self.StoredVariables["APosterioriCorrelations"] = Persistence.OneMatrix(name = "APosterioriCorrelations")
639 self.StoredVariables["APosterioriCovariance"] = Persistence.OneMatrix(name = "APosterioriCovariance")
640 self.StoredVariables["APosterioriStandardDeviations"] = Persistence.OneVector(name = "APosterioriStandardDeviations")
641 self.StoredVariables["APosterioriVariances"] = Persistence.OneVector(name = "APosterioriVariances")
642 self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
643 self.StoredVariables["BMA"] = Persistence.OneVector(name = "BMA")
644 self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
645 self.StoredVariables["CostFunctionJAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJAtCurrentOptimum")
646 self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
647 self.StoredVariables["CostFunctionJbAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJbAtCurrentOptimum")
648 self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
649 self.StoredVariables["CostFunctionJoAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJoAtCurrentOptimum")
650 self.StoredVariables["CurrentIterationNumber"] = Persistence.OneIndex(name = "CurrentIterationNumber")
651 self.StoredVariables["CurrentOptimum"] = Persistence.OneVector(name = "CurrentOptimum")
652 self.StoredVariables["CurrentState"] = Persistence.OneVector(name = "CurrentState")
653 self.StoredVariables["ForecastState"] = Persistence.OneVector(name = "ForecastState")
654 self.StoredVariables["GradientOfCostFunctionJ"] = Persistence.OneVector(name = "GradientOfCostFunctionJ")
655 self.StoredVariables["GradientOfCostFunctionJb"] = Persistence.OneVector(name = "GradientOfCostFunctionJb")
656 self.StoredVariables["GradientOfCostFunctionJo"] = Persistence.OneVector(name = "GradientOfCostFunctionJo")
657 self.StoredVariables["IndexOfOptimum"] = Persistence.OneIndex(name = "IndexOfOptimum")
658 self.StoredVariables["Innovation"] = Persistence.OneVector(name = "Innovation")
659 self.StoredVariables["InnovationAtCurrentAnalysis"] = Persistence.OneVector(name = "InnovationAtCurrentAnalysis")
660 self.StoredVariables["InnovationAtCurrentState"] = Persistence.OneVector(name = "InnovationAtCurrentState")
661 self.StoredVariables["JacobianMatrixAtBackground"] = Persistence.OneMatrix(name = "JacobianMatrixAtBackground")
662 self.StoredVariables["JacobianMatrixAtCurrentState"] = Persistence.OneMatrix(name = "JacobianMatrixAtCurrentState")
663 self.StoredVariables["JacobianMatrixAtOptimum"] = Persistence.OneMatrix(name = "JacobianMatrixAtOptimum")
664 self.StoredVariables["KalmanGainAtOptimum"] = Persistence.OneMatrix(name = "KalmanGainAtOptimum")
665 self.StoredVariables["MahalanobisConsistency"] = Persistence.OneScalar(name = "MahalanobisConsistency")
666 self.StoredVariables["OMA"] = Persistence.OneVector(name = "OMA")
667 self.StoredVariables["OMB"] = Persistence.OneVector(name = "OMB")
668 self.StoredVariables["Residu"] = Persistence.OneScalar(name = "Residu")
669 self.StoredVariables["SigmaBck2"] = Persistence.OneScalar(name = "SigmaBck2")
670 self.StoredVariables["SigmaObs2"] = Persistence.OneScalar(name = "SigmaObs2")
671 self.StoredVariables["SimulatedObservationAtBackground"] = Persistence.OneVector(name = "SimulatedObservationAtBackground")
672 self.StoredVariables["SimulatedObservationAtCurrentAnalysis"]= Persistence.OneVector(name = "SimulatedObservationAtCurrentAnalysis")
673 self.StoredVariables["SimulatedObservationAtCurrentOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentOptimum")
674 self.StoredVariables["SimulatedObservationAtCurrentState"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentState")
675 self.StoredVariables["SimulatedObservationAtOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtOptimum")
676 self.StoredVariables["SimulationQuantiles"] = Persistence.OneMatrix(name = "SimulationQuantiles")
678 for k in self.StoredVariables:
679 self.__canonical_stored_name[k.lower()] = k
681 for k, v in self.__variable_names_not_public.items():
682 self.__canonical_parameter_name[k.lower()] = k
683 self.__canonical_parameter_name["algorithm"] = "Algorithm"
684 self.__canonical_parameter_name["storesupplementarycalculations"] = "StoreSupplementaryCalculations"
686 def _pre_run(self, Parameters, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None ):
688 logging.debug("%s Lancement", self._name)
689 logging.debug("%s Taille mémoire utilisée de %.0f Mio"%(self._name, self._m.getUsedMemory("Mio")))
690 self._getTimeState(reset=True)
692 # Mise a jour des paramètres internes avec le contenu de Parameters, en
693 # reprenant les valeurs par défauts pour toutes celles non définies
694 self.__setParameters(Parameters, reset=True)
695 for k, v in self.__variable_names_not_public.items():
696 if k not in self._parameters: self.__setParameters( {k:v} )
698 # Corrections et compléments des vecteurs
699 def __test_vvalue(argument, variable, argname, symbol=None):
700 if symbol is None: symbol = variable
702 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
703 raise ValueError("%s %s vector %s is not set and has to be properly defined!"%(self._name,argname,symbol))
704 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
705 logging.debug("%s %s vector %s is not set, but is optional."%(self._name,argname,symbol))
707 logging.debug("%s %s vector %s is not set, but is not required."%(self._name,argname,symbol))
709 logging.debug("%s %s vector %s is set, and its size is %i."%(self._name,argname,symbol,numpy.array(argument).size))
711 __test_vvalue( Xb, "Xb", "Background or initial state" )
712 __test_vvalue( Y, "Y", "Observation" )
713 __test_vvalue( U, "U", "Control" )
715 # Corrections et compléments des covariances
716 def __test_cvalue(argument, variable, argname, symbol=None):
717 if symbol is None: symbol = variable
719 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
720 raise ValueError("%s %s error covariance matrix %s is not set and has to be properly defined!"%(self._name,argname,symbol))
721 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
722 logging.debug("%s %s error covariance matrix %s is not set, but is optional."%(self._name,argname,symbol))
724 logging.debug("%s %s error covariance matrix %s is not set, but is not required."%(self._name,argname,symbol))
726 logging.debug("%s %s error covariance matrix %s is set."%(self._name,argname,symbol))
728 __test_cvalue( B, "B", "Background" )
729 __test_cvalue( R, "R", "Observation" )
730 __test_cvalue( Q, "Q", "Evolution" )
732 # Corrections et compléments des opérateurs
733 def __test_ovalue(argument, variable, argname, symbol=None):
734 if symbol is None: symbol = variable
735 if argument is None or (isinstance(argument,dict) and len(argument)==0):
736 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
737 raise ValueError("%s %s operator %s is not set and has to be properly defined!"%(self._name,argname,symbol))
738 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
739 logging.debug("%s %s operator %s is not set, but is optional."%(self._name,argname,symbol))
741 logging.debug("%s %s operator %s is not set, but is not required."%(self._name,argname,symbol))
743 logging.debug("%s %s operator %s is set."%(self._name,argname,symbol))
745 __test_ovalue( HO, "HO", "Observation", "H" )
746 __test_ovalue( EM, "EM", "Evolution", "M" )
747 __test_ovalue( CM, "CM", "Control Model", "C" )
749 if ("Bounds" in self._parameters) and isinstance(self._parameters["Bounds"], (list, tuple)) and (len(self._parameters["Bounds"]) > 0):
750 logging.debug("%s Prise en compte des bornes effectuee"%(self._name,))
752 self._parameters["Bounds"] = None
754 if logging.getLogger().level < logging.WARNING:
755 self._parameters["optiprint"], self._parameters["optdisp"] = 1, 1
756 if PlatformInfo.has_scipy:
757 import scipy.optimize
758 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_ALL
760 self._parameters["optmessages"] = 15
762 self._parameters["optiprint"], self._parameters["optdisp"] = -1, 0
763 if PlatformInfo.has_scipy:
764 import scipy.optimize
765 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_NONE
767 self._parameters["optmessages"] = 15
771 def _post_run(self,_oH=None):
773 if ("StoreSupplementaryCalculations" in self._parameters) and \
774 "APosterioriCovariance" in self._parameters["StoreSupplementaryCalculations"]:
775 for _A in self.StoredVariables["APosterioriCovariance"]:
776 if "APosterioriVariances" in self._parameters["StoreSupplementaryCalculations"]:
777 self.StoredVariables["APosterioriVariances"].store( numpy.diag(_A) )
778 if "APosterioriStandardDeviations" in self._parameters["StoreSupplementaryCalculations"]:
779 self.StoredVariables["APosterioriStandardDeviations"].store( numpy.sqrt(numpy.diag(_A)) )
780 if "APosterioriCorrelations" in self._parameters["StoreSupplementaryCalculations"]:
781 _EI = numpy.diag(1./numpy.sqrt(numpy.diag(_A)))
782 _C = numpy.dot(_EI, numpy.dot(_A, _EI))
783 self.StoredVariables["APosterioriCorrelations"].store( _C )
784 if _oH is not None and "Direct" in _oH and "Tangent" in _oH and "Adjoint" in _oH:
785 logging.debug("%s Nombre d'évaluation(s) de l'opérateur d'observation direct/tangent/adjoint.: %i/%i/%i", self._name, _oH["Direct"].nbcalls(0),_oH["Tangent"].nbcalls(0),_oH["Adjoint"].nbcalls(0))
786 logging.debug("%s Nombre d'appels au cache d'opérateur d'observation direct/tangent/adjoint..: %i/%i/%i", self._name, _oH["Direct"].nbcalls(3),_oH["Tangent"].nbcalls(3),_oH["Adjoint"].nbcalls(3))
787 logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
788 logging.debug("%s Durées d'utilisation CPU de %.1fs et elapsed de %.1fs", self._name, self._getTimeState()[0], self._getTimeState()[1])
789 logging.debug("%s Terminé", self._name)
792 def _toStore(self, key):
793 "True if in StoreSupplementaryCalculations, else False"
794 return key in self._parameters["StoreSupplementaryCalculations"]
796 def get(self, key=None):
798 Renvoie l'une des variables stockées identifiée par la clé, ou le
799 dictionnaire de l'ensemble des variables disponibles en l'absence de
800 clé. Ce sont directement les variables sous forme objet qui sont
801 renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
802 des classes de persistance.
805 return self.StoredVariables[self.__canonical_stored_name[key.lower()]]
807 return self.StoredVariables
809 def __contains__(self, key=None):
810 "D.__contains__(k) -> True if D has a key k, else False"
811 if key is None or key.lower() not in self.__canonical_stored_name:
814 return self.__canonical_stored_name[key.lower()] in self.StoredVariables
817 "D.keys() -> list of D's keys"
818 if hasattr(self, "StoredVariables"):
819 return self.StoredVariables.keys()
824 "D.pop(k[,d]) -> v, remove specified key and return the corresponding value"
825 if hasattr(self, "StoredVariables") and k.lower() in self.__canonical_stored_name:
826 return self.StoredVariables.pop(self.__canonical_stored_name[k.lower()], d)
831 raise TypeError("pop expected at least 1 arguments, got 0")
832 "If key is not found, d is returned if given, otherwise KeyError is raised"
838 def run(self, Xb=None, Y=None, H=None, M=None, R=None, B=None, Q=None, Parameters=None):
840 Doit implémenter l'opération élémentaire de calcul d'assimilation sous
841 sa forme mathématique la plus naturelle possible.
843 raise NotImplementedError("Mathematical assimilation calculation has not been implemented!")
845 def defineRequiredParameter(self, name = None, default = None, typecast = None, message = None, minval = None, maxval = None, listval = None):
847 Permet de définir dans l'algorithme des paramètres requis et leurs
848 caractéristiques par défaut.
851 raise ValueError("A name is mandatory to define a required parameter.")
853 self.__required_parameters[name] = {
855 "typecast" : typecast,
861 self.__canonical_parameter_name[name.lower()] = name
862 logging.debug("%s %s (valeur par défaut = %s)", self._name, message, self.setParameterValue(name))
864 def getRequiredParameters(self, noDetails=True):
866 Renvoie la liste des noms de paramètres requis ou directement le
867 dictionnaire des paramètres requis.
870 return sorted(self.__required_parameters.keys())
872 return self.__required_parameters
874 def setParameterValue(self, name=None, value=None):
876 Renvoie la valeur d'un paramètre requis de manière contrôlée
878 __k = self.__canonical_parameter_name[name.lower()]
879 default = self.__required_parameters[__k]["default"]
880 typecast = self.__required_parameters[__k]["typecast"]
881 minval = self.__required_parameters[__k]["minval"]
882 maxval = self.__required_parameters[__k]["maxval"]
883 listval = self.__required_parameters[__k]["listval"]
885 if value is None and default is None:
887 elif value is None and default is not None:
888 if typecast is None: __val = default
889 else: __val = typecast( default )
891 if typecast is None: __val = value
894 __val = typecast( value )
896 raise ValueError("The value '%s' for the parameter named '%s' can not be correctly evaluated with type '%s'."%(value, __k, typecast))
898 if minval is not None and (numpy.array(__val, float) < minval).any():
899 raise ValueError("The parameter named '%s' of value '%s' can not be less than %s."%(__k, __val, minval))
900 if maxval is not None and (numpy.array(__val, float) > maxval).any():
901 raise ValueError("The parameter named '%s' of value '%s' can not be greater than %s."%(__k, __val, maxval))
902 if listval is not None:
903 if typecast is list or typecast is tuple or isinstance(__val,list) or isinstance(__val,tuple):
906 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%(v, __k, listval))
907 elif __val not in listval:
908 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%( __val, __k,listval))
912 def requireInputArguments(self, mandatory=(), optional=()):
914 Permet d'imposer des arguments de calcul requis en entrée.
916 self.__required_inputs["RequiredInputValues"]["mandatory"] = tuple( mandatory )
917 self.__required_inputs["RequiredInputValues"]["optional"] = tuple( optional )
919 def getInputArguments(self):
921 Permet d'obtenir les listes des arguments de calcul requis en entrée.
923 return self.__required_inputs["RequiredInputValues"]["mandatory"], self.__required_inputs["RequiredInputValues"]["optional"]
925 def setAttributes(self, tags=()):
927 Permet d'adjoindre des attributs comme les tags de classification.
928 Renvoie la liste actuelle dans tous les cas.
930 self.__required_inputs["ClassificationTags"].extend( tags )
931 return self.__required_inputs["ClassificationTags"]
933 def __setParameters(self, fromDico={}, reset=False):
935 Permet de stocker les paramètres reçus dans le dictionnaire interne.
937 self._parameters.update( fromDico )
938 __inverse_fromDico_keys = {}
939 for k in fromDico.keys():
940 if k.lower() in self.__canonical_parameter_name:
941 __inverse_fromDico_keys[self.__canonical_parameter_name[k.lower()]] = k
942 #~ __inverse_fromDico_keys = dict([(self.__canonical_parameter_name[k.lower()],k) for k in fromDico.keys()])
943 __canonic_fromDico_keys = __inverse_fromDico_keys.keys()
944 for k in self.__required_parameters.keys():
945 if k in __canonic_fromDico_keys:
946 self._parameters[k] = self.setParameterValue(k,fromDico[__inverse_fromDico_keys[k]])
948 self._parameters[k] = self.setParameterValue(k)
951 logging.debug("%s %s : %s", self._name, self.__required_parameters[k]["message"], self._parameters[k])
953 def _getTimeState(self, reset=False):
955 Initialise ou restitue le temps de calcul (cpu/elapsed) à la seconde
958 self.__initial_cpu_time = time.process_time()
959 self.__initial_elapsed_time = time.perf_counter()
962 self.__cpu_time = time.process_time() - self.__initial_cpu_time
963 self.__elapsed_time = time.perf_counter() - self.__initial_elapsed_time
964 return self.__cpu_time, self.__elapsed_time
966 def _StopOnTimeLimit(self, X=None, withReason=False):
967 "Stop criteria on time limit: True/False [+ Reason]"
968 c, e = self._getTimeState()
969 if "MaximumCpuTime" in self._parameters and c > self._parameters["MaximumCpuTime"]:
970 __SC, __SR = True, "Reached maximum CPU time (%.1fs > %.1fs)"%(c, self._parameters["MaximumCpuTime"])
971 elif "MaximumElapsedTime" in self._parameters and e > self._parameters["MaximumElapsedTime"]:
972 __SC, __SR = True, "Reached maximum elapsed time (%.1fs > %.1fs)"%(e, self._parameters["MaximumElapsedTime"])
974 __SC, __SR = False, ""
980 # ==============================================================================
981 class AlgorithmAndParameters(object):
983 Classe générale d'interface d'action pour l'algorithme et ses paramètres
986 name = "GenericAlgorithm",
993 self.__name = str(name)
997 self.__algorithm = {}
998 self.__algorithmFile = None
999 self.__algorithmName = None
1001 self.updateParameters( asDict, asScript )
1003 if asAlgorithm is None and asScript is not None:
1004 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
1006 __Algo = asAlgorithm
1008 if __Algo is not None:
1009 self.__A = str(__Algo)
1010 self.__P.update( {"Algorithm":self.__A} )
1012 self.__setAlgorithm( self.__A )
1014 self.__variable_names_not_public = {"nextStep":False} # Duplication dans Algorithm
1016 def updateParameters(self,
1020 "Mise a jour des parametres"
1021 if asDict is None and asScript is not None:
1022 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1026 if __Dict is not None:
1027 self.__P.update( dict(__Dict) )
1029 def executePythonScheme(self, asDictAO = None):
1030 "Permet de lancer le calcul d'assimilation"
1031 Operator.CM.clearCache()
1033 if not isinstance(asDictAO, dict):
1034 raise ValueError("The objects for algorithm calculation have to be given together as a dictionnary, and they are not")
1035 if hasattr(asDictAO["Background"],"getO"): self.__Xb = asDictAO["Background"].getO()
1036 elif hasattr(asDictAO["CheckingPoint"],"getO"): self.__Xb = asDictAO["CheckingPoint"].getO()
1037 else: self.__Xb = None
1038 if hasattr(asDictAO["Observation"],"getO"): self.__Y = asDictAO["Observation"].getO()
1039 else: self.__Y = asDictAO["Observation"]
1040 if hasattr(asDictAO["ControlInput"],"getO"): self.__U = asDictAO["ControlInput"].getO()
1041 else: self.__U = asDictAO["ControlInput"]
1042 if hasattr(asDictAO["ObservationOperator"],"getO"): self.__HO = asDictAO["ObservationOperator"].getO()
1043 else: self.__HO = asDictAO["ObservationOperator"]
1044 if hasattr(asDictAO["EvolutionModel"],"getO"): self.__EM = asDictAO["EvolutionModel"].getO()
1045 else: self.__EM = asDictAO["EvolutionModel"]
1046 if hasattr(asDictAO["ControlModel"],"getO"): self.__CM = asDictAO["ControlModel"].getO()
1047 else: self.__CM = asDictAO["ControlModel"]
1048 self.__B = asDictAO["BackgroundError"]
1049 self.__R = asDictAO["ObservationError"]
1050 self.__Q = asDictAO["EvolutionError"]
1052 self.__shape_validate()
1054 self.__algorithm.run(
1064 Parameters = self.__P,
1068 def executeYACSScheme(self, FileName=None):
1069 "Permet de lancer le calcul d'assimilation"
1070 if FileName is None or not os.path.exists(FileName):
1071 raise ValueError("a YACS file name has to be given for YACS execution.\n")
1073 __file = os.path.abspath(FileName)
1074 logging.debug("The YACS file name is \"%s\"."%__file)
1075 if not PlatformInfo.has_salome or \
1076 not PlatformInfo.has_yacs or \
1077 not PlatformInfo.has_adao:
1078 raise ImportError("\n\n"+\
1079 "Unable to get SALOME, YACS or ADAO environnement variables.\n"+\
1080 "Please load the right environnement before trying to use it.\n")
1083 import SALOMERuntime
1085 SALOMERuntime.RuntimeSALOME_setRuntime()
1087 r = pilot.getRuntime()
1088 xmlLoader = loader.YACSLoader()
1089 xmlLoader.registerProcCataLoader()
1091 catalogAd = r.loadCatalog("proc", __file)
1092 r.addCatalog(catalogAd)
1097 p = xmlLoader.load(__file)
1098 except IOError as ex:
1099 print("The YACS XML schema file can not be loaded: %s"%(ex,))
1101 logger = p.getLogger("parser")
1102 if not logger.isEmpty():
1103 print("The imported YACS XML schema has errors on parsing:")
1104 print(logger.getStr())
1107 print("The YACS XML schema is not valid and will not be executed:")
1108 print(p.getErrorReport())
1110 info=pilot.LinkInfo(pilot.LinkInfo.ALL_DONT_STOP)
1111 p.checkConsistency(info)
1112 if info.areWarningsOrErrors():
1113 print("The YACS XML schema is not coherent and will not be executed:")
1114 print(info.getGlobalRepr())
1116 e = pilot.ExecutorSwig()
1118 if p.getEffectiveState() != pilot.DONE:
1119 print(p.getErrorReport())
1123 def get(self, key = None):
1124 "Vérifie l'existence d'une clé de variable ou de paramètres"
1125 if key in self.__algorithm:
1126 return self.__algorithm.get( key )
1127 elif key in self.__P:
1128 return self.__P[key]
1130 allvariables = self.__P
1131 for k in self.__variable_names_not_public: allvariables.pop(k, None)
1134 def pop(self, k, d):
1135 "Necessaire pour le pickling"
1136 return self.__algorithm.pop(k, d)
1138 def getAlgorithmRequiredParameters(self, noDetails=True):
1139 "Renvoie la liste des paramètres requis selon l'algorithme"
1140 return self.__algorithm.getRequiredParameters(noDetails)
1142 def getAlgorithmInputArguments(self):
1143 "Renvoie la liste des entrées requises selon l'algorithme"
1144 return self.__algorithm.getInputArguments()
1146 def getAlgorithmAttributes(self):
1147 "Renvoie la liste des attributs selon l'algorithme"
1148 return self.__algorithm.setAttributes()
1150 def setObserver(self, __V, __O, __I, __S):
1151 if self.__algorithm is None \
1152 or isinstance(self.__algorithm, dict) \
1153 or not hasattr(self.__algorithm,"StoredVariables"):
1154 raise ValueError("No observer can be build before choosing an algorithm.")
1155 if __V not in self.__algorithm:
1156 raise ValueError("An observer requires to be set on a variable named %s which does not exist."%__V)
1158 self.__algorithm.StoredVariables[ __V ].setDataObserver(
1161 HookParameters = __I,
1164 def removeObserver(self, __V, __O, __A = False):
1165 if self.__algorithm is None \
1166 or isinstance(self.__algorithm, dict) \
1167 or not hasattr(self.__algorithm,"StoredVariables"):
1168 raise ValueError("No observer can be removed before choosing an algorithm.")
1169 if __V not in self.__algorithm:
1170 raise ValueError("An observer requires to be removed on a variable named %s which does not exist."%__V)
1172 return self.__algorithm.StoredVariables[ __V ].removeDataObserver(
1177 def hasObserver(self, __V):
1178 if self.__algorithm is None \
1179 or isinstance(self.__algorithm, dict) \
1180 or not hasattr(self.__algorithm,"StoredVariables"):
1182 if __V not in self.__algorithm:
1184 return self.__algorithm.StoredVariables[ __V ].hasDataObserver()
1187 __allvariables = list(self.__algorithm.keys()) + list(self.__P.keys())
1188 for k in self.__variable_names_not_public:
1189 if k in __allvariables: __allvariables.remove(k)
1190 return __allvariables
1192 def __contains__(self, key=None):
1193 "D.__contains__(k) -> True if D has a key k, else False"
1194 return key in self.__algorithm or key in self.__P
1197 "x.__repr__() <==> repr(x)"
1198 return repr(self.__A)+", "+repr(self.__P)
1201 "x.__str__() <==> str(x)"
1202 return str(self.__A)+", "+str(self.__P)
1204 def __setAlgorithm(self, choice = None ):
1206 Permet de sélectionner l'algorithme à utiliser pour mener à bien l'étude
1207 d'assimilation. L'argument est un champ caractère se rapportant au nom
1208 d'un algorithme réalisant l'opération sur les arguments fixes.
1211 raise ValueError("Error: algorithm choice has to be given")
1212 if self.__algorithmName is not None:
1213 raise ValueError("Error: algorithm choice has already been done as \"%s\", it can't be changed."%self.__algorithmName)
1214 daDirectory = "daAlgorithms"
1216 # Recherche explicitement le fichier complet
1217 # ------------------------------------------
1219 for directory in sys.path:
1220 if os.path.isfile(os.path.join(directory, daDirectory, str(choice)+'.py')):
1221 module_path = os.path.abspath(os.path.join(directory, daDirectory))
1222 if module_path is None:
1223 raise ImportError("No algorithm module named \"%s\" has been found in the search path.\n The search path is %s"%(choice, sys.path))
1225 # Importe le fichier complet comme un module
1226 # ------------------------------------------
1228 sys_path_tmp = sys.path ; sys.path.insert(0,module_path)
1229 self.__algorithmFile = __import__(str(choice), globals(), locals(), [])
1230 if not hasattr(self.__algorithmFile, "ElementaryAlgorithm"):
1231 raise ImportError("this module does not define a valid elementary algorithm.")
1232 self.__algorithmName = str(choice)
1233 sys.path = sys_path_tmp ; del sys_path_tmp
1234 except ImportError as e:
1235 raise ImportError("The module named \"%s\" was found, but is incorrect at the import stage.\n The import error message is: %s"%(choice,e))
1237 # Instancie un objet du type élémentaire du fichier
1238 # -------------------------------------------------
1239 self.__algorithm = self.__algorithmFile.ElementaryAlgorithm()
1242 def __shape_validate(self):
1244 Validation de la correspondance correcte des tailles des variables et
1245 des matrices s'il y en a.
1247 if self.__Xb is None: __Xb_shape = (0,)
1248 elif hasattr(self.__Xb,"size"): __Xb_shape = (self.__Xb.size,)
1249 elif hasattr(self.__Xb,"shape"):
1250 if isinstance(self.__Xb.shape, tuple): __Xb_shape = self.__Xb.shape
1251 else: __Xb_shape = self.__Xb.shape()
1252 else: raise TypeError("The background (Xb) has no attribute of shape: problem !")
1254 if self.__Y is None: __Y_shape = (0,)
1255 elif hasattr(self.__Y,"size"): __Y_shape = (self.__Y.size,)
1256 elif hasattr(self.__Y,"shape"):
1257 if isinstance(self.__Y.shape, tuple): __Y_shape = self.__Y.shape
1258 else: __Y_shape = self.__Y.shape()
1259 else: raise TypeError("The observation (Y) has no attribute of shape: problem !")
1261 if self.__U is None: __U_shape = (0,)
1262 elif hasattr(self.__U,"size"): __U_shape = (self.__U.size,)
1263 elif hasattr(self.__U,"shape"):
1264 if isinstance(self.__U.shape, tuple): __U_shape = self.__U.shape
1265 else: __U_shape = self.__U.shape()
1266 else: raise TypeError("The control (U) has no attribute of shape: problem !")
1268 if self.__B is None: __B_shape = (0,0)
1269 elif hasattr(self.__B,"shape"):
1270 if isinstance(self.__B.shape, tuple): __B_shape = self.__B.shape
1271 else: __B_shape = self.__B.shape()
1272 else: raise TypeError("The a priori errors covariance matrix (B) has no attribute of shape: problem !")
1274 if self.__R is None: __R_shape = (0,0)
1275 elif hasattr(self.__R,"shape"):
1276 if isinstance(self.__R.shape, tuple): __R_shape = self.__R.shape
1277 else: __R_shape = self.__R.shape()
1278 else: raise TypeError("The observation errors covariance matrix (R) has no attribute of shape: problem !")
1280 if self.__Q is None: __Q_shape = (0,0)
1281 elif hasattr(self.__Q,"shape"):
1282 if isinstance(self.__Q.shape, tuple): __Q_shape = self.__Q.shape
1283 else: __Q_shape = self.__Q.shape()
1284 else: raise TypeError("The evolution errors covariance matrix (Q) has no attribute of shape: problem !")
1286 if len(self.__HO) == 0: __HO_shape = (0,0)
1287 elif isinstance(self.__HO, dict): __HO_shape = (0,0)
1288 elif hasattr(self.__HO["Direct"],"shape"):
1289 if isinstance(self.__HO["Direct"].shape, tuple): __HO_shape = self.__HO["Direct"].shape
1290 else: __HO_shape = self.__HO["Direct"].shape()
1291 else: raise TypeError("The observation operator (H) has no attribute of shape: problem !")
1293 if len(self.__EM) == 0: __EM_shape = (0,0)
1294 elif isinstance(self.__EM, dict): __EM_shape = (0,0)
1295 elif hasattr(self.__EM["Direct"],"shape"):
1296 if isinstance(self.__EM["Direct"].shape, tuple): __EM_shape = self.__EM["Direct"].shape
1297 else: __EM_shape = self.__EM["Direct"].shape()
1298 else: raise TypeError("The evolution model (EM) has no attribute of shape: problem !")
1300 if len(self.__CM) == 0: __CM_shape = (0,0)
1301 elif isinstance(self.__CM, dict): __CM_shape = (0,0)
1302 elif hasattr(self.__CM["Direct"],"shape"):
1303 if isinstance(self.__CM["Direct"].shape, tuple): __CM_shape = self.__CM["Direct"].shape
1304 else: __CM_shape = self.__CM["Direct"].shape()
1305 else: raise TypeError("The control model (CM) has no attribute of shape: problem !")
1307 # Vérification des conditions
1308 # ---------------------------
1309 if not( len(__Xb_shape) == 1 or min(__Xb_shape) == 1 ):
1310 raise ValueError("Shape characteristic of background (Xb) is incorrect: \"%s\"."%(__Xb_shape,))
1311 if not( len(__Y_shape) == 1 or min(__Y_shape) == 1 ):
1312 raise ValueError("Shape characteristic of observation (Y) is incorrect: \"%s\"."%(__Y_shape,))
1314 if not( min(__B_shape) == max(__B_shape) ):
1315 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) is incorrect: \"%s\"."%(__B_shape,))
1316 if not( min(__R_shape) == max(__R_shape) ):
1317 raise ValueError("Shape characteristic of observation errors covariance matrix (R) is incorrect: \"%s\"."%(__R_shape,))
1318 if not( min(__Q_shape) == max(__Q_shape) ):
1319 raise ValueError("Shape characteristic of evolution errors covariance matrix (Q) is incorrect: \"%s\"."%(__Q_shape,))
1320 if not( min(__EM_shape) == max(__EM_shape) ):
1321 raise ValueError("Shape characteristic of evolution operator (EM) is incorrect: \"%s\"."%(__EM_shape,))
1323 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[1] == max(__Xb_shape) ):
1324 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and state (X) \"%s\" are incompatible."%(__HO_shape,__Xb_shape))
1325 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[0] == max(__Y_shape) ):
1326 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation (Y) \"%s\" are incompatible."%(__HO_shape,__Y_shape))
1327 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__B) > 0 and not( __HO_shape[1] == __B_shape[0] ):
1328 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and a priori errors covariance matrix (B) \"%s\" are incompatible."%(__HO_shape,__B_shape))
1329 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__R) > 0 and not( __HO_shape[0] == __R_shape[1] ):
1330 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation errors covariance matrix (R) \"%s\" are incompatible."%(__HO_shape,__R_shape))
1332 if self.__B is not None and len(self.__B) > 0 and not( __B_shape[1] == max(__Xb_shape) ):
1333 if self.__algorithmName in ["EnsembleBlue",]:
1334 asPersistentVector = self.__Xb.reshape((-1,min(__B_shape)))
1335 self.__Xb = Persistence.OneVector("Background", basetype=numpy.matrix)
1336 for member in asPersistentVector:
1337 self.__Xb.store( numpy.matrix( numpy.ravel(member), numpy.float ).T )
1338 __Xb_shape = min(__B_shape)
1340 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) \"%s\" and background (Xb) \"%s\" are incompatible."%(__B_shape,__Xb_shape))
1342 if self.__R is not None and len(self.__R) > 0 and not( __R_shape[1] == max(__Y_shape) ):
1343 raise ValueError("Shape characteristic of observation errors covariance matrix (R) \"%s\" and observation (Y) \"%s\" are incompatible."%(__R_shape,__Y_shape))
1345 if self.__EM is not None and len(self.__EM) > 0 and not isinstance(self.__EM, dict) and not( __EM_shape[1] == max(__Xb_shape) ):
1346 raise ValueError("Shape characteristic of evolution model (EM) \"%s\" and state (X) \"%s\" are incompatible."%(__EM_shape,__Xb_shape))
1348 if self.__CM is not None and len(self.__CM) > 0 and not isinstance(self.__CM, dict) and not( __CM_shape[1] == max(__U_shape) ):
1349 raise ValueError("Shape characteristic of control model (CM) \"%s\" and control (U) \"%s\" are incompatible."%(__CM_shape,__U_shape))
1351 if ("Bounds" in self.__P) \
1352 and (isinstance(self.__P["Bounds"], list) or isinstance(self.__P["Bounds"], tuple)) \
1353 and (len(self.__P["Bounds"]) != max(__Xb_shape)):
1354 raise ValueError("The number \"%s\" of bound pairs for the state (X) components is different of the size \"%s\" of the state itself." \
1355 %(len(self.__P["Bounds"]),max(__Xb_shape)))
1359 # ==============================================================================
1360 class RegulationAndParameters(object):
1362 Classe générale d'interface d'action pour la régulation et ses paramètres
1365 name = "GenericRegulation",
1372 self.__name = str(name)
1375 if asAlgorithm is None and asScript is not None:
1376 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
1378 __Algo = asAlgorithm
1380 if asDict is None and asScript is not None:
1381 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1385 if __Dict is not None:
1386 self.__P.update( dict(__Dict) )
1388 if __Algo is not None:
1389 self.__P.update( {"Algorithm":str(__Algo)} )
1391 def get(self, key = None):
1392 "Vérifie l'existence d'une clé de variable ou de paramètres"
1394 return self.__P[key]
1398 # ==============================================================================
1399 class DataObserver(object):
1401 Classe générale d'interface de type observer
1404 name = "GenericObserver",
1416 self.__name = str(name)
1421 if onVariable is None:
1422 raise ValueError("setting an observer has to be done over a variable name or a list of variable names, not over None.")
1423 elif type(onVariable) in (tuple, list):
1424 self.__V = tuple(map( str, onVariable ))
1425 if withInfo is None:
1428 self.__I = (str(withInfo),)*len(self.__V)
1429 elif isinstance(onVariable, str):
1430 self.__V = (onVariable,)
1431 if withInfo is None:
1432 self.__I = (onVariable,)
1434 self.__I = (str(withInfo),)
1436 raise ValueError("setting an observer has to be done over a variable name or a list of variable names.")
1438 if asObsObject is not None:
1439 self.__O = asObsObject
1441 __FunctionText = str(UserScript('Observer', asTemplate, asString, asScript))
1442 __Function = Observer2Func(__FunctionText)
1443 self.__O = __Function.getfunc()
1445 for k in range(len(self.__V)):
1448 if ename not in withAlgo:
1449 raise ValueError("An observer is asked to be set on a variable named %s which does not exist."%ename)
1451 withAlgo.setObserver(ename, self.__O, einfo, scheduledBy)
1454 "x.__repr__() <==> repr(x)"
1455 return repr(self.__V)+"\n"+repr(self.__O)
1458 "x.__str__() <==> str(x)"
1459 return str(self.__V)+"\n"+str(self.__O)
1461 # ==============================================================================
1462 class UserScript(object):
1464 Classe générale d'interface de type texte de script utilisateur
1467 name = "GenericUserScript",
1474 self.__name = str(name)
1476 if asString is not None:
1478 elif self.__name == "UserPostAnalysis" and (asTemplate is not None) and (asTemplate in Templates.UserPostAnalysisTemplates):
1479 self.__F = Templates.UserPostAnalysisTemplates[asTemplate]
1480 elif self.__name == "Observer" and (asTemplate is not None) and (asTemplate in Templates.ObserverTemplates):
1481 self.__F = Templates.ObserverTemplates[asTemplate]
1482 elif asScript is not None:
1483 self.__F = Interfaces.ImportFromScript(asScript).getstring()
1488 "x.__repr__() <==> repr(x)"
1489 return repr(self.__F)
1492 "x.__str__() <==> str(x)"
1493 return str(self.__F)
1495 # ==============================================================================
1496 class ExternalParameters(object):
1498 Classe générale d'interface de type texte de script utilisateur
1501 name = "GenericExternalParameters",
1507 self.__name = str(name)
1510 self.updateParameters( asDict, asScript )
1512 def updateParameters(self,
1516 "Mise a jour des parametres"
1517 if asDict is None and asScript is not None:
1518 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "ExternalParameters" )
1522 if __Dict is not None:
1523 self.__P.update( dict(__Dict) )
1525 def get(self, key = None):
1527 return self.__P[key]
1529 return list(self.__P.keys())
1532 return list(self.__P.keys())
1534 def pop(self, k, d):
1535 return self.__P.pop(k, d)
1538 return self.__P.items()
1540 def __contains__(self, key=None):
1541 "D.__contains__(k) -> True if D has a key k, else False"
1542 return key in self.__P
1544 # ==============================================================================
1545 class State(object):
1547 Classe générale d'interface de type état
1550 name = "GenericVector",
1552 asPersistentVector = None,
1558 toBeChecked = False,
1561 Permet de définir un vecteur :
1562 - asVector : entrée des données, comme un vecteur compatible avec le
1563 constructeur de numpy.matrix, ou "True" si entrée par script.
1564 - asPersistentVector : entrée des données, comme une série de vecteurs
1565 compatible avec le constructeur de numpy.matrix, ou comme un objet de
1566 type Persistence, ou "True" si entrée par script.
1567 - asScript : si un script valide est donné contenant une variable
1568 nommée "name", la variable est de type "asVector" (par défaut) ou
1569 "asPersistentVector" selon que l'une de ces variables est placée à
1571 - asDataFile : si un ou plusieurs fichiers valides sont donnés
1572 contenant des valeurs en colonnes, elles-mêmes nommées "colNames"
1573 (s'il n'y a pas de nom de colonne indiquée, on cherche une colonne
1574 nommée "name"), on récupère les colonnes et on les range ligne après
1575 ligne (colMajor=False, par défaut) ou colonne après colonne
1576 (colMajor=True). La variable résultante est de type "asVector" (par
1577 défaut) ou "asPersistentVector" selon que l'une de ces variables est
1580 self.__name = str(name)
1581 self.__check = bool(toBeChecked)
1585 self.__is_vector = False
1586 self.__is_series = False
1588 if asScript is not None:
1589 __Vector, __Series = None, None
1590 if asPersistentVector:
1591 __Series = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1593 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1594 elif asDataFile is not None:
1595 __Vector, __Series = None, None
1596 if asPersistentVector:
1597 if colNames is not None:
1598 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
1600 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1601 if bool(colMajor) and not Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1602 __Series = numpy.transpose(__Series)
1603 elif not bool(colMajor) and Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1604 __Series = numpy.transpose(__Series)
1606 if colNames is not None:
1607 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
1609 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1611 __Vector = numpy.ravel(__Vector, order = "F")
1613 __Vector = numpy.ravel(__Vector, order = "C")
1615 __Vector, __Series = asVector, asPersistentVector
1617 if __Vector is not None:
1618 self.__is_vector = True
1619 self.__V = numpy.matrix( numpy.asmatrix(__Vector).A1, numpy.float ).T
1620 self.shape = self.__V.shape
1621 self.size = self.__V.size
1622 elif __Series is not None:
1623 self.__is_series = True
1624 if isinstance(__Series, (tuple, list, numpy.ndarray, numpy.matrix, str)):
1625 self.__V = Persistence.OneVector(self.__name, basetype=numpy.matrix)
1626 if isinstance(__Series, str): __Series = eval(__Series)
1627 for member in __Series:
1628 self.__V.store( numpy.matrix( numpy.asmatrix(member).A1, numpy.float ).T )
1631 if isinstance(self.__V.shape, (tuple, list)):
1632 self.shape = self.__V.shape
1634 self.shape = self.__V.shape()
1635 if len(self.shape) == 1:
1636 self.shape = (self.shape[0],1)
1637 self.size = self.shape[0] * self.shape[1]
1639 raise ValueError("The %s object is improperly defined or undefined, it requires at minima either a vector, a list/tuple of vectors or a persistent object. Please check your vector input."%self.__name)
1641 if scheduledBy is not None:
1642 self.__T = scheduledBy
1644 def getO(self, withScheduler=False):
1646 return self.__V, self.__T
1647 elif self.__T is None:
1653 "Vérification du type interne"
1654 return self.__is_vector
1657 "Vérification du type interne"
1658 return self.__is_series
1661 "x.__repr__() <==> repr(x)"
1662 return repr(self.__V)
1665 "x.__str__() <==> str(x)"
1666 return str(self.__V)
1668 # ==============================================================================
1669 class Covariance(object):
1671 Classe générale d'interface de type covariance
1674 name = "GenericCovariance",
1675 asCovariance = None,
1676 asEyeByScalar = None,
1677 asEyeByVector = None,
1680 toBeChecked = False,
1683 Permet de définir une covariance :
1684 - asCovariance : entrée des données, comme une matrice compatible avec
1685 le constructeur de numpy.matrix
1686 - asEyeByScalar : entrée des données comme un seul scalaire de variance,
1687 multiplicatif d'une matrice de corrélation identité, aucune matrice
1688 n'étant donc explicitement à donner
1689 - asEyeByVector : entrée des données comme un seul vecteur de variance,
1690 à mettre sur la diagonale d'une matrice de corrélation, aucune matrice
1691 n'étant donc explicitement à donner
1692 - asCovObject : entrée des données comme un objet python, qui a les
1693 methodes obligatoires "getT", "getI", "diag", "trace", "__add__",
1694 "__sub__", "__neg__", "__mul__", "__rmul__" et facultatives "shape",
1695 "size", "cholesky", "choleskyI", "asfullmatrix", "__repr__", "__str__"
1696 - toBeChecked : booléen indiquant si le caractère SDP de la matrice
1697 pleine doit être vérifié
1699 self.__name = str(name)
1700 self.__check = bool(toBeChecked)
1703 self.__is_scalar = False
1704 self.__is_vector = False
1705 self.__is_matrix = False
1706 self.__is_object = False
1708 if asScript is not None:
1709 __Matrix, __Scalar, __Vector, __Object = None, None, None, None
1711 __Scalar = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1713 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1715 __Object = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1717 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1719 __Matrix, __Scalar, __Vector, __Object = asCovariance, asEyeByScalar, asEyeByVector, asCovObject
1721 if __Scalar is not None:
1722 if numpy.matrix(__Scalar).size != 1:
1723 raise ValueError(' The diagonal multiplier given to define a sparse matrix is not a unique scalar value.\n Its actual measured size is %i. Please check your scalar input.'%numpy.matrix(__Scalar).size)
1724 self.__is_scalar = True
1725 self.__C = numpy.abs( float(__Scalar) )
1728 elif __Vector is not None:
1729 self.__is_vector = True
1730 self.__C = numpy.abs( numpy.array( numpy.ravel( numpy.matrix(__Vector, float ) ) ) )
1731 self.shape = (self.__C.size,self.__C.size)
1732 self.size = self.__C.size**2
1733 elif __Matrix is not None:
1734 self.__is_matrix = True
1735 self.__C = numpy.matrix( __Matrix, float )
1736 self.shape = self.__C.shape
1737 self.size = self.__C.size
1738 elif __Object is not None:
1739 self.__is_object = True
1741 for at in ("getT","getI","diag","trace","__add__","__sub__","__neg__","__mul__","__rmul__"):
1742 if not hasattr(self.__C,at):
1743 raise ValueError("The matrix given for %s as an object has no attribute \"%s\". Please check your object input."%(self.__name,at))
1744 if hasattr(self.__C,"shape"):
1745 self.shape = self.__C.shape
1748 if hasattr(self.__C,"size"):
1749 self.size = self.__C.size
1754 # raise ValueError("The %s covariance matrix has to be specified either as a matrix, a vector for its diagonal or a scalar multiplying an identity matrix."%self.__name)
1758 def __validate(self):
1760 if self.__C is None:
1761 raise UnboundLocalError("%s covariance matrix value has not been set!"%(self.__name,))
1762 if self.ismatrix() and min(self.shape) != max(self.shape):
1763 raise ValueError("The given matrix for %s is not a square one, its shape is %s. Please check your matrix input."%(self.__name,self.shape))
1764 if self.isobject() and min(self.shape) != max(self.shape):
1765 raise ValueError("The matrix given for \"%s\" is not a square one, its shape is %s. Please check your object input."%(self.__name,self.shape))
1766 if self.isscalar() and self.__C <= 0:
1767 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your scalar input %s."%(self.__name,self.__C))
1768 if self.isvector() and (self.__C <= 0).any():
1769 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your vector input."%(self.__name,))
1770 if self.ismatrix() and (self.__check or logging.getLogger().level < logging.WARNING):
1772 L = numpy.linalg.cholesky( self.__C )
1774 raise ValueError("The %s covariance matrix is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1775 if self.isobject() and (self.__check or logging.getLogger().level < logging.WARNING):
1777 L = self.__C.cholesky()
1779 raise ValueError("The %s covariance object is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1782 "Vérification du type interne"
1783 return self.__is_scalar
1786 "Vérification du type interne"
1787 return self.__is_vector
1790 "Vérification du type interne"
1791 return self.__is_matrix
1794 "Vérification du type interne"
1795 return self.__is_object
1800 return Covariance(self.__name+"I", asCovariance = self.__C.I )
1801 elif self.isvector():
1802 return Covariance(self.__name+"I", asEyeByVector = 1. / self.__C )
1803 elif self.isscalar():
1804 return Covariance(self.__name+"I", asEyeByScalar = 1. / self.__C )
1805 elif self.isobject():
1806 return Covariance(self.__name+"I", asCovObject = self.__C.getI() )
1808 return None # Indispensable
1813 return Covariance(self.__name+"T", asCovariance = self.__C.T )
1814 elif self.isvector():
1815 return Covariance(self.__name+"T", asEyeByVector = self.__C )
1816 elif self.isscalar():
1817 return Covariance(self.__name+"T", asEyeByScalar = self.__C )
1818 elif self.isobject():
1819 return Covariance(self.__name+"T", asCovObject = self.__C.getT() )
1822 "Décomposition de Cholesky"
1824 return Covariance(self.__name+"C", asCovariance = numpy.linalg.cholesky(self.__C) )
1825 elif self.isvector():
1826 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
1827 elif self.isscalar():
1828 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
1829 elif self.isobject() and hasattr(self.__C,"cholesky"):
1830 return Covariance(self.__name+"C", asCovObject = self.__C.cholesky() )
1832 def choleskyI(self):
1833 "Inversion de la décomposition de Cholesky"
1835 return Covariance(self.__name+"H", asCovariance = numpy.linalg.cholesky(self.__C).I )
1836 elif self.isvector():
1837 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
1838 elif self.isscalar():
1839 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
1840 elif self.isobject() and hasattr(self.__C,"choleskyI"):
1841 return Covariance(self.__name+"H", asCovObject = self.__C.choleskyI() )
1844 "Racine carrée matricielle"
1847 return Covariance(self.__name+"C", asCovariance = scipy.linalg.sqrtm(self.__C) )
1848 elif self.isvector():
1849 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
1850 elif self.isscalar():
1851 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
1852 elif self.isobject() and hasattr(self.__C,"sqrt"):
1853 return Covariance(self.__name+"C", asCovObject = self.__C.sqrt() )
1856 "Inversion de la racine carrée matricielle"
1859 return Covariance(self.__name+"H", asCovariance = scipy.linalg.sqrtm(self.__C).I )
1860 elif self.isvector():
1861 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
1862 elif self.isscalar():
1863 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
1864 elif self.isobject() and hasattr(self.__C,"sqrtI"):
1865 return Covariance(self.__name+"H", asCovObject = self.__C.sqrtI() )
1867 def diag(self, msize=None):
1868 "Diagonale de la matrice"
1870 return numpy.diag(self.__C)
1871 elif self.isvector():
1873 elif self.isscalar():
1875 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1877 return self.__C * numpy.ones(int(msize))
1878 elif self.isobject():
1879 return self.__C.diag()
1881 def asfullmatrix(self, msize=None):
1885 elif self.isvector():
1886 return numpy.matrix( numpy.diag(self.__C), float )
1887 elif self.isscalar():
1889 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1891 return numpy.matrix( self.__C * numpy.eye(int(msize)), float )
1892 elif self.isobject() and hasattr(self.__C,"asfullmatrix"):
1893 return self.__C.asfullmatrix()
1895 def trace(self, msize=None):
1896 "Trace de la matrice"
1898 return numpy.trace(self.__C)
1899 elif self.isvector():
1900 return float(numpy.sum(self.__C))
1901 elif self.isscalar():
1903 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1905 return self.__C * int(msize)
1906 elif self.isobject():
1907 return self.__C.trace()
1913 "x.__repr__() <==> repr(x)"
1914 return repr(self.__C)
1917 "x.__str__() <==> str(x)"
1918 return str(self.__C)
1920 def __add__(self, other):
1921 "x.__add__(y) <==> x+y"
1922 if self.ismatrix() or self.isobject():
1923 return self.__C + numpy.asmatrix(other)
1924 elif self.isvector() or self.isscalar():
1925 _A = numpy.asarray(other)
1926 _A.reshape(_A.size)[::_A.shape[1]+1] += self.__C
1927 return numpy.asmatrix(_A)
1929 def __radd__(self, other):
1930 "x.__radd__(y) <==> y+x"
1931 raise NotImplementedError("%s covariance matrix __radd__ method not available for %s type!"%(self.__name,type(other)))
1933 def __sub__(self, other):
1934 "x.__sub__(y) <==> x-y"
1935 if self.ismatrix() or self.isobject():
1936 return self.__C - numpy.asmatrix(other)
1937 elif self.isvector() or self.isscalar():
1938 _A = numpy.asarray(other)
1939 _A.reshape(_A.size)[::_A.shape[1]+1] = self.__C - _A.reshape(_A.size)[::_A.shape[1]+1]
1940 return numpy.asmatrix(_A)
1942 def __rsub__(self, other):
1943 "x.__rsub__(y) <==> y-x"
1944 raise NotImplementedError("%s covariance matrix __rsub__ method not available for %s type!"%(self.__name,type(other)))
1947 "x.__neg__() <==> -x"
1950 def __mul__(self, other):
1951 "x.__mul__(y) <==> x*y"
1952 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
1953 return self.__C * other
1954 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
1955 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1956 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
1957 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1958 return self.__C * numpy.asmatrix(other)
1960 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asmatrix(other).shape,self.__name))
1961 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
1962 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1963 return numpy.asmatrix(self.__C * numpy.ravel(other)).T
1964 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1965 return numpy.asmatrix((self.__C * (numpy.asarray(other).transpose())).transpose())
1967 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
1968 elif self.isscalar() and isinstance(other,numpy.matrix):
1969 return self.__C * other
1970 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
1971 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
1972 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
1974 return self.__C * numpy.asmatrix(other)
1975 elif self.isobject():
1976 return self.__C.__mul__(other)
1978 raise NotImplementedError("%s covariance matrix __mul__ method not available for %s type!"%(self.__name,type(other)))
1980 def __rmul__(self, other):
1981 "x.__rmul__(y) <==> y*x"
1982 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
1983 return other * self.__C
1984 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
1985 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1986 return numpy.asmatrix(numpy.ravel(other)) * self.__C
1987 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1988 return numpy.asmatrix(other) * self.__C
1990 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name))
1991 elif self.isvector() and isinstance(other,numpy.matrix):
1992 if numpy.ravel(other).size == self.shape[0]: # Vecteur
1993 return numpy.asmatrix(numpy.ravel(other) * self.__C)
1994 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
1995 return numpy.asmatrix(numpy.array(other) * self.__C)
1997 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
1998 elif self.isscalar() and isinstance(other,numpy.matrix):
1999 return other * self.__C
2000 elif self.isobject():
2001 return self.__C.__rmul__(other)
2003 raise NotImplementedError("%s covariance matrix __rmul__ method not available for %s type!"%(self.__name,type(other)))
2006 "x.__len__() <==> len(x)"
2007 return self.shape[0]
2009 # ==============================================================================
2010 class Observer2Func(object):
2012 Creation d'une fonction d'observateur a partir de son texte
2014 def __init__(self, corps=""):
2015 self.__corps = corps
2016 def func(self,var,info):
2017 "Fonction d'observation"
2020 "Restitution du pointeur de fonction dans l'objet"
2023 # ==============================================================================
2024 class CaseLogger(object):
2026 Conservation des commandes de creation d'un cas
2028 def __init__(self, __name="", __objname="case", __addViewers=None, __addLoaders=None):
2029 self.__name = str(__name)
2030 self.__objname = str(__objname)
2031 self.__logSerie = []
2032 self.__switchoff = False
2034 "TUI" :Interfaces._TUIViewer,
2035 "SCD" :Interfaces._SCDViewer,
2036 "YACS":Interfaces._YACSViewer,
2039 "TUI" :Interfaces._TUIViewer,
2040 "COM" :Interfaces._COMViewer,
2042 if __addViewers is not None:
2043 self.__viewers.update(dict(__addViewers))
2044 if __addLoaders is not None:
2045 self.__loaders.update(dict(__addLoaders))
2047 def register(self, __command=None, __keys=None, __local=None, __pre=None, __switchoff=False):
2048 "Enregistrement d'une commande individuelle"
2049 if __command is not None and __keys is not None and __local is not None and not self.__switchoff:
2050 if "self" in __keys: __keys.remove("self")
2051 self.__logSerie.append( (str(__command), __keys, __local, __pre, __switchoff) )
2053 self.__switchoff = True
2055 self.__switchoff = False
2057 def dump(self, __filename=None, __format="TUI", __upa=""):
2058 "Restitution normalisée des commandes"
2059 if __format in self.__viewers:
2060 __formater = self.__viewers[__format](self.__name, self.__objname, self.__logSerie)
2062 raise ValueError("Dumping as \"%s\" is not available"%__format)
2063 return __formater.dump(__filename, __upa)
2065 def load(self, __filename=None, __content=None, __object=None, __format="TUI"):
2066 "Chargement normalisé des commandes"
2067 if __format in self.__loaders:
2068 __formater = self.__loaders[__format]()
2070 raise ValueError("Loading as \"%s\" is not available"%__format)
2071 return __formater.load(__filename, __content, __object)
2073 # ==============================================================================
2076 _extraArguments = None,
2077 _sFunction = lambda x: x,
2082 Pour une liste ordonnée de vecteurs en entrée, renvoie en sortie la liste
2083 correspondante de valeurs de la fonction en argument
2085 # Vérifications et définitions initiales
2086 # logging.debug("MULTF Internal multifonction calculations begin with function %s"%(_sFunction.__name__,))
2087 if not PlatformInfo.isIterable( __xserie ):
2088 raise TypeError("MultiFonction not iterable unkown input type: %s"%(type(__xserie),))
2090 if (_mpWorkers is None) or (_mpWorkers is not None and _mpWorkers < 1):
2093 __mpWorkers = int(_mpWorkers)
2095 import multiprocessing
2106 if _extraArguments is None:
2108 elif _extraArguments is not None and isinstance(_extraArguments, (list, tuple, map)):
2109 for __xvalue in __xserie:
2110 _jobs.append( [__xvalue, ] + list(_extraArguments) )
2112 raise TypeError("MultiFonction extra arguments unkown input type: %s"%(type(_extraArguments),))
2113 # logging.debug("MULTF Internal multiprocessing calculations begin : evaluation of %i point(s)"%(len(_jobs),))
2114 import multiprocessing
2115 with multiprocessing.Pool(__mpWorkers) as pool:
2116 __multiHX = pool.map( _sFunction, _jobs )
2119 # logging.debug("MULTF Internal multiprocessing calculation end")
2121 # logging.debug("MULTF Internal monoprocessing calculation begin")
2123 if _extraArguments is None:
2124 for __xvalue in __xserie:
2125 __multiHX.append( _sFunction( __xvalue ) )
2126 elif _extraArguments is not None and isinstance(_extraArguments, (list, tuple, map)):
2127 for __xvalue in __xserie:
2128 __multiHX.append( _sFunction( __xvalue, *_extraArguments ) )
2129 elif _extraArguments is not None and isinstance(_extraArguments, dict):
2130 for __xvalue in __xserie:
2131 __multiHX.append( _sFunction( __xvalue, **_extraArguments ) )
2133 raise TypeError("MultiFonction extra arguments unkown input type: %s"%(type(_extraArguments),))
2134 # logging.debug("MULTF Internal monoprocessing calculation end")
2136 # logging.debug("MULTF Internal multifonction calculations end")
2139 # ==============================================================================
2140 def CostFunction3D(_x,
2141 _Hm = None, # Pour simuler Hm(x) : HO["Direct"].appliedTo
2142 _HmX = None, # Simulation déjà faite de Hm(x)
2143 _arg = None, # Arguments supplementaires pour Hm, sous la forme d'un tuple
2148 _SIV = False, # A résorber pour la 8.0
2149 _SSC = [], # self._parameters["StoreSupplementaryCalculations"]
2150 _nPS = 0, # nbPreviousSteps
2151 _QM = "DA", # QualityMeasure
2152 _SSV = {}, # Entrée et/ou sortie : self.StoredVariables
2153 _fRt = False, # Restitue ou pas la sortie étendue
2154 _sSc = True, # Stocke ou pas les SSC
2157 Fonction-coût générale utile pour les algorithmes statiques/3D : 3DVAR, BLUE
2158 et dérivés, Kalman et dérivés, LeastSquares, SamplingTest, PSO, SA, Tabu,
2159 DFO, QuantileRegression
2165 for k in ["CostFunctionJ",
2171 "SimulatedObservationAtCurrentOptimum",
2172 "SimulatedObservationAtCurrentState",
2176 if hasattr(_SSV[k],"store"):
2177 _SSV[k].append = _SSV[k].store # Pour utiliser "append" au lieu de "store"
2179 _X = numpy.asmatrix(numpy.ravel( _x )).T
2180 if _SIV or "CurrentState" in _SSC or "CurrentOptimum" in _SSC:
2181 _SSV["CurrentState"].append( _X )
2183 if _HmX is not None:
2187 raise ValueError("COSTFUNCTION3D Operator has to be defined.")
2191 _HX = _Hm( _X, *_arg )
2192 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
2194 if "SimulatedObservationAtCurrentState" in _SSC or \
2195 "SimulatedObservationAtCurrentOptimum" in _SSC:
2196 _SSV["SimulatedObservationAtCurrentState"].append( _HX )
2198 if numpy.any(numpy.isnan(_HX)):
2199 Jb, Jo, J = numpy.nan, numpy.nan, numpy.nan
2201 _Y = numpy.asmatrix(numpy.ravel( _Y )).T
2202 if _QM in ["AugmentedWeightedLeastSquares", "AWLS", "AugmentedPonderatedLeastSquares", "APLS", "DA"]:
2203 if _BI is None or _RI is None:
2204 raise ValueError("Background and Observation error covariance matrix has to be properly defined!")
2205 _Xb = numpy.asmatrix(numpy.ravel( _Xb )).T
2206 Jb = 0.5 * (_X - _Xb).T * _BI * (_X - _Xb)
2207 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
2208 elif _QM in ["WeightedLeastSquares", "WLS", "PonderatedLeastSquares", "PLS"]:
2210 raise ValueError("Observation error covariance matrix has to be properly defined!")
2212 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
2213 elif _QM in ["LeastSquares", "LS", "L2"]:
2215 Jo = 0.5 * (_Y - _HX).T * (_Y - _HX)
2216 elif _QM in ["AbsoluteValue", "L1"]:
2218 Jo = numpy.sum( numpy.abs(_Y - _HX) )
2219 elif _QM in ["MaximumError", "ME"]:
2221 Jo = numpy.max( numpy.abs(_Y - _HX) )
2222 elif _QM in ["QR", "Null"]:
2226 raise ValueError("Unknown asked quality measure!")
2228 J = float( Jb ) + float( Jo )
2231 _SSV["CostFunctionJb"].append( Jb )
2232 _SSV["CostFunctionJo"].append( Jo )
2233 _SSV["CostFunctionJ" ].append( J )
2235 if "IndexOfOptimum" in _SSC or \
2236 "CurrentOptimum" in _SSC or \
2237 "SimulatedObservationAtCurrentOptimum" in _SSC:
2238 IndexMin = numpy.argmin( _SSV["CostFunctionJ"][_nPS:] ) + _nPS
2239 if "IndexOfOptimum" in _SSC:
2240 _SSV["IndexOfOptimum"].append( IndexMin )
2241 if "CurrentOptimum" in _SSC:
2242 _SSV["CurrentOptimum"].append( _SSV["CurrentState"][IndexMin] )
2243 if "SimulatedObservationAtCurrentOptimum" in _SSC:
2244 _SSV["SimulatedObservationAtCurrentOptimum"].append( _SSV["SimulatedObservationAtCurrentState"][IndexMin] )
2249 if _QM in ["QR"]: # Pour le QuantileRegression
2254 # ==============================================================================
2255 if __name__ == "__main__":
2256 print('\n AUTODIAGNOSTIC\n')