1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2020 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les outils généraux élémentaires.
26 __author__ = "Jean-Philippe ARGAUD"
34 from functools import partial
35 from daCore import Persistence, PlatformInfo, Interfaces
36 from daCore import Templates
38 # ==============================================================================
39 class CacheManager(object):
41 Classe générale de gestion d'un cache de calculs
44 toleranceInRedundancy = 1.e-18,
45 lenghtOfRedundancy = -1,
48 Les caractéristiques de tolérance peuvent être modifiées à la création.
50 self.__tolerBP = float(toleranceInRedundancy)
51 self.__lenghtOR = int(lenghtOfRedundancy)
52 self.__initlnOR = self.__lenghtOR
59 self.__listOPCV = [] # Previous Calculated Points, Results, Point Norms, Operator
61 # logging.debug("CM Tolerance de determination des doublons : %.2e", self.__tolerBP)
63 def wasCalculatedIn(self, xValue, oName="" ): #, info="" ):
64 "Vérifie l'existence d'un calcul correspondant à la valeur"
68 for i in range(min(len(self.__listOPCV),self.__lenghtOR)-1,-1,-1):
69 if not hasattr(xValue, 'size') or (str(oName) != self.__listOPCV[i][3]) or (xValue.size != self.__listOPCV[i][0].size):
70 # logging.debug("CM Différence de la taille %s de X et de celle %s du point %i déjà calculé", xValue.shape,i,self.__listOPCP[i].shape)
72 elif numpy.linalg.norm(numpy.ravel(xValue) - self.__listOPCV[i][0]) < self.__tolerBP * self.__listOPCV[i][2]:
74 __HxV = self.__listOPCV[i][1]
75 # logging.debug("CM Cas%s déja calculé, portant le numéro %i", info, i)
79 def storeValueInX(self, xValue, HxValue, oName="" ):
80 "Stocke pour un opérateur o un calcul Hx correspondant à la valeur x"
81 if self.__lenghtOR < 0:
82 self.__lenghtOR = 2 * xValue.size + 2
83 self.__initlnOR = self.__lenghtOR
84 self.__seenNames.append(str(oName))
85 if str(oName) not in self.__seenNames: # Etend la liste si nouveau
86 self.__lenghtOR += 2 * xValue.size + 2
87 self.__initlnOR += self.__lenghtOR
88 self.__seenNames.append(str(oName))
89 while len(self.__listOPCV) > self.__lenghtOR:
90 # logging.debug("CM Réduction de la liste des cas à %i éléments par suppression du premier", self.__lenghtOR)
91 self.__listOPCV.pop(0)
92 self.__listOPCV.append( (
93 copy.copy(numpy.ravel(xValue)),
95 numpy.linalg.norm(xValue),
101 self.__initlnOR = self.__lenghtOR
103 self.__enabled = False
107 self.__lenghtOR = self.__initlnOR
108 self.__enabled = True
110 # ==============================================================================
111 class Operator(object):
113 Classe générale d'interface de type opérateur simple
121 name = "GenericOperator",
124 avoidingRedundancy = True,
125 inputAsMultiFunction = False,
126 enableMultiProcess = False,
127 extraArguments = None,
130 On construit un objet de ce type en fournissant, à l'aide de l'un des
131 deux mots-clé, soit une fonction ou un multi-fonction python, soit une
134 - name : nom d'opérateur
135 - fromMethod : argument de type fonction Python
136 - fromMatrix : argument adapté au constructeur numpy.matrix
137 - avoidingRedundancy : booléen évitant (ou pas) les calculs redondants
138 - inputAsMultiFunction : booléen indiquant une fonction explicitement
139 définie (ou pas) en multi-fonction
140 - extraArguments : arguments supplémentaires passés à la fonction de
141 base et ses dérivées (tuple ou dictionnaire)
143 self.__name = str(name)
144 self.__NbCallsAsMatrix, self.__NbCallsAsMethod, self.__NbCallsOfCached = 0, 0, 0
145 self.__AvoidRC = bool( avoidingRedundancy )
146 self.__inputAsMF = bool( inputAsMultiFunction )
147 self.__mpEnabled = bool( enableMultiProcess )
148 self.__extraArgs = extraArguments
149 if fromMethod is not None and self.__inputAsMF:
150 self.__Method = fromMethod # logtimer(fromMethod)
152 self.__Type = "Method"
153 elif fromMethod is not None and not self.__inputAsMF:
154 self.__Method = partial( MultiFonction, _sFunction=fromMethod, _mpEnabled=self.__mpEnabled)
156 self.__Type = "Method"
157 elif fromMatrix is not None:
159 self.__Matrix = numpy.matrix( fromMatrix, numpy.float )
160 self.__Type = "Matrix"
166 def disableAvoidingRedundancy(self):
168 Operator.CM.disable()
170 def enableAvoidingRedundancy(self):
175 Operator.CM.disable()
181 def appliedTo(self, xValue, HValue = None, argsAsSerie = False):
183 Permet de restituer le résultat de l'application de l'opérateur à une
184 série d'arguments xValue. Cette méthode se contente d'appliquer, chaque
185 argument devant a priori être du bon type.
187 - les arguments par série sont :
188 - xValue : argument adapté pour appliquer l'opérateur
189 - HValue : valeur précalculée de l'opérateur en ce point
190 - argsAsSerie : indique si les arguments sont une mono ou multi-valeur
197 if HValue is not None:
201 PlatformInfo.isIterable( _xValue, True, " in Operator.appliedTo" )
203 if _HValue is not None:
204 assert len(_xValue) == len(_HValue), "Incompatible number of elements in xValue and HValue"
206 for i in range(len(_HValue)):
207 HxValue.append( numpy.asmatrix( numpy.ravel( _HValue[i] ) ).T )
209 Operator.CM.storeValueInX(_xValue[i],HxValue[-1],self.__name)
214 for i, xv in enumerate(_xValue):
216 __alreadyCalculated, __HxV = Operator.CM.wasCalculatedIn(xv,self.__name)
218 __alreadyCalculated = False
220 if __alreadyCalculated:
221 self.__addOneCacheCall()
224 if self.__Matrix is not None:
225 self.__addOneMatrixCall()
226 _hv = self.__Matrix * xv
228 self.__addOneMethodCall()
232 HxValue.append( _hv )
234 if len(_xserie)>0 and self.__Matrix is None:
235 if self.__extraArgs is None:
236 _hserie = self.__Method( _xserie ) # Calcul MF
238 _hserie = self.__Method( _xserie, self.__extraArgs ) # Calcul MF
239 if not hasattr(_hserie, "pop"):
240 raise TypeError("The user input multi-function doesn't seem to return sequence results, behaving like a mono-function. It has to be checked.")
246 Operator.CM.storeValueInX(_xv,_hv,self.__name)
248 if argsAsSerie: return HxValue
249 else: return HxValue[-1]
251 def appliedControledFormTo(self, paires, argsAsSerie = False):
253 Permet de restituer le résultat de l'application de l'opérateur à des
254 paires (xValue, uValue). Cette méthode se contente d'appliquer, son
255 argument devant a priori être du bon type. Si la uValue est None,
256 on suppose que l'opérateur ne s'applique qu'à xValue.
258 - paires : les arguments par paire sont :
259 - xValue : argument X adapté pour appliquer l'opérateur
260 - uValue : argument U adapté pour appliquer l'opérateur
261 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
263 if argsAsSerie: _xuValue = paires
264 else: _xuValue = (paires,)
265 PlatformInfo.isIterable( _xuValue, True, " in Operator.appliedControledFormTo" )
267 if self.__Matrix is not None:
269 for paire in _xuValue:
270 _xValue, _uValue = paire
271 self.__addOneMatrixCall()
272 HxValue.append( self.__Matrix * _xValue )
275 for paire in _xuValue:
277 _xValue, _uValue = paire
278 if _uValue is not None:
279 _xuValue.append( paire )
281 _xuValue.append( _xValue )
282 self.__addOneMethodCall( len(_xuValue) )
283 if self.__extraArgs is None:
284 HxValue = self.__Method( _xuValue ) # Calcul MF
286 HxValue = self.__Method( _xuValue, self.__extraArgs ) # Calcul MF
288 if argsAsSerie: return HxValue
289 else: return HxValue[-1]
291 def appliedInXTo(self, paires, argsAsSerie = False):
293 Permet de restituer le résultat de l'application de l'opérateur à une
294 série d'arguments xValue, sachant que l'opérateur est valable en
295 xNominal. Cette méthode se contente d'appliquer, son argument devant a
296 priori être du bon type. Si l'opérateur est linéaire car c'est une
297 matrice, alors il est valable en tout point nominal et xNominal peut
298 être quelconque. Il n'y a qu'une seule paire par défaut, et argsAsSerie
299 permet d'indiquer que l'argument est multi-paires.
301 - paires : les arguments par paire sont :
302 - xNominal : série d'arguments permettant de donner le point où
303 l'opérateur est construit pour être ensuite appliqué
304 - xValue : série d'arguments adaptés pour appliquer l'opérateur
305 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
307 if argsAsSerie: _nxValue = paires
308 else: _nxValue = (paires,)
309 PlatformInfo.isIterable( _nxValue, True, " in Operator.appliedInXTo" )
311 if self.__Matrix is not None:
313 for paire in _nxValue:
314 _xNominal, _xValue = paire
315 self.__addOneMatrixCall()
316 HxValue.append( self.__Matrix * _xValue )
318 self.__addOneMethodCall( len(_nxValue) )
319 if self.__extraArgs is None:
320 HxValue = self.__Method( _nxValue ) # Calcul MF
322 HxValue = self.__Method( _nxValue, self.__extraArgs ) # Calcul MF
324 if argsAsSerie: return HxValue
325 else: return HxValue[-1]
327 def asMatrix(self, ValueForMethodForm = "UnknownVoidValue", argsAsSerie = False):
329 Permet de renvoyer l'opérateur sous la forme d'une matrice
331 if self.__Matrix is not None:
332 self.__addOneMatrixCall()
333 mValue = [self.__Matrix,]
334 elif ValueForMethodForm is not "UnknownVoidValue": # Ne pas utiliser "None"
337 self.__addOneMethodCall( len(ValueForMethodForm) )
338 for _vfmf in ValueForMethodForm:
339 mValue.append( numpy.matrix( self.__Method(((_vfmf, None),)) ) )
341 self.__addOneMethodCall()
342 mValue = self.__Method(((ValueForMethodForm, None),))
344 raise ValueError("Matrix form of the operator defined as a function/method requires to give an operating point.")
346 if argsAsSerie: return mValue
347 else: return mValue[-1]
351 Renvoie la taille sous forme numpy si l'opérateur est disponible sous
352 la forme d'une matrice
354 if self.__Matrix is not None:
355 return self.__Matrix.shape
357 raise ValueError("Matrix form of the operator is not available, nor the shape")
359 def nbcalls(self, which=None):
361 Renvoie les nombres d'évaluations de l'opérateur
364 self.__NbCallsAsMatrix+self.__NbCallsAsMethod,
365 self.__NbCallsAsMatrix,
366 self.__NbCallsAsMethod,
367 self.__NbCallsOfCached,
368 Operator.NbCallsAsMatrix+Operator.NbCallsAsMethod,
369 Operator.NbCallsAsMatrix,
370 Operator.NbCallsAsMethod,
371 Operator.NbCallsOfCached,
373 if which is None: return __nbcalls
374 else: return __nbcalls[which]
376 def __addOneMatrixCall(self):
377 "Comptabilise un appel"
378 self.__NbCallsAsMatrix += 1 # Decompte local
379 Operator.NbCallsAsMatrix += 1 # Decompte global
381 def __addOneMethodCall(self, nb = 1):
382 "Comptabilise un appel"
383 self.__NbCallsAsMethod += nb # Decompte local
384 Operator.NbCallsAsMethod += nb # Decompte global
386 def __addOneCacheCall(self):
387 "Comptabilise un appel"
388 self.__NbCallsOfCached += 1 # Decompte local
389 Operator.NbCallsOfCached += 1 # Decompte global
391 # ==============================================================================
392 class FullOperator(object):
394 Classe générale d'interface de type opérateur complet
395 (Direct, Linéaire Tangent, Adjoint)
398 name = "GenericFullOperator",
400 asOneFunction = None, # 1 Fonction
401 asThreeFunctions = None, # 3 Fonctions in a dictionary
402 asScript = None, # 1 or 3 Fonction(s) by script
403 asDict = None, # Parameters
405 extraArguments = None,
407 inputAsMF = False,# Fonction(s) as Multi-Functions
412 self.__name = str(name)
413 self.__check = bool(toBeChecked)
414 self.__extraArgs = extraArguments
419 if (asDict is not None) and isinstance(asDict, dict):
420 __Parameters.update( asDict )
421 # Priorité à EnableMultiProcessingInDerivatives=True
422 if "EnableMultiProcessing" in __Parameters and __Parameters["EnableMultiProcessing"]:
423 __Parameters["EnableMultiProcessingInDerivatives"] = True
424 __Parameters["EnableMultiProcessingInEvaluation"] = False
425 if "EnableMultiProcessingInDerivatives" not in __Parameters:
426 __Parameters["EnableMultiProcessingInDerivatives"] = False
427 if __Parameters["EnableMultiProcessingInDerivatives"]:
428 __Parameters["EnableMultiProcessingInEvaluation"] = False
429 if "EnableMultiProcessingInEvaluation" not in __Parameters:
430 __Parameters["EnableMultiProcessingInEvaluation"] = False
431 if "withIncrement" in __Parameters: # Temporaire
432 __Parameters["DifferentialIncrement"] = __Parameters["withIncrement"]
434 if asScript is not None:
435 __Matrix, __Function = None, None
437 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
439 __Function = { "Direct":Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ) }
440 __Function.update({"useApproximatedDerivatives":True})
441 __Function.update(__Parameters)
442 elif asThreeFunctions:
444 "Direct" :Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ),
445 "Tangent":Interfaces.ImportFromScript(asScript).getvalue( "TangentOperator" ),
446 "Adjoint":Interfaces.ImportFromScript(asScript).getvalue( "AdjointOperator" ),
448 __Function.update(__Parameters)
451 if asOneFunction is not None:
452 if isinstance(asOneFunction, dict) and "Direct" in asOneFunction:
453 if asOneFunction["Direct"] is not None:
454 __Function = asOneFunction
456 raise ValueError("The function has to be given in a dictionnary which have 1 key (\"Direct\")")
458 __Function = { "Direct":asOneFunction }
459 __Function.update({"useApproximatedDerivatives":True})
460 __Function.update(__Parameters)
461 elif asThreeFunctions is not None:
462 if isinstance(asThreeFunctions, dict) and \
463 ("Tangent" in asThreeFunctions) and (asThreeFunctions["Tangent"] is not None) and \
464 ("Adjoint" in asThreeFunctions) and (asThreeFunctions["Adjoint"] is not None) and \
465 (("useApproximatedDerivatives" not in asThreeFunctions) or not bool(asThreeFunctions["useApproximatedDerivatives"])):
466 __Function = asThreeFunctions
467 elif isinstance(asThreeFunctions, dict) and \
468 ("Direct" in asThreeFunctions) and (asThreeFunctions["Direct"] is not None):
469 __Function = asThreeFunctions
470 __Function.update({"useApproximatedDerivatives":True})
472 raise ValueError("The functions has to be given in a dictionnary which have either 1 key (\"Direct\") or 3 keys (\"Direct\" (optionnal), \"Tangent\" and \"Adjoint\")")
473 if "Direct" not in asThreeFunctions:
474 __Function["Direct"] = asThreeFunctions["Tangent"]
475 __Function.update(__Parameters)
479 # if sys.version_info[0] < 3 and isinstance(__Function, dict):
480 # for k in ("Direct", "Tangent", "Adjoint"):
481 # if k in __Function and hasattr(__Function[k],"__class__"):
482 # if type(__Function[k]) is type(self.__init__):
483 # raise TypeError("can't use a class method (%s) as a function for the \"%s\" operator. Use a real function instead."%(type(__Function[k]),k))
485 if appliedInX is not None and isinstance(appliedInX, dict):
486 __appliedInX = appliedInX
487 elif appliedInX is not None:
488 __appliedInX = {"HXb":appliedInX}
492 if scheduledBy is not None:
493 self.__T = scheduledBy
495 if isinstance(__Function, dict) and \
496 ("useApproximatedDerivatives" in __Function) and bool(__Function["useApproximatedDerivatives"]) and \
497 ("Direct" in __Function) and (__Function["Direct"] is not None):
498 if "CenteredFiniteDifference" not in __Function: __Function["CenteredFiniteDifference"] = False
499 if "DifferentialIncrement" not in __Function: __Function["DifferentialIncrement"] = 0.01
500 if "withdX" not in __Function: __Function["withdX"] = None
501 if "withAvoidingRedundancy" not in __Function: __Function["withAvoidingRedundancy"] = avoidRC
502 if "withToleranceInRedundancy" not in __Function: __Function["withToleranceInRedundancy"] = 1.e-18
503 if "withLenghtOfRedundancy" not in __Function: __Function["withLenghtOfRedundancy"] = -1
504 if "NumberOfProcesses" not in __Function: __Function["NumberOfProcesses"] = None
505 if "withmfEnabled" not in __Function: __Function["withmfEnabled"] = inputAsMF
506 from daCore import NumericObjects
507 FDA = NumericObjects.FDApproximation(
509 Function = __Function["Direct"],
510 centeredDF = __Function["CenteredFiniteDifference"],
511 increment = __Function["DifferentialIncrement"],
512 dX = __Function["withdX"],
513 avoidingRedundancy = __Function["withAvoidingRedundancy"],
514 toleranceInRedundancy = __Function["withToleranceInRedundancy"],
515 lenghtOfRedundancy = __Function["withLenghtOfRedundancy"],
516 mpEnabled = __Function["EnableMultiProcessingInDerivatives"],
517 mpWorkers = __Function["NumberOfProcesses"],
518 mfEnabled = __Function["withmfEnabled"],
520 self.__FO["Direct"] = Operator( name = self.__name, fromMethod = FDA.DirectOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
521 self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMethod = FDA.TangentOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
522 self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMethod = FDA.AdjointOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
523 elif isinstance(__Function, dict) and \
524 ("Direct" in __Function) and ("Tangent" in __Function) and ("Adjoint" in __Function) and \
525 (__Function["Direct"] is not None) and (__Function["Tangent"] is not None) and (__Function["Adjoint"] is not None):
526 self.__FO["Direct"] = Operator( name = self.__name, fromMethod = __Function["Direct"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
527 self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMethod = __Function["Tangent"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
528 self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMethod = __Function["Adjoint"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
529 elif asMatrix is not None:
530 __matrice = numpy.matrix( __Matrix, numpy.float )
531 self.__FO["Direct"] = Operator( name = self.__name, fromMatrix = __matrice, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
532 self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMatrix = __matrice, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
533 self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMatrix = __matrice.T, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
536 raise ValueError("The %s object is improperly defined or undefined, it requires at minima either a matrix, a Direct operator for approximate derivatives or a Tangent/Adjoint operators pair. Please check your operator input."%self.__name)
538 if __appliedInX is not None:
539 self.__FO["AppliedInX"] = {}
540 for key in list(__appliedInX.keys()):
541 if type( __appliedInX[key] ) is type( numpy.matrix([]) ):
542 # Pour le cas où l'on a une vraie matrice
543 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].A1, numpy.float ).T
544 elif type( __appliedInX[key] ) is type( numpy.array([]) ) and len(__appliedInX[key].shape) > 1:
545 # Pour le cas où l'on a un vecteur représenté en array avec 2 dimensions
546 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].reshape(len(__appliedInX[key]),), numpy.float ).T
548 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key], numpy.float ).T
550 self.__FO["AppliedInX"] = None
556 "x.__repr__() <==> repr(x)"
557 return repr(self.__FO)
560 "x.__str__() <==> str(x)"
561 return str(self.__FO)
563 # ==============================================================================
564 class Algorithm(object):
566 Classe générale d'interface de type algorithme
568 Elle donne un cadre pour l'écriture d'une classe élémentaire d'algorithme
569 d'assimilation, en fournissant un container (dictionnaire) de variables
570 persistantes initialisées, et des méthodes d'accès à ces variables stockées.
572 Une classe élémentaire d'algorithme doit implémenter la méthode "run".
574 def __init__(self, name):
576 L'initialisation présente permet de fabriquer des variables de stockage
577 disponibles de manière générique dans les algorithmes élémentaires. Ces
578 variables de stockage sont ensuite conservées dans un dictionnaire
579 interne à l'objet, mais auquel on accède par la méthode "get".
581 Les variables prévues sont :
582 - APosterioriCorrelations : matrice de corrélations de la matrice A
583 - APosterioriCovariance : matrice de covariances a posteriori : A
584 - APosterioriStandardDeviations : vecteur des écart-types de la matrice A
585 - APosterioriVariances : vecteur des variances de la matrice A
586 - Analysis : vecteur d'analyse : Xa
587 - BMA : Background moins Analysis : Xa - Xb
588 - CostFunctionJ : fonction-coût globale, somme des deux parties suivantes Jb et Jo
589 - CostFunctionJAtCurrentOptimum : fonction-coût globale à l'état optimal courant lors d'itérations
590 - CostFunctionJb : partie ébauche ou background de la fonction-coût : Jb
591 - CostFunctionJbAtCurrentOptimum : partie ébauche à l'état optimal courant lors d'itérations
592 - CostFunctionJo : partie observations de la fonction-coût : Jo
593 - CostFunctionJoAtCurrentOptimum : partie observations à l'état optimal courant lors d'itérations
594 - CurrentOptimum : état optimal courant lors d'itérations
595 - CurrentState : état courant lors d'itérations
596 - GradientOfCostFunctionJ : gradient de la fonction-coût globale
597 - GradientOfCostFunctionJb : gradient de la partie ébauche de la fonction-coût
598 - GradientOfCostFunctionJo : gradient de la partie observations de la fonction-coût
599 - IndexOfOptimum : index de l'état optimal courant lors d'itérations
600 - Innovation : l'innovation : d = Y - H(X)
601 - InnovationAtCurrentState : l'innovation à l'état courant : dn = Y - H(Xn)
602 - JacobianMatrixAtBackground : matrice jacobienne à l'état d'ébauche
603 - JacobianMatrixAtCurrentState : matrice jacobienne à l'état courant
604 - JacobianMatrixAtOptimum : matrice jacobienne à l'optimum
605 - KalmanGainAtOptimum : gain de Kalman à l'optimum
606 - MahalanobisConsistency : indicateur de consistance des covariances
607 - OMA : Observation moins Analyse : Y - Xa
608 - OMB : Observation moins Background : Y - Xb
609 - PredictedState : état prédit courant lors d'itérations
610 - Residu : dans le cas des algorithmes de vérification
611 - SigmaBck2 : indicateur de correction optimale des erreurs d'ébauche
612 - SigmaObs2 : indicateur de correction optimale des erreurs d'observation
613 - SimulatedObservationAtBackground : l'état observé H(Xb) à l'ébauche
614 - SimulatedObservationAtCurrentOptimum : l'état observé H(X) à l'état optimal courant
615 - SimulatedObservationAtCurrentState : l'état observé H(X) à l'état courant
616 - SimulatedObservationAtOptimum : l'état observé H(Xa) à l'optimum
617 - SimulationQuantiles : états observés H(X) pour les quantiles demandés
618 On peut rajouter des variables à stocker dans l'initialisation de
619 l'algorithme élémentaire qui va hériter de cette classe
621 logging.debug("%s Initialisation", str(name))
622 self._m = PlatformInfo.SystemUsage()
624 self._name = str( name )
625 self._parameters = {"StoreSupplementaryCalculations":[]}
626 self.__required_parameters = {}
627 self.__required_inputs = {"RequiredInputValues":{"mandatory":(), "optional":()}}
628 self.__variable_names_not_public = {"nextStep":False} # Duplication dans AlgorithmAndParameters
629 self.__canonical_parameter_name = {} # Correspondance "lower"->"correct"
630 self.__canonical_stored_name = {} # Correspondance "lower"->"correct"
632 self.StoredVariables = {}
633 self.StoredVariables["APosterioriCorrelations"] = Persistence.OneMatrix(name = "APosterioriCorrelations")
634 self.StoredVariables["APosterioriCovariance"] = Persistence.OneMatrix(name = "APosterioriCovariance")
635 self.StoredVariables["APosterioriStandardDeviations"] = Persistence.OneVector(name = "APosterioriStandardDeviations")
636 self.StoredVariables["APosterioriVariances"] = Persistence.OneVector(name = "APosterioriVariances")
637 self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
638 self.StoredVariables["BMA"] = Persistence.OneVector(name = "BMA")
639 self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
640 self.StoredVariables["CostFunctionJAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJAtCurrentOptimum")
641 self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
642 self.StoredVariables["CostFunctionJbAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJbAtCurrentOptimum")
643 self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
644 self.StoredVariables["CostFunctionJoAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJoAtCurrentOptimum")
645 self.StoredVariables["CurrentOptimum"] = Persistence.OneVector(name = "CurrentOptimum")
646 self.StoredVariables["CurrentState"] = Persistence.OneVector(name = "CurrentState")
647 self.StoredVariables["GradientOfCostFunctionJ"] = Persistence.OneVector(name = "GradientOfCostFunctionJ")
648 self.StoredVariables["GradientOfCostFunctionJb"] = Persistence.OneVector(name = "GradientOfCostFunctionJb")
649 self.StoredVariables["GradientOfCostFunctionJo"] = Persistence.OneVector(name = "GradientOfCostFunctionJo")
650 self.StoredVariables["IndexOfOptimum"] = Persistence.OneIndex(name = "IndexOfOptimum")
651 self.StoredVariables["Innovation"] = Persistence.OneVector(name = "Innovation")
652 self.StoredVariables["InnovationAtCurrentAnalysis"] = Persistence.OneVector(name = "InnovationAtCurrentAnalysis")
653 self.StoredVariables["InnovationAtCurrentState"] = Persistence.OneVector(name = "InnovationAtCurrentState")
654 self.StoredVariables["JacobianMatrixAtBackground"] = Persistence.OneMatrix(name = "JacobianMatrixAtBackground")
655 self.StoredVariables["JacobianMatrixAtCurrentState"] = Persistence.OneMatrix(name = "JacobianMatrixAtCurrentState")
656 self.StoredVariables["JacobianMatrixAtOptimum"] = Persistence.OneMatrix(name = "JacobianMatrixAtOptimum")
657 self.StoredVariables["KalmanGainAtOptimum"] = Persistence.OneMatrix(name = "KalmanGainAtOptimum")
658 self.StoredVariables["MahalanobisConsistency"] = Persistence.OneScalar(name = "MahalanobisConsistency")
659 self.StoredVariables["OMA"] = Persistence.OneVector(name = "OMA")
660 self.StoredVariables["OMB"] = Persistence.OneVector(name = "OMB")
661 self.StoredVariables["PredictedState"] = Persistence.OneVector(name = "PredictedState")
662 self.StoredVariables["Residu"] = Persistence.OneScalar(name = "Residu")
663 self.StoredVariables["SigmaBck2"] = Persistence.OneScalar(name = "SigmaBck2")
664 self.StoredVariables["SigmaObs2"] = Persistence.OneScalar(name = "SigmaObs2")
665 self.StoredVariables["SimulatedObservationAtBackground"] = Persistence.OneVector(name = "SimulatedObservationAtBackground")
666 self.StoredVariables["SimulatedObservationAtCurrentAnalysis"]= Persistence.OneVector(name = "SimulatedObservationAtCurrentAnalysis")
667 self.StoredVariables["SimulatedObservationAtCurrentOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentOptimum")
668 self.StoredVariables["SimulatedObservationAtCurrentState"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentState")
669 self.StoredVariables["SimulatedObservationAtOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtOptimum")
670 self.StoredVariables["SimulationQuantiles"] = Persistence.OneMatrix(name = "SimulationQuantiles")
672 for k in self.StoredVariables:
673 self.__canonical_stored_name[k.lower()] = k
675 for k, v in self.__variable_names_not_public.items():
676 self.__canonical_parameter_name[k.lower()] = k
677 self.__canonical_parameter_name["algorithm"] = "Algorithm"
678 self.__canonical_parameter_name["storesupplementarycalculations"] = "StoreSupplementaryCalculations"
680 def _pre_run(self, Parameters, Xb=None, Y=None, R=None, B=None, Q=None ):
682 logging.debug("%s Lancement", self._name)
683 logging.debug("%s Taille mémoire utilisée de %.0f Mio"%(self._name, self._m.getUsedMemory("Mio")))
685 # Mise a jour des paramètres internes avec le contenu de Parameters, en
686 # reprenant les valeurs par défauts pour toutes celles non définies
687 self.__setParameters(Parameters, reset=True)
688 for k, v in self.__variable_names_not_public.items():
689 if k not in self._parameters: self.__setParameters( {k:v} )
691 # Corrections et compléments
692 def __test_vvalue(argument, variable, argname):
694 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
695 raise ValueError("%s %s vector %s has to be properly defined!"%(self._name,argname,variable))
696 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
697 logging.debug("%s %s vector %s is not set, but is optional."%(self._name,argname,variable))
699 logging.debug("%s %s vector %s is not set, but is not required."%(self._name,argname,variable))
701 logging.debug("%s %s vector %s is set, and its size is %i."%(self._name,argname,variable,numpy.array(argument).size))
703 __test_vvalue( Xb, "Xb", "Background or initial state" )
704 __test_vvalue( Y, "Y", "Observation" )
706 def __test_cvalue(argument, variable, argname):
708 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
709 raise ValueError("%s %s error covariance matrix %s has to be properly defined!"%(self._name,argname,variable))
710 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
711 logging.debug("%s %s error covariance matrix %s is not set, but is optional."%(self._name,argname,variable))
713 logging.debug("%s %s error covariance matrix %s is not set, but is not required."%(self._name,argname,variable))
715 logging.debug("%s %s error covariance matrix %s is set."%(self._name,argname,variable))
717 __test_cvalue( R, "R", "Observation" )
718 __test_cvalue( B, "B", "Background" )
719 __test_cvalue( Q, "Q", "Evolution" )
721 if ("Bounds" in self._parameters) and isinstance(self._parameters["Bounds"], (list, tuple)) and (len(self._parameters["Bounds"]) > 0):
722 logging.debug("%s Prise en compte des bornes effectuee"%(self._name,))
724 self._parameters["Bounds"] = None
726 if logging.getLogger().level < logging.WARNING:
727 self._parameters["optiprint"], self._parameters["optdisp"] = 1, 1
728 if PlatformInfo.has_scipy:
729 import scipy.optimize
730 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_ALL
732 self._parameters["optmessages"] = 15
734 self._parameters["optiprint"], self._parameters["optdisp"] = -1, 0
735 if PlatformInfo.has_scipy:
736 import scipy.optimize
737 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_NONE
739 self._parameters["optmessages"] = 15
743 def _post_run(self,_oH=None):
745 if ("StoreSupplementaryCalculations" in self._parameters) and \
746 "APosterioriCovariance" in self._parameters["StoreSupplementaryCalculations"]:
747 for _A in self.StoredVariables["APosterioriCovariance"]:
748 if "APosterioriVariances" in self._parameters["StoreSupplementaryCalculations"]:
749 self.StoredVariables["APosterioriVariances"].store( numpy.diag(_A) )
750 if "APosterioriStandardDeviations" in self._parameters["StoreSupplementaryCalculations"]:
751 self.StoredVariables["APosterioriStandardDeviations"].store( numpy.sqrt(numpy.diag(_A)) )
752 if "APosterioriCorrelations" in self._parameters["StoreSupplementaryCalculations"]:
753 _EI = numpy.diag(1./numpy.sqrt(numpy.diag(_A)))
754 _C = numpy.dot(_EI, numpy.dot(_A, _EI))
755 self.StoredVariables["APosterioriCorrelations"].store( _C )
757 logging.debug("%s Nombre d'évaluation(s) de l'opérateur d'observation direct/tangent/adjoint.: %i/%i/%i", self._name, _oH["Direct"].nbcalls(0),_oH["Tangent"].nbcalls(0),_oH["Adjoint"].nbcalls(0))
758 logging.debug("%s Nombre d'appels au cache d'opérateur d'observation direct/tangent/adjoint..: %i/%i/%i", self._name, _oH["Direct"].nbcalls(3),_oH["Tangent"].nbcalls(3),_oH["Adjoint"].nbcalls(3))
759 logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
760 logging.debug("%s Terminé", self._name)
763 def _toStore(self, key):
764 "True if in StoreSupplementaryCalculations, else False"
765 return key in self._parameters["StoreSupplementaryCalculations"]
767 def get(self, key=None):
769 Renvoie l'une des variables stockées identifiée par la clé, ou le
770 dictionnaire de l'ensemble des variables disponibles en l'absence de
771 clé. Ce sont directement les variables sous forme objet qui sont
772 renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
773 des classes de persistance.
776 return self.StoredVariables[self.__canonical_stored_name[key.lower()]]
778 return self.StoredVariables
780 def __contains__(self, key=None):
781 "D.__contains__(k) -> True if D has a key k, else False"
782 if key is None or key.lower() not in self.__canonical_stored_name:
785 return self.__canonical_stored_name[key.lower()] in self.StoredVariables
788 "D.keys() -> list of D's keys"
789 if hasattr(self, "StoredVariables"):
790 return self.StoredVariables.keys()
795 "D.pop(k[,d]) -> v, remove specified key and return the corresponding value"
796 if hasattr(self, "StoredVariables") and k.lower() in self.__canonical_stored_name:
797 return self.StoredVariables.pop(self.__canonical_stored_name[k.lower()], d)
802 raise TypeError("pop expected at least 1 arguments, got 0")
803 "If key is not found, d is returned if given, otherwise KeyError is raised"
809 def run(self, Xb=None, Y=None, H=None, M=None, R=None, B=None, Q=None, Parameters=None):
811 Doit implémenter l'opération élémentaire de calcul d'assimilation sous
812 sa forme mathématique la plus naturelle possible.
814 raise NotImplementedError("Mathematical assimilation calculation has not been implemented!")
816 def defineRequiredParameter(self, name = None, default = None, typecast = None, message = None, minval = None, maxval = None, listval = None):
818 Permet de définir dans l'algorithme des paramètres requis et leurs
819 caractéristiques par défaut.
822 raise ValueError("A name is mandatory to define a required parameter.")
824 self.__required_parameters[name] = {
826 "typecast" : typecast,
832 self.__canonical_parameter_name[name.lower()] = name
833 logging.debug("%s %s (valeur par défaut = %s)", self._name, message, self.setParameterValue(name))
835 def getRequiredParameters(self, noDetails=True):
837 Renvoie la liste des noms de paramètres requis ou directement le
838 dictionnaire des paramètres requis.
841 return sorted(self.__required_parameters.keys())
843 return self.__required_parameters
845 def setParameterValue(self, name=None, value=None):
847 Renvoie la valeur d'un paramètre requis de manière contrôlée
849 __k = self.__canonical_parameter_name[name.lower()]
850 default = self.__required_parameters[__k]["default"]
851 typecast = self.__required_parameters[__k]["typecast"]
852 minval = self.__required_parameters[__k]["minval"]
853 maxval = self.__required_parameters[__k]["maxval"]
854 listval = self.__required_parameters[__k]["listval"]
856 if value is None and default is None:
858 elif value is None and default is not None:
859 if typecast is None: __val = default
860 else: __val = typecast( default )
862 if typecast is None: __val = value
865 __val = typecast( value )
867 raise ValueError("The value '%s' for the parameter named '%s' can not be correctly evaluated with type '%s'."%(value, __k, typecast))
869 if minval is not None and (numpy.array(__val, float) < minval).any():
870 raise ValueError("The parameter named '%s' of value '%s' can not be less than %s."%(__k, __val, minval))
871 if maxval is not None and (numpy.array(__val, float) > maxval).any():
872 raise ValueError("The parameter named '%s' of value '%s' can not be greater than %s."%(__k, __val, maxval))
873 if listval is not None:
874 if typecast is list or typecast is tuple or isinstance(__val,list) or isinstance(__val,tuple):
877 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%(v, __k, listval))
878 elif __val not in listval:
879 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%( __val, __k,listval))
883 def requireInputArguments(self, mandatory=(), optional=()):
885 Permet d'imposer des arguments requises en entrée
887 self.__required_inputs["RequiredInputValues"]["mandatory"] = tuple( mandatory )
888 self.__required_inputs["RequiredInputValues"]["optional"] = tuple( optional )
890 def __setParameters(self, fromDico={}, reset=False):
892 Permet de stocker les paramètres reçus dans le dictionnaire interne.
894 self._parameters.update( fromDico )
895 __inverse_fromDico_keys = {}
896 for k in fromDico.keys():
897 if k.lower() in self.__canonical_parameter_name:
898 __inverse_fromDico_keys[self.__canonical_parameter_name[k.lower()]] = k
899 #~ __inverse_fromDico_keys = dict([(self.__canonical_parameter_name[k.lower()],k) for k in fromDico.keys()])
900 __canonic_fromDico_keys = __inverse_fromDico_keys.keys()
901 for k in self.__required_parameters.keys():
902 if k in __canonic_fromDico_keys:
903 self._parameters[k] = self.setParameterValue(k,fromDico[__inverse_fromDico_keys[k]])
905 self._parameters[k] = self.setParameterValue(k)
908 logging.debug("%s %s : %s", self._name, self.__required_parameters[k]["message"], self._parameters[k])
910 # ==============================================================================
911 class AlgorithmAndParameters(object):
913 Classe générale d'interface d'action pour l'algorithme et ses paramètres
916 name = "GenericAlgorithm",
923 self.__name = str(name)
927 self.__algorithm = {}
928 self.__algorithmFile = None
929 self.__algorithmName = None
931 self.updateParameters( asDict, asScript )
933 if asAlgorithm is None and asScript is not None:
934 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
938 if __Algo is not None:
939 self.__A = str(__Algo)
940 self.__P.update( {"Algorithm":self.__A} )
942 self.__setAlgorithm( self.__A )
944 self.__variable_names_not_public = {"nextStep":False} # Duplication dans Algorithm
946 def updateParameters(self,
950 "Mise a jour des parametres"
951 if asDict is None and asScript is not None:
952 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
956 if __Dict is not None:
957 self.__P.update( dict(__Dict) )
959 def executePythonScheme(self, asDictAO = None):
960 "Permet de lancer le calcul d'assimilation"
961 Operator.CM.clearCache()
963 if not isinstance(asDictAO, dict):
964 raise ValueError("The objects for algorithm calculation have to be given together as a dictionnary, and they are not")
965 if hasattr(asDictAO["Background"],"getO"): self.__Xb = asDictAO["Background"].getO()
966 elif hasattr(asDictAO["CheckingPoint"],"getO"): self.__Xb = asDictAO["CheckingPoint"].getO()
967 else: self.__Xb = None
968 if hasattr(asDictAO["Observation"],"getO"): self.__Y = asDictAO["Observation"].getO()
969 else: self.__Y = asDictAO["Observation"]
970 if hasattr(asDictAO["ControlInput"],"getO"): self.__U = asDictAO["ControlInput"].getO()
971 else: self.__U = asDictAO["ControlInput"]
972 if hasattr(asDictAO["ObservationOperator"],"getO"): self.__HO = asDictAO["ObservationOperator"].getO()
973 else: self.__HO = asDictAO["ObservationOperator"]
974 if hasattr(asDictAO["EvolutionModel"],"getO"): self.__EM = asDictAO["EvolutionModel"].getO()
975 else: self.__EM = asDictAO["EvolutionModel"]
976 if hasattr(asDictAO["ControlModel"],"getO"): self.__CM = asDictAO["ControlModel"].getO()
977 else: self.__CM = asDictAO["ControlModel"]
978 self.__B = asDictAO["BackgroundError"]
979 self.__R = asDictAO["ObservationError"]
980 self.__Q = asDictAO["EvolutionError"]
982 self.__shape_validate()
984 self.__algorithm.run(
994 Parameters = self.__P,
998 def executeYACSScheme(self, FileName=None):
999 "Permet de lancer le calcul d'assimilation"
1000 if FileName is None or not os.path.exists(FileName):
1001 raise ValueError("a YACS file name has to be given for YACS execution.\n")
1003 __file = os.path.abspath(FileName)
1004 logging.debug("The YACS file name is \"%s\"."%__file)
1005 if not PlatformInfo.has_salome or \
1006 not PlatformInfo.has_yacs or \
1007 not PlatformInfo.has_adao:
1008 raise ImportError("\n\n"+\
1009 "Unable to get SALOME, YACS or ADAO environnement variables.\n"+\
1010 "Please load the right environnement before trying to use it.\n")
1013 import SALOMERuntime
1015 SALOMERuntime.RuntimeSALOME_setRuntime()
1017 r = pilot.getRuntime()
1018 xmlLoader = loader.YACSLoader()
1019 xmlLoader.registerProcCataLoader()
1021 catalogAd = r.loadCatalog("proc", __file)
1022 r.addCatalog(catalogAd)
1027 p = xmlLoader.load(__file)
1028 except IOError as ex:
1029 print("The YACS XML schema file can not be loaded: %s"%(ex,))
1031 logger = p.getLogger("parser")
1032 if not logger.isEmpty():
1033 print("The imported YACS XML schema has errors on parsing:")
1034 print(logger.getStr())
1037 print("The YACS XML schema is not valid and will not be executed:")
1038 print(p.getErrorReport())
1040 info=pilot.LinkInfo(pilot.LinkInfo.ALL_DONT_STOP)
1041 p.checkConsistency(info)
1042 if info.areWarningsOrErrors():
1043 print("The YACS XML schema is not coherent and will not be executed:")
1044 print(info.getGlobalRepr())
1046 e = pilot.ExecutorSwig()
1048 if p.getEffectiveState() != pilot.DONE:
1049 print(p.getErrorReport())
1053 def get(self, key = None):
1054 "Vérifie l'existence d'une clé de variable ou de paramètres"
1055 if key in self.__algorithm:
1056 return self.__algorithm.get( key )
1057 elif key in self.__P:
1058 return self.__P[key]
1060 allvariables = self.__P
1061 for k in self.__variable_names_not_public: allvariables.pop(k, None)
1064 def pop(self, k, d):
1065 "Necessaire pour le pickling"
1066 return self.__algorithm.pop(k, d)
1068 def getAlgorithmRequiredParameters(self, noDetails=True):
1069 "Renvoie la liste des paramètres requis selon l'algorithme"
1070 return self.__algorithm.getRequiredParameters(noDetails)
1072 def setObserver(self, __V, __O, __I, __S):
1073 if self.__algorithm is None \
1074 or isinstance(self.__algorithm, dict) \
1075 or not hasattr(self.__algorithm,"StoredVariables"):
1076 raise ValueError("No observer can be build before choosing an algorithm.")
1077 if __V not in self.__algorithm:
1078 raise ValueError("An observer requires to be set on a variable named %s which does not exist."%__V)
1080 self.__algorithm.StoredVariables[ __V ].setDataObserver(
1083 HookParameters = __I,
1086 def removeObserver(self, __V, __O, __A = False):
1087 if self.__algorithm is None \
1088 or isinstance(self.__algorithm, dict) \
1089 or not hasattr(self.__algorithm,"StoredVariables"):
1090 raise ValueError("No observer can be removed before choosing an algorithm.")
1091 if __V not in self.__algorithm:
1092 raise ValueError("An observer requires to be removed on a variable named %s which does not exist."%__V)
1094 return self.__algorithm.StoredVariables[ __V ].removeDataObserver(
1099 def hasObserver(self, __V):
1100 if self.__algorithm is None \
1101 or isinstance(self.__algorithm, dict) \
1102 or not hasattr(self.__algorithm,"StoredVariables"):
1104 if __V not in self.__algorithm:
1106 return self.__algorithm.StoredVariables[ __V ].hasDataObserver()
1109 __allvariables = list(self.__algorithm.keys()) + list(self.__P.keys())
1110 for k in self.__variable_names_not_public:
1111 if k in __allvariables: __allvariables.remove(k)
1112 return __allvariables
1114 def __contains__(self, key=None):
1115 "D.__contains__(k) -> True if D has a key k, else False"
1116 return key in self.__algorithm or key in self.__P
1119 "x.__repr__() <==> repr(x)"
1120 return repr(self.__A)+", "+repr(self.__P)
1123 "x.__str__() <==> str(x)"
1124 return str(self.__A)+", "+str(self.__P)
1126 def __setAlgorithm(self, choice = None ):
1128 Permet de sélectionner l'algorithme à utiliser pour mener à bien l'étude
1129 d'assimilation. L'argument est un champ caractère se rapportant au nom
1130 d'un algorithme réalisant l'opération sur les arguments fixes.
1133 raise ValueError("Error: algorithm choice has to be given")
1134 if self.__algorithmName is not None:
1135 raise ValueError("Error: algorithm choice has already been done as \"%s\", it can't be changed."%self.__algorithmName)
1136 daDirectory = "daAlgorithms"
1138 # Recherche explicitement le fichier complet
1139 # ------------------------------------------
1141 for directory in sys.path:
1142 if os.path.isfile(os.path.join(directory, daDirectory, str(choice)+'.py')):
1143 module_path = os.path.abspath(os.path.join(directory, daDirectory))
1144 if module_path is None:
1145 raise ImportError("No algorithm module named \"%s\" has been found in the search path.\n The search path is %s"%(choice, sys.path))
1147 # Importe le fichier complet comme un module
1148 # ------------------------------------------
1150 sys_path_tmp = sys.path ; sys.path.insert(0,module_path)
1151 self.__algorithmFile = __import__(str(choice), globals(), locals(), [])
1152 if not hasattr(self.__algorithmFile, "ElementaryAlgorithm"):
1153 raise ImportError("this module does not define a valid elementary algorithm.")
1154 self.__algorithmName = str(choice)
1155 sys.path = sys_path_tmp ; del sys_path_tmp
1156 except ImportError as e:
1157 raise ImportError("The module named \"%s\" was found, but is incorrect at the import stage.\n The import error message is: %s"%(choice,e))
1159 # Instancie un objet du type élémentaire du fichier
1160 # -------------------------------------------------
1161 self.__algorithm = self.__algorithmFile.ElementaryAlgorithm()
1164 def __shape_validate(self):
1166 Validation de la correspondance correcte des tailles des variables et
1167 des matrices s'il y en a.
1169 if self.__Xb is None: __Xb_shape = (0,)
1170 elif hasattr(self.__Xb,"size"): __Xb_shape = (self.__Xb.size,)
1171 elif hasattr(self.__Xb,"shape"):
1172 if isinstance(self.__Xb.shape, tuple): __Xb_shape = self.__Xb.shape
1173 else: __Xb_shape = self.__Xb.shape()
1174 else: raise TypeError("The background (Xb) has no attribute of shape: problem !")
1176 if self.__Y is None: __Y_shape = (0,)
1177 elif hasattr(self.__Y,"size"): __Y_shape = (self.__Y.size,)
1178 elif hasattr(self.__Y,"shape"):
1179 if isinstance(self.__Y.shape, tuple): __Y_shape = self.__Y.shape
1180 else: __Y_shape = self.__Y.shape()
1181 else: raise TypeError("The observation (Y) has no attribute of shape: problem !")
1183 if self.__U is None: __U_shape = (0,)
1184 elif hasattr(self.__U,"size"): __U_shape = (self.__U.size,)
1185 elif hasattr(self.__U,"shape"):
1186 if isinstance(self.__U.shape, tuple): __U_shape = self.__U.shape
1187 else: __U_shape = self.__U.shape()
1188 else: raise TypeError("The control (U) has no attribute of shape: problem !")
1190 if self.__B is None: __B_shape = (0,0)
1191 elif hasattr(self.__B,"shape"):
1192 if isinstance(self.__B.shape, tuple): __B_shape = self.__B.shape
1193 else: __B_shape = self.__B.shape()
1194 else: raise TypeError("The a priori errors covariance matrix (B) has no attribute of shape: problem !")
1196 if self.__R is None: __R_shape = (0,0)
1197 elif hasattr(self.__R,"shape"):
1198 if isinstance(self.__R.shape, tuple): __R_shape = self.__R.shape
1199 else: __R_shape = self.__R.shape()
1200 else: raise TypeError("The observation errors covariance matrix (R) has no attribute of shape: problem !")
1202 if self.__Q is None: __Q_shape = (0,0)
1203 elif hasattr(self.__Q,"shape"):
1204 if isinstance(self.__Q.shape, tuple): __Q_shape = self.__Q.shape
1205 else: __Q_shape = self.__Q.shape()
1206 else: raise TypeError("The evolution errors covariance matrix (Q) has no attribute of shape: problem !")
1208 if len(self.__HO) == 0: __HO_shape = (0,0)
1209 elif isinstance(self.__HO, dict): __HO_shape = (0,0)
1210 elif hasattr(self.__HO["Direct"],"shape"):
1211 if isinstance(self.__HO["Direct"].shape, tuple): __HO_shape = self.__HO["Direct"].shape
1212 else: __HO_shape = self.__HO["Direct"].shape()
1213 else: raise TypeError("The observation operator (H) has no attribute of shape: problem !")
1215 if len(self.__EM) == 0: __EM_shape = (0,0)
1216 elif isinstance(self.__EM, dict): __EM_shape = (0,0)
1217 elif hasattr(self.__EM["Direct"],"shape"):
1218 if isinstance(self.__EM["Direct"].shape, tuple): __EM_shape = self.__EM["Direct"].shape
1219 else: __EM_shape = self.__EM["Direct"].shape()
1220 else: raise TypeError("The evolution model (EM) has no attribute of shape: problem !")
1222 if len(self.__CM) == 0: __CM_shape = (0,0)
1223 elif isinstance(self.__CM, dict): __CM_shape = (0,0)
1224 elif hasattr(self.__CM["Direct"],"shape"):
1225 if isinstance(self.__CM["Direct"].shape, tuple): __CM_shape = self.__CM["Direct"].shape
1226 else: __CM_shape = self.__CM["Direct"].shape()
1227 else: raise TypeError("The control model (CM) has no attribute of shape: problem !")
1229 # Vérification des conditions
1230 # ---------------------------
1231 if not( len(__Xb_shape) == 1 or min(__Xb_shape) == 1 ):
1232 raise ValueError("Shape characteristic of background (Xb) is incorrect: \"%s\"."%(__Xb_shape,))
1233 if not( len(__Y_shape) == 1 or min(__Y_shape) == 1 ):
1234 raise ValueError("Shape characteristic of observation (Y) is incorrect: \"%s\"."%(__Y_shape,))
1236 if not( min(__B_shape) == max(__B_shape) ):
1237 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) is incorrect: \"%s\"."%(__B_shape,))
1238 if not( min(__R_shape) == max(__R_shape) ):
1239 raise ValueError("Shape characteristic of observation errors covariance matrix (R) is incorrect: \"%s\"."%(__R_shape,))
1240 if not( min(__Q_shape) == max(__Q_shape) ):
1241 raise ValueError("Shape characteristic of evolution errors covariance matrix (Q) is incorrect: \"%s\"."%(__Q_shape,))
1242 if not( min(__EM_shape) == max(__EM_shape) ):
1243 raise ValueError("Shape characteristic of evolution operator (EM) is incorrect: \"%s\"."%(__EM_shape,))
1245 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[1] == max(__Xb_shape) ):
1246 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and state (X) \"%s\" are incompatible."%(__HO_shape,__Xb_shape))
1247 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[0] == max(__Y_shape) ):
1248 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation (Y) \"%s\" are incompatible."%(__HO_shape,__Y_shape))
1249 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__B) > 0 and not( __HO_shape[1] == __B_shape[0] ):
1250 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and a priori errors covariance matrix (B) \"%s\" are incompatible."%(__HO_shape,__B_shape))
1251 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__R) > 0 and not( __HO_shape[0] == __R_shape[1] ):
1252 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation errors covariance matrix (R) \"%s\" are incompatible."%(__HO_shape,__R_shape))
1254 if self.__B is not None and len(self.__B) > 0 and not( __B_shape[1] == max(__Xb_shape) ):
1255 if self.__algorithmName in ["EnsembleBlue",]:
1256 asPersistentVector = self.__Xb.reshape((-1,min(__B_shape)))
1257 self.__Xb = Persistence.OneVector("Background", basetype=numpy.matrix)
1258 for member in asPersistentVector:
1259 self.__Xb.store( numpy.matrix( numpy.ravel(member), numpy.float ).T )
1260 __Xb_shape = min(__B_shape)
1262 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) \"%s\" and background (Xb) \"%s\" are incompatible."%(__B_shape,__Xb_shape))
1264 if self.__R is not None and len(self.__R) > 0 and not( __R_shape[1] == max(__Y_shape) ):
1265 raise ValueError("Shape characteristic of observation errors covariance matrix (R) \"%s\" and observation (Y) \"%s\" are incompatible."%(__R_shape,__Y_shape))
1267 if self.__EM is not None and len(self.__EM) > 0 and not isinstance(self.__EM, dict) and not( __EM_shape[1] == max(__Xb_shape) ):
1268 raise ValueError("Shape characteristic of evolution model (EM) \"%s\" and state (X) \"%s\" are incompatible."%(__EM_shape,__Xb_shape))
1270 if self.__CM is not None and len(self.__CM) > 0 and not isinstance(self.__CM, dict) and not( __CM_shape[1] == max(__U_shape) ):
1271 raise ValueError("Shape characteristic of control model (CM) \"%s\" and control (U) \"%s\" are incompatible."%(__CM_shape,__U_shape))
1273 if ("Bounds" in self.__P) \
1274 and (isinstance(self.__P["Bounds"], list) or isinstance(self.__P["Bounds"], tuple)) \
1275 and (len(self.__P["Bounds"]) != max(__Xb_shape)):
1276 raise ValueError("The number \"%s\" of bound pairs for the state (X) components is different of the size \"%s\" of the state itself." \
1277 %(len(self.__P["Bounds"]),max(__Xb_shape)))
1281 # ==============================================================================
1282 class RegulationAndParameters(object):
1284 Classe générale d'interface d'action pour la régulation et ses paramètres
1287 name = "GenericRegulation",
1294 self.__name = str(name)
1297 if asAlgorithm is None and asScript is not None:
1298 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
1300 __Algo = asAlgorithm
1302 if asDict is None and asScript is not None:
1303 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1307 if __Dict is not None:
1308 self.__P.update( dict(__Dict) )
1310 if __Algo is not None:
1311 self.__P.update( {"Algorithm":__Algo} )
1313 def get(self, key = None):
1314 "Vérifie l'existence d'une clé de variable ou de paramètres"
1316 return self.__P[key]
1320 # ==============================================================================
1321 class DataObserver(object):
1323 Classe générale d'interface de type observer
1326 name = "GenericObserver",
1338 self.__name = str(name)
1343 if onVariable is None:
1344 raise ValueError("setting an observer has to be done over a variable name or a list of variable names, not over None.")
1345 elif type(onVariable) in (tuple, list):
1346 self.__V = tuple(map( str, onVariable ))
1347 if withInfo is None:
1350 self.__I = (str(withInfo),)*len(self.__V)
1351 elif isinstance(onVariable, str):
1352 self.__V = (onVariable,)
1353 if withInfo is None:
1354 self.__I = (onVariable,)
1356 self.__I = (str(withInfo),)
1358 raise ValueError("setting an observer has to be done over a variable name or a list of variable names.")
1360 if asString is not None:
1361 __FunctionText = asString
1362 elif (asTemplate is not None) and (asTemplate in Templates.ObserverTemplates):
1363 __FunctionText = Templates.ObserverTemplates[asTemplate]
1364 elif asScript is not None:
1365 __FunctionText = Interfaces.ImportFromScript(asScript).getstring()
1368 __Function = ObserverF(__FunctionText)
1370 if asObsObject is not None:
1371 self.__O = asObsObject
1373 self.__O = __Function.getfunc()
1375 for k in range(len(self.__V)):
1378 if ename not in withAlgo:
1379 raise ValueError("An observer is asked to be set on a variable named %s which does not exist."%ename)
1381 withAlgo.setObserver(ename, self.__O, einfo, scheduledBy)
1384 "x.__repr__() <==> repr(x)"
1385 return repr(self.__V)+"\n"+repr(self.__O)
1388 "x.__str__() <==> str(x)"
1389 return str(self.__V)+"\n"+str(self.__O)
1391 # ==============================================================================
1392 class State(object):
1394 Classe générale d'interface de type état
1397 name = "GenericVector",
1399 asPersistentVector = None,
1405 toBeChecked = False,
1408 Permet de définir un vecteur :
1409 - asVector : entrée des données, comme un vecteur compatible avec le
1410 constructeur de numpy.matrix, ou "True" si entrée par script.
1411 - asPersistentVector : entrée des données, comme une série de vecteurs
1412 compatible avec le constructeur de numpy.matrix, ou comme un objet de
1413 type Persistence, ou "True" si entrée par script.
1414 - asScript : si un script valide est donné contenant une variable
1415 nommée "name", la variable est de type "asVector" (par défaut) ou
1416 "asPersistentVector" selon que l'une de ces variables est placée à
1418 - asDataFile : si un ou plusieurs fichiers valides sont donnés
1419 contenant des valeurs en colonnes, elles-mêmes nommées "colNames"
1420 (s'il n'y a pas de nom de colonne indiquée, on cherche une colonne
1421 nommée "name"), on récupère les colonnes et on les range ligne après
1422 ligne (colMajor=False, par défaut) ou colonne après colonne
1423 (colMajor=True). La variable résultante est de type "asVector" (par
1424 défaut) ou "asPersistentVector" selon que l'une de ces variables est
1427 self.__name = str(name)
1428 self.__check = bool(toBeChecked)
1432 self.__is_vector = False
1433 self.__is_series = False
1435 if asScript is not None:
1436 __Vector, __Series = None, None
1437 if asPersistentVector:
1438 __Series = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1440 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1441 elif asDataFile is not None:
1442 __Vector, __Series = None, None
1443 if asPersistentVector:
1444 if colNames is not None:
1445 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
1447 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1448 if bool(colMajor) and not Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1449 __Series = numpy.transpose(__Series)
1450 elif not bool(colMajor) and Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1451 __Series = numpy.transpose(__Series)
1453 if colNames is not None:
1454 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
1456 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1458 __Vector = numpy.ravel(__Vector, order = "F")
1460 __Vector = numpy.ravel(__Vector, order = "C")
1462 __Vector, __Series = asVector, asPersistentVector
1464 if __Vector is not None:
1465 self.__is_vector = True
1466 self.__V = numpy.matrix( numpy.asmatrix(__Vector).A1, numpy.float ).T
1467 self.shape = self.__V.shape
1468 self.size = self.__V.size
1469 elif __Series is not None:
1470 self.__is_series = True
1471 if isinstance(__Series, (tuple, list, numpy.ndarray, numpy.matrix, str)):
1472 self.__V = Persistence.OneVector(self.__name, basetype=numpy.matrix)
1473 if isinstance(__Series, str): __Series = eval(__Series)
1474 for member in __Series:
1475 self.__V.store( numpy.matrix( numpy.asmatrix(member).A1, numpy.float ).T )
1478 if isinstance(self.__V.shape, (tuple, list)):
1479 self.shape = self.__V.shape
1481 self.shape = self.__V.shape()
1482 if len(self.shape) == 1:
1483 self.shape = (self.shape[0],1)
1484 self.size = self.shape[0] * self.shape[1]
1486 raise ValueError("The %s object is improperly defined or undefined, it requires at minima either a vector, a list/tuple of vectors or a persistent object. Please check your vector input."%self.__name)
1488 if scheduledBy is not None:
1489 self.__T = scheduledBy
1491 def getO(self, withScheduler=False):
1493 return self.__V, self.__T
1494 elif self.__T is None:
1500 "Vérification du type interne"
1501 return self.__is_vector
1504 "Vérification du type interne"
1505 return self.__is_series
1508 "x.__repr__() <==> repr(x)"
1509 return repr(self.__V)
1512 "x.__str__() <==> str(x)"
1513 return str(self.__V)
1515 # ==============================================================================
1516 class Covariance(object):
1518 Classe générale d'interface de type covariance
1521 name = "GenericCovariance",
1522 asCovariance = None,
1523 asEyeByScalar = None,
1524 asEyeByVector = None,
1527 toBeChecked = False,
1530 Permet de définir une covariance :
1531 - asCovariance : entrée des données, comme une matrice compatible avec
1532 le constructeur de numpy.matrix
1533 - asEyeByScalar : entrée des données comme un seul scalaire de variance,
1534 multiplicatif d'une matrice de corrélation identité, aucune matrice
1535 n'étant donc explicitement à donner
1536 - asEyeByVector : entrée des données comme un seul vecteur de variance,
1537 à mettre sur la diagonale d'une matrice de corrélation, aucune matrice
1538 n'étant donc explicitement à donner
1539 - asCovObject : entrée des données comme un objet python, qui a les
1540 methodes obligatoires "getT", "getI", "diag", "trace", "__add__",
1541 "__sub__", "__neg__", "__mul__", "__rmul__" et facultatives "shape",
1542 "size", "cholesky", "choleskyI", "asfullmatrix", "__repr__", "__str__"
1543 - toBeChecked : booléen indiquant si le caractère SDP de la matrice
1544 pleine doit être vérifié
1546 self.__name = str(name)
1547 self.__check = bool(toBeChecked)
1550 self.__is_scalar = False
1551 self.__is_vector = False
1552 self.__is_matrix = False
1553 self.__is_object = False
1555 if asScript is not None:
1556 __Matrix, __Scalar, __Vector, __Object = None, None, None, None
1558 __Scalar = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1560 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1562 __Object = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1564 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1566 __Matrix, __Scalar, __Vector, __Object = asCovariance, asEyeByScalar, asEyeByVector, asCovObject
1568 if __Scalar is not None:
1569 if numpy.matrix(__Scalar).size != 1:
1570 raise ValueError(' The diagonal multiplier given to define a sparse matrix is not a unique scalar value.\n Its actual measured size is %i. Please check your scalar input.'%numpy.matrix(__Scalar).size)
1571 self.__is_scalar = True
1572 self.__C = numpy.abs( float(__Scalar) )
1575 elif __Vector is not None:
1576 self.__is_vector = True
1577 self.__C = numpy.abs( numpy.array( numpy.ravel( numpy.matrix(__Vector, float ) ) ) )
1578 self.shape = (self.__C.size,self.__C.size)
1579 self.size = self.__C.size**2
1580 elif __Matrix is not None:
1581 self.__is_matrix = True
1582 self.__C = numpy.matrix( __Matrix, float )
1583 self.shape = self.__C.shape
1584 self.size = self.__C.size
1585 elif __Object is not None:
1586 self.__is_object = True
1588 for at in ("getT","getI","diag","trace","__add__","__sub__","__neg__","__mul__","__rmul__"):
1589 if not hasattr(self.__C,at):
1590 raise ValueError("The matrix given for %s as an object has no attribute \"%s\". Please check your object input."%(self.__name,at))
1591 if hasattr(self.__C,"shape"):
1592 self.shape = self.__C.shape
1595 if hasattr(self.__C,"size"):
1596 self.size = self.__C.size
1601 # raise ValueError("The %s covariance matrix has to be specified either as a matrix, a vector for its diagonal or a scalar multiplying an identity matrix."%self.__name)
1605 def __validate(self):
1607 if self.__C is None:
1608 raise UnboundLocalError("%s covariance matrix value has not been set!"%(self.__name,))
1609 if self.ismatrix() and min(self.shape) != max(self.shape):
1610 raise ValueError("The given matrix for %s is not a square one, its shape is %s. Please check your matrix input."%(self.__name,self.shape))
1611 if self.isobject() and min(self.shape) != max(self.shape):
1612 raise ValueError("The matrix given for \"%s\" is not a square one, its shape is %s. Please check your object input."%(self.__name,self.shape))
1613 if self.isscalar() and self.__C <= 0:
1614 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your scalar input %s."%(self.__name,self.__C))
1615 if self.isvector() and (self.__C <= 0).any():
1616 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your vector input."%(self.__name,))
1617 if self.ismatrix() and (self.__check or logging.getLogger().level < logging.WARNING):
1619 L = numpy.linalg.cholesky( self.__C )
1621 raise ValueError("The %s covariance matrix is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1622 if self.isobject() and (self.__check or logging.getLogger().level < logging.WARNING):
1624 L = self.__C.cholesky()
1626 raise ValueError("The %s covariance object is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1629 "Vérification du type interne"
1630 return self.__is_scalar
1633 "Vérification du type interne"
1634 return self.__is_vector
1637 "Vérification du type interne"
1638 return self.__is_matrix
1641 "Vérification du type interne"
1642 return self.__is_object
1647 return Covariance(self.__name+"I", asCovariance = self.__C.I )
1648 elif self.isvector():
1649 return Covariance(self.__name+"I", asEyeByVector = 1. / self.__C )
1650 elif self.isscalar():
1651 return Covariance(self.__name+"I", asEyeByScalar = 1. / self.__C )
1652 elif self.isobject():
1653 return Covariance(self.__name+"I", asCovObject = self.__C.getI() )
1655 return None # Indispensable
1660 return Covariance(self.__name+"T", asCovariance = self.__C.T )
1661 elif self.isvector():
1662 return Covariance(self.__name+"T", asEyeByVector = self.__C )
1663 elif self.isscalar():
1664 return Covariance(self.__name+"T", asEyeByScalar = self.__C )
1665 elif self.isobject():
1666 return Covariance(self.__name+"T", asCovObject = self.__C.getT() )
1669 "Décomposition de Cholesky"
1671 return Covariance(self.__name+"C", asCovariance = numpy.linalg.cholesky(self.__C) )
1672 elif self.isvector():
1673 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
1674 elif self.isscalar():
1675 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
1676 elif self.isobject() and hasattr(self.__C,"cholesky"):
1677 return Covariance(self.__name+"C", asCovObject = self.__C.cholesky() )
1679 def choleskyI(self):
1680 "Inversion de la décomposition de Cholesky"
1682 return Covariance(self.__name+"H", asCovariance = numpy.linalg.cholesky(self.__C).I )
1683 elif self.isvector():
1684 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
1685 elif self.isscalar():
1686 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
1687 elif self.isobject() and hasattr(self.__C,"choleskyI"):
1688 return Covariance(self.__name+"H", asCovObject = self.__C.choleskyI() )
1690 def diag(self, msize=None):
1691 "Diagonale de la matrice"
1693 return numpy.diag(self.__C)
1694 elif self.isvector():
1696 elif self.isscalar():
1698 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1700 return self.__C * numpy.ones(int(msize))
1701 elif self.isobject():
1702 return self.__C.diag()
1704 def asfullmatrix(self, msize=None):
1708 elif self.isvector():
1709 return numpy.matrix( numpy.diag(self.__C), float )
1710 elif self.isscalar():
1712 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1714 return numpy.matrix( self.__C * numpy.eye(int(msize)), float )
1715 elif self.isobject() and hasattr(self.__C,"asfullmatrix"):
1716 return self.__C.asfullmatrix()
1718 def trace(self, msize=None):
1719 "Trace de la matrice"
1721 return numpy.trace(self.__C)
1722 elif self.isvector():
1723 return float(numpy.sum(self.__C))
1724 elif self.isscalar():
1726 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1728 return self.__C * int(msize)
1729 elif self.isobject():
1730 return self.__C.trace()
1736 "x.__repr__() <==> repr(x)"
1737 return repr(self.__C)
1740 "x.__str__() <==> str(x)"
1741 return str(self.__C)
1743 def __add__(self, other):
1744 "x.__add__(y) <==> x+y"
1745 if self.ismatrix() or self.isobject():
1746 return self.__C + numpy.asmatrix(other)
1747 elif self.isvector() or self.isscalar():
1748 _A = numpy.asarray(other)
1749 _A.reshape(_A.size)[::_A.shape[1]+1] += self.__C
1750 return numpy.asmatrix(_A)
1752 def __radd__(self, other):
1753 "x.__radd__(y) <==> y+x"
1754 raise NotImplementedError("%s covariance matrix __radd__ method not available for %s type!"%(self.__name,type(other)))
1756 def __sub__(self, other):
1757 "x.__sub__(y) <==> x-y"
1758 if self.ismatrix() or self.isobject():
1759 return self.__C - numpy.asmatrix(other)
1760 elif self.isvector() or self.isscalar():
1761 _A = numpy.asarray(other)
1762 _A.reshape(_A.size)[::_A.shape[1]+1] = self.__C - _A.reshape(_A.size)[::_A.shape[1]+1]
1763 return numpy.asmatrix(_A)
1765 def __rsub__(self, other):
1766 "x.__rsub__(y) <==> y-x"
1767 raise NotImplementedError("%s covariance matrix __rsub__ method not available for %s type!"%(self.__name,type(other)))
1770 "x.__neg__() <==> -x"
1773 def __mul__(self, other):
1774 "x.__mul__(y) <==> x*y"
1775 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
1776 return self.__C * other
1777 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
1778 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1779 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
1780 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1781 return self.__C * numpy.asmatrix(other)
1783 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asmatrix(other).shape,self.__name))
1784 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
1785 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1786 return numpy.asmatrix(self.__C * numpy.ravel(other)).T
1787 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1788 return numpy.asmatrix((self.__C * (numpy.asarray(other).transpose())).transpose())
1790 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
1791 elif self.isscalar() and isinstance(other,numpy.matrix):
1792 return self.__C * other
1793 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
1794 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
1795 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
1797 return self.__C * numpy.asmatrix(other)
1798 elif self.isobject():
1799 return self.__C.__mul__(other)
1801 raise NotImplementedError("%s covariance matrix __mul__ method not available for %s type!"%(self.__name,type(other)))
1803 def __rmul__(self, other):
1804 "x.__rmul__(y) <==> y*x"
1805 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
1806 return other * self.__C
1807 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
1808 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1809 return numpy.asmatrix(numpy.ravel(other)) * self.__C
1810 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1811 return numpy.asmatrix(other) * self.__C
1813 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name))
1814 elif self.isvector() and isinstance(other,numpy.matrix):
1815 if numpy.ravel(other).size == self.shape[0]: # Vecteur
1816 return numpy.asmatrix(numpy.ravel(other) * self.__C)
1817 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
1818 return numpy.asmatrix(numpy.array(other) * self.__C)
1820 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
1821 elif self.isscalar() and isinstance(other,numpy.matrix):
1822 return other * self.__C
1823 elif self.isobject():
1824 return self.__C.__rmul__(other)
1826 raise NotImplementedError("%s covariance matrix __rmul__ method not available for %s type!"%(self.__name,type(other)))
1829 "x.__len__() <==> len(x)"
1830 return self.shape[0]
1832 # ==============================================================================
1833 class ObserverF(object):
1835 Creation d'une fonction d'observateur a partir de son texte
1837 def __init__(self, corps=""):
1838 self.__corps = corps
1839 def func(self,var,info):
1840 "Fonction d'observation"
1843 "Restitution du pointeur de fonction dans l'objet"
1846 # ==============================================================================
1847 class CaseLogger(object):
1849 Conservation des commandes de creation d'un cas
1851 def __init__(self, __name="", __objname="case", __addViewers=None, __addLoaders=None):
1852 self.__name = str(__name)
1853 self.__objname = str(__objname)
1854 self.__logSerie = []
1855 self.__switchoff = False
1857 "TUI" :Interfaces._TUIViewer,
1858 "SCD" :Interfaces._SCDViewer,
1859 "YACS":Interfaces._YACSViewer,
1862 "TUI" :Interfaces._TUIViewer,
1863 "COM" :Interfaces._COMViewer,
1865 if __addViewers is not None:
1866 self.__viewers.update(dict(__addViewers))
1867 if __addLoaders is not None:
1868 self.__loaders.update(dict(__addLoaders))
1870 def register(self, __command=None, __keys=None, __local=None, __pre=None, __switchoff=False):
1871 "Enregistrement d'une commande individuelle"
1872 if __command is not None and __keys is not None and __local is not None and not self.__switchoff:
1873 if "self" in __keys: __keys.remove("self")
1874 self.__logSerie.append( (str(__command), __keys, __local, __pre, __switchoff) )
1876 self.__switchoff = True
1878 self.__switchoff = False
1880 def dump(self, __filename=None, __format="TUI", __upa=""):
1881 "Restitution normalisée des commandes"
1882 if __format in self.__viewers:
1883 __formater = self.__viewers[__format](self.__name, self.__objname, self.__logSerie)
1885 raise ValueError("Dumping as \"%s\" is not available"%__format)
1886 return __formater.dump(__filename, __upa)
1888 def load(self, __filename=None, __content=None, __object=None, __format="TUI"):
1889 "Chargement normalisé des commandes"
1890 if __format in self.__loaders:
1891 __formater = self.__loaders[__format]()
1893 raise ValueError("Loading as \"%s\" is not available"%__format)
1894 return __formater.load(__filename, __content, __object)
1896 # ==============================================================================
1899 _extraArguments = None,
1900 _sFunction = lambda x: x,
1905 Pour une liste ordonnée de vecteurs en entrée, renvoie en sortie la liste
1906 correspondante de valeurs de la fonction en argument
1908 # Vérifications et définitions initiales
1909 # logging.debug("MULTF Internal multifonction calculations begin with function %s"%(_sFunction.__name__,))
1910 if not PlatformInfo.isIterable( __xserie ):
1911 raise TypeError("MultiFonction not iterable unkown input type: %s"%(type(__xserie),))
1913 if (_mpWorkers is None) or (_mpWorkers is not None and _mpWorkers < 1):
1916 __mpWorkers = int(_mpWorkers)
1918 import multiprocessing
1929 if _extraArguments is None:
1931 elif _extraArguments is not None and isinstance(_extraArguments, (list, tuple, map)):
1932 for __xvalue in __xserie:
1933 _jobs.append( [__xvalue, ] + list(_extraArguments) )
1935 raise TypeError("MultiFonction extra arguments unkown input type: %s"%(type(_extraArguments),))
1936 # logging.debug("MULTF Internal multiprocessing calculations begin : evaluation of %i point(s)"%(len(_jobs),))
1937 import multiprocessing
1938 with multiprocessing.Pool(__mpWorkers) as pool:
1939 __multiHX = pool.map( _sFunction, _jobs )
1942 # logging.debug("MULTF Internal multiprocessing calculation end")
1944 # logging.debug("MULTF Internal monoprocessing calculation begin")
1946 if _extraArguments is None:
1947 for __xvalue in __xserie:
1948 __multiHX.append( _sFunction( __xvalue ) )
1949 elif _extraArguments is not None and isinstance(_extraArguments, (list, tuple, map)):
1950 for __xvalue in __xserie:
1951 __multiHX.append( _sFunction( __xvalue, *_extraArguments ) )
1952 elif _extraArguments is not None and isinstance(_extraArguments, dict):
1953 for __xvalue in __xserie:
1954 __multiHX.append( _sFunction( __xvalue, **_extraArguments ) )
1956 raise TypeError("MultiFonction extra arguments unkown input type: %s"%(type(_extraArguments),))
1957 # logging.debug("MULTF Internal monoprocessing calculation end")
1959 # logging.debug("MULTF Internal multifonction calculations end")
1962 # ==============================================================================
1963 def CostFunction3D(_x,
1964 _Hm = None, # Pour simuler Hm(x) : HO["Direct"].appliedTo
1965 _HmX = None, # Simulation déjà faite de Hm(x)
1966 _arg = None, # Arguments supplementaires pour Hm, sous la forme d'un tuple
1971 _SIV = False, # A résorber pour la 8.0
1972 _SSC = [], # self._parameters["StoreSupplementaryCalculations"]
1973 _nPS = 0, # nbPreviousSteps
1974 _QM = "DA", # QualityMeasure
1975 _SSV = {}, # Entrée et/ou sortie : self.StoredVariables
1976 _fRt = False, # Restitue ou pas la sortie étendue
1977 _sSc = True, # Stocke ou pas les SSC
1980 Fonction-coût générale utile pour les algorithmes statiques/3D : 3DVAR, BLUE
1981 et dérivés, Kalman et dérivés, LeastSquares, SamplingTest, PSO, SA, Tabu,
1982 DFO, QuantileRegression
1988 for k in ["CostFunctionJ",
1994 "SimulatedObservationAtCurrentOptimum",
1995 "SimulatedObservationAtCurrentState",
1999 if hasattr(_SSV[k],"store"):
2000 _SSV[k].append = _SSV[k].store # Pour utiliser "append" au lieu de "store"
2002 _X = numpy.asmatrix(numpy.ravel( _x )).T
2003 if _SIV or "CurrentState" in _SSC or "CurrentOptimum" in _SSC:
2004 _SSV["CurrentState"].append( _X )
2006 if _HmX is not None:
2010 raise ValueError("COSTFUNCTION3D Operator has to be defined.")
2014 _HX = _Hm( _X, *_arg )
2015 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
2017 if "SimulatedObservationAtCurrentState" in _SSC or \
2018 "SimulatedObservationAtCurrentOptimum" in _SSC:
2019 _SSV["SimulatedObservationAtCurrentState"].append( _HX )
2021 if numpy.any(numpy.isnan(_HX)):
2022 Jb, Jo, J = numpy.nan, numpy.nan, numpy.nan
2024 _Y = numpy.asmatrix(numpy.ravel( _Y )).T
2025 if _QM in ["AugmentedWeightedLeastSquares", "AWLS", "AugmentedPonderatedLeastSquares", "APLS", "DA"]:
2026 if _BI is None or _RI is None:
2027 raise ValueError("Background and Observation error covariance matrix has to be properly defined!")
2028 _Xb = numpy.asmatrix(numpy.ravel( _Xb )).T
2029 Jb = 0.5 * (_X - _Xb).T * _BI * (_X - _Xb)
2030 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
2031 elif _QM in ["WeightedLeastSquares", "WLS", "PonderatedLeastSquares", "PLS"]:
2033 raise ValueError("Observation error covariance matrix has to be properly defined!")
2035 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
2036 elif _QM in ["LeastSquares", "LS", "L2"]:
2038 Jo = 0.5 * (_Y - _HX).T * (_Y - _HX)
2039 elif _QM in ["AbsoluteValue", "L1"]:
2041 Jo = numpy.sum( numpy.abs(_Y - _HX) )
2042 elif _QM in ["MaximumError", "ME"]:
2044 Jo = numpy.max( numpy.abs(_Y - _HX) )
2045 elif _QM in ["QR", "Null"]:
2049 raise ValueError("Unknown asked quality measure!")
2051 J = float( Jb ) + float( Jo )
2054 _SSV["CostFunctionJb"].append( Jb )
2055 _SSV["CostFunctionJo"].append( Jo )
2056 _SSV["CostFunctionJ" ].append( J )
2058 if "IndexOfOptimum" in _SSC or \
2059 "CurrentOptimum" in _SSC or \
2060 "SimulatedObservationAtCurrentOptimum" in _SSC:
2061 IndexMin = numpy.argmin( _SSV["CostFunctionJ"][_nPS:] ) + _nPS
2062 if "IndexOfOptimum" in _SSC:
2063 _SSV["IndexOfOptimum"].append( IndexMin )
2064 if "CurrentOptimum" in _SSC:
2065 _SSV["CurrentOptimum"].append( _SSV["CurrentState"][IndexMin] )
2066 if "SimulatedObservationAtCurrentOptimum" in _SSC:
2067 _SSV["SimulatedObservationAtCurrentOptimum"].append( _SSV["SimulatedObservationAtCurrentState"][IndexMin] )
2072 if _QM in ["QR"]: # Pour le QuantileRegression
2077 # ==============================================================================
2078 if __name__ == "__main__":
2079 print('\n AUTODIAGNOSTIC\n')