1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2019 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les outils généraux élémentaires.
26 __author__ = "Jean-Philippe ARGAUD"
34 from functools import partial
35 from daCore import Persistence
36 from daCore import PlatformInfo
37 from daCore import Interfaces
38 from daCore import Templates
39 from daCore.Interfaces import ImportFromScript, ImportFromFile
41 # ==============================================================================
42 class CacheManager(object):
44 Classe générale de gestion d'un cache de calculs
47 toleranceInRedundancy = 1.e-18,
48 lenghtOfRedundancy = -1,
51 Les caractéristiques de tolérance peuvent être modifées à la création.
53 self.__tolerBP = float(toleranceInRedundancy)
54 self.__lenghtOR = int(lenghtOfRedundancy)
55 self.__initlnOR = self.__lenghtOR
60 self.__listOPCV = [] # Operator Previous Calculated Points, Results, Point Norms
61 # logging.debug("CM Tolerance de determination des doublons : %.2e", self.__tolerBP)
63 def wasCalculatedIn(self, xValue ): #, info="" ):
64 "Vérifie l'existence d'un calcul correspondant à la valeur"
67 for i in range(min(len(self.__listOPCV),self.__lenghtOR)-1,-1,-1):
68 if not hasattr(xValue, 'size') or (xValue.size != self.__listOPCV[i][0].size):
69 # logging.debug("CM Différence de la taille %s de X et de celle %s du point %i déjà calculé", xValue.shape,i,self.__listOPCP[i].shape)
71 if numpy.linalg.norm(numpy.ravel(xValue) - self.__listOPCV[i][0]) < self.__tolerBP * self.__listOPCV[i][2]:
73 __HxV = self.__listOPCV[i][1]
74 # logging.debug("CM Cas%s déja calculé, portant le numéro %i", info, i)
78 def storeValueInX(self, xValue, HxValue ):
79 "Stocke un calcul correspondant à la valeur"
80 if self.__lenghtOR < 0:
81 self.__lenghtOR = 2 * xValue.size + 2
82 self.__initlnOR = self.__lenghtOR
83 while len(self.__listOPCV) > self.__lenghtOR:
84 # logging.debug("CM Réduction de la liste des cas à %i éléments par suppression du premier", self.__lenghtOR)
85 self.__listOPCV.pop(0)
86 self.__listOPCV.append( (
87 copy.copy(numpy.ravel(xValue)),
89 numpy.linalg.norm(xValue),
94 self.__initlnOR = self.__lenghtOR
99 self.__lenghtOR = self.__initlnOR
101 # ==============================================================================
102 class Operator(object):
104 Classe générale d'interface de type opérateur simple
114 avoidingRedundancy = True,
115 inputAsMultiFunction = False,
116 enableMultiProcess = False,
117 extraArguments = None,
120 On construit un objet de ce type en fournissant, à l'aide de l'un des
121 deux mots-clé, soit une fonction ou un multi-fonction python, soit une
124 - fromMethod : argument de type fonction Python
125 - fromMatrix : argument adapté au constructeur numpy.matrix
126 - avoidingRedundancy : booléen évitant (ou pas) les calculs redondants
127 - inputAsMultiFunction : booléen indiquant une fonction explicitement
128 définie (ou pas) en multi-fonction
129 - extraArguments : arguments supplémentaires passés à la fonction de
130 base et ses dérivées (tuple ou dictionnaire)
132 self.__NbCallsAsMatrix, self.__NbCallsAsMethod, self.__NbCallsOfCached = 0, 0, 0
133 self.__AvoidRC = bool( avoidingRedundancy )
134 self.__inputAsMF = bool( inputAsMultiFunction )
135 self.__mpEnabled = bool( enableMultiProcess )
136 self.__extraArgs = extraArguments
137 if fromMethod is not None and self.__inputAsMF:
138 self.__Method = fromMethod # logtimer(fromMethod)
140 self.__Type = "Method"
141 elif fromMethod is not None and not self.__inputAsMF:
142 self.__Method = partial( MultiFonction, _sFunction=fromMethod, _mpEnabled=self.__mpEnabled)
144 self.__Type = "Method"
145 elif fromMatrix is not None:
147 self.__Matrix = numpy.matrix( fromMatrix, numpy.float )
148 self.__Type = "Matrix"
154 def disableAvoidingRedundancy(self):
156 Operator.CM.disable()
158 def enableAvoidingRedundancy(self):
163 Operator.CM.disable()
169 def appliedTo(self, xValue, HValue = None, argsAsSerie = False):
171 Permet de restituer le résultat de l'application de l'opérateur à une
172 série d'arguments xValue. Cette méthode se contente d'appliquer, chaque
173 argument devant a priori être du bon type.
175 - les arguments par série sont :
176 - xValue : argument adapté pour appliquer l'opérateur
177 - HValue : valeur précalculée de l'opérateur en ce point
178 - argsAsSerie : indique si les arguments sont une mono ou multi-valeur
185 if HValue is not None:
189 PlatformInfo.isIterable( _xValue, True, " in Operator.appliedTo" )
191 if _HValue is not None:
192 assert len(_xValue) == len(_HValue), "Incompatible number of elements in xValue and HValue"
194 for i in range(len(_HValue)):
195 HxValue.append( numpy.asmatrix( numpy.ravel( _HValue[i] ) ).T )
197 Operator.CM.storeValueInX(_xValue[i],HxValue[-1])
202 for i, xv in enumerate(_xValue):
204 __alreadyCalculated, __HxV = Operator.CM.wasCalculatedIn(xv)
206 __alreadyCalculated = False
208 if __alreadyCalculated:
209 self.__addOneCacheCall()
212 if self.__Matrix is not None:
213 self.__addOneMatrixCall()
214 _hv = self.__Matrix * xv
216 self.__addOneMethodCall()
220 HxValue.append( _hv )
222 if len(_xserie)>0 and self.__Matrix is None:
223 if self.__extraArgs is None:
224 _hserie = self.__Method( _xserie ) # Calcul MF
226 _hserie = self.__Method( _xserie, self.__extraArgs ) # Calcul MF
227 if not hasattr(_hserie, "pop"):
228 raise TypeError("The user input multi-function doesn't seem to return sequence results, behaving like a mono-function. It has to be checked.")
234 Operator.CM.storeValueInX(_xv,_hv)
236 if argsAsSerie: return HxValue
237 else: return HxValue[-1]
239 def appliedControledFormTo(self, paires, argsAsSerie = False):
241 Permet de restituer le résultat de l'application de l'opérateur à des
242 paires (xValue, uValue). Cette méthode se contente d'appliquer, son
243 argument devant a priori être du bon type. Si la uValue est None,
244 on suppose que l'opérateur ne s'applique qu'à xValue.
246 - paires : les arguments par paire sont :
247 - xValue : argument X adapté pour appliquer l'opérateur
248 - uValue : argument U adapté pour appliquer l'opérateur
249 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
251 if argsAsSerie: _xuValue = paires
252 else: _xuValue = (paires,)
253 PlatformInfo.isIterable( _xuValue, True, " in Operator.appliedControledFormTo" )
255 if self.__Matrix is not None:
257 for paire in _xuValue:
258 _xValue, _uValue = paire
259 self.__addOneMatrixCall()
260 HxValue.append( self.__Matrix * _xValue )
263 for paire in _xuValue:
265 _xValue, _uValue = paire
266 if _uValue is not None:
267 _xuValue.append( paire )
269 _xuValue.append( _xValue )
270 self.__addOneMethodCall( len(_xuValue) )
271 if self.__extraArgs is None:
272 HxValue = self.__Method( _xuValue ) # Calcul MF
274 HxValue = self.__Method( _xuValue, self.__extraArgs ) # Calcul MF
276 if argsAsSerie: return HxValue
277 else: return HxValue[-1]
279 def appliedInXTo(self, paires, argsAsSerie = False):
281 Permet de restituer le résultat de l'application de l'opérateur à une
282 série d'arguments xValue, sachant que l'opérateur est valable en
283 xNominal. Cette méthode se contente d'appliquer, son argument devant a
284 priori être du bon type. Si l'opérateur est linéaire car c'est une
285 matrice, alors il est valable en tout point nominal et xNominal peut
286 être quelconque. Il n'y a qu'une seule paire par défaut, et argsAsSerie
287 permet d'indiquer que l'argument est multi-paires.
289 - paires : les arguments par paire sont :
290 - xNominal : série d'arguments permettant de donner le point où
291 l'opérateur est construit pour être ensuite appliqué
292 - xValue : série d'arguments adaptés pour appliquer l'opérateur
293 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
295 if argsAsSerie: _nxValue = paires
296 else: _nxValue = (paires,)
297 PlatformInfo.isIterable( _nxValue, True, " in Operator.appliedInXTo" )
299 if self.__Matrix is not None:
301 for paire in _nxValue:
302 _xNominal, _xValue = paire
303 self.__addOneMatrixCall()
304 HxValue.append( self.__Matrix * _xValue )
306 self.__addOneMethodCall( len(_nxValue) )
307 if self.__extraArgs is None:
308 HxValue = self.__Method( _nxValue ) # Calcul MF
310 HxValue = self.__Method( _nxValue, self.__extraArgs ) # Calcul MF
312 if argsAsSerie: return HxValue
313 else: return HxValue[-1]
315 def asMatrix(self, ValueForMethodForm = "UnknownVoidValue", argsAsSerie = False):
317 Permet de renvoyer l'opérateur sous la forme d'une matrice
319 if self.__Matrix is not None:
320 self.__addOneMatrixCall()
321 mValue = [self.__Matrix,]
322 elif ValueForMethodForm is not "UnknownVoidValue": # Ne pas utiliser "None"
325 self.__addOneMethodCall( len(ValueForMethodForm) )
326 for _vfmf in ValueForMethodForm:
327 mValue.append( numpy.matrix( self.__Method(((_vfmf, None),)) ) )
329 self.__addOneMethodCall()
330 mValue = self.__Method(((ValueForMethodForm, None),))
332 raise ValueError("Matrix form of the operator defined as a function/method requires to give an operating point.")
334 if argsAsSerie: return mValue
335 else: return mValue[-1]
339 Renvoie la taille sous forme numpy si l'opérateur est disponible sous
340 la forme d'une matrice
342 if self.__Matrix is not None:
343 return self.__Matrix.shape
345 raise ValueError("Matrix form of the operator is not available, nor the shape")
347 def nbcalls(self, which=None):
349 Renvoie les nombres d'évaluations de l'opérateur
352 self.__NbCallsAsMatrix+self.__NbCallsAsMethod,
353 self.__NbCallsAsMatrix,
354 self.__NbCallsAsMethod,
355 self.__NbCallsOfCached,
356 Operator.NbCallsAsMatrix+Operator.NbCallsAsMethod,
357 Operator.NbCallsAsMatrix,
358 Operator.NbCallsAsMethod,
359 Operator.NbCallsOfCached,
361 if which is None: return __nbcalls
362 else: return __nbcalls[which]
364 def __addOneMatrixCall(self):
365 "Comptabilise un appel"
366 self.__NbCallsAsMatrix += 1 # Decompte local
367 Operator.NbCallsAsMatrix += 1 # Decompte global
369 def __addOneMethodCall(self, nb = 1):
370 "Comptabilise un appel"
371 self.__NbCallsAsMethod += nb # Decompte local
372 Operator.NbCallsAsMethod += nb # Decompte global
374 def __addOneCacheCall(self):
375 "Comptabilise un appel"
376 self.__NbCallsOfCached += 1 # Decompte local
377 Operator.NbCallsOfCached += 1 # Decompte global
379 # ==============================================================================
380 class FullOperator(object):
382 Classe générale d'interface de type opérateur complet
383 (Direct, Linéaire Tangent, Adjoint)
386 name = "GenericFullOperator",
388 asOneFunction = None, # 1 Fonction
389 asThreeFunctions = None, # 3 Fonctions in a dictionary
390 asScript = None, # 1 or 3 Fonction(s) by script
391 asDict = None, # Parameters
393 extraArguments = None,
395 inputAsMF = False,# Fonction(s) as Multi-Functions
400 self.__name = str(name)
401 self.__check = bool(toBeChecked)
402 self.__extraArgs = extraArguments
407 if (asDict is not None) and isinstance(asDict, dict):
408 __Parameters.update( asDict )
409 # Priorité à EnableMultiProcessingInDerivatives=True
410 if "EnableMultiProcessing" in __Parameters and __Parameters["EnableMultiProcessing"]:
411 __Parameters["EnableMultiProcessingInDerivatives"] = True
412 __Parameters["EnableMultiProcessingInEvaluation"] = False
413 if "EnableMultiProcessingInDerivatives" not in __Parameters:
414 __Parameters["EnableMultiProcessingInDerivatives"] = False
415 if __Parameters["EnableMultiProcessingInDerivatives"]:
416 __Parameters["EnableMultiProcessingInEvaluation"] = False
417 if "EnableMultiProcessingInEvaluation" not in __Parameters:
418 __Parameters["EnableMultiProcessingInEvaluation"] = False
419 if "withIncrement" in __Parameters: # Temporaire
420 __Parameters["DifferentialIncrement"] = __Parameters["withIncrement"]
422 if asScript is not None:
423 __Matrix, __Function = None, None
425 __Matrix = ImportFromScript(asScript).getvalue( self.__name )
427 __Function = { "Direct":ImportFromScript(asScript).getvalue( "DirectOperator" ) }
428 __Function.update({"useApproximatedDerivatives":True})
429 __Function.update(__Parameters)
430 elif asThreeFunctions:
432 "Direct" :ImportFromScript(asScript).getvalue( "DirectOperator" ),
433 "Tangent":ImportFromScript(asScript).getvalue( "TangentOperator" ),
434 "Adjoint":ImportFromScript(asScript).getvalue( "AdjointOperator" ),
436 __Function.update(__Parameters)
439 if asOneFunction is not None:
440 if isinstance(asOneFunction, dict) and "Direct" in asOneFunction:
441 if asOneFunction["Direct"] is not None:
442 __Function = asOneFunction
444 raise ValueError("The function has to be given in a dictionnary which have 1 key (\"Direct\")")
446 __Function = { "Direct":asOneFunction }
447 __Function.update({"useApproximatedDerivatives":True})
448 __Function.update(__Parameters)
449 elif asThreeFunctions is not None:
450 if isinstance(asThreeFunctions, dict) and \
451 ("Tangent" in asThreeFunctions) and (asThreeFunctions["Tangent"] is not None) and \
452 ("Adjoint" in asThreeFunctions) and (asThreeFunctions["Adjoint"] is not None) and \
453 (("useApproximatedDerivatives" not in asThreeFunctions) or not bool(asThreeFunctions["useApproximatedDerivatives"])):
454 __Function = asThreeFunctions
455 elif isinstance(asThreeFunctions, dict) and \
456 ("Direct" in asThreeFunctions) and (asThreeFunctions["Direct"] is not None):
457 __Function = asThreeFunctions
458 __Function.update({"useApproximatedDerivatives":True})
460 raise ValueError("The functions has to be given in a dictionnary which have either 1 key (\"Direct\") or 3 keys (\"Direct\" (optionnal), \"Tangent\" and \"Adjoint\")")
461 if "Direct" not in asThreeFunctions:
462 __Function["Direct"] = asThreeFunctions["Tangent"]
463 __Function.update(__Parameters)
467 # if sys.version_info[0] < 3 and isinstance(__Function, dict):
468 # for k in ("Direct", "Tangent", "Adjoint"):
469 # if k in __Function and hasattr(__Function[k],"__class__"):
470 # if type(__Function[k]) is type(self.__init__):
471 # raise TypeError("can't use a class method (%s) as a function for the \"%s\" operator. Use a real function instead."%(type(__Function[k]),k))
473 if appliedInX is not None and isinstance(appliedInX, dict):
474 __appliedInX = appliedInX
475 elif appliedInX is not None:
476 __appliedInX = {"HXb":appliedInX}
480 if scheduledBy is not None:
481 self.__T = scheduledBy
483 if isinstance(__Function, dict) and \
484 ("useApproximatedDerivatives" in __Function) and bool(__Function["useApproximatedDerivatives"]) and \
485 ("Direct" in __Function) and (__Function["Direct"] is not None):
486 if "CenteredFiniteDifference" not in __Function: __Function["CenteredFiniteDifference"] = False
487 if "DifferentialIncrement" not in __Function: __Function["DifferentialIncrement"] = 0.01
488 if "withdX" not in __Function: __Function["withdX"] = None
489 if "withAvoidingRedundancy" not in __Function: __Function["withAvoidingRedundancy"] = avoidRC
490 if "withToleranceInRedundancy" not in __Function: __Function["withToleranceInRedundancy"] = 1.e-18
491 if "withLenghtOfRedundancy" not in __Function: __Function["withLenghtOfRedundancy"] = -1
492 if "NumberOfProcesses" not in __Function: __Function["NumberOfProcesses"] = None
493 if "withmfEnabled" not in __Function: __Function["withmfEnabled"] = inputAsMF
494 from daNumerics.ApproximatedDerivatives import FDApproximation
495 FDA = FDApproximation(
496 Function = __Function["Direct"],
497 centeredDF = __Function["CenteredFiniteDifference"],
498 increment = __Function["DifferentialIncrement"],
499 dX = __Function["withdX"],
500 avoidingRedundancy = __Function["withAvoidingRedundancy"],
501 toleranceInRedundancy = __Function["withToleranceInRedundancy"],
502 lenghtOfRedundancy = __Function["withLenghtOfRedundancy"],
503 mpEnabled = __Function["EnableMultiProcessingInDerivatives"],
504 mpWorkers = __Function["NumberOfProcesses"],
505 mfEnabled = __Function["withmfEnabled"],
507 self.__FO["Direct"] = Operator( fromMethod = FDA.DirectOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
508 self.__FO["Tangent"] = Operator( fromMethod = FDA.TangentOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
509 self.__FO["Adjoint"] = Operator( fromMethod = FDA.AdjointOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
510 elif isinstance(__Function, dict) and \
511 ("Direct" in __Function) and ("Tangent" in __Function) and ("Adjoint" in __Function) and \
512 (__Function["Direct"] is not None) and (__Function["Tangent"] is not None) and (__Function["Adjoint"] is not None):
513 self.__FO["Direct"] = Operator( fromMethod = __Function["Direct"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
514 self.__FO["Tangent"] = Operator( fromMethod = __Function["Tangent"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
515 self.__FO["Adjoint"] = Operator( fromMethod = __Function["Adjoint"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
516 elif asMatrix is not None:
517 __matrice = numpy.matrix( __Matrix, numpy.float )
518 self.__FO["Direct"] = Operator( fromMatrix = __matrice, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
519 self.__FO["Tangent"] = Operator( fromMatrix = __matrice, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
520 self.__FO["Adjoint"] = Operator( fromMatrix = __matrice.T, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
523 raise ValueError("Improperly defined operator, it requires at minima either a matrix, a Direct for approximate derivatives or a Tangent/Adjoint pair.")
525 if __appliedInX is not None:
526 self.__FO["AppliedInX"] = {}
527 for key in list(__appliedInX.keys()):
528 if type( __appliedInX[key] ) is type( numpy.matrix([]) ):
529 # Pour le cas où l'on a une vraie matrice
530 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].A1, numpy.float ).T
531 elif type( __appliedInX[key] ) is type( numpy.array([]) ) and len(__appliedInX[key].shape) > 1:
532 # Pour le cas où l'on a un vecteur représenté en array avec 2 dimensions
533 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].reshape(len(__appliedInX[key]),), numpy.float ).T
535 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key], numpy.float ).T
537 self.__FO["AppliedInX"] = None
543 "x.__repr__() <==> repr(x)"
544 return repr(self.__V)
547 "x.__str__() <==> str(x)"
550 # ==============================================================================
551 class Algorithm(object):
553 Classe générale d'interface de type algorithme
555 Elle donne un cadre pour l'écriture d'une classe élémentaire d'algorithme
556 d'assimilation, en fournissant un container (dictionnaire) de variables
557 persistantes initialisées, et des méthodes d'accès à ces variables stockées.
559 Une classe élémentaire d'algorithme doit implémenter la méthode "run".
561 def __init__(self, name):
563 L'initialisation présente permet de fabriquer des variables de stockage
564 disponibles de manière générique dans les algorithmes élémentaires. Ces
565 variables de stockage sont ensuite conservées dans un dictionnaire
566 interne à l'objet, mais auquel on accède par la méthode "get".
568 Les variables prévues sont :
569 - APosterioriCorrelations : matrice de corrélations de la matrice A
570 - APosterioriCovariance : matrice de covariances a posteriori : A
571 - APosterioriStandardDeviations : vecteur des écart-types de la matrice A
572 - APosterioriVariances : vecteur des variances de la matrice A
573 - Analysis : vecteur d'analyse : Xa
574 - BMA : Background moins Analysis : Xa - Xb
575 - CostFunctionJ : fonction-coût globale, somme des deux parties suivantes Jb et Jo
576 - CostFunctionJAtCurrentOptimum : fonction-coût globale à l'état optimal courant lors d'itérations
577 - CostFunctionJb : partie ébauche ou background de la fonction-coût : Jb
578 - CostFunctionJbAtCurrentOptimum : partie ébauche à l'état optimal courant lors d'itérations
579 - CostFunctionJo : partie observations de la fonction-coût : Jo
580 - CostFunctionJoAtCurrentOptimum : partie observations à l'état optimal courant lors d'itérations
581 - CurrentOptimum : état optimal courant lors d'itérations
582 - CurrentState : état courant lors d'itérations
583 - GradientOfCostFunctionJ : gradient de la fonction-coût globale
584 - GradientOfCostFunctionJb : gradient de la partie ébauche de la fonction-coût
585 - GradientOfCostFunctionJo : gradient de la partie observations de la fonction-coût
586 - IndexOfOptimum : index de l'état optimal courant lors d'itérations
587 - Innovation : l'innovation : d = Y - H(X)
588 - InnovationAtCurrentState : l'innovation à l'état courant : dn = Y - H(Xn)
589 - JacobianMatrixAtBackground : matrice jacobienne à l'état d'ébauche
590 - JacobianMatrixAtCurrentState : matrice jacobienne à l'état courant
591 - JacobianMatrixAtOptimum : matrice jacobienne à l'optimum
592 - KalmanGainAtOptimum : gain de Kalman à l'optimum
593 - MahalanobisConsistency : indicateur de consistance des covariances
594 - OMA : Observation moins Analyse : Y - Xa
595 - OMB : Observation moins Background : Y - Xb
596 - PredictedState : état prédit courant lors d'itérations
597 - Residu : dans le cas des algorithmes de vérification
598 - SigmaBck2 : indicateur de correction optimale des erreurs d'ébauche
599 - SigmaObs2 : indicateur de correction optimale des erreurs d'observation
600 - SimulatedObservationAtBackground : l'état observé H(Xb) à l'ébauche
601 - SimulatedObservationAtCurrentOptimum : l'état observé H(X) à l'état optimal courant
602 - SimulatedObservationAtCurrentState : l'état observé H(X) à l'état courant
603 - SimulatedObservationAtOptimum : l'état observé H(Xa) à l'optimum
604 - SimulationQuantiles : états observés H(X) pour les quantiles demandés
605 On peut rajouter des variables à stocker dans l'initialisation de
606 l'algorithme élémentaire qui va hériter de cette classe
608 logging.debug("%s Initialisation", str(name))
609 self._m = PlatformInfo.SystemUsage()
611 self._name = str( name )
612 self._parameters = {"StoreSupplementaryCalculations":[]}
613 self.__required_parameters = {}
614 self.__required_inputs = {"RequiredInputValues":{"mandatory":(), "optional":()}}
615 self.__variable_names_not_public = {"nextStep":False} # Duplication dans AlgorithmAndParameters
617 self.StoredVariables = {}
618 self.StoredVariables["APosterioriCorrelations"] = Persistence.OneMatrix(name = "APosterioriCorrelations")
619 self.StoredVariables["APosterioriCovariance"] = Persistence.OneMatrix(name = "APosterioriCovariance")
620 self.StoredVariables["APosterioriStandardDeviations"] = Persistence.OneVector(name = "APosterioriStandardDeviations")
621 self.StoredVariables["APosterioriVariances"] = Persistence.OneVector(name = "APosterioriVariances")
622 self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
623 self.StoredVariables["BMA"] = Persistence.OneVector(name = "BMA")
624 self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
625 self.StoredVariables["CostFunctionJAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJAtCurrentOptimum")
626 self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
627 self.StoredVariables["CostFunctionJbAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJbAtCurrentOptimum")
628 self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
629 self.StoredVariables["CostFunctionJoAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJoAtCurrentOptimum")
630 self.StoredVariables["CurrentOptimum"] = Persistence.OneVector(name = "CurrentOptimum")
631 self.StoredVariables["CurrentState"] = Persistence.OneVector(name = "CurrentState")
632 self.StoredVariables["GradientOfCostFunctionJ"] = Persistence.OneVector(name = "GradientOfCostFunctionJ")
633 self.StoredVariables["GradientOfCostFunctionJb"] = Persistence.OneVector(name = "GradientOfCostFunctionJb")
634 self.StoredVariables["GradientOfCostFunctionJo"] = Persistence.OneVector(name = "GradientOfCostFunctionJo")
635 self.StoredVariables["IndexOfOptimum"] = Persistence.OneIndex(name = "IndexOfOptimum")
636 self.StoredVariables["Innovation"] = Persistence.OneVector(name = "Innovation")
637 self.StoredVariables["InnovationAtCurrentAnalysis"] = Persistence.OneVector(name = "InnovationAtCurrentAnalysis")
638 self.StoredVariables["InnovationAtCurrentState"] = Persistence.OneVector(name = "InnovationAtCurrentState")
639 self.StoredVariables["JacobianMatrixAtBackground"] = Persistence.OneMatrix(name = "JacobianMatrixAtBackground")
640 self.StoredVariables["JacobianMatrixAtCurrentState"] = Persistence.OneMatrix(name = "JacobianMatrixAtCurrentState")
641 self.StoredVariables["JacobianMatrixAtOptimum"] = Persistence.OneMatrix(name = "JacobianMatrixAtOptimum")
642 self.StoredVariables["KalmanGainAtOptimum"] = Persistence.OneMatrix(name = "KalmanGainAtOptimum")
643 self.StoredVariables["MahalanobisConsistency"] = Persistence.OneScalar(name = "MahalanobisConsistency")
644 self.StoredVariables["OMA"] = Persistence.OneVector(name = "OMA")
645 self.StoredVariables["OMB"] = Persistence.OneVector(name = "OMB")
646 self.StoredVariables["PredictedState"] = Persistence.OneVector(name = "PredictedState")
647 self.StoredVariables["Residu"] = Persistence.OneScalar(name = "Residu")
648 self.StoredVariables["SigmaBck2"] = Persistence.OneScalar(name = "SigmaBck2")
649 self.StoredVariables["SigmaObs2"] = Persistence.OneScalar(name = "SigmaObs2")
650 self.StoredVariables["SimulatedObservationAtBackground"] = Persistence.OneVector(name = "SimulatedObservationAtBackground")
651 self.StoredVariables["SimulatedObservationAtCurrentAnalysis"]= Persistence.OneVector(name = "SimulatedObservationAtCurrentAnalysis")
652 self.StoredVariables["SimulatedObservationAtCurrentOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentOptimum")
653 self.StoredVariables["SimulatedObservationAtCurrentState"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentState")
654 self.StoredVariables["SimulatedObservationAtOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtOptimum")
655 self.StoredVariables["SimulationQuantiles"] = Persistence.OneMatrix(name = "SimulationQuantiles")
657 def _pre_run(self, Parameters, Xb=None, Y=None, R=None, B=None, Q=None ):
659 logging.debug("%s Lancement", self._name)
660 logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
662 # Mise a jour de self._parameters avec Parameters
663 self.__setParameters(Parameters)
665 for k, v in self.__variable_names_not_public.items():
666 if k not in self._parameters: self.__setParameters( {k:v} )
668 # Corrections et complements
669 def __test_vvalue(argument, variable, argname):
671 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
672 raise ValueError("%s %s vector %s has to be properly defined!"%(self._name,argname,variable))
673 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
674 logging.debug("%s %s vector %s is not set, but is optional."%(self._name,argname,variable))
676 logging.debug("%s %s vector %s is not set, but is not required."%(self._name,argname,variable))
678 logging.debug("%s %s vector %s is set, and its size is %i."%(self._name,argname,variable,numpy.array(argument).size))
680 __test_vvalue( Xb, "Xb", "Background or initial state" )
681 __test_vvalue( Y, "Y", "Observation" )
683 def __test_cvalue(argument, variable, argname):
685 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
686 raise ValueError("%s %s error covariance matrix %s has to be properly defined!"%(self._name,argname,variable))
687 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
688 logging.debug("%s %s error covariance matrix %s is not set, but is optional."%(self._name,argname,variable))
690 logging.debug("%s %s error covariance matrix %s is not set, but is not required."%(self._name,argname,variable))
692 logging.debug("%s %s error covariance matrix %s is set."%(self._name,argname,variable))
694 __test_cvalue( R, "R", "Observation" )
695 __test_cvalue( B, "B", "Background" )
696 __test_cvalue( Q, "Q", "Evolution" )
698 if ("Bounds" in self._parameters) and isinstance(self._parameters["Bounds"], (list, tuple)) and (len(self._parameters["Bounds"]) > 0):
699 logging.debug("%s Prise en compte des bornes effectuee"%(self._name,))
701 self._parameters["Bounds"] = None
703 if logging.getLogger().level < logging.WARNING:
704 self._parameters["optiprint"], self._parameters["optdisp"] = 1, 1
705 if PlatformInfo.has_scipy:
706 import scipy.optimize
707 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_ALL
709 self._parameters["optmessages"] = 15
711 self._parameters["optiprint"], self._parameters["optdisp"] = -1, 0
712 if PlatformInfo.has_scipy:
713 import scipy.optimize
714 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_NONE
716 self._parameters["optmessages"] = 15
720 def _post_run(self,_oH=None):
722 if ("StoreSupplementaryCalculations" in self._parameters) and \
723 "APosterioriCovariance" in self._parameters["StoreSupplementaryCalculations"]:
724 for _A in self.StoredVariables["APosterioriCovariance"]:
725 if "APosterioriVariances" in self._parameters["StoreSupplementaryCalculations"]:
726 self.StoredVariables["APosterioriVariances"].store( numpy.diag(_A) )
727 if "APosterioriStandardDeviations" in self._parameters["StoreSupplementaryCalculations"]:
728 self.StoredVariables["APosterioriStandardDeviations"].store( numpy.sqrt(numpy.diag(_A)) )
729 if "APosterioriCorrelations" in self._parameters["StoreSupplementaryCalculations"]:
730 _EI = numpy.diag(1./numpy.sqrt(numpy.diag(_A)))
731 _C = numpy.dot(_EI, numpy.dot(_A, _EI))
732 self.StoredVariables["APosterioriCorrelations"].store( _C )
734 logging.debug("%s Nombre d'évaluation(s) de l'opérateur d'observation direct/tangent/adjoint.: %i/%i/%i", self._name, _oH["Direct"].nbcalls(0),_oH["Tangent"].nbcalls(0),_oH["Adjoint"].nbcalls(0))
735 logging.debug("%s Nombre d'appels au cache d'opérateur d'observation direct/tangent/adjoint..: %i/%i/%i", self._name, _oH["Direct"].nbcalls(3),_oH["Tangent"].nbcalls(3),_oH["Adjoint"].nbcalls(3))
736 logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
737 logging.debug("%s Terminé", self._name)
740 def _toStore(self, key):
741 "True if in StoreSupplementaryCalculations, else False"
742 return key in self._parameters["StoreSupplementaryCalculations"]
744 def get(self, key=None):
746 Renvoie l'une des variables stockées identifiée par la clé, ou le
747 dictionnaire de l'ensemble des variables disponibles en l'absence de
748 clé. Ce sont directement les variables sous forme objet qui sont
749 renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
750 des classes de persistance.
753 return self.StoredVariables[key]
755 return self.StoredVariables
757 def __contains__(self, key=None):
758 "D.__contains__(k) -> True if D has a key k, else False"
759 return key in self.StoredVariables
762 "D.keys() -> list of D's keys"
763 if hasattr(self, "StoredVariables"):
764 return self.StoredVariables.keys()
769 "D.pop(k[,d]) -> v, remove specified key and return the corresponding value"
770 if hasattr(self, "StoredVariables"):
771 return self.StoredVariables.pop(k, d)
776 raise TypeError("pop expected at least 1 arguments, got 0")
777 "If key is not found, d is returned if given, otherwise KeyError is raised"
783 def run(self, Xb=None, Y=None, H=None, M=None, R=None, B=None, Q=None, Parameters=None):
785 Doit implémenter l'opération élémentaire de calcul d'assimilation sous
786 sa forme mathématique la plus naturelle possible.
788 raise NotImplementedError("Mathematical assimilation calculation has not been implemented!")
790 def defineRequiredParameter(self, name = None, default = None, typecast = None, message = None, minval = None, maxval = None, listval = None):
792 Permet de définir dans l'algorithme des paramètres requis et leurs
793 caractéristiques par défaut.
796 raise ValueError("A name is mandatory to define a required parameter.")
798 self.__required_parameters[name] = {
800 "typecast" : typecast,
806 logging.debug("%s %s (valeur par défaut = %s)", self._name, message, self.setParameterValue(name))
808 def getRequiredParameters(self, noDetails=True):
810 Renvoie la liste des noms de paramètres requis ou directement le
811 dictionnaire des paramètres requis.
814 return sorted(self.__required_parameters.keys())
816 return self.__required_parameters
818 def setParameterValue(self, name=None, value=None):
820 Renvoie la valeur d'un paramètre requis de manière contrôlée
822 default = self.__required_parameters[name]["default"]
823 typecast = self.__required_parameters[name]["typecast"]
824 minval = self.__required_parameters[name]["minval"]
825 maxval = self.__required_parameters[name]["maxval"]
826 listval = self.__required_parameters[name]["listval"]
828 if value is None and default is None:
830 elif value is None and default is not None:
831 if typecast is None: __val = default
832 else: __val = typecast( default )
834 if typecast is None: __val = value
835 else: __val = typecast( value )
837 if minval is not None and (numpy.array(__val, float) < minval).any():
838 raise ValueError("The parameter named \"%s\" of value \"%s\" can not be less than %s."%(name, __val, minval))
839 if maxval is not None and (numpy.array(__val, float) > maxval).any():
840 raise ValueError("The parameter named \"%s\" of value \"%s\" can not be greater than %s."%(name, __val, maxval))
841 if listval is not None:
842 if typecast is list or typecast is tuple or isinstance(__val,list) or isinstance(__val,tuple):
845 raise ValueError("The value \"%s\" of the parameter named \"%s\" is not allowed, it has to be in the list %s."%(v, name, listval))
846 elif __val not in listval:
847 raise ValueError("The value \"%s\" of the parameter named \"%s\" is not allowed, it has to be in the list %s."%( __val, name,listval))
850 def requireInputArguments(self, mandatory=(), optional=()):
852 Permet d'imposer des arguments requises en entrée
854 self.__required_inputs["RequiredInputValues"]["mandatory"] = tuple( mandatory )
855 self.__required_inputs["RequiredInputValues"]["optional"] = tuple( optional )
857 def __setParameters(self, fromDico={}):
859 Permet de stocker les paramètres reçus dans le dictionnaire interne.
861 self._parameters.update( fromDico )
862 for k in self.__required_parameters.keys():
863 if k in fromDico.keys():
864 self._parameters[k] = self.setParameterValue(k,fromDico[k])
866 self._parameters[k] = self.setParameterValue(k)
867 logging.debug("%s %s : %s", self._name, self.__required_parameters[k]["message"], self._parameters[k])
869 # ==============================================================================
870 class AlgorithmAndParameters(object):
872 Classe générale d'interface d'action pour l'algorithme et ses paramètres
875 name = "GenericAlgorithm",
882 self.__name = str(name)
886 self.__algorithm = {}
887 self.__algorithmFile = None
888 self.__algorithmName = None
890 self.updateParameters( asDict, asScript )
892 if asAlgorithm is None and asScript is not None:
893 __Algo = ImportFromScript(asScript).getvalue( "Algorithm" )
897 if __Algo is not None:
898 self.__A = str(__Algo)
899 self.__P.update( {"Algorithm":self.__A} )
901 self.__setAlgorithm( self.__A )
903 self.__variable_names_not_public = {"nextStep":False} # Duplication dans Algorithm
905 def updateParameters(self,
909 "Mise a jour des parametres"
910 if asDict is None and asScript is not None:
911 __Dict = ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
915 if __Dict is not None:
916 self.__P.update( dict(__Dict) )
918 def executePythonScheme(self, asDictAO = None):
919 "Permet de lancer le calcul d'assimilation"
920 Operator.CM.clearCache()
922 if not isinstance(asDictAO, dict):
923 raise ValueError("The objects for algorithm calculation have to be given together as a dictionnary, and they are not")
924 if hasattr(asDictAO["Background"],"getO"): self.__Xb = asDictAO["Background"].getO()
925 elif hasattr(asDictAO["CheckingPoint"],"getO"): self.__Xb = asDictAO["CheckingPoint"].getO()
926 else: self.__Xb = None
927 if hasattr(asDictAO["Observation"],"getO"): self.__Y = asDictAO["Observation"].getO()
928 else: self.__Y = asDictAO["Observation"]
929 if hasattr(asDictAO["ControlInput"],"getO"): self.__U = asDictAO["ControlInput"].getO()
930 else: self.__U = asDictAO["ControlInput"]
931 if hasattr(asDictAO["ObservationOperator"],"getO"): self.__HO = asDictAO["ObservationOperator"].getO()
932 else: self.__HO = asDictAO["ObservationOperator"]
933 if hasattr(asDictAO["EvolutionModel"],"getO"): self.__EM = asDictAO["EvolutionModel"].getO()
934 else: self.__EM = asDictAO["EvolutionModel"]
935 if hasattr(asDictAO["ControlModel"],"getO"): self.__CM = asDictAO["ControlModel"].getO()
936 else: self.__CM = asDictAO["ControlModel"]
937 self.__B = asDictAO["BackgroundError"]
938 self.__R = asDictAO["ObservationError"]
939 self.__Q = asDictAO["EvolutionError"]
941 self.__shape_validate()
943 self.__algorithm.run(
953 Parameters = self.__P,
957 def executeYACSScheme(self, FileName=None):
958 "Permet de lancer le calcul d'assimilation"
959 if FileName is None or not os.path.exists(FileName):
960 raise ValueError("a YACS file name has to be given for YACS execution.\n")
962 __file = os.path.abspath(FileName)
963 logging.debug("The YACS file name is \"%s\"."%__file)
964 if not PlatformInfo.has_salome or \
965 not PlatformInfo.has_yacs or \
966 not PlatformInfo.has_adao:
967 raise ImportError("\n\n"+\
968 "Unable to get SALOME, YACS or ADAO environnement variables.\n"+\
969 "Please load the right environnement before trying to use it.\n")
974 SALOMERuntime.RuntimeSALOME_setRuntime()
976 r = pilot.getRuntime()
977 xmlLoader = loader.YACSLoader()
978 xmlLoader.registerProcCataLoader()
980 catalogAd = r.loadCatalog("proc", __file)
981 r.addCatalog(catalogAd)
986 p = xmlLoader.load(__file)
987 except IOError as ex:
988 print("The YACS XML schema file can not be loaded: %s"%(ex,))
990 logger = p.getLogger("parser")
991 if not logger.isEmpty():
992 print("The imported YACS XML schema has errors on parsing:")
993 print(logger.getStr())
996 print("The YACS XML schema is not valid and will not be executed:")
997 print(p.getErrorReport())
999 info=pilot.LinkInfo(pilot.LinkInfo.ALL_DONT_STOP)
1000 p.checkConsistency(info)
1001 if info.areWarningsOrErrors():
1002 print("The YACS XML schema is not coherent and will not be executed:")
1003 print(info.getGlobalRepr())
1005 e = pilot.ExecutorSwig()
1007 if p.getEffectiveState() != pilot.DONE:
1008 print(p.getErrorReport())
1012 def get(self, key = None):
1013 "Vérifie l'existence d'une clé de variable ou de paramètres"
1014 if key in self.__algorithm:
1015 return self.__algorithm.get( key )
1016 elif key in self.__P:
1017 return self.__P[key]
1019 allvariables = self.__P
1020 for k in self.__variable_names_not_public: allvariables.pop(k, None)
1023 def pop(self, k, d):
1024 "Necessaire pour le pickling"
1025 return self.__algorithm.pop(k, d)
1027 def getAlgorithmRequiredParameters(self, noDetails=True):
1028 "Renvoie la liste des paramètres requis selon l'algorithme"
1029 return self.__algorithm.getRequiredParameters(noDetails)
1031 def setObserver(self, __V, __O, __I, __S):
1032 if self.__algorithm is None \
1033 or isinstance(self.__algorithm, dict) \
1034 or not hasattr(self.__algorithm,"StoredVariables"):
1035 raise ValueError("No observer can be build before choosing an algorithm.")
1036 if __V not in self.__algorithm:
1037 raise ValueError("An observer requires to be set on a variable named %s which does not exist."%__V)
1039 self.__algorithm.StoredVariables[ __V ].setDataObserver(
1042 HookParameters = __I,
1045 def removeObserver(self, __V, __O, __A = False):
1046 if self.__algorithm is None \
1047 or isinstance(self.__algorithm, dict) \
1048 or not hasattr(self.__algorithm,"StoredVariables"):
1049 raise ValueError("No observer can be removed before choosing an algorithm.")
1050 if __V not in self.__algorithm:
1051 raise ValueError("An observer requires to be removed on a variable named %s which does not exist."%__V)
1053 return self.__algorithm.StoredVariables[ __V ].removeDataObserver(
1058 def hasObserver(self, __V):
1059 if self.__algorithm is None \
1060 or isinstance(self.__algorithm, dict) \
1061 or not hasattr(self.__algorithm,"StoredVariables"):
1063 if __V not in self.__algorithm:
1065 return self.__algorithm.StoredVariables[ __V ].hasDataObserver()
1068 __allvariables = list(self.__algorithm.keys()) + list(self.__P.keys())
1069 for k in self.__variable_names_not_public:
1070 if k in __allvariables: __allvariables.remove(k)
1071 return __allvariables
1073 def __contains__(self, key=None):
1074 "D.__contains__(k) -> True if D has a key k, else False"
1075 return key in self.__algorithm or key in self.__P
1078 "x.__repr__() <==> repr(x)"
1079 return repr(self.__A)+", "+repr(self.__P)
1082 "x.__str__() <==> str(x)"
1083 return str(self.__A)+", "+str(self.__P)
1085 def __setAlgorithm(self, choice = None ):
1087 Permet de sélectionner l'algorithme à utiliser pour mener à bien l'étude
1088 d'assimilation. L'argument est un champ caractère se rapportant au nom
1089 d'un algorithme réalisant l'opération sur les arguments fixes.
1092 raise ValueError("Error: algorithm choice has to be given")
1093 if self.__algorithmName is not None:
1094 raise ValueError("Error: algorithm choice has already been done as \"%s\", it can't be changed."%self.__algorithmName)
1095 daDirectory = "daAlgorithms"
1097 # Recherche explicitement le fichier complet
1098 # ------------------------------------------
1100 for directory in sys.path:
1101 if os.path.isfile(os.path.join(directory, daDirectory, str(choice)+'.py')):
1102 module_path = os.path.abspath(os.path.join(directory, daDirectory))
1103 if module_path is None:
1104 raise ImportError("No algorithm module named \"%s\" has been found in the search path.\n The search path is %s"%(choice, sys.path))
1106 # Importe le fichier complet comme un module
1107 # ------------------------------------------
1109 sys_path_tmp = sys.path ; sys.path.insert(0,module_path)
1110 self.__algorithmFile = __import__(str(choice), globals(), locals(), [])
1111 if not hasattr(self.__algorithmFile, "ElementaryAlgorithm"):
1112 raise ImportError("this module does not define a valid elementary algorithm.")
1113 self.__algorithmName = str(choice)
1114 sys.path = sys_path_tmp ; del sys_path_tmp
1115 except ImportError as e:
1116 raise ImportError("The module named \"%s\" was found, but is incorrect at the import stage.\n The import error message is: %s"%(choice,e))
1118 # Instancie un objet du type élémentaire du fichier
1119 # -------------------------------------------------
1120 self.__algorithm = self.__algorithmFile.ElementaryAlgorithm()
1123 def __shape_validate(self):
1125 Validation de la correspondance correcte des tailles des variables et
1126 des matrices s'il y en a.
1128 if self.__Xb is None: __Xb_shape = (0,)
1129 elif hasattr(self.__Xb,"size"): __Xb_shape = (self.__Xb.size,)
1130 elif hasattr(self.__Xb,"shape"):
1131 if isinstance(self.__Xb.shape, tuple): __Xb_shape = self.__Xb.shape
1132 else: __Xb_shape = self.__Xb.shape()
1133 else: raise TypeError("The background (Xb) has no attribute of shape: problem !")
1135 if self.__Y is None: __Y_shape = (0,)
1136 elif hasattr(self.__Y,"size"): __Y_shape = (self.__Y.size,)
1137 elif hasattr(self.__Y,"shape"):
1138 if isinstance(self.__Y.shape, tuple): __Y_shape = self.__Y.shape
1139 else: __Y_shape = self.__Y.shape()
1140 else: raise TypeError("The observation (Y) has no attribute of shape: problem !")
1142 if self.__U is None: __U_shape = (0,)
1143 elif hasattr(self.__U,"size"): __U_shape = (self.__U.size,)
1144 elif hasattr(self.__U,"shape"):
1145 if isinstance(self.__U.shape, tuple): __U_shape = self.__U.shape
1146 else: __U_shape = self.__U.shape()
1147 else: raise TypeError("The control (U) has no attribute of shape: problem !")
1149 if self.__B is None: __B_shape = (0,0)
1150 elif hasattr(self.__B,"shape"):
1151 if isinstance(self.__B.shape, tuple): __B_shape = self.__B.shape
1152 else: __B_shape = self.__B.shape()
1153 else: raise TypeError("The a priori errors covariance matrix (B) has no attribute of shape: problem !")
1155 if self.__R is None: __R_shape = (0,0)
1156 elif hasattr(self.__R,"shape"):
1157 if isinstance(self.__R.shape, tuple): __R_shape = self.__R.shape
1158 else: __R_shape = self.__R.shape()
1159 else: raise TypeError("The observation errors covariance matrix (R) has no attribute of shape: problem !")
1161 if self.__Q is None: __Q_shape = (0,0)
1162 elif hasattr(self.__Q,"shape"):
1163 if isinstance(self.__Q.shape, tuple): __Q_shape = self.__Q.shape
1164 else: __Q_shape = self.__Q.shape()
1165 else: raise TypeError("The evolution errors covariance matrix (Q) has no attribute of shape: problem !")
1167 if len(self.__HO) == 0: __HO_shape = (0,0)
1168 elif isinstance(self.__HO, dict): __HO_shape = (0,0)
1169 elif hasattr(self.__HO["Direct"],"shape"):
1170 if isinstance(self.__HO["Direct"].shape, tuple): __HO_shape = self.__HO["Direct"].shape
1171 else: __HO_shape = self.__HO["Direct"].shape()
1172 else: raise TypeError("The observation operator (H) has no attribute of shape: problem !")
1174 if len(self.__EM) == 0: __EM_shape = (0,0)
1175 elif isinstance(self.__EM, dict): __EM_shape = (0,0)
1176 elif hasattr(self.__EM["Direct"],"shape"):
1177 if isinstance(self.__EM["Direct"].shape, tuple): __EM_shape = self.__EM["Direct"].shape
1178 else: __EM_shape = self.__EM["Direct"].shape()
1179 else: raise TypeError("The evolution model (EM) has no attribute of shape: problem !")
1181 if len(self.__CM) == 0: __CM_shape = (0,0)
1182 elif isinstance(self.__CM, dict): __CM_shape = (0,0)
1183 elif hasattr(self.__CM["Direct"],"shape"):
1184 if isinstance(self.__CM["Direct"].shape, tuple): __CM_shape = self.__CM["Direct"].shape
1185 else: __CM_shape = self.__CM["Direct"].shape()
1186 else: raise TypeError("The control model (CM) has no attribute of shape: problem !")
1188 # Vérification des conditions
1189 # ---------------------------
1190 if not( len(__Xb_shape) == 1 or min(__Xb_shape) == 1 ):
1191 raise ValueError("Shape characteristic of background (Xb) is incorrect: \"%s\"."%(__Xb_shape,))
1192 if not( len(__Y_shape) == 1 or min(__Y_shape) == 1 ):
1193 raise ValueError("Shape characteristic of observation (Y) is incorrect: \"%s\"."%(__Y_shape,))
1195 if not( min(__B_shape) == max(__B_shape) ):
1196 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) is incorrect: \"%s\"."%(__B_shape,))
1197 if not( min(__R_shape) == max(__R_shape) ):
1198 raise ValueError("Shape characteristic of observation errors covariance matrix (R) is incorrect: \"%s\"."%(__R_shape,))
1199 if not( min(__Q_shape) == max(__Q_shape) ):
1200 raise ValueError("Shape characteristic of evolution errors covariance matrix (Q) is incorrect: \"%s\"."%(__Q_shape,))
1201 if not( min(__EM_shape) == max(__EM_shape) ):
1202 raise ValueError("Shape characteristic of evolution operator (EM) is incorrect: \"%s\"."%(__EM_shape,))
1204 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[1] == max(__Xb_shape) ):
1205 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and state (X) \"%s\" are incompatible."%(__HO_shape,__Xb_shape))
1206 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[0] == max(__Y_shape) ):
1207 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation (Y) \"%s\" are incompatible."%(__HO_shape,__Y_shape))
1208 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__B) > 0 and not( __HO_shape[1] == __B_shape[0] ):
1209 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and a priori errors covariance matrix (B) \"%s\" are incompatible."%(__HO_shape,__B_shape))
1210 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__R) > 0 and not( __HO_shape[0] == __R_shape[1] ):
1211 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation errors covariance matrix (R) \"%s\" are incompatible."%(__HO_shape,__R_shape))
1213 if self.__B is not None and len(self.__B) > 0 and not( __B_shape[1] == max(__Xb_shape) ):
1214 if self.__algorithmName in ["EnsembleBlue",]:
1215 asPersistentVector = self.__Xb.reshape((-1,min(__B_shape)))
1216 self.__Xb = Persistence.OneVector("Background", basetype=numpy.matrix)
1217 for member in asPersistentVector:
1218 self.__Xb.store( numpy.matrix( numpy.ravel(member), numpy.float ).T )
1219 __Xb_shape = min(__B_shape)
1221 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) \"%s\" and background (Xb) \"%s\" are incompatible."%(__B_shape,__Xb_shape))
1223 if self.__R is not None and len(self.__R) > 0 and not( __R_shape[1] == max(__Y_shape) ):
1224 raise ValueError("Shape characteristic of observation errors covariance matrix (R) \"%s\" and observation (Y) \"%s\" are incompatible."%(__R_shape,__Y_shape))
1226 if self.__EM is not None and len(self.__EM) > 0 and not isinstance(self.__EM, dict) and not( __EM_shape[1] == max(__Xb_shape) ):
1227 raise ValueError("Shape characteristic of evolution model (EM) \"%s\" and state (X) \"%s\" are incompatible."%(__EM_shape,__Xb_shape))
1229 if self.__CM is not None and len(self.__CM) > 0 and not isinstance(self.__CM, dict) and not( __CM_shape[1] == max(__U_shape) ):
1230 raise ValueError("Shape characteristic of control model (CM) \"%s\" and control (U) \"%s\" are incompatible."%(__CM_shape,__U_shape))
1232 if ("Bounds" in self.__P) \
1233 and (isinstance(self.__P["Bounds"], list) or isinstance(self.__P["Bounds"], tuple)) \
1234 and (len(self.__P["Bounds"]) != max(__Xb_shape)):
1235 raise ValueError("The number \"%s\" of bound pairs for the state (X) components is different of the size \"%s\" of the state itself." \
1236 %(len(self.__P["Bounds"]),max(__Xb_shape)))
1240 # ==============================================================================
1241 class RegulationAndParameters(object):
1243 Classe générale d'interface d'action pour la régulation et ses paramètres
1246 name = "GenericRegulation",
1253 self.__name = str(name)
1256 if asAlgorithm is None and asScript is not None:
1257 __Algo = ImportFromScript(asScript).getvalue( "Algorithm" )
1259 __Algo = asAlgorithm
1261 if asDict is None and asScript is not None:
1262 __Dict = ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1266 if __Dict is not None:
1267 self.__P.update( dict(__Dict) )
1269 if __Algo is not None:
1270 self.__P.update( {"Algorithm":__Algo} )
1272 def get(self, key = None):
1273 "Vérifie l'existence d'une clé de variable ou de paramètres"
1275 return self.__P[key]
1279 # ==============================================================================
1280 class DataObserver(object):
1282 Classe générale d'interface de type observer
1285 name = "GenericObserver",
1297 self.__name = str(name)
1302 if onVariable is None:
1303 raise ValueError("setting an observer has to be done over a variable name or a list of variable names, not over None.")
1304 elif type(onVariable) in (tuple, list):
1305 self.__V = tuple(map( str, onVariable ))
1306 if withInfo is None:
1309 self.__I = (str(withInfo),)*len(self.__V)
1310 elif isinstance(onVariable, str):
1311 self.__V = (onVariable,)
1312 if withInfo is None:
1313 self.__I = (onVariable,)
1315 self.__I = (str(withInfo),)
1317 raise ValueError("setting an observer has to be done over a variable name or a list of variable names.")
1319 if asString is not None:
1320 __FunctionText = asString
1321 elif (asTemplate is not None) and (asTemplate in Templates.ObserverTemplates):
1322 __FunctionText = Templates.ObserverTemplates[asTemplate]
1323 elif asScript is not None:
1324 __FunctionText = ImportFromScript(asScript).getstring()
1327 __Function = ObserverF(__FunctionText)
1329 if asObsObject is not None:
1330 self.__O = asObsObject
1332 self.__O = __Function.getfunc()
1334 for k in range(len(self.__V)):
1337 if ename not in withAlgo:
1338 raise ValueError("An observer is asked to be set on a variable named %s which does not exist."%ename)
1340 withAlgo.setObserver(ename, self.__O, einfo, scheduledBy)
1343 "x.__repr__() <==> repr(x)"
1344 return repr(self.__V)+"\n"+repr(self.__O)
1347 "x.__str__() <==> str(x)"
1348 return str(self.__V)+"\n"+str(self.__O)
1350 # ==============================================================================
1351 class State(object):
1353 Classe générale d'interface de type état
1356 name = "GenericVector",
1358 asPersistentVector = None,
1364 toBeChecked = False,
1367 Permet de définir un vecteur :
1368 - asVector : entrée des données, comme un vecteur compatible avec le
1369 constructeur de numpy.matrix, ou "True" si entrée par script.
1370 - asPersistentVector : entrée des données, comme une série de vecteurs
1371 compatible avec le constructeur de numpy.matrix, ou comme un objet de
1372 type Persistence, ou "True" si entrée par script.
1373 - asScript : si un script valide est donné contenant une variable
1374 nommée "name", la variable est de type "asVector" (par défaut) ou
1375 "asPersistentVector" selon que l'une de ces variables est placée à
1377 - asDataFile : si un ou plusieurs fichiers valides sont donnés
1378 contenant des valeurs en colonnes, elles-mêmes nommées "colNames"
1379 (s'il n'y a pas de nom de colonne indiquée, on cherche une colonne
1380 nommée "name"), on récupère les colonnes et on les range ligne après
1381 ligne (colMajor=False) ou colonne après colonne (colMajor=True). La
1382 variable résultante est de type "asVector" (par défaut) ou
1383 "asPersistentVector" selon que l'une de ces variables est placée à
1386 self.__name = str(name)
1387 self.__check = bool(toBeChecked)
1391 self.__is_vector = False
1392 self.__is_series = False
1394 if asScript is not None:
1395 __Vector, __Series = None, None
1396 if asPersistentVector:
1397 __Series = ImportFromScript(asScript).getvalue( self.__name )
1399 __Vector = ImportFromScript(asScript).getvalue( self.__name )
1400 elif asDataFile is not None:
1401 __Vector, __Series = None, None
1402 if asPersistentVector:
1403 if colNames is not None:
1404 __Series = ImportFromFile(asDataFile).getvalue( colNames )[1]
1406 __Series = ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1407 if bool(colMajor) and not ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1408 __Series = numpy.transpose(__Series)
1409 elif not bool(colMajor) and ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1410 __Series = numpy.transpose(__Series)
1412 if colNames is not None:
1413 __Vector = ImportFromFile(asDataFile).getvalue( colNames )[1]
1415 __Vector = ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1417 __Vector = numpy.ravel(__Vector, order = "F")
1419 __Vector = numpy.ravel(__Vector, order = "C")
1421 __Vector, __Series = asVector, asPersistentVector
1423 if __Vector is not None:
1424 self.__is_vector = True
1425 self.__V = numpy.matrix( numpy.asmatrix(__Vector).A1, numpy.float ).T
1426 self.shape = self.__V.shape
1427 self.size = self.__V.size
1428 elif __Series is not None:
1429 self.__is_series = True
1430 if isinstance(__Series, (tuple, list, numpy.ndarray, numpy.matrix, str)):
1431 self.__V = Persistence.OneVector(self.__name, basetype=numpy.matrix)
1432 if isinstance(__Series, str): __Series = eval(__Series)
1433 for member in __Series:
1434 self.__V.store( numpy.matrix( numpy.asmatrix(member).A1, numpy.float ).T )
1437 if isinstance(self.__V.shape, (tuple, list)):
1438 self.shape = self.__V.shape
1440 self.shape = self.__V.shape()
1441 if len(self.shape) == 1:
1442 self.shape = (self.shape[0],1)
1443 self.size = self.shape[0] * self.shape[1]
1445 raise ValueError("The %s object is improperly defined, it requires at minima either a vector, a list/tuple of vectors or a persistent object. Please check your vector input."%self.__name)
1447 if scheduledBy is not None:
1448 self.__T = scheduledBy
1450 def getO(self, withScheduler=False):
1452 return self.__V, self.__T
1453 elif self.__T is None:
1459 "Vérification du type interne"
1460 return self.__is_vector
1463 "Vérification du type interne"
1464 return self.__is_series
1467 "x.__repr__() <==> repr(x)"
1468 return repr(self.__V)
1471 "x.__str__() <==> str(x)"
1472 return str(self.__V)
1474 # ==============================================================================
1475 class Covariance(object):
1477 Classe générale d'interface de type covariance
1480 name = "GenericCovariance",
1481 asCovariance = None,
1482 asEyeByScalar = None,
1483 asEyeByVector = None,
1486 toBeChecked = False,
1489 Permet de définir une covariance :
1490 - asCovariance : entrée des données, comme une matrice compatible avec
1491 le constructeur de numpy.matrix
1492 - asEyeByScalar : entrée des données comme un seul scalaire de variance,
1493 multiplicatif d'une matrice de corrélation identité, aucune matrice
1494 n'étant donc explicitement à donner
1495 - asEyeByVector : entrée des données comme un seul vecteur de variance,
1496 à mettre sur la diagonale d'une matrice de corrélation, aucune matrice
1497 n'étant donc explicitement à donner
1498 - asCovObject : entrée des données comme un objet python, qui a les
1499 methodes obligatoires "getT", "getI", "diag", "trace", "__add__",
1500 "__sub__", "__neg__", "__mul__", "__rmul__" et facultatives "shape",
1501 "size", "cholesky", "choleskyI", "asfullmatrix", "__repr__", "__str__"
1502 - toBeChecked : booléen indiquant si le caractère SDP de la matrice
1503 pleine doit être vérifié
1505 self.__name = str(name)
1506 self.__check = bool(toBeChecked)
1509 self.__is_scalar = False
1510 self.__is_vector = False
1511 self.__is_matrix = False
1512 self.__is_object = False
1514 if asScript is not None:
1515 __Matrix, __Scalar, __Vector, __Object = None, None, None, None
1517 __Scalar = ImportFromScript(asScript).getvalue( self.__name )
1519 __Vector = ImportFromScript(asScript).getvalue( self.__name )
1521 __Object = ImportFromScript(asScript).getvalue( self.__name )
1523 __Matrix = ImportFromScript(asScript).getvalue( self.__name )
1525 __Matrix, __Scalar, __Vector, __Object = asCovariance, asEyeByScalar, asEyeByVector, asCovObject
1527 if __Scalar is not None:
1528 if numpy.matrix(__Scalar).size != 1:
1529 raise ValueError(' The diagonal multiplier given to define a sparse matrix is not a unique scalar value.\n Its actual measured size is %i. Please check your scalar input.'%numpy.matrix(__Scalar).size)
1530 self.__is_scalar = True
1531 self.__C = numpy.abs( float(__Scalar) )
1534 elif __Vector is not None:
1535 self.__is_vector = True
1536 self.__C = numpy.abs( numpy.array( numpy.ravel( numpy.matrix(__Vector, float ) ) ) )
1537 self.shape = (self.__C.size,self.__C.size)
1538 self.size = self.__C.size**2
1539 elif __Matrix is not None:
1540 self.__is_matrix = True
1541 self.__C = numpy.matrix( __Matrix, float )
1542 self.shape = self.__C.shape
1543 self.size = self.__C.size
1544 elif __Object is not None:
1545 self.__is_object = True
1547 for at in ("getT","getI","diag","trace","__add__","__sub__","__neg__","__mul__","__rmul__"):
1548 if not hasattr(self.__C,at):
1549 raise ValueError("The matrix given for %s as an object has no attribute \"%s\". Please check your object input."%(self.__name,at))
1550 if hasattr(self.__C,"shape"):
1551 self.shape = self.__C.shape
1554 if hasattr(self.__C,"size"):
1555 self.size = self.__C.size
1560 # raise ValueError("The %s covariance matrix has to be specified either as a matrix, a vector for its diagonal or a scalar multiplying an identity matrix."%self.__name)
1564 def __validate(self):
1566 if self.__C is None:
1567 raise UnboundLocalError("%s covariance matrix value has not been set!"%(self.__name,))
1568 if self.ismatrix() and min(self.shape) != max(self.shape):
1569 raise ValueError("The given matrix for %s is not a square one, its shape is %s. Please check your matrix input."%(self.__name,self.shape))
1570 if self.isobject() and min(self.shape) != max(self.shape):
1571 raise ValueError("The matrix given for \"%s\" is not a square one, its shape is %s. Please check your object input."%(self.__name,self.shape))
1572 if self.isscalar() and self.__C <= 0:
1573 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your scalar input %s."%(self.__name,self.__C))
1574 if self.isvector() and (self.__C <= 0).any():
1575 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your vector input."%(self.__name,))
1576 if self.ismatrix() and (self.__check or logging.getLogger().level < logging.WARNING):
1578 L = numpy.linalg.cholesky( self.__C )
1580 raise ValueError("The %s covariance matrix is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1581 if self.isobject() and (self.__check or logging.getLogger().level < logging.WARNING):
1583 L = self.__C.cholesky()
1585 raise ValueError("The %s covariance object is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1588 "Vérification du type interne"
1589 return self.__is_scalar
1592 "Vérification du type interne"
1593 return self.__is_vector
1596 "Vérification du type interne"
1597 return self.__is_matrix
1600 "Vérification du type interne"
1601 return self.__is_object
1606 return Covariance(self.__name+"I", asCovariance = self.__C.I )
1607 elif self.isvector():
1608 return Covariance(self.__name+"I", asEyeByVector = 1. / self.__C )
1609 elif self.isscalar():
1610 return Covariance(self.__name+"I", asEyeByScalar = 1. / self.__C )
1611 elif self.isobject():
1612 return Covariance(self.__name+"I", asCovObject = self.__C.getI() )
1614 return None # Indispensable
1619 return Covariance(self.__name+"T", asCovariance = self.__C.T )
1620 elif self.isvector():
1621 return Covariance(self.__name+"T", asEyeByVector = self.__C )
1622 elif self.isscalar():
1623 return Covariance(self.__name+"T", asEyeByScalar = self.__C )
1624 elif self.isobject():
1625 return Covariance(self.__name+"T", asCovObject = self.__C.getT() )
1628 "Décomposition de Cholesky"
1630 return Covariance(self.__name+"C", asCovariance = numpy.linalg.cholesky(self.__C) )
1631 elif self.isvector():
1632 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
1633 elif self.isscalar():
1634 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
1635 elif self.isobject() and hasattr(self.__C,"cholesky"):
1636 return Covariance(self.__name+"C", asCovObject = self.__C.cholesky() )
1638 def choleskyI(self):
1639 "Inversion de la décomposition de Cholesky"
1641 return Covariance(self.__name+"H", asCovariance = numpy.linalg.cholesky(self.__C).I )
1642 elif self.isvector():
1643 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
1644 elif self.isscalar():
1645 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
1646 elif self.isobject() and hasattr(self.__C,"choleskyI"):
1647 return Covariance(self.__name+"H", asCovObject = self.__C.choleskyI() )
1649 def diag(self, msize=None):
1650 "Diagonale de la matrice"
1652 return numpy.diag(self.__C)
1653 elif self.isvector():
1655 elif self.isscalar():
1657 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1659 return self.__C * numpy.ones(int(msize))
1660 elif self.isobject():
1661 return self.__C.diag()
1663 def asfullmatrix(self, msize=None):
1667 elif self.isvector():
1668 return numpy.matrix( numpy.diag(self.__C), float )
1669 elif self.isscalar():
1671 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1673 return numpy.matrix( self.__C * numpy.eye(int(msize)), float )
1674 elif self.isobject() and hasattr(self.__C,"asfullmatrix"):
1675 return self.__C.asfullmatrix()
1677 def trace(self, msize=None):
1678 "Trace de la matrice"
1680 return numpy.trace(self.__C)
1681 elif self.isvector():
1682 return float(numpy.sum(self.__C))
1683 elif self.isscalar():
1685 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1687 return self.__C * int(msize)
1688 elif self.isobject():
1689 return self.__C.trace()
1695 "x.__repr__() <==> repr(x)"
1696 return repr(self.__C)
1699 "x.__str__() <==> str(x)"
1700 return str(self.__C)
1702 def __add__(self, other):
1703 "x.__add__(y) <==> x+y"
1704 if self.ismatrix() or self.isobject():
1705 return self.__C + numpy.asmatrix(other)
1706 elif self.isvector() or self.isscalar():
1707 _A = numpy.asarray(other)
1708 _A.reshape(_A.size)[::_A.shape[1]+1] += self.__C
1709 return numpy.asmatrix(_A)
1711 def __radd__(self, other):
1712 "x.__radd__(y) <==> y+x"
1713 raise NotImplementedError("%s covariance matrix __radd__ method not available for %s type!"%(self.__name,type(other)))
1715 def __sub__(self, other):
1716 "x.__sub__(y) <==> x-y"
1717 if self.ismatrix() or self.isobject():
1718 return self.__C - numpy.asmatrix(other)
1719 elif self.isvector() or self.isscalar():
1720 _A = numpy.asarray(other)
1721 _A.reshape(_A.size)[::_A.shape[1]+1] = self.__C - _A.reshape(_A.size)[::_A.shape[1]+1]
1722 return numpy.asmatrix(_A)
1724 def __rsub__(self, other):
1725 "x.__rsub__(y) <==> y-x"
1726 raise NotImplementedError("%s covariance matrix __rsub__ method not available for %s type!"%(self.__name,type(other)))
1729 "x.__neg__() <==> -x"
1732 def __mul__(self, other):
1733 "x.__mul__(y) <==> x*y"
1734 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
1735 return self.__C * other
1736 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
1737 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1738 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
1739 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1740 return self.__C * numpy.asmatrix(other)
1742 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asmatrix(other).shape,self.__name))
1743 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
1744 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1745 return numpy.asmatrix(self.__C * numpy.ravel(other)).T
1746 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1747 return numpy.asmatrix((self.__C * (numpy.asarray(other).transpose())).transpose())
1749 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
1750 elif self.isscalar() and isinstance(other,numpy.matrix):
1751 return self.__C * other
1752 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
1753 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
1754 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
1756 return self.__C * numpy.asmatrix(other)
1757 elif self.isobject():
1758 return self.__C.__mul__(other)
1760 raise NotImplementedError("%s covariance matrix __mul__ method not available for %s type!"%(self.__name,type(other)))
1762 def __rmul__(self, other):
1763 "x.__rmul__(y) <==> y*x"
1764 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
1765 return other * self.__C
1766 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
1767 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1768 return numpy.asmatrix(numpy.ravel(other)) * self.__C
1769 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1770 return numpy.asmatrix(other) * self.__C
1772 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name))
1773 elif self.isvector() and isinstance(other,numpy.matrix):
1774 if numpy.ravel(other).size == self.shape[0]: # Vecteur
1775 return numpy.asmatrix(numpy.ravel(other) * self.__C)
1776 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
1777 return numpy.asmatrix(numpy.array(other) * self.__C)
1779 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
1780 elif self.isscalar() and isinstance(other,numpy.matrix):
1781 return other * self.__C
1782 elif self.isobject():
1783 return self.__C.__rmul__(other)
1785 raise NotImplementedError("%s covariance matrix __rmul__ method not available for %s type!"%(self.__name,type(other)))
1788 "x.__len__() <==> len(x)"
1789 return self.shape[0]
1791 # ==============================================================================
1792 class ObserverF(object):
1794 Creation d'une fonction d'observateur a partir de son texte
1796 def __init__(self, corps=""):
1797 self.__corps = corps
1798 def func(self,var,info):
1799 "Fonction d'observation"
1802 "Restitution du pointeur de fonction dans l'objet"
1805 # ==============================================================================
1806 class CaseLogger(object):
1808 Conservation des commandes de creation d'un cas
1810 def __init__(self, __name="", __objname="case", __addViewers=None, __addLoaders=None):
1811 self.__name = str(__name)
1812 self.__objname = str(__objname)
1813 self.__logSerie = []
1814 self.__switchoff = False
1816 "TUI" :Interfaces._TUIViewer,
1817 "SCD" :Interfaces._SCDViewer,
1818 "YACS":Interfaces._YACSViewer,
1821 "TUI" :Interfaces._TUIViewer,
1822 "COM" :Interfaces._COMViewer,
1824 if __addViewers is not None:
1825 self.__viewers.update(dict(__addViewers))
1826 if __addLoaders is not None:
1827 self.__loaders.update(dict(__addLoaders))
1829 def register(self, __command=None, __keys=None, __local=None, __pre=None, __switchoff=False):
1830 "Enregistrement d'une commande individuelle"
1831 if __command is not None and __keys is not None and __local is not None and not self.__switchoff:
1832 if "self" in __keys: __keys.remove("self")
1833 self.__logSerie.append( (str(__command), __keys, __local, __pre, __switchoff) )
1835 self.__switchoff = True
1837 self.__switchoff = False
1839 def dump(self, __filename=None, __format="TUI", __upa=""):
1840 "Restitution normalisée des commandes"
1841 if __format in self.__viewers:
1842 __formater = self.__viewers[__format](self.__name, self.__objname, self.__logSerie)
1844 raise ValueError("Dumping as \"%s\" is not available"%__format)
1845 return __formater.dump(__filename, __upa)
1847 def load(self, __filename=None, __content=None, __object=None, __format="TUI"):
1848 "Chargement normalisé des commandes"
1849 if __format in self.__loaders:
1850 __formater = self.__loaders[__format]()
1852 raise ValueError("Loading as \"%s\" is not available"%__format)
1853 return __formater.load(__filename, __content, __object)
1855 # ==============================================================================
1858 _extraArguments = None,
1859 _sFunction = lambda x: x,
1864 Pour une liste ordonnée de vecteurs en entrée, renvoie en sortie la liste
1865 correspondante de valeurs de la fonction en argument
1867 # Vérifications et définitions initiales
1868 # logging.debug("MULTF Internal multifonction calculations begin with function %s"%(_sFunction.__name__,))
1869 if not PlatformInfo.isIterable( __xserie ):
1870 raise TypeError("MultiFonction not iterable unkown input type: %s"%(type(__xserie),))
1872 if (_mpWorkers is None) or (_mpWorkers is not None and _mpWorkers < 1):
1875 __mpWorkers = int(_mpWorkers)
1877 import multiprocessing
1888 if _extraArguments is None:
1890 elif _extraArguments is not None and isinstance(_extraArguments, (list, tuple, map)):
1891 for __xvalue in __xserie:
1892 _jobs.append( [__xvalue, ] + list(_extraArguments) )
1894 raise TypeError("MultiFonction extra arguments unkown input type: %s"%(type(_extraArguments),))
1895 # logging.debug("MULTF Internal multiprocessing calculations begin : evaluation of %i point(s)"%(len(_jobs),))
1896 import multiprocessing
1897 with multiprocessing.Pool(__mpWorkers) as pool:
1898 __multiHX = pool.map( _sFunction, _jobs )
1901 # logging.debug("MULTF Internal multiprocessing calculation end")
1903 # logging.debug("MULTF Internal monoprocessing calculation begin")
1905 if _extraArguments is None:
1906 for __xvalue in __xserie:
1907 __multiHX.append( _sFunction( __xvalue ) )
1908 elif _extraArguments is not None and isinstance(_extraArguments, (list, tuple, map)):
1909 for __xvalue in __xserie:
1910 __multiHX.append( _sFunction( __xvalue, *_extraArguments ) )
1911 elif _extraArguments is not None and isinstance(_extraArguments, dict):
1912 for __xvalue in __xserie:
1913 __multiHX.append( _sFunction( __xvalue, **_extraArguments ) )
1915 raise TypeError("MultiFonction extra arguments unkown input type: %s"%(type(_extraArguments),))
1916 # logging.debug("MULTF Internal monoprocessing calculation end")
1918 # logging.debug("MULTF Internal multifonction calculations end")
1921 # ==============================================================================
1922 def CostFunction3D(_x,
1923 _Hm = None, # Pour simuler Hm(x) : HO["Direct"].appliedTo
1924 _HmX = None, # Simulation déjà faite de Hm(x)
1925 _arg = None, # Arguments supplementaires pour Hm, sous la forme d'un tuple
1930 _SIV = False, # A résorber pour la 8.0
1931 _SSC = [], # self._parameters["StoreSupplementaryCalculations"]
1932 _nPS = 0, # nbPreviousSteps
1933 _QM = "DA", # QualityMeasure
1934 _SSV = {}, # Entrée et/ou sortie : self.StoredVariables
1935 _fRt = False, # Restitue ou pas la sortie étendue
1936 _sSc = True, # Stocke ou pas les SSC
1939 Fonction-coût générale utile pour les algorithmes statiques/3D : 3DVAR, BLUE
1940 et dérivés, Kalman et dérivés, LeastSquares, SamplingTest, PSO, SA, Tabu,
1941 DFO, QuantileRegression
1947 for k in ["CostFunctionJ",
1953 "SimulatedObservationAtCurrentOptimum",
1954 "SimulatedObservationAtCurrentState",
1958 if hasattr(_SSV[k],"store"):
1959 _SSV[k].append = _SSV[k].store # Pour utiliser "append" au lieu de "store"
1961 _X = numpy.asmatrix(numpy.ravel( _x )).T
1962 if _SIV or "CurrentState" in _SSC or "CurrentOptimum" in _SSC:
1963 _SSV["CurrentState"].append( _X )
1965 if _HmX is not None:
1969 raise ValueError("COSTFUNCTION3D Operator has to be defined.")
1973 _HX = _Hm( _X, *_arg )
1974 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
1976 if "SimulatedObservationAtCurrentState" in _SSC or \
1977 "SimulatedObservationAtCurrentOptimum" in _SSC:
1978 _SSV["SimulatedObservationAtCurrentState"].append( _HX )
1980 if numpy.any(numpy.isnan(_HX)):
1981 Jb, Jo, J = numpy.nan, numpy.nan, numpy.nan
1983 _Y = numpy.asmatrix(numpy.ravel( _Y )).T
1984 if _QM in ["AugmentedWeightedLeastSquares", "AWLS", "AugmentedPonderatedLeastSquares", "APLS", "DA"]:
1985 if _BI is None or _RI is None:
1986 raise ValueError("Background and Observation error covariance matrix has to be properly defined!")
1987 _Xb = numpy.asmatrix(numpy.ravel( _Xb )).T
1988 Jb = 0.5 * (_X - _Xb).T * _BI * (_X - _Xb)
1989 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
1990 elif _QM in ["WeightedLeastSquares", "WLS", "PonderatedLeastSquares", "PLS"]:
1992 raise ValueError("Observation error covariance matrix has to be properly defined!")
1994 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
1995 elif _QM in ["LeastSquares", "LS", "L2"]:
1997 Jo = 0.5 * (_Y - _HX).T * (_Y - _HX)
1998 elif _QM in ["AbsoluteValue", "L1"]:
2000 Jo = numpy.sum( numpy.abs(_Y - _HX) )
2001 elif _QM in ["MaximumError", "ME"]:
2003 Jo = numpy.max( numpy.abs(_Y - _HX) )
2004 elif _QM in ["QR", "Null"]:
2008 raise ValueError("Unknown asked quality measure!")
2010 J = float( Jb ) + float( Jo )
2013 _SSV["CostFunctionJb"].append( Jb )
2014 _SSV["CostFunctionJo"].append( Jo )
2015 _SSV["CostFunctionJ" ].append( J )
2017 if "IndexOfOptimum" in _SSC or \
2018 "CurrentOptimum" in _SSC or \
2019 "SimulatedObservationAtCurrentOptimum" in _SSC:
2020 IndexMin = numpy.argmin( _SSV["CostFunctionJ"][_nPS:] ) + _nPS
2021 if "IndexOfOptimum" in _SSC:
2022 _SSV["IndexOfOptimum"].append( IndexMin )
2023 if "CurrentOptimum" in _SSC:
2024 _SSV["CurrentOptimum"].append( _SSV["CurrentState"][IndexMin] )
2025 if "SimulatedObservationAtCurrentOptimum" in _SSC:
2026 _SSV["SimulatedObservationAtCurrentOptimum"].append( _SSV["SimulatedObservationAtCurrentState"][IndexMin] )
2031 if _QM in ["QR"]: # Pour le QuantileRegression
2036 # ==============================================================================
2037 if __name__ == "__main__":
2038 print('\n AUTODIAGNOSTIC\n')