1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2024 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les objets numériques génériques.
26 __author__ = "Jean-Philippe ARGAUD"
28 import os, copy, types, sys, logging, math, numpy, scipy, itertools
29 from daCore.BasicObjects import Operator, Covariance, PartialAlgorithm
30 from daCore.PlatformInfo import PlatformInfo, vt, vfloat
31 mpr = PlatformInfo().MachinePrecision()
32 mfp = PlatformInfo().MaximumPrecision()
33 # logging.getLogger().setLevel(logging.DEBUG)
35 # ==============================================================================
36 def ExecuteFunction( triplet ):
37 assert len(triplet) == 3, "Incorrect number of arguments"
38 X, xArgs, funcrepr = triplet
39 __X = numpy.ravel( X ).reshape((-1, 1))
40 __sys_path_tmp = sys.path
41 sys.path.insert(0, funcrepr["__userFunction__path"])
42 __module = __import__(funcrepr["__userFunction__modl"], globals(), locals(), [])
43 __fonction = getattr(__module, funcrepr["__userFunction__name"])
44 sys.path = __sys_path_tmp
46 if isinstance(xArgs, dict):
47 __HX = __fonction( __X, **xArgs )
49 __HX = __fonction( __X )
50 return numpy.ravel( __HX )
52 # ==============================================================================
53 class FDApproximation(object):
55 Cette classe sert d'interface pour définir les opérateurs approximés. A la
56 création d'un objet, en fournissant une fonction "Function", on obtient un
57 objet qui dispose de 3 méthodes "DirectOperator", "TangentOperator" et
58 "AdjointOperator". On contrôle l'approximation DF avec l'incrément
59 multiplicatif "increment" valant par défaut 1%, ou avec l'incrément fixe
60 "dX" qui sera multiplié par "increment" (donc en %), et on effectue de DF
61 centrées si le booléen "centeredDF" est vrai.
64 "__name", "__extraArgs", "__mpEnabled", "__mpWorkers", "__mfEnabled",
65 "__rmEnabled", "__avoidRC", "__tolerBP", "__centeredDF", "__lengthRJ",
66 "__listJPCP", "__listJPCI", "__listJPCR", "__listJPPN", "__listJPIN",
67 "__userOperator", "__userFunction", "__increment", "__pool", "__dX",
68 "__userFunction__name", "__userFunction__modl", "__userFunction__path",
72 name = "FDApproximation",
77 extraArguments = None,
78 reducingMemoryUse = False,
79 avoidingRedundancy = True,
80 toleranceInRedundancy = 1.e-18,
81 lengthOfRedundancy = -1,
86 self.__name = str(name)
87 self.__extraArgs = extraArguments
91 import multiprocessing # noqa: F401
92 self.__mpEnabled = True
94 self.__mpEnabled = False
96 self.__mpEnabled = False
97 self.__mpWorkers = mpWorkers
98 if self.__mpWorkers is not None and self.__mpWorkers < 1:
99 self.__mpWorkers = None
100 logging.debug("FDA Calculs en multiprocessing : %s (nombre de processus : %s)"%(self.__mpEnabled, self.__mpWorkers))
102 self.__mfEnabled = bool(mfEnabled)
103 logging.debug("FDA Calculs en multifonctions : %s"%(self.__mfEnabled,))
105 self.__rmEnabled = bool(reducingMemoryUse)
106 logging.debug("FDA Calculs avec réduction mémoire : %s"%(self.__rmEnabled,))
108 if avoidingRedundancy:
109 self.__avoidRC = True
110 self.__tolerBP = float(toleranceInRedundancy)
111 self.__lengthRJ = int(lengthOfRedundancy)
112 self.__listJPCP = [] # Jacobian Previous Calculated Points
113 self.__listJPCI = [] # Jacobian Previous Calculated Increment
114 self.__listJPCR = [] # Jacobian Previous Calculated Results
115 self.__listJPPN = [] # Jacobian Previous Calculated Point Norms
116 self.__listJPIN = [] # Jacobian Previous Calculated Increment Norms
118 self.__avoidRC = False
119 logging.debug("FDA Calculs avec réduction des doublons : %s"%self.__avoidRC)
121 logging.debug("FDA Tolérance de détermination des doublons : %.2e"%self.__tolerBP)
124 if isinstance(Function, types.FunctionType):
125 logging.debug("FDA Calculs en multiprocessing : FunctionType")
126 self.__userFunction__name = Function.__name__
128 mod = os.path.join(Function.__globals__['filepath'], Function.__globals__['filename'])
130 mod = os.path.abspath(Function.__globals__['__file__'])
131 if not os.path.isfile(mod):
132 raise ImportError("No user defined function or method found with the name %s"%(mod,))
133 self.__userFunction__modl = os.path.basename(mod).replace('.pyc', '').replace('.pyo', '').replace('.py', '')
134 self.__userFunction__path = os.path.dirname(mod)
136 self.__userOperator = Operator(
138 fromMethod = Function,
139 avoidingRedundancy = self.__avoidRC,
140 inputAsMultiFunction = self.__mfEnabled,
141 extraArguments = self.__extraArgs )
142 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
143 elif isinstance(Function, types.MethodType):
144 logging.debug("FDA Calculs en multiprocessing : MethodType")
145 self.__userFunction__name = Function.__name__
147 mod = os.path.join(Function.__globals__['filepath'], Function.__globals__['filename'])
149 mod = os.path.abspath(Function.__func__.__globals__['__file__'])
150 if not os.path.isfile(mod):
151 raise ImportError("No user defined function or method found with the name %s"%(mod,))
152 self.__userFunction__modl = os.path.basename(mod).replace('.pyc', '').replace('.pyo', '').replace('.py', '')
153 self.__userFunction__path = os.path.dirname(mod)
155 self.__userOperator = Operator(
157 fromMethod = Function,
158 avoidingRedundancy = self.__avoidRC,
159 inputAsMultiFunction = self.__mfEnabled,
160 extraArguments = self.__extraArgs )
161 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
163 raise TypeError("User defined function or method has to be provided for finite differences approximation.")
165 self.__userOperator = Operator(
167 fromMethod = Function,
168 avoidingRedundancy = self.__avoidRC,
169 inputAsMultiFunction = self.__mfEnabled,
170 extraArguments = self.__extraArgs )
171 self.__userFunction = self.__userOperator.appliedTo
173 self.__centeredDF = bool(centeredDF)
174 if abs(float(increment)) > 1.e-15:
175 self.__increment = float(increment)
177 self.__increment = 0.01
181 self.__dX = numpy.ravel( dX )
183 # ---------------------------------------------------------
184 def __doublon__(self, __e, __l, __n, __v=None):
185 __ac, __iac = False, -1
186 for i in range(len(__l) - 1, -1, -1):
187 if numpy.linalg.norm(__e - __l[i]) < self.__tolerBP * __n[i]:
188 __ac, __iac = True, i
190 logging.debug("FDA Cas%s déjà calculé, récupération du doublon %i"%(__v, __iac))
194 # ---------------------------------------------------------
195 def __listdotwith__(self, __LMatrix, __dotWith = None, __dotTWith = None):
196 "Produit incrémental d'une matrice liste de colonnes avec un vecteur"
197 if not isinstance(__LMatrix, (list, tuple)):
198 raise TypeError("Columnwise list matrix has not the proper type: %s"%type(__LMatrix))
199 if __dotWith is not None:
200 __Idwx = numpy.ravel( __dotWith )
201 assert len(__LMatrix) == __Idwx.size, "Incorrect size of elements"
202 __Produit = numpy.zeros(__LMatrix[0].size)
203 for i, col in enumerate(__LMatrix):
204 __Produit += float(__Idwx[i]) * col
206 elif __dotTWith is not None:
207 _Idwy = numpy.ravel( __dotTWith ).T
208 assert __LMatrix[0].size == _Idwy.size, "Incorrect size of elements"
209 __Produit = numpy.zeros(len(__LMatrix))
210 for i, col in enumerate(__LMatrix):
211 __Produit[i] = vfloat( _Idwy @ col)
217 # ---------------------------------------------------------
218 def DirectOperator(self, X, **extraArgs ):
220 Calcul du direct à l'aide de la fonction fournie.
222 NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
223 ne doivent pas être données ici à la fonction utilisateur.
225 logging.debug("FDA Calcul DirectOperator (explicite)")
227 _HX = self.__userFunction( X, argsAsSerie = True )
229 _HX = numpy.ravel(self.__userFunction( numpy.ravel(X) ))
233 # ---------------------------------------------------------
234 def TangentMatrix(self, X, dotWith = None, dotTWith = None ):
236 Calcul de l'opérateur tangent comme la Jacobienne par différences finies,
237 c'est-à-dire le gradient de H en X. On utilise des différences finies
238 directionnelles autour du point X. X est un numpy.ndarray.
240 Différences finies centrées (approximation d'ordre 2):
241 1/ Pour chaque composante i de X, on ajoute et on enlève la perturbation
242 dX[i] à la composante X[i], pour composer X_plus_dXi et X_moins_dXi, et
243 on calcule les réponses HX_plus_dXi = H( X_plus_dXi ) et HX_moins_dXi =
245 2/ On effectue les différences (HX_plus_dXi-HX_moins_dXi) et on divise par
247 3/ Chaque résultat, par composante, devient une colonne de la Jacobienne
249 Différences finies non centrées (approximation d'ordre 1):
250 1/ Pour chaque composante i de X, on ajoute la perturbation dX[i] à la
251 composante X[i] pour composer X_plus_dXi, et on calcule la réponse
252 HX_plus_dXi = H( X_plus_dXi )
253 2/ On calcule la valeur centrale HX = H(X)
254 3/ On effectue les différences (HX_plus_dXi-HX) et on divise par
256 4/ Chaque résultat, par composante, devient une colonne de la Jacobienne
259 logging.debug("FDA Début du calcul de la Jacobienne")
260 logging.debug("FDA Incrément de............: %s*X"%float(self.__increment))
261 logging.debug("FDA Approximation centrée...: %s"%(self.__centeredDF))
263 if X is None or len(X) == 0:
264 raise ValueError("Nominal point X for approximate derivatives can not be None or void (given X: %s)."%(str(X),))
266 _X = numpy.ravel( X )
268 if self.__dX is None:
269 _dX = self.__increment * _X
271 _dX = numpy.ravel( self.__dX )
272 assert len(_X) == len(_dX), "Inconsistent dX increment length with respect to the X one"
273 assert _X.size == _dX.size, "Inconsistent dX increment size with respect to the X one"
275 if (_dX == 0.).any():
278 _dX = numpy.where( _dX == 0., float(self.__increment), _dX )
280 _dX = numpy.where( _dX == 0., moyenne, _dX )
282 __alreadyCalculated = False
284 __bidon, __alreadyCalculatedP = self.__doublon__( _X, self.__listJPCP, self.__listJPPN, None)
285 __bidon, __alreadyCalculatedI = self.__doublon__(_dX, self.__listJPCI, self.__listJPIN, None)
286 if __alreadyCalculatedP == __alreadyCalculatedI > -1:
287 __alreadyCalculated, __i = True, __alreadyCalculatedP
288 logging.debug("FDA Cas J déjà calculé, récupération du doublon %i"%__i)
290 if __alreadyCalculated:
291 logging.debug("FDA Calcul Jacobienne (par récupération du doublon %i)"%__i)
292 _Jacobienne = self.__listJPCR[__i]
293 logging.debug("FDA Fin du calcul de la Jacobienne")
294 if dotWith is not None:
295 return numpy.dot( _Jacobienne, numpy.ravel( dotWith ))
296 elif dotTWith is not None:
297 return numpy.dot(_Jacobienne.T, numpy.ravel( dotTWith ))
299 logging.debug("FDA Calcul Jacobienne (explicite)")
300 if self.__centeredDF:
302 if self.__mpEnabled and not self.__mfEnabled:
304 "__userFunction__path": self.__userFunction__path,
305 "__userFunction__modl": self.__userFunction__modl,
306 "__userFunction__name": self.__userFunction__name,
309 for i in range( len(_dX) ):
311 _X_plus_dXi = numpy.array( _X, dtype=float )
312 _X_plus_dXi[i] = _X[i] + _dXi
313 _X_moins_dXi = numpy.array( _X, dtype=float )
314 _X_moins_dXi[i] = _X[i] - _dXi
316 _jobs.append( ( _X_plus_dXi, self.__extraArgs, funcrepr) )
317 _jobs.append( (_X_moins_dXi, self.__extraArgs, funcrepr) )
319 import multiprocessing
320 self.__pool = multiprocessing.Pool(self.__mpWorkers)
321 _HX_plusmoins_dX = self.__pool.map( ExecuteFunction, _jobs )
326 for i in range( len(_dX) ):
327 _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2 * i] - _HX_plusmoins_dX[2 * i + 1] ) / (2. * _dX[i]) )
329 elif self.__mfEnabled:
331 for i in range( len(_dX) ):
333 _X_plus_dXi = numpy.array( _X, dtype=float )
334 _X_plus_dXi[i] = _X[i] + _dXi
335 _X_moins_dXi = numpy.array( _X, dtype=float )
336 _X_moins_dXi[i] = _X[i] - _dXi
338 _xserie.append( _X_plus_dXi )
339 _xserie.append( _X_moins_dXi )
341 _HX_plusmoins_dX = self.DirectOperator( _xserie )
344 for i in range( len(_dX) ):
345 _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2 * i] - _HX_plusmoins_dX[2 * i + 1] ) / (2. * _dX[i]) )
349 for i in range( _dX.size ):
351 _X_plus_dXi = numpy.array( _X, dtype=float )
352 _X_plus_dXi[i] = _X[i] + _dXi
353 _X_moins_dXi = numpy.array( _X, dtype=float )
354 _X_moins_dXi[i] = _X[i] - _dXi
356 _HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
357 _HX_moins_dXi = self.DirectOperator( _X_moins_dXi )
359 _Jacobienne.append( numpy.ravel( _HX_plus_dXi - _HX_moins_dXi ) / (2. * _dXi) )
363 if self.__mpEnabled and not self.__mfEnabled:
365 "__userFunction__path": self.__userFunction__path,
366 "__userFunction__modl": self.__userFunction__modl,
367 "__userFunction__name": self.__userFunction__name,
370 _jobs.append( (_X, self.__extraArgs, funcrepr) )
371 for i in range( len(_dX) ):
372 _X_plus_dXi = numpy.array( _X, dtype=float )
373 _X_plus_dXi[i] = _X[i] + _dX[i]
375 _jobs.append( (_X_plus_dXi, self.__extraArgs, funcrepr) )
377 import multiprocessing
378 self.__pool = multiprocessing.Pool(self.__mpWorkers)
379 _HX_plus_dX = self.__pool.map( ExecuteFunction, _jobs )
383 _HX = _HX_plus_dX.pop(0)
386 for i in range( len(_dX) ):
387 _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
389 elif self.__mfEnabled:
392 for i in range( len(_dX) ):
393 _X_plus_dXi = numpy.array( _X, dtype=float )
394 _X_plus_dXi[i] = _X[i] + _dX[i]
396 _xserie.append( _X_plus_dXi )
398 _HX_plus_dX = self.DirectOperator( _xserie )
400 _HX = _HX_plus_dX.pop(0)
403 for i in range( len(_dX) ):
404 _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
408 _HX = self.DirectOperator( _X )
409 for i in range( _dX.size ):
411 _X_plus_dXi = numpy.array( _X, dtype=float )
412 _X_plus_dXi[i] = _X[i] + _dXi
414 _HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
416 _Jacobienne.append( numpy.ravel(( _HX_plus_dXi - _HX ) / _dXi) )
418 if (dotWith is not None) or (dotTWith is not None):
419 __Produit = self.__listdotwith__(_Jacobienne, dotWith, dotTWith)
422 if __Produit is None or self.__avoidRC:
423 _Jacobienne = numpy.transpose( numpy.vstack( _Jacobienne ) )
425 if self.__lengthRJ < 0:
426 self.__lengthRJ = 2 * _X.size
427 while len(self.__listJPCP) > self.__lengthRJ:
428 self.__listJPCP.pop(0)
429 self.__listJPCI.pop(0)
430 self.__listJPCR.pop(0)
431 self.__listJPPN.pop(0)
432 self.__listJPIN.pop(0)
433 self.__listJPCP.append( copy.copy(_X) )
434 self.__listJPCI.append( copy.copy(_dX) )
435 self.__listJPCR.append( copy.copy(_Jacobienne) )
436 self.__listJPPN.append( numpy.linalg.norm(_X) )
437 self.__listJPIN.append( numpy.linalg.norm(_Jacobienne) )
438 logging.debug("FDA Fin du calcul de la Jacobienne")
439 if __Produit is not None:
444 # ---------------------------------------------------------
445 def TangentOperator(self, paire, **extraArgs ):
447 Calcul du tangent à l'aide de la Jacobienne.
449 NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
450 ne doivent pas être données ici à la fonction utilisateur.
453 assert len(paire) == 1, "Incorrect length of arguments"
455 assert len(_paire) == 2, "Incorrect number of arguments"
457 assert len(paire) == 2, "Incorrect number of arguments"
460 if dX is None or len(dX) == 0:
462 # Calcul de la forme matricielle si le second argument est None
463 # -------------------------------------------------------------
464 _Jacobienne = self.TangentMatrix( X )
466 return [_Jacobienne,]
471 # Calcul de la valeur linéarisée de H en X appliqué à dX
472 # ------------------------------------------------------
473 _HtX = self.TangentMatrix( X, dotWith = dX )
479 # ---------------------------------------------------------
480 def AdjointOperator(self, paire, **extraArgs ):
482 Calcul de l'adjoint à l'aide de la Jacobienne.
484 NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
485 ne doivent pas être données ici à la fonction utilisateur.
488 assert len(paire) == 1, "Incorrect length of arguments"
490 assert len(_paire) == 2, "Incorrect number of arguments"
492 assert len(paire) == 2, "Incorrect number of arguments"
495 if Y is None or len(Y) == 0:
497 # Calcul de la forme matricielle si le second argument est None
498 # -------------------------------------------------------------
499 _JacobienneT = self.TangentMatrix( X ).T
501 return [_JacobienneT,]
506 # Calcul de la valeur de l'adjoint en X appliqué à Y
507 # --------------------------------------------------
508 _HaY = self.TangentMatrix( X, dotTWith = Y )
514 # ==============================================================================
515 def SetInitialDirection( __Direction = [], __Amplitude = 1., __Position = None ):
516 "Établit ou élabore une direction avec une amplitude"
518 if len(__Direction) == 0 and __Position is None:
519 raise ValueError("If initial direction is void, current position has to be given")
520 if abs(float(__Amplitude)) < mpr:
521 raise ValueError("Amplitude of perturbation can not be zero")
523 if len(__Direction) > 0:
524 __dX0 = numpy.asarray(__Direction)
527 __X0 = numpy.ravel(numpy.asarray(__Position))
528 __mX0 = numpy.mean( __X0, dtype=mfp )
529 if abs(__mX0) < 2 * mpr:
530 __mX0 = 1. # Évite le problème de position nulle
533 __dX0.append( numpy.random.normal(0., abs(v)) )
535 __dX0.append( numpy.random.normal(0., __mX0) )
537 __dX0 = numpy.asarray(__dX0, float) # Évite le problème d'array de taille 1
538 __dX0 = numpy.ravel( __dX0 ) # Redresse les vecteurs
539 __dX0 = float(__Amplitude) * __dX0
543 # ==============================================================================
544 def EnsembleOfCenteredPerturbations( __bgCenter, __bgCovariance, __nbMembers ):
545 "Génération d'un ensemble de taille __nbMembers-1 d'états aléatoires centrés"
547 __bgCenter = numpy.ravel(__bgCenter)[:, None]
549 raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(__nbMembers),))
551 if __bgCovariance is None:
552 _Perturbations = numpy.tile( __bgCenter, __nbMembers)
554 _Z = numpy.random.multivariate_normal(numpy.zeros(__bgCenter.size), __bgCovariance, size=__nbMembers).T
555 _Perturbations = numpy.tile( __bgCenter, __nbMembers) + _Z
557 return _Perturbations
559 # ==============================================================================
560 def EnsembleOfBackgroundPerturbations(
565 "Génération d'un ensemble de taille __nbMembers-1 d'états aléatoires centrés"
566 def __CenteredRandomAnomalies(Zr, N):
568 Génère une matrice de N anomalies aléatoires centrées sur Zr selon les
569 notes manuscrites de MB et conforme au code de PS avec eps = -1
572 Q = numpy.identity(N - 1) - numpy.ones((N - 1, N - 1)) / numpy.sqrt(N) / (numpy.sqrt(N) - eps)
573 Q = numpy.concatenate((Q, [eps * numpy.ones(N - 1) / numpy.sqrt(N)]), axis=0)
574 R, _ = numpy.linalg.qr(numpy.random.normal(size = (N - 1, N - 1)))
576 Zr = numpy.dot(Q, Zr)
579 __bgCenter = numpy.ravel(__bgCenter).reshape((-1, 1))
581 raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(__nbMembers),))
582 if __bgCovariance is None:
583 _Perturbations = numpy.tile( __bgCenter, __nbMembers)
586 _U, _s, _V = numpy.linalg.svd(__bgCovariance, full_matrices=False)
587 _nbctl = __bgCenter.size
588 if __nbMembers > _nbctl:
589 _Z = numpy.concatenate((numpy.dot(
590 numpy.diag(numpy.sqrt(_s[:_nbctl])), _V[:_nbctl]),
591 numpy.random.multivariate_normal(numpy.zeros(_nbctl), __bgCovariance, __nbMembers - 1 - _nbctl)), axis = 0)
593 _Z = numpy.dot(numpy.diag(numpy.sqrt(_s[:__nbMembers - 1])), _V[:__nbMembers - 1])
594 _Zca = __CenteredRandomAnomalies(_Z, __nbMembers)
595 _Perturbations = __bgCenter + _Zca
597 if max(abs(__bgCovariance.flatten())) > 0:
598 _nbctl = __bgCenter.size
599 _Z = numpy.random.multivariate_normal(numpy.zeros(_nbctl), __bgCovariance, __nbMembers - 1)
600 _Zca = __CenteredRandomAnomalies(_Z, __nbMembers)
601 _Perturbations = __bgCenter + _Zca
603 _Perturbations = numpy.tile( __bgCenter, __nbMembers)
605 return _Perturbations
607 # ==============================================================================
608 def EnsembleMean( __Ensemble ):
609 "Renvoie la moyenne empirique d'un ensemble"
610 return numpy.asarray(__Ensemble).mean(axis=1, dtype=mfp).astype('float').reshape((-1, 1))
612 # ==============================================================================
613 def EnsembleOfAnomalies( __Ensemble, __OptMean = None, __Normalisation = 1. ):
614 "Renvoie les anomalies centrées à partir d'un ensemble"
615 if __OptMean is None:
616 __Em = EnsembleMean( __Ensemble )
618 __Em = numpy.ravel( __OptMean ).reshape((-1, 1))
620 return __Normalisation * (numpy.asarray( __Ensemble ) - __Em)
622 # ==============================================================================
623 def EnsembleErrorCovariance( __Ensemble, __Quick = False ):
624 "Renvoie l'estimation empirique de la covariance d'ensemble"
626 # Covariance rapide mais rarement définie positive
627 __Covariance = numpy.cov( __Ensemble )
629 # Résultat souvent identique à numpy.cov, mais plus robuste
630 __n, __m = numpy.asarray( __Ensemble ).shape
631 __Anomalies = EnsembleOfAnomalies( __Ensemble )
632 # Estimation empirique
633 __Covariance = ( __Anomalies @ __Anomalies.T ) / (__m - 1)
635 __Covariance = ( __Covariance + __Covariance.T ) * 0.5
636 # Assure la positivité
637 __epsilon = mpr * numpy.trace( __Covariance )
638 __Covariance = __Covariance + __epsilon * numpy.identity(__n)
642 # ==============================================================================
643 def SingularValuesEstimation( __Ensemble, __Using = "SVDVALS"):
644 "Renvoie les valeurs singulières de l'ensemble et leur carré"
645 if __Using == "SVDVALS": # Recommandé
647 __sv = scipy.linalg.svdvals( __Ensemble )
649 elif __Using == "SVD":
650 _, __sv, _ = numpy.linalg.svd( __Ensemble )
652 elif __Using == "EIG": # Lent
653 __eva, __eve = numpy.linalg.eig( __Ensemble @ __Ensemble.T )
654 __svsq = numpy.sort(numpy.abs(numpy.real( __eva )))[::-1]
655 __sv = numpy.sqrt( __svsq )
656 elif __Using == "EIGH":
657 __eva, __eve = numpy.linalg.eigh( __Ensemble @ __Ensemble.T )
658 __svsq = numpy.sort(numpy.abs(numpy.real( __eva )))[::-1]
659 __sv = numpy.sqrt( __svsq )
660 elif __Using == "EIGVALS":
661 __eva = numpy.linalg.eigvals( __Ensemble @ __Ensemble.T )
662 __svsq = numpy.sort(numpy.abs(numpy.real( __eva )))[::-1]
663 __sv = numpy.sqrt( __svsq )
664 elif __Using == "EIGVALSH":
665 __eva = numpy.linalg.eigvalsh( __Ensemble @ __Ensemble.T )
666 __svsq = numpy.sort(numpy.abs(numpy.real( __eva )))[::-1]
667 __sv = numpy.sqrt( __svsq )
669 raise ValueError("Error in requested variant name: %s"%__Using)
671 __tisv = __svsq / __svsq.sum()
672 __qisv = 1. - __svsq.cumsum() / __svsq.sum()
673 # Différence à 1.e-16 : __qisv = 1. - __tisv.cumsum()
675 return __sv, __svsq, __tisv, __qisv
677 # ==============================================================================
678 def MaxL2NormByColumn(__Ensemble, __LcCsts = False, __IncludedPoints = []):
679 "Maximum des normes L2 calculées par colonne"
680 if __LcCsts and len(__IncludedPoints) > 0:
681 normes = numpy.linalg.norm(
682 numpy.take(__Ensemble, __IncludedPoints, axis=0, mode='clip'),
686 normes = numpy.linalg.norm( __Ensemble, axis = 0)
687 nmax = numpy.max(normes)
688 imax = numpy.argmax(normes)
689 return nmax, imax, normes
691 def MaxLinfNormByColumn(__Ensemble, __LcCsts = False, __IncludedPoints = []):
692 "Maximum des normes Linf calculées par colonne"
693 if __LcCsts and len(__IncludedPoints) > 0:
694 normes = numpy.linalg.norm(
695 numpy.take(__Ensemble, __IncludedPoints, axis=0, mode='clip'),
696 axis = 0, ord=numpy.inf,
699 normes = numpy.linalg.norm( __Ensemble, axis = 0, ord=numpy.inf)
700 nmax = numpy.max(normes)
701 imax = numpy.argmax(normes)
702 return nmax, imax, normes
704 def InterpolationErrorByColumn(
705 __Ensemble = None, __Basis = None, __Points = None, __M = 2, # Usage 1
706 __Differences = None, # Usage 2
707 __ErrorNorm = None, # Commun
708 __LcCsts = False, __IncludedPoints = [], # Commun
709 __CDM = False, # ComputeMaxDifference # Commun
710 __RMU = False, # ReduceMemoryUse # Commun
711 __FTL = False, # ForceTril # Commun
713 "Analyse des normes d'erreurs d'interpolation calculées par colonne"
714 if __ErrorNorm == "L2":
715 NormByColumn = MaxL2NormByColumn
717 NormByColumn = MaxLinfNormByColumn
719 if __Differences is None and not __RMU: # Usage 1
721 rBasis = numpy.tril( __Basis[__Points, :] )
723 rBasis = __Basis[__Points, :]
724 rEnsemble = __Ensemble[__Points, :]
727 rBasis_inv = numpy.linalg.inv(rBasis)
728 Interpolator = numpy.dot(__Basis, numpy.dot(rBasis_inv, rEnsemble))
730 rBasis_inv = 1. / rBasis
731 Interpolator = numpy.outer(__Basis, numpy.outer(rBasis_inv, rEnsemble))
733 differences = __Ensemble - Interpolator
735 error, nbr, _ = NormByColumn(differences, __LcCsts, __IncludedPoints)
738 maxDifference = differences[:, nbr]
740 elif __Differences is None and __RMU: # Usage 1
742 rBasis = numpy.tril( __Basis[__Points, :] )
744 rBasis = __Basis[__Points, :]
745 rEnsemble = __Ensemble[__Points, :]
748 rBasis_inv = numpy.linalg.inv(rBasis)
749 rCoordinates = numpy.dot(rBasis_inv, rEnsemble)
751 rBasis_inv = 1. / rBasis
752 rCoordinates = numpy.outer(rBasis_inv, rEnsemble)
756 for iCol in range(__Ensemble.shape[1]):
758 iDifference = __Ensemble[:, iCol] - numpy.dot(__Basis, rCoordinates[:, iCol])
760 iDifference = __Ensemble[:, iCol] - numpy.ravel(numpy.outer(__Basis, rCoordinates[:, iCol]))
762 normDifference, _, _ = NormByColumn(iDifference, __LcCsts, __IncludedPoints)
764 if normDifference > error:
765 error = normDifference
769 maxDifference = __Ensemble[:, nbr] - numpy.dot(__Basis, rCoordinates[:, nbr])
772 differences = __Differences
774 error, nbr, _ = NormByColumn(differences, __LcCsts, __IncludedPoints)
777 # faire cette variable intermédiaire coûte cher
778 maxDifference = differences[:, nbr]
781 return error, nbr, maxDifference
785 # ==============================================================================
786 def EnsemblePerturbationWithGivenCovariance(
790 "Ajout d'une perturbation à chaque membre d'un ensemble selon une covariance prescrite"
791 if hasattr(__Covariance, "assparsematrix"):
792 if (abs(__Ensemble).mean() > mpr) and (abs(__Covariance.assparsematrix()) / abs(__Ensemble).mean() < mpr).all():
793 # Traitement d'une covariance nulle ou presque
795 if (abs(__Ensemble).mean() <= mpr) and (abs(__Covariance.assparsematrix()) < mpr).all():
796 # Traitement d'une covariance nulle ou presque
799 if (abs(__Ensemble).mean() > mpr) and (abs(__Covariance) / abs(__Ensemble).mean() < mpr).all():
800 # Traitement d'une covariance nulle ou presque
802 if (abs(__Ensemble).mean() <= mpr) and (abs(__Covariance) < mpr).all():
803 # Traitement d'une covariance nulle ou presque
806 __n, __m = __Ensemble.shape
807 if __Seed is not None:
808 numpy.random.seed(__Seed)
810 if hasattr(__Covariance, "isscalar") and __Covariance.isscalar():
811 # Traitement d'une covariance multiple de l'identité
813 __std = numpy.sqrt(__Covariance.assparsematrix())
814 __Ensemble += numpy.random.normal(__zero, __std, size=(__m, __n)).T
816 elif hasattr(__Covariance, "isvector") and __Covariance.isvector():
817 # Traitement d'une covariance diagonale avec variances non identiques
818 __zero = numpy.zeros(__n)
819 __std = numpy.sqrt(__Covariance.assparsematrix())
820 __Ensemble += numpy.asarray([numpy.random.normal(__zero, __std) for i in range(__m)]).T
822 elif hasattr(__Covariance, "ismatrix") and __Covariance.ismatrix():
823 # Traitement d'une covariance pleine
824 __Ensemble += numpy.random.multivariate_normal(numpy.zeros(__n), __Covariance.asfullmatrix(__n), size=__m).T
826 elif isinstance(__Covariance, numpy.ndarray):
827 # Traitement d'une covariance numpy pleine, sachant qu'on arrive ici en dernier
828 __Ensemble += numpy.random.multivariate_normal(numpy.zeros(__n), __Covariance, size=__m).T
831 raise ValueError("Error in ensemble perturbation with inadequate covariance specification")
835 # ==============================================================================
836 def CovarianceInflation(
838 __InflationType = None,
839 __InflationFactor = None,
840 __BackgroundCov = None ):
842 Inflation applicable soit sur Pb ou Pa, soit sur les ensembles EXb ou EXa
844 Synthèse : Hunt 2007, section 2.3.5
846 if __InflationFactor is None:
847 return __InputCovOrEns
849 __InflationFactor = float(__InflationFactor)
851 __InputCovOrEns = numpy.asarray(__InputCovOrEns)
852 if __InputCovOrEns.size == 0:
853 return __InputCovOrEns
855 if __InflationType in ["MultiplicativeOnAnalysisCovariance", "MultiplicativeOnBackgroundCovariance"]:
856 if __InflationFactor < 1.:
857 raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
858 if __InflationFactor < 1. + mpr: # No inflation = 1
859 return __InputCovOrEns
860 __OutputCovOrEns = __InflationFactor**2 * __InputCovOrEns
862 elif __InflationType in ["MultiplicativeOnAnalysisAnomalies", "MultiplicativeOnBackgroundAnomalies"]:
863 if __InflationFactor < 1.:
864 raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
865 if __InflationFactor < 1. + mpr: # No inflation = 1
866 return __InputCovOrEns
867 __InputCovOrEnsMean = __InputCovOrEns.mean(axis=1, dtype=mfp).astype('float')
868 __OutputCovOrEns = __InputCovOrEnsMean[:, numpy.newaxis] \
869 + __InflationFactor * (__InputCovOrEns - __InputCovOrEnsMean[:, numpy.newaxis])
871 elif __InflationType in ["AdditiveOnAnalysisCovariance", "AdditiveOnBackgroundCovariance"]:
872 if __InflationFactor < 0.:
873 raise ValueError("Inflation factor for additive inflation has to be greater or equal than 0.")
874 if __InflationFactor < mpr: # No inflation = 0
875 return __InputCovOrEns
876 __n, __m = __InputCovOrEns.shape
878 raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
879 __tr = __InputCovOrEns.trace() / __n
880 if __InflationFactor > __tr:
881 raise ValueError("Inflation factor for additive inflation has to be small over %.0e."%__tr)
882 __OutputCovOrEns = (1. - __InflationFactor) * __InputCovOrEns + __InflationFactor * numpy.identity(__n)
884 elif __InflationType == "HybridOnBackgroundCovariance":
885 if __InflationFactor < 0.:
886 raise ValueError("Inflation factor for hybrid inflation has to be greater or equal than 0.")
887 if __InflationFactor < mpr: # No inflation = 0
888 return __InputCovOrEns
889 __n, __m = __InputCovOrEns.shape
891 raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
892 if __BackgroundCov is None:
893 raise ValueError("Background covariance matrix B has to be given for hybrid inflation.")
894 if __InputCovOrEns.shape != __BackgroundCov.shape:
895 raise ValueError("Ensemble covariance matrix has to be of same size than background covariance matrix B.")
896 __OutputCovOrEns = (1. - __InflationFactor) * __InputCovOrEns + __InflationFactor * __BackgroundCov
898 elif __InflationType == "Relaxation":
899 raise NotImplementedError("Relaxation inflation type not implemented")
902 raise ValueError("Error in inflation type, '%s' is not a valid keyword."%__InflationType)
904 return __OutputCovOrEns
906 # ==============================================================================
907 def HessienneEstimation( __selfA, __nb, __HaM, __HtM, __BI, __RI ):
908 "Estimation de la Hessienne"
911 for i in range(int(__nb)):
912 __ee = numpy.zeros((__nb, 1))
914 __HtEE = numpy.dot(__HtM, __ee).reshape((-1, 1))
915 __HessienneI.append( numpy.ravel( __BI * __ee + __HaM * (__RI * __HtEE) ) )
917 __A = numpy.linalg.inv(numpy.array( __HessienneI ))
918 __A = (__A + __A.T) * 0.5 # Symétrie
919 __A = __A + mpr * numpy.trace( __A ) * numpy.identity(__nb) # Positivité
921 if min(__A.shape) != max(__A.shape):
923 "The %s a posteriori covariance matrix A"%(__selfA._name,) + \
924 " is of shape %s, despites it has to be a"%(str(__A.shape),) + \
925 " squared matrix. There is an error in the observation operator," + \
927 if (numpy.diag(__A) < 0).any():
929 "The %s a posteriori covariance matrix A"%(__selfA._name,) + \
930 " has at least one negative value on its diagonal. There is an" + \
931 " error in the observation operator, please check it.")
932 if logging.getLogger().level < logging.WARNING: # La vérification n'a lieu qu'en debug
934 numpy.linalg.cholesky( __A )
935 logging.debug("%s La matrice de covariance a posteriori A est bien symétrique définie positive."%(__selfA._name,))
938 "The %s a posteriori covariance matrix A"%(__selfA._name,) + \
939 " is not symmetric positive-definite. Please check your a" + \
940 " priori covariances and your observation operator.")
944 # ==============================================================================
945 def QuantilesEstimations( selfA, A, Xa, HXa = None, Hm = None, HtM = None ):
946 "Estimation des quantiles a posteriori à partir de A>0 (selfA est modifié)"
947 nbsamples = selfA._parameters["NumberOfSamplesForQuantiles"]
949 # Traitement des bornes
950 if "StateBoundsForQuantiles" in selfA._parameters:
951 LBounds = selfA._parameters["StateBoundsForQuantiles"] # Prioritaire
952 elif "Bounds" in selfA._parameters:
953 LBounds = selfA._parameters["Bounds"] # Défaut raisonnable
956 if LBounds is not None:
957 LBounds = ForceNumericBounds( LBounds )
958 __Xa = numpy.ravel(Xa)
960 # Échantillonnage des états
963 for i in range(nbsamples):
964 if selfA._parameters["SimulationForQuantiles"] == "Linear" and HtM is not None and HXa is not None:
965 dXr = (numpy.random.multivariate_normal(__Xa, A) - __Xa).reshape((-1, 1))
966 if LBounds is not None: # "EstimateProjection" par défaut
967 dXr = numpy.max(numpy.hstack((dXr, LBounds[:, 0].reshape((-1, 1))) - __Xa.reshape((-1, 1))), axis=1)
968 dXr = numpy.min(numpy.hstack((dXr, LBounds[:, 1].reshape((-1, 1))) - __Xa.reshape((-1, 1))), axis=1)
970 Yr = HXa.reshape((-1, 1)) + dYr
971 if selfA._toStore("SampledStateForQuantiles"):
972 Xr = __Xa + numpy.ravel(dXr)
973 elif selfA._parameters["SimulationForQuantiles"] == "NonLinear" and Hm is not None:
974 Xr = numpy.random.multivariate_normal(__Xa, A)
975 if LBounds is not None: # "EstimateProjection" par défaut
976 Xr = numpy.max(numpy.hstack((Xr.reshape((-1, 1)), LBounds[:, 0].reshape((-1, 1)))), axis=1)
977 Xr = numpy.min(numpy.hstack((Xr.reshape((-1, 1)), LBounds[:, 1].reshape((-1, 1)))), axis=1)
978 Yr = numpy.asarray(Hm( Xr ))
980 raise ValueError("Quantile simulations has only to be Linear or NonLinear.")
983 YfQ = Yr.reshape((-1, 1))
984 if selfA._toStore("SampledStateForQuantiles"):
985 EXr = Xr.reshape((-1, 1))
987 YfQ = numpy.hstack((YfQ, Yr.reshape((-1, 1))))
988 if selfA._toStore("SampledStateForQuantiles"):
989 EXr = numpy.hstack((EXr, Xr.reshape((-1, 1))))
991 # Extraction des quantiles
994 for quantile in selfA._parameters["Quantiles"]:
995 if not (0. <= float(quantile) <= 1.):
997 indice = int(nbsamples * float(quantile) - 1. / nbsamples)
999 YQ = YfQ[:, indice].reshape((-1, 1))
1001 YQ = numpy.hstack((YQ, YfQ[:, indice].reshape((-1, 1))))
1002 if YQ is not None: # Liste non vide de quantiles
1003 selfA.StoredVariables["SimulationQuantiles"].store( YQ )
1004 if selfA._toStore("SampledStateForQuantiles"):
1005 selfA.StoredVariables["SampledStateForQuantiles"].store( EXr )
1009 # ==============================================================================
1010 def ForceNumericBounds( __Bounds, __infNumbers = True ):
1011 "Force les bornes à être des valeurs numériques, sauf si globalement None"
1012 # Conserve une valeur par défaut à None s'il n'y a pas de bornes
1013 if __Bounds is None:
1016 # Converti toutes les bornes individuelles None à +/- l'infini chiffré
1017 __Bounds = numpy.asarray( __Bounds, dtype=float ).reshape((-1, 2))
1018 if len(__Bounds.shape) != 2 or __Bounds.shape[0] == 0 or __Bounds.shape[1] != 2:
1019 raise ValueError("Incorrectly shaped bounds data (effective shape is %s)"%(__Bounds.shape,))
1021 __Bounds[numpy.isnan(__Bounds[:, 0]), 0] = -float('inf')
1022 __Bounds[numpy.isnan(__Bounds[:, 1]), 1] = float('inf')
1024 __Bounds[numpy.isnan(__Bounds[:, 0]), 0] = -sys.float_info.max
1025 __Bounds[numpy.isnan(__Bounds[:, 1]), 1] = sys.float_info.max
1028 # ==============================================================================
1029 def RecentredBounds( __Bounds, __Center, __Scale = None ):
1030 "Recentre les bornes autour de 0, sauf si globalement None"
1031 # Conserve une valeur par défaut à None s'il n'y a pas de bornes
1032 if __Bounds is None:
1036 # Recentre les valeurs numériques de bornes
1037 return ForceNumericBounds( __Bounds ) - numpy.ravel( __Center ).reshape((-1, 1))
1039 # Recentre les valeurs numériques de bornes et change l'échelle par une matrice
1040 return __Scale @ (ForceNumericBounds( __Bounds, False ) - numpy.ravel( __Center ).reshape((-1, 1)))
1042 # ==============================================================================
1043 def ApplyBounds( __Vector, __Bounds, __newClip = True ):
1044 "Applique des bornes numériques à un point"
1045 # Conserve une valeur par défaut s'il n'y a pas de bornes
1046 if __Bounds is None:
1049 if not isinstance(__Vector, numpy.ndarray): # Is an array
1050 raise ValueError("Incorrect array definition of vector data")
1051 if not isinstance(__Bounds, numpy.ndarray): # Is an array
1052 raise ValueError("Incorrect array definition of bounds data")
1053 if 2 * __Vector.size != __Bounds.size: # Is a 2 column array of vector length
1054 raise ValueError("Incorrect bounds number (%i) to be applied for this vector (of size %i)"%(__Bounds.size, __Vector.size))
1055 if len(__Bounds.shape) != 2 or min(__Bounds.shape) <= 0 or __Bounds.shape[1] != 2:
1056 raise ValueError("Incorrectly shaped bounds data")
1059 __Vector = __Vector.clip(
1060 __Bounds[:, 0].reshape(__Vector.shape),
1061 __Bounds[:, 1].reshape(__Vector.shape),
1064 __Vector = numpy.max(numpy.hstack((__Vector.reshape((-1, 1)), numpy.asmatrix(__Bounds)[:, 0])), axis=1)
1065 __Vector = numpy.min(numpy.hstack((__Vector.reshape((-1, 1)), numpy.asmatrix(__Bounds)[:, 1])), axis=1)
1066 __Vector = numpy.asarray(__Vector)
1070 # ==============================================================================
1071 def VariablesAndIncrementsBounds( __Bounds, __BoxBounds, __Xini, __Name, __Multiplier = 1. ):
1072 __Bounds = ForceNumericBounds( __Bounds )
1073 __BoxBounds = ForceNumericBounds( __BoxBounds )
1074 if __Bounds is None and __BoxBounds is None:
1075 raise ValueError("Algorithm %s requires bounds on all variables (by Bounds), or on all variable increments (by BoxBounds), or both, to be explicitly given."%(__Name,))
1076 elif __Bounds is None and __BoxBounds is not None:
1077 __Bounds = __BoxBounds
1078 logging.debug("%s Definition of parameter bounds from current parameter increment bounds"%(__Name,))
1079 elif __Bounds is not None and __BoxBounds is None:
1080 __BoxBounds = __Multiplier * (__Bounds - __Xini.reshape((-1, 1))) # "M * [Xmin,Xmax]-Xini"
1081 logging.debug("%s Definition of parameter increment bounds from current parameter bounds"%(__Name,))
1082 return __Bounds, __BoxBounds
1084 # ==============================================================================
1085 def Apply3DVarRecentringOnEnsemble( __EnXn, __EnXf, __Ynpu, __HO, __R, __B, __SuppPars ):
1086 "Recentre l'ensemble Xn autour de l'analyse 3DVAR"
1087 __Betaf = __SuppPars["HybridCovarianceEquilibrium"]
1089 Xf = EnsembleMean( __EnXf )
1090 Pf = Covariance( asCovariance=EnsembleErrorCovariance(__EnXf) )
1091 Pf = (1 - __Betaf) * __B.asfullmatrix(Xf.size) + __Betaf * Pf
1093 selfB = PartialAlgorithm("3DVAR")
1094 selfB._parameters["Minimizer"] = "LBFGSB"
1095 selfB._parameters["MaximumNumberOfIterations"] = __SuppPars["HybridMaximumNumberOfIterations"]
1096 selfB._parameters["CostDecrementTolerance"] = __SuppPars["HybridCostDecrementTolerance"]
1097 selfB._parameters["ProjectedGradientTolerance"] = -1
1098 selfB._parameters["GradientNormTolerance"] = 1.e-05
1099 selfB._parameters["StoreInternalVariables"] = False
1100 selfB._parameters["optiprint"] = -1
1101 selfB._parameters["optdisp"] = 0
1102 selfB._parameters["Bounds"] = None
1103 selfB._parameters["InitializationPoint"] = Xf
1104 from daAlgorithms.Atoms import std3dvar
1105 std3dvar.std3dvar(selfB, Xf, __Ynpu, None, __HO, None, __R, Pf)
1106 Xa = selfB.get("Analysis")[-1].reshape((-1, 1))
1109 return Xa + EnsembleOfAnomalies( __EnXn )
1111 # ==============================================================================
1112 def GenerateRandomPointInHyperSphere( __Center, __Radius ):
1113 "Génère un point aléatoire uniformément à l'intérieur d'une hyper-sphère"
1114 __Dimension = numpy.asarray( __Center ).size
1115 __GaussDelta = numpy.random.normal( 0, 1, size=__Center.shape )
1116 __VectorNorm = numpy.linalg.norm( __GaussDelta )
1117 __PointOnHS = __Radius * (__GaussDelta / __VectorNorm)
1118 __MoveInHS = math.exp( math.log(numpy.random.uniform()) / __Dimension) # rand()**1/n
1119 __PointInHS = __MoveInHS * __PointOnHS
1120 return __Center + __PointInHS
1122 # ==============================================================================
1123 class GenerateWeightsAndSigmaPoints(object):
1124 "Génère les points sigma et les poids associés"
1127 Nn=0, EO="State", VariantM="UKF",
1128 Alpha=None, Beta=2., Kappa=0.):
1130 self.Alpha = numpy.longdouble( Alpha )
1131 self.Beta = numpy.longdouble( Beta )
1132 if abs(Kappa) < 2 * mpr:
1133 if EO == "Parameters" and VariantM == "UKF":
1134 self.Kappa = 3 - self.Nn
1135 else: # EO == "State":
1139 self.Kappa = numpy.longdouble( self.Kappa )
1140 self.Lambda = self.Alpha**2 * ( self.Nn + self.Kappa ) - self.Nn
1141 self.Gamma = self.Alpha * numpy.sqrt( self.Nn + self.Kappa )
1142 # Rq.: Gamma = sqrt(n+Lambda) = Alpha*sqrt(n+Kappa)
1143 assert 0. < self.Alpha <= 1., "Alpha has to be between 0 strictly and 1 included"
1145 if VariantM == "UKF":
1146 self.Wm, self.Wc, self.SC = self.__UKF2000()
1147 elif VariantM == "S3F":
1148 self.Wm, self.Wc, self.SC = self.__S3F2022()
1149 elif VariantM == "MSS":
1150 self.Wm, self.Wc, self.SC = self.__MSS2011()
1151 elif VariantM == "5OS":
1152 self.Wm, self.Wc, self.SC = self.__5OS2002()
1154 raise ValueError("Variant \"%s\" is not a valid one."%VariantM)
1156 def __UKF2000(self):
1157 "Standard Set, Julier et al. 2000 (aka Canonical UKF)"
1158 # Rq.: W^{(m)}_{i=/=0} = 1. / (2.*(n + Lambda))
1159 Winn = 1. / (2. * ( self.Nn + self.Kappa ) * self.Alpha**2)
1162 for point in range(2 * self.Nn):
1164 # Rq.: LsLpL = Lambda / (n + Lambda)
1165 LsLpL = 1. - self.Nn / (self.Alpha**2 * ( self.Nn + self.Kappa ))
1166 Wm = numpy.array( Ww )
1168 Wc = numpy.array( Ww )
1169 Wc[0] = LsLpL + (1. - self.Alpha**2 + self.Beta)
1170 # OK: assert abs(Wm.sum()-1.) < self.Nn*mpr, "UKF ill-conditioned %s >= %s"%(abs(Wm.sum()-1.), self.Nn*mpr)
1172 SC = numpy.zeros((self.Nn, len(Ww)))
1173 for ligne in range(self.Nn):
1175 SC[ligne, it ] = self.Gamma
1176 SC[ligne, self.Nn + it] = -self.Gamma
1180 def __S3F2022(self):
1181 "Scaled Spherical Simplex Set, Papakonstantinou et al. 2022"
1182 # Rq.: W^{(m)}_{i=/=0} = (n + Kappa) / ((n + Lambda) * (n + 1 + Kappa))
1183 Winn = 1. / ((self.Nn + 1. + self.Kappa) * self.Alpha**2)
1186 for point in range(self.Nn + 1):
1188 # Rq.: LsLpL = Lambda / (n + Lambda)
1189 LsLpL = 1. - self.Nn / (self.Alpha**2 * ( self.Nn + self.Kappa ))
1190 Wm = numpy.array( Ww )
1192 Wc = numpy.array( Ww )
1193 Wc[0] = LsLpL + (1. - self.Alpha**2 + self.Beta)
1194 # OK: assert abs(Wm.sum()-1.) < self.Nn*mpr, "S3F ill-conditioned %s >= %s"%(abs(Wm.sum()-1.), self.Nn*mpr)
1196 SC = numpy.zeros((self.Nn, len(Ww)))
1197 for ligne in range(self.Nn):
1199 q_t = it / math.sqrt( it * (it + 1) * Winn )
1200 SC[ligne, 1:it + 1] = -q_t / it
1201 SC[ligne, it + 1 ] = q_t
1205 def __MSS2011(self):
1206 "Minimum Set, Menegaz et al. 2011"
1207 rho2 = (1 - self.Alpha) / self.Nn
1208 Cc = numpy.real(scipy.linalg.sqrtm( numpy.identity(self.Nn) - rho2 ))
1209 Ww = self.Alpha * rho2 * scipy.linalg.inv(Cc) @ numpy.ones(self.Nn) @ scipy.linalg.inv(Cc.T)
1210 Wm = Wc = numpy.concatenate((Ww, [self.Alpha]))
1211 # OK: assert abs(Wm.sum()-1.) < self.Nn*mpr, "MSS ill-conditioned %s >= %s"%(abs(Wm.sum()-1.), self.Nn*mpr)
1213 # inv(sqrt(W)) = diag(inv(sqrt(W)))
1214 SC1an = Cc @ numpy.diag(1. / numpy.sqrt( Ww ))
1215 SCnpu = (- numpy.sqrt(rho2) / numpy.sqrt(self.Alpha)) * numpy.ones(self.Nn).reshape((-1, 1))
1216 SC = numpy.concatenate((SC1an, SCnpu), axis=1)
1220 def __5OS2002(self):
1221 "Fifth Order Set, Lerner 2002"
1223 for point in range(2 * self.Nn):
1224 Ww.append( (4. - self.Nn) / 18. )
1225 for point in range(2 * self.Nn, 2 * self.Nn**2):
1226 Ww.append( 1. / 36. )
1227 Ww.append( (self.Nn**2 - 7 * self.Nn) / 18. + 1.)
1228 Wm = Wc = numpy.array( Ww )
1229 # OK: assert abs(Wm.sum()-1.) < self.Nn*mpr, "5OS ill-conditioned %s >= %s"%(abs(Wm.sum()-1.), self.Nn*mpr)
1231 xi1n = numpy.diag( math.sqrt(3) * numpy.ones( self.Nn ) )
1232 xi2n = numpy.diag( -math.sqrt(3) * numpy.ones( self.Nn ) )
1234 xi3n1 = numpy.zeros((int((self.Nn - 1) * self.Nn / 2), self.Nn), dtype=float)
1235 xi3n2 = numpy.zeros((int((self.Nn - 1) * self.Nn / 2), self.Nn), dtype=float)
1236 xi4n1 = numpy.zeros((int((self.Nn - 1) * self.Nn / 2), self.Nn), dtype=float)
1237 xi4n2 = numpy.zeros((int((self.Nn - 1) * self.Nn / 2), self.Nn), dtype=float)
1239 for i1 in range(self.Nn - 1):
1240 for i2 in range(i1 + 1, self.Nn):
1241 xi3n1[ia, i1] = xi3n2[ia, i2] = math.sqrt(3)
1242 xi3n2[ia, i1] = xi3n1[ia, i2] = -math.sqrt(3)
1243 # --------------------------------
1244 xi4n1[ia, i1] = xi4n1[ia, i2] = math.sqrt(3)
1245 xi4n2[ia, i1] = xi4n2[ia, i2] = -math.sqrt(3)
1247 SC = numpy.concatenate((xi1n, xi2n, xi3n1, xi3n2, xi4n1, xi4n2, numpy.zeros((1, self.Nn)))).T
1251 def nbOfPoints(self):
1252 assert self.Nn == self.SC.shape[0], "Size mismatch %i =/= %i"%(self.Nn, self.SC.shape[0])
1253 assert self.Wm.size == self.SC.shape[1], "Size mismatch %i =/= %i"%(self.Wm.size, self.SC.shape[1])
1254 assert self.Wm.size == self.Wc.size, "Size mismatch %i =/= %i"%(self.Wm.size, self.Wc.size)
1258 return self.Wm, self.Wc, self.SC
1261 "x.__repr__() <==> repr(x)"
1263 msg += " Alpha = %s\n"%self.Alpha
1264 msg += " Beta = %s\n"%self.Beta
1265 msg += " Kappa = %s\n"%self.Kappa
1266 msg += " Lambda = %s\n"%self.Lambda
1267 msg += " Gamma = %s\n"%self.Gamma
1268 msg += " Wm = %s\n"%self.Wm
1269 msg += " Wc = %s\n"%self.Wc
1270 msg += " sum(Wm) = %s\n"%numpy.sum(self.Wm)
1271 msg += " SC =\n%s\n"%self.SC
1274 # ==============================================================================
1275 def GetNeighborhoodTopology( __ntype, __ipop ):
1276 "Renvoi une topologie de connexion pour une population de points"
1277 if __ntype in ["FullyConnectedNeighborhood", "FullyConnectedNeighbourhood", "gbest"]:
1278 __topology = [__ipop for __i in __ipop]
1279 elif __ntype in ["RingNeighborhoodWithRadius1", "RingNeighbourhoodWithRadius1", "lbest"]:
1280 __cpop = list(__ipop[-1:]) + list(__ipop) + list(__ipop[:1])
1281 __topology = [__cpop[__n:__n + 3] for __n in range(len(__ipop))]
1282 elif __ntype in ["RingNeighborhoodWithRadius2", "RingNeighbourhoodWithRadius2"]:
1283 __cpop = list(__ipop[-2:]) + list(__ipop) + list(__ipop[:2])
1284 __topology = [__cpop[__n:__n + 5] for __n in range(len(__ipop))]
1285 elif __ntype in ["AdaptativeRandomWith3Neighbors", "AdaptativeRandomWith3Neighbours", "abest"]:
1286 __cpop = 3 * list(__ipop)
1287 __topology = [[__i] + list(numpy.random.choice(__cpop, 3)) for __i in __ipop]
1288 elif __ntype in ["AdaptativeRandomWith5Neighbors", "AdaptativeRandomWith5Neighbours"]:
1289 __cpop = 5 * list(__ipop)
1290 __topology = [[__i] + list(numpy.random.choice(__cpop, 5)) for __i in __ipop]
1292 raise ValueError("Swarm topology type unavailable because name \"%s\" is unknown."%__ntype)
1295 # ==============================================================================
1296 def FindIndexesFromNames( __NameOfLocations = None, __ExcludeLocations = None, ForceArray = False ):
1297 "Exprime les indices des noms exclus, en ignorant les absents"
1298 if __ExcludeLocations is None:
1299 __ExcludeIndexes = ()
1300 elif isinstance(__ExcludeLocations, (list, numpy.ndarray, tuple)) and len(__ExcludeLocations) == 0:
1301 __ExcludeIndexes = ()
1303 elif __NameOfLocations is None:
1305 __ExcludeIndexes = numpy.asarray(__ExcludeLocations, dtype=int)
1306 except ValueError as e:
1307 if "invalid literal for int() with base 10:" in str(e):
1308 raise ValueError("to exclude named locations, initial location name list can not be void and has to have the same length as one state")
1310 raise ValueError(str(e))
1311 elif isinstance(__NameOfLocations, (list, numpy.ndarray, tuple)) and len(__NameOfLocations) == 0:
1313 __ExcludeIndexes = numpy.asarray(__ExcludeLocations, dtype=int)
1314 except ValueError as e:
1315 if "invalid literal for int() with base 10:" in str(e):
1316 raise ValueError("to exclude named locations, initial location name list can not be void and has to have the same length as one state")
1318 raise ValueError(str(e))
1322 __ExcludeIndexes = numpy.asarray(__ExcludeLocations, dtype=int)
1323 except ValueError as e:
1324 if "invalid literal for int() with base 10:" in str(e):
1325 if len(__NameOfLocations) < 1.e6 + 1 and len(__ExcludeLocations) > 1500:
1329 if ForceArray or __Heuristic:
1330 # Recherche par array permettant des noms invalides, peu efficace
1331 __NameToIndex = dict(numpy.array((
1333 range(len(__NameOfLocations))
1335 __ExcludeIndexes = numpy.asarray([__NameToIndex.get(k, -1) for k in __ExcludeLocations], dtype=int)
1338 # Recherche par liste permettant des noms invalides, très efficace
1339 def __NameToIndex_get( cle, default = -1 ):
1340 if cle in __NameOfLocations:
1341 return __NameOfLocations.index(cle)
1344 __ExcludeIndexes = numpy.asarray([__NameToIndex_get(k, -1) for k in __ExcludeLocations], dtype=int)
1346 # Recherche par liste interdisant des noms invalides, mais encore un peu plus efficace
1347 # __ExcludeIndexes = numpy.asarray([__NameOfLocations.index(k) for k in __ExcludeLocations], dtype=int)
1349 # Ignore les noms absents
1350 __ExcludeIndexes = numpy.compress(__ExcludeIndexes > -1, __ExcludeIndexes)
1351 if len(__ExcludeIndexes) == 0:
1352 __ExcludeIndexes = ()
1354 raise ValueError(str(e))
1356 return __ExcludeIndexes
1358 # ==============================================================================
1359 def BuildComplexSampleList(
1361 __SampleAsExplicitHyperCube,
1362 __SampleAsMinMaxStepHyperCube,
1363 __SampleAsMinMaxLatinHyperCube,
1364 __SampleAsMinMaxSobolSequence,
1365 __SampleAsIndependantRandomVariables,
1368 # ---------------------------
1369 if len(__SampleAsnUplet) > 0:
1370 sampleList = __SampleAsnUplet
1371 for i, Xx in enumerate(sampleList):
1372 if numpy.ravel(Xx).size != __X0.size:
1373 raise ValueError("The size %i of the %ith state X in the sample and %i of the checking point Xb are different, they have to be identical."%(numpy.ravel(Xx).size, i + 1, __X0.size))
1374 # ---------------------------
1375 elif len(__SampleAsExplicitHyperCube) > 0:
1376 sampleList = itertools.product(*list(__SampleAsExplicitHyperCube))
1377 # ---------------------------
1378 elif len(__SampleAsMinMaxStepHyperCube) > 0:
1379 coordinatesList = []
1380 for i, dim in enumerate(__SampleAsMinMaxStepHyperCube):
1382 raise ValueError("For dimension %i, the variable definition \"%s\" is incorrect, it should be [min,max,step]."%(i, dim))
1384 coordinatesList.append(numpy.linspace(dim[0], dim[1], 1 + int((float(dim[1]) - float(dim[0])) / float(dim[2]))))
1385 sampleList = itertools.product(*coordinatesList)
1386 # ---------------------------
1387 elif len(__SampleAsMinMaxLatinHyperCube) > 0:
1388 import scipy, warnings
1389 if vt(scipy.version.version) <= vt("1.7.0"):
1390 __msg = "In order to use Latin Hypercube sampling, you must at least use Scipy version 1.7.0 (and you are presently using Scipy %s). A void sample is then generated."%scipy.version.version
1391 warnings.warn(__msg, FutureWarning, stacklevel=50)
1394 __spDesc = list(__SampleAsMinMaxLatinHyperCube)
1395 __nbDime, __nbSamp = map(int, __spDesc.pop()) # Réduction du dernier
1396 __sample = scipy.stats.qmc.LatinHypercube(
1398 seed = numpy.random.default_rng(__Seed),
1400 __sample = __sample.random(n = __nbSamp)
1401 __bounds = numpy.array(__spDesc)[:, 0:2]
1402 __l_bounds = __bounds[:, 0]
1403 __u_bounds = __bounds[:, 1]
1404 sampleList = scipy.stats.qmc.scale(__sample, __l_bounds, __u_bounds)
1405 # ---------------------------
1406 elif len(__SampleAsMinMaxSobolSequence) > 0:
1407 import scipy, warnings
1408 if vt(scipy.version.version) <= vt("1.7.0"):
1409 __msg = "In order to use Latin Hypercube sampling, you must at least use Scipy version 1.7.0 (and you are presently using Scipy %s). A void sample is then generated."%scipy.version.version
1410 warnings.warn(__msg, FutureWarning, stacklevel=50)
1413 __spDesc = list(__SampleAsMinMaxSobolSequence)
1414 __nbDime, __nbSamp = map(int, __spDesc.pop()) # Réduction du dernier
1415 if __nbDime != len(__spDesc):
1416 warnings.warn("Declared space dimension (%i) is not equal to number of bounds (%i), the last one will be used."%(__nbDime, len(__spDesc)), FutureWarning, stacklevel=50)
1417 __sample = scipy.stats.qmc.Sobol(
1419 seed = numpy.random.default_rng(__Seed),
1421 __sample = __sample.random_base2(m = int(math.log2(__nbSamp)) + 1)
1422 __bounds = numpy.array(__spDesc)[:, 0:2]
1423 __l_bounds = __bounds[:, 0]
1424 __u_bounds = __bounds[:, 1]
1425 sampleList = scipy.stats.qmc.scale(__sample, __l_bounds, __u_bounds)
1426 # ---------------------------
1427 elif len(__SampleAsIndependantRandomVariables) > 0:
1428 coordinatesList = []
1429 for i, dim in enumerate(__SampleAsIndependantRandomVariables):
1431 raise ValueError("For dimension %i, the variable definition \"%s\" is incorrect, it should be ('distribution',(parameters),length) with distribution in ['normal'(mean,std),'lognormal'(mean,sigma),'uniform'(low,high),'weibull'(shape)]."%(i, dim))
1432 elif not ( str(dim[0]) in ['normal', 'lognormal', 'uniform', 'weibull'] \
1433 and hasattr(numpy.random, str(dim[0])) ):
1434 raise ValueError("For dimension %i, the distribution name \"%s\" is not allowed, please choose in ['normal'(mean,std),'lognormal'(mean,sigma),'uniform'(low,high),'weibull'(shape)]"%(i, str(dim[0])))
1436 distribution = getattr(numpy.random, str(dim[0]), 'normal')
1437 coordinatesList.append(distribution(*dim[1], size=max(1, int(dim[2]))))
1438 sampleList = itertools.product(*coordinatesList)
1440 sampleList = iter([__X0,])
1444 # ==============================================================================
1446 selfA, Xb, Y, U, HO, EM, CM, R, B, Q, oneCycle,
1447 __CovForecast = False ):
1449 Prévision multi-pas avec une correction par pas (multi-méthodes)
1454 if selfA._parameters["EstimationOf"] == "State":
1455 if len(selfA.StoredVariables["Analysis"]) == 0 or not selfA._parameters["nextStep"]:
1456 Xn = numpy.asarray(Xb)
1459 selfA.StoredVariables["Analysis"].store( Xn )
1460 if selfA._toStore("APosterioriCovariance"):
1461 if hasattr(B, "asfullmatrix"):
1462 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(Xn.size) )
1464 selfA.StoredVariables["APosterioriCovariance"].store( B )
1465 selfA._setInternalState("seed", numpy.random.get_state())
1466 elif selfA._parameters["nextStep"]:
1467 Xn = selfA._getInternalState("Xn")
1469 Pn = selfA._getInternalState("Pn")
1471 Xn = numpy.asarray(Xb)
1475 if hasattr(Y, "stepnumber"):
1476 duration = Y.stepnumber()
1482 for step in range(duration - 1):
1483 selfA.StoredVariables["CurrentStepNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1485 if hasattr(Y, "store"):
1486 Ynpu = numpy.asarray( Y[step + 1] ).reshape((-1, 1))
1488 Ynpu = numpy.asarray( Y ).reshape((-1, 1))
1491 if hasattr(U, "store") and len(U) > 1:
1492 Un = numpy.asarray( U[step] ).reshape((-1, 1))
1493 elif hasattr(U, "store") and len(U) == 1:
1494 Un = numpy.asarray( U[0] ).reshape((-1, 1))
1496 Un = numpy.asarray( U ).reshape((-1, 1))
1500 # Predict (Time Update)
1501 # ---------------------
1502 if selfA._parameters["EstimationOf"] == "State":
1504 Mt = EM["Tangent"].asMatrix(Xn)
1505 Mt = Mt.reshape(Xn.size, Xn.size) # ADAO & check shape
1507 Ma = EM["Adjoint"].asMatrix(Xn)
1508 Ma = Ma.reshape(Xn.size, Xn.size) # ADAO & check shape
1509 Pn_predicted = Q + Mt @ (Pn @ Ma)
1510 M = EM["Direct"].appliedControledFormTo
1511 Xn_predicted = M( (Xn, Un) ).reshape((-1, 1))
1512 if CM is not None and "Tangent" in CM and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1513 Cm = CM["Tangent"].asMatrix(Xn_predicted)
1514 Cm = Cm.reshape(Xn.size, Un.size) # ADAO & check shape
1515 Xn_predicted = Xn_predicted + (Cm @ Un).reshape((-1, 1))
1516 elif selfA._parameters["EstimationOf"] == "Parameters": # No forecast
1517 # --- > Par principe, M = Id, Q = 0
1521 Xn_predicted = numpy.asarray(Xn_predicted).reshape((-1, 1))
1522 if selfA._toStore("ForecastState"):
1523 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
1525 if hasattr(Pn_predicted, "asfullmatrix"):
1526 Pn_predicted = Pn_predicted.asfullmatrix(Xn.size)
1528 Pn_predicted = numpy.asarray(Pn_predicted).reshape((Xn.size, Xn.size))
1529 if selfA._toStore("ForecastCovariance"):
1530 selfA.StoredVariables["ForecastCovariance"].store( Pn_predicted )
1532 # Correct (Measurement Update)
1533 # ----------------------------
1535 oneCycle(selfA, Xn_predicted, Ynpu, Un, HO, CM, R, Pn_predicted, True)
1537 oneCycle(selfA, Xn_predicted, Ynpu, Un, HO, CM, R, B, True)
1539 # --------------------------
1540 Xn = selfA._getInternalState("Xn")
1542 Pn = selfA._getInternalState("Pn")
1546 # ==============================================================================
1547 if __name__ == "__main__":
1548 print("\n AUTODIAGNOSTIC\n")