1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2024 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les objets numériques génériques.
26 __author__ = "Jean-Philippe ARGAUD"
28 import os, copy, types, sys, logging, math, numpy, itertools
29 from daCore.BasicObjects import Operator, Covariance, PartialAlgorithm
30 from daCore.PlatformInfo import PlatformInfo, vt, vfloat
31 mpr = PlatformInfo().MachinePrecision()
32 mfp = PlatformInfo().MaximumPrecision()
33 # logging.getLogger().setLevel(logging.DEBUG)
35 # ==============================================================================
36 def ExecuteFunction( triplet ):
37 assert len(triplet) == 3, "Incorrect number of arguments"
38 X, xArgs, funcrepr = triplet
39 __X = numpy.ravel( X ).reshape((-1,1))
40 __sys_path_tmp = sys.path ; sys.path.insert(0,funcrepr["__userFunction__path"])
41 __module = __import__(funcrepr["__userFunction__modl"], globals(), locals(), [])
42 __fonction = getattr(__module,funcrepr["__userFunction__name"])
43 sys.path = __sys_path_tmp ; del __sys_path_tmp
44 if isinstance(xArgs, dict):
45 __HX = __fonction( __X, **xArgs )
47 __HX = __fonction( __X )
48 return numpy.ravel( __HX )
50 # ==============================================================================
51 class FDApproximation(object):
53 Cette classe sert d'interface pour définir les opérateurs approximés. A la
54 création d'un objet, en fournissant une fonction "Function", on obtient un
55 objet qui dispose de 3 méthodes "DirectOperator", "TangentOperator" et
56 "AdjointOperator". On contrôle l'approximation DF avec l'incrément
57 multiplicatif "increment" valant par défaut 1%, ou avec l'incrément fixe
58 "dX" qui sera multiplié par "increment" (donc en %), et on effectue de DF
59 centrées si le booléen "centeredDF" est vrai.
62 "__name", "__extraArgs", "__mpEnabled", "__mpWorkers", "__mfEnabled",
63 "__rmEnabled", "__avoidRC", "__tolerBP", "__centeredDF", "__lengthRJ",
64 "__listJPCP", "__listJPCI", "__listJPCR", "__listJPPN", "__listJPIN",
65 "__userOperator", "__userFunction", "__increment", "__pool", "__dX",
66 "__userFunction__name", "__userFunction__modl", "__userFunction__path",
70 name = "FDApproximation",
75 extraArguments = None,
76 reducingMemoryUse = False,
77 avoidingRedundancy = True,
78 toleranceInRedundancy = 1.e-18,
79 lengthOfRedundancy = -1,
84 self.__name = str(name)
85 self.__extraArgs = extraArguments
89 import multiprocessing
90 self.__mpEnabled = True
92 self.__mpEnabled = False
94 self.__mpEnabled = False
95 self.__mpWorkers = mpWorkers
96 if self.__mpWorkers is not None and self.__mpWorkers < 1:
97 self.__mpWorkers = None
98 logging.debug("FDA Calculs en multiprocessing : %s (nombre de processus : %s)"%(self.__mpEnabled,self.__mpWorkers))
100 self.__mfEnabled = bool(mfEnabled)
101 logging.debug("FDA Calculs en multifonctions : %s"%(self.__mfEnabled,))
103 self.__rmEnabled = bool(reducingMemoryUse)
104 logging.debug("FDA Calculs avec réduction mémoire : %s"%(self.__rmEnabled,))
106 if avoidingRedundancy:
107 self.__avoidRC = True
108 self.__tolerBP = float(toleranceInRedundancy)
109 self.__lengthRJ = int(lengthOfRedundancy)
110 self.__listJPCP = [] # Jacobian Previous Calculated Points
111 self.__listJPCI = [] # Jacobian Previous Calculated Increment
112 self.__listJPCR = [] # Jacobian Previous Calculated Results
113 self.__listJPPN = [] # Jacobian Previous Calculated Point Norms
114 self.__listJPIN = [] # Jacobian Previous Calculated Increment Norms
116 self.__avoidRC = False
117 logging.debug("FDA Calculs avec réduction des doublons : %s"%self.__avoidRC)
119 logging.debug("FDA Tolérance de détermination des doublons : %.2e"%self.__tolerBP)
122 if isinstance(Function,types.FunctionType):
123 logging.debug("FDA Calculs en multiprocessing : FunctionType")
124 self.__userFunction__name = Function.__name__
126 mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
128 mod = os.path.abspath(Function.__globals__['__file__'])
129 if not os.path.isfile(mod):
130 raise ImportError("No user defined function or method found with the name %s"%(mod,))
131 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
132 self.__userFunction__path = os.path.dirname(mod)
134 self.__userOperator = Operator(
136 fromMethod = Function,
137 avoidingRedundancy = self.__avoidRC,
138 inputAsMultiFunction = self.__mfEnabled,
139 extraArguments = self.__extraArgs )
140 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
141 elif isinstance(Function,types.MethodType):
142 logging.debug("FDA Calculs en multiprocessing : MethodType")
143 self.__userFunction__name = Function.__name__
145 mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
147 mod = os.path.abspath(Function.__func__.__globals__['__file__'])
148 if not os.path.isfile(mod):
149 raise ImportError("No user defined function or method found with the name %s"%(mod,))
150 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
151 self.__userFunction__path = os.path.dirname(mod)
153 self.__userOperator = Operator(
155 fromMethod = Function,
156 avoidingRedundancy = self.__avoidRC,
157 inputAsMultiFunction = self.__mfEnabled,
158 extraArguments = self.__extraArgs )
159 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
161 raise TypeError("User defined function or method has to be provided for finite differences approximation.")
163 self.__userOperator = Operator(
165 fromMethod = Function,
166 avoidingRedundancy = self.__avoidRC,
167 inputAsMultiFunction = self.__mfEnabled,
168 extraArguments = self.__extraArgs )
169 self.__userFunction = self.__userOperator.appliedTo
171 self.__centeredDF = bool(centeredDF)
172 if abs(float(increment)) > 1.e-15:
173 self.__increment = float(increment)
175 self.__increment = 0.01
179 self.__dX = numpy.ravel( dX )
181 # ---------------------------------------------------------
182 def __doublon__(self, __e, __l, __n, __v=None):
183 __ac, __iac = False, -1
184 for i in range(len(__l)-1,-1,-1):
185 if numpy.linalg.norm(__e - __l[i]) < self.__tolerBP * __n[i]:
186 __ac, __iac = True, i
187 if __v is not None: logging.debug("FDA Cas%s déjà calculé, récupération du doublon %i"%(__v,__iac))
191 # ---------------------------------------------------------
192 def __listdotwith__(self, __LMatrix, __dotWith = None, __dotTWith = None):
193 "Produit incrémental d'une matrice liste de colonnes avec un vecteur"
194 if not isinstance(__LMatrix, (list,tuple)):
195 raise TypeError("Columnwise list matrix has not the proper type: %s"%type(__LMatrix))
196 if __dotWith is not None:
197 __Idwx = numpy.ravel( __dotWith )
198 assert len(__LMatrix) == __Idwx.size, "Incorrect size of elements"
199 __Produit = numpy.zeros(__LMatrix[0].size)
200 for i, col in enumerate(__LMatrix):
201 __Produit += float(__Idwx[i]) * col
203 elif __dotTWith is not None:
204 _Idwy = numpy.ravel( __dotTWith ).T
205 assert __LMatrix[0].size == _Idwy.size, "Incorrect size of elements"
206 __Produit = numpy.zeros(len(__LMatrix))
207 for i, col in enumerate(__LMatrix):
208 __Produit[i] = vfloat( _Idwy @ col)
214 # ---------------------------------------------------------
215 def DirectOperator(self, X, **extraArgs ):
217 Calcul du direct à l'aide de la fonction fournie.
219 NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
220 ne doivent pas être données ici à la fonction utilisateur.
222 logging.debug("FDA Calcul DirectOperator (explicite)")
224 _HX = self.__userFunction( X, argsAsSerie = True )
226 _HX = numpy.ravel(self.__userFunction( numpy.ravel(X) ))
230 # ---------------------------------------------------------
231 def TangentMatrix(self, X, dotWith = None, dotTWith = None ):
233 Calcul de l'opérateur tangent comme la Jacobienne par différences finies,
234 c'est-à-dire le gradient de H en X. On utilise des différences finies
235 directionnelles autour du point X. X est un numpy.ndarray.
237 Différences finies centrées (approximation d'ordre 2):
238 1/ Pour chaque composante i de X, on ajoute et on enlève la perturbation
239 dX[i] à la composante X[i], pour composer X_plus_dXi et X_moins_dXi, et
240 on calcule les réponses HX_plus_dXi = H( X_plus_dXi ) et HX_moins_dXi =
242 2/ On effectue les différences (HX_plus_dXi-HX_moins_dXi) et on divise par
244 3/ Chaque résultat, par composante, devient une colonne de la Jacobienne
246 Différences finies non centrées (approximation d'ordre 1):
247 1/ Pour chaque composante i de X, on ajoute la perturbation dX[i] à la
248 composante X[i] pour composer X_plus_dXi, et on calcule la réponse
249 HX_plus_dXi = H( X_plus_dXi )
250 2/ On calcule la valeur centrale HX = H(X)
251 3/ On effectue les différences (HX_plus_dXi-HX) et on divise par
253 4/ Chaque résultat, par composante, devient une colonne de la Jacobienne
256 logging.debug("FDA Début du calcul de la Jacobienne")
257 logging.debug("FDA Incrément de............: %s*X"%float(self.__increment))
258 logging.debug("FDA Approximation centrée...: %s"%(self.__centeredDF))
260 if X is None or len(X)==0:
261 raise ValueError("Nominal point X for approximate derivatives can not be None or void (given X: %s)."%(str(X),))
263 _X = numpy.ravel( X )
265 if self.__dX is None:
266 _dX = self.__increment * _X
268 _dX = numpy.ravel( self.__dX )
269 assert len(_X) == len(_dX), "Inconsistent dX increment length with respect to the X one"
270 assert _X.size == _dX.size, "Inconsistent dX increment size with respect to the X one"
272 if (_dX == 0.).any():
275 _dX = numpy.where( _dX == 0., float(self.__increment), _dX )
277 _dX = numpy.where( _dX == 0., moyenne, _dX )
279 __alreadyCalculated = False
281 __bidon, __alreadyCalculatedP = self.__doublon__(_X, self.__listJPCP, self.__listJPPN, None)
282 __bidon, __alreadyCalculatedI = self.__doublon__(_dX, self.__listJPCI, self.__listJPIN, None)
283 if __alreadyCalculatedP == __alreadyCalculatedI > -1:
284 __alreadyCalculated, __i = True, __alreadyCalculatedP
285 logging.debug("FDA Cas J déjà calculé, récupération du doublon %i"%__i)
287 if __alreadyCalculated:
288 logging.debug("FDA Calcul Jacobienne (par récupération du doublon %i)"%__i)
289 _Jacobienne = self.__listJPCR[__i]
290 logging.debug("FDA Fin du calcul de la Jacobienne")
291 if dotWith is not None:
292 return numpy.dot(_Jacobienne, numpy.ravel( dotWith ))
293 elif dotTWith is not None:
294 return numpy.dot(_Jacobienne.T, numpy.ravel( dotTWith ))
296 logging.debug("FDA Calcul Jacobienne (explicite)")
297 if self.__centeredDF:
299 if self.__mpEnabled and not self.__mfEnabled:
301 "__userFunction__path" : self.__userFunction__path,
302 "__userFunction__modl" : self.__userFunction__modl,
303 "__userFunction__name" : self.__userFunction__name,
306 for i in range( len(_dX) ):
308 _X_plus_dXi = numpy.array( _X, dtype=float )
309 _X_plus_dXi[i] = _X[i] + _dXi
310 _X_moins_dXi = numpy.array( _X, dtype=float )
311 _X_moins_dXi[i] = _X[i] - _dXi
313 _jobs.append( (_X_plus_dXi, self.__extraArgs, funcrepr) )
314 _jobs.append( (_X_moins_dXi, self.__extraArgs, funcrepr) )
316 import multiprocessing
317 self.__pool = multiprocessing.Pool(self.__mpWorkers)
318 _HX_plusmoins_dX = self.__pool.map( ExecuteFunction, _jobs )
323 for i in range( len(_dX) ):
324 _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
326 elif self.__mfEnabled:
328 for i in range( len(_dX) ):
330 _X_plus_dXi = numpy.array( _X, dtype=float )
331 _X_plus_dXi[i] = _X[i] + _dXi
332 _X_moins_dXi = numpy.array( _X, dtype=float )
333 _X_moins_dXi[i] = _X[i] - _dXi
335 _xserie.append( _X_plus_dXi )
336 _xserie.append( _X_moins_dXi )
338 _HX_plusmoins_dX = self.DirectOperator( _xserie )
341 for i in range( len(_dX) ):
342 _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
346 for i in range( _dX.size ):
348 _X_plus_dXi = numpy.array( _X, dtype=float )
349 _X_plus_dXi[i] = _X[i] + _dXi
350 _X_moins_dXi = numpy.array( _X, dtype=float )
351 _X_moins_dXi[i] = _X[i] - _dXi
353 _HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
354 _HX_moins_dXi = self.DirectOperator( _X_moins_dXi )
356 _Jacobienne.append( numpy.ravel( _HX_plus_dXi - _HX_moins_dXi ) / (2.*_dXi) )
360 if self.__mpEnabled and not self.__mfEnabled:
362 "__userFunction__path" : self.__userFunction__path,
363 "__userFunction__modl" : self.__userFunction__modl,
364 "__userFunction__name" : self.__userFunction__name,
367 _jobs.append( (_X, self.__extraArgs, funcrepr) )
368 for i in range( len(_dX) ):
369 _X_plus_dXi = numpy.array( _X, dtype=float )
370 _X_plus_dXi[i] = _X[i] + _dX[i]
372 _jobs.append( (_X_plus_dXi, self.__extraArgs, funcrepr) )
374 import multiprocessing
375 self.__pool = multiprocessing.Pool(self.__mpWorkers)
376 _HX_plus_dX = self.__pool.map( ExecuteFunction, _jobs )
380 _HX = _HX_plus_dX.pop(0)
383 for i in range( len(_dX) ):
384 _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
386 elif self.__mfEnabled:
389 for i in range( len(_dX) ):
390 _X_plus_dXi = numpy.array( _X, dtype=float )
391 _X_plus_dXi[i] = _X[i] + _dX[i]
393 _xserie.append( _X_plus_dXi )
395 _HX_plus_dX = self.DirectOperator( _xserie )
397 _HX = _HX_plus_dX.pop(0)
400 for i in range( len(_dX) ):
401 _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
405 _HX = self.DirectOperator( _X )
406 for i in range( _dX.size ):
408 _X_plus_dXi = numpy.array( _X, dtype=float )
409 _X_plus_dXi[i] = _X[i] + _dXi
411 _HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
413 _Jacobienne.append( numpy.ravel(( _HX_plus_dXi - _HX ) / _dXi) )
415 if (dotWith is not None) or (dotTWith is not None):
416 __Produit = self.__listdotwith__(_Jacobienne, dotWith, dotTWith)
419 if __Produit is None or self.__avoidRC:
420 _Jacobienne = numpy.transpose( numpy.vstack( _Jacobienne ) )
422 if self.__lengthRJ < 0: self.__lengthRJ = 2 * _X.size
423 while len(self.__listJPCP) > self.__lengthRJ:
424 self.__listJPCP.pop(0)
425 self.__listJPCI.pop(0)
426 self.__listJPCR.pop(0)
427 self.__listJPPN.pop(0)
428 self.__listJPIN.pop(0)
429 self.__listJPCP.append( copy.copy(_X) )
430 self.__listJPCI.append( copy.copy(_dX) )
431 self.__listJPCR.append( copy.copy(_Jacobienne) )
432 self.__listJPPN.append( numpy.linalg.norm(_X) )
433 self.__listJPIN.append( numpy.linalg.norm(_Jacobienne) )
434 logging.debug("FDA Fin du calcul de la Jacobienne")
435 if __Produit is not None:
440 # ---------------------------------------------------------
441 def TangentOperator(self, paire, **extraArgs ):
443 Calcul du tangent à l'aide de la Jacobienne.
445 NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
446 ne doivent pas être données ici à la fonction utilisateur.
449 assert len(paire) == 1, "Incorrect length of arguments"
451 assert len(_paire) == 2, "Incorrect number of arguments"
453 assert len(paire) == 2, "Incorrect number of arguments"
456 if dX is None or len(dX) == 0:
458 # Calcul de la forme matricielle si le second argument est None
459 # -------------------------------------------------------------
460 _Jacobienne = self.TangentMatrix( X )
461 if self.__mfEnabled: return [_Jacobienne,]
462 else: return _Jacobienne
465 # Calcul de la valeur linéarisée de H en X appliqué à dX
466 # ------------------------------------------------------
467 _HtX = self.TangentMatrix( X, dotWith = dX )
468 if self.__mfEnabled: return [_HtX,]
471 # ---------------------------------------------------------
472 def AdjointOperator(self, paire, **extraArgs ):
474 Calcul de l'adjoint à l'aide de la Jacobienne.
476 NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
477 ne doivent pas être données ici à la fonction utilisateur.
480 assert len(paire) == 1, "Incorrect length of arguments"
482 assert len(_paire) == 2, "Incorrect number of arguments"
484 assert len(paire) == 2, "Incorrect number of arguments"
487 if Y is None or len(Y) == 0:
489 # Calcul de la forme matricielle si le second argument est None
490 # -------------------------------------------------------------
491 _JacobienneT = self.TangentMatrix( X ).T
492 if self.__mfEnabled: return [_JacobienneT,]
493 else: return _JacobienneT
496 # Calcul de la valeur de l'adjoint en X appliqué à Y
497 # --------------------------------------------------
498 _HaY = self.TangentMatrix( X, dotTWith = Y )
499 if self.__mfEnabled: return [_HaY,]
502 # ==============================================================================
503 def SetInitialDirection( __Direction = [], __Amplitude = 1., __Position = None ):
504 "Établit ou élabore une direction avec une amplitude"
506 if len(__Direction) == 0 and __Position is None:
507 raise ValueError("If initial direction is void, current position has to be given")
508 if abs(float(__Amplitude)) < mpr:
509 raise ValueError("Amplitude of perturbation can not be zero")
511 if len(__Direction) > 0:
512 __dX0 = numpy.asarray(__Direction)
515 __X0 = numpy.ravel(numpy.asarray(__Position))
516 __mX0 = numpy.mean( __X0, dtype=mfp )
517 if abs(__mX0) < 2*mpr: __mX0 = 1. # Évite le problème de position nulle
520 __dX0.append( numpy.random.normal(0.,abs(v)) )
522 __dX0.append( numpy.random.normal(0.,__mX0) )
524 __dX0 = numpy.asarray(__dX0,float) # Évite le problème d'array de taille 1
525 __dX0 = numpy.ravel( __dX0 ) # Redresse les vecteurs
526 __dX0 = float(__Amplitude) * __dX0
530 # ==============================================================================
531 def EnsembleOfCenteredPerturbations( __bgCenter, __bgCovariance, __nbMembers ):
532 "Génération d'un ensemble de taille __nbMembers-1 d'états aléatoires centrés"
534 __bgCenter = numpy.ravel(__bgCenter)[:,None]
536 raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(__nbMembers),))
538 if __bgCovariance is None:
539 _Perturbations = numpy.tile( __bgCenter, __nbMembers)
541 _Z = numpy.random.multivariate_normal(numpy.zeros(__bgCenter.size), __bgCovariance, size=__nbMembers).T
542 _Perturbations = numpy.tile( __bgCenter, __nbMembers) + _Z
544 return _Perturbations
546 # ==============================================================================
547 def EnsembleOfBackgroundPerturbations(
553 "Génération d'un ensemble de taille __nbMembers-1 d'états aléatoires centrés"
554 def __CenteredRandomAnomalies(Zr, N):
556 Génère une matrice de N anomalies aléatoires centrées sur Zr selon les
557 notes manuscrites de MB et conforme au code de PS avec eps = -1
560 Q = numpy.identity(N-1)-numpy.ones((N-1,N-1))/numpy.sqrt(N)/(numpy.sqrt(N)-eps)
561 Q = numpy.concatenate((Q, [eps*numpy.ones(N-1)/numpy.sqrt(N)]), axis=0)
562 R, _ = numpy.linalg.qr(numpy.random.normal(size = (N-1,N-1)))
567 __bgCenter = numpy.ravel(__bgCenter).reshape((-1,1))
569 raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(__nbMembers),))
570 if __bgCovariance is None:
571 _Perturbations = numpy.tile( __bgCenter, __nbMembers)
574 _U, _s, _V = numpy.linalg.svd(__bgCovariance, full_matrices=False)
575 _nbctl = __bgCenter.size
576 if __nbMembers > _nbctl:
577 _Z = numpy.concatenate((numpy.dot(
578 numpy.diag(numpy.sqrt(_s[:_nbctl])), _V[:_nbctl]),
579 numpy.random.multivariate_normal(numpy.zeros(_nbctl),__bgCovariance,__nbMembers-1-_nbctl)), axis = 0)
581 _Z = numpy.dot(numpy.diag(numpy.sqrt(_s[:__nbMembers-1])), _V[:__nbMembers-1])
582 _Zca = __CenteredRandomAnomalies(_Z, __nbMembers)
583 _Perturbations = __bgCenter + _Zca
585 if max(abs(__bgCovariance.flatten())) > 0:
586 _nbctl = __bgCenter.size
587 _Z = numpy.random.multivariate_normal(numpy.zeros(_nbctl),__bgCovariance,__nbMembers-1)
588 _Zca = __CenteredRandomAnomalies(_Z, __nbMembers)
589 _Perturbations = __bgCenter + _Zca
591 _Perturbations = numpy.tile( __bgCenter, __nbMembers)
593 return _Perturbations
595 # ==============================================================================
596 def EnsembleMean( __Ensemble ):
597 "Renvoie la moyenne empirique d'un ensemble"
598 return numpy.asarray(__Ensemble).mean(axis=1, dtype=mfp).astype('float').reshape((-1,1))
600 # ==============================================================================
601 def EnsembleOfAnomalies( __Ensemble, __OptMean = None, __Normalisation = 1. ):
602 "Renvoie les anomalies centrées à partir d'un ensemble"
603 if __OptMean is None:
604 __Em = EnsembleMean( __Ensemble )
606 __Em = numpy.ravel( __OptMean ).reshape((-1,1))
608 return __Normalisation * (numpy.asarray( __Ensemble ) - __Em)
610 # ==============================================================================
611 def EnsembleErrorCovariance( __Ensemble, __Quick = False ):
612 "Renvoie l'estimation empirique de la covariance d'ensemble"
614 # Covariance rapide mais rarement définie positive
615 __Covariance = numpy.cov( __Ensemble )
617 # Résultat souvent identique à numpy.cov, mais plus robuste
618 __n, __m = numpy.asarray( __Ensemble ).shape
619 __Anomalies = EnsembleOfAnomalies( __Ensemble )
620 # Estimation empirique
621 __Covariance = ( __Anomalies @ __Anomalies.T ) / (__m-1)
623 __Covariance = ( __Covariance + __Covariance.T ) * 0.5
624 # Assure la positivité
625 __epsilon = mpr*numpy.trace( __Covariance )
626 __Covariance = __Covariance + __epsilon * numpy.identity(__n)
630 # ==============================================================================
631 def SingularValuesEstimation( __Ensemble, __Using = "SVDVALS"):
632 "Renvoie les valeurs singulières de l'ensemble et leur carré"
633 if __Using == "SVDVALS": # Recommandé
635 __sv = scipy.linalg.svdvals( __Ensemble )
637 elif __Using == "SVD":
638 _, __sv, _ = numpy.linalg.svd( __Ensemble )
640 elif __Using == "EIG": # Lent
641 __eva, __eve = numpy.linalg.eig( __Ensemble @ __Ensemble.T )
642 __svsq = numpy.sort(numpy.abs(numpy.real( __eva )))[::-1]
643 __sv = numpy.sqrt( __svsq )
644 elif __Using == "EIGH":
645 __eva, __eve = numpy.linalg.eigh( __Ensemble @ __Ensemble.T )
646 __svsq = numpy.sort(numpy.abs(numpy.real( __eva )))[::-1]
647 __sv = numpy.sqrt( __svsq )
648 elif __Using == "EIGVALS":
649 __eva = numpy.linalg.eigvals( __Ensemble @ __Ensemble.T )
650 __svsq = numpy.sort(numpy.abs(numpy.real( __eva )))[::-1]
651 __sv = numpy.sqrt( __svsq )
652 elif __Using == "EIGVALSH":
653 __eva = numpy.linalg.eigvalsh( __Ensemble @ __Ensemble.T )
654 __svsq = numpy.sort(numpy.abs(numpy.real( __eva )))[::-1]
655 __sv = numpy.sqrt( __svsq )
657 raise ValueError("Error in requested variant name: %s"%__Using)
659 __tisv = __svsq / __svsq.sum()
660 __qisv = 1. - __svsq.cumsum() / __svsq.sum()
661 # Différence à 1.e-16 : __qisv = 1. - __tisv.cumsum()
663 return __sv, __svsq, __tisv, __qisv
665 # ==============================================================================
666 def MaxL2NormByColumn(__Ensemble, __LcCsts = False, __IncludedPoints = []):
667 "Maximum des normes L2 calculées par colonne"
668 if __LcCsts and len(__IncludedPoints) > 0:
669 normes = numpy.linalg.norm(
670 numpy.take(__Ensemble, __IncludedPoints, axis=0, mode='clip'),
674 normes = numpy.linalg.norm( __Ensemble, axis = 0)
675 nmax = numpy.max(normes)
676 imax = numpy.argmax(normes)
677 return nmax, imax, normes
679 def MaxLinfNormByColumn(__Ensemble, __LcCsts = False, __IncludedPoints = []):
680 "Maximum des normes Linf calculées par colonne"
681 if __LcCsts and len(__IncludedPoints) > 0:
682 normes = numpy.linalg.norm(
683 numpy.take(__Ensemble, __IncludedPoints, axis=0, mode='clip'),
684 axis = 0, ord=numpy.inf,
687 normes = numpy.linalg.norm( __Ensemble, axis = 0, ord=numpy.inf)
688 nmax = numpy.max(normes)
689 imax = numpy.argmax(normes)
690 return nmax, imax, normes
692 def InterpolationErrorByColumn(
693 __Ensemble = None, __Basis = None, __Points = None, __M = 2, # Usage 1
694 __Differences = None, # Usage 2
695 __ErrorNorm = None, # Commun
696 __LcCsts = False, __IncludedPoints = [], # Commun
697 __CDM = False, # ComputeMaxDifference # Commun
698 __RMU = False, # ReduceMemoryUse # Commun
699 __FTL = False, # ForceTril # Commun
701 "Analyse des normes d'erreurs d'interpolation calculées par colonne"
702 if __ErrorNorm == "L2":
703 NormByColumn = MaxL2NormByColumn
705 NormByColumn = MaxLinfNormByColumn
707 if __Differences is None and not __RMU: # Usage 1
709 rBasis = numpy.tril( __Basis[__Points,:] )
711 rBasis = __Basis[__Points,:]
712 rEnsemble = __Ensemble[__Points,:]
715 rBasis_inv = numpy.linalg.inv(rBasis)
716 Interpolator = numpy.dot(__Basis,numpy.dot(rBasis_inv,rEnsemble))
718 rBasis_inv = 1. / rBasis
719 Interpolator = numpy.outer(__Basis,numpy.outer(rBasis_inv,rEnsemble))
721 differences = __Ensemble - Interpolator
723 error, nbr, _ = NormByColumn(differences, __LcCsts, __IncludedPoints)
726 maxDifference = differences[:,nbr]
728 elif __Differences is None and __RMU: # Usage 1
730 rBasis = numpy.tril( __Basis[__Points,:] )
732 rBasis = __Basis[__Points,:]
733 rEnsemble = __Ensemble[__Points,:]
736 rBasis_inv = numpy.linalg.inv(rBasis)
737 rCoordinates = numpy.dot(rBasis_inv,rEnsemble)
739 rBasis_inv = 1. / rBasis
740 rCoordinates = numpy.outer(rBasis_inv,rEnsemble)
744 for iCol in range(__Ensemble.shape[1]):
746 iDifference = __Ensemble[:,iCol] - numpy.dot(__Basis, rCoordinates[:,iCol])
748 iDifference = __Ensemble[:,iCol] - numpy.ravel(numpy.outer(__Basis, rCoordinates[:,iCol]))
750 normDifference, _, _ = NormByColumn(iDifference, __LcCsts, __IncludedPoints)
752 if normDifference > error:
753 error = normDifference
757 maxDifference = __Ensemble[:,nbr] - numpy.dot(__Basis, rCoordinates[:,nbr])
760 differences = __Differences
762 error, nbr, _ = NormByColumn(differences, __LcCsts, __IncludedPoints)
765 # faire cette variable intermédiaire coûte cher
766 maxDifference = differences[:,nbr]
769 return error, nbr, maxDifference
773 # ==============================================================================
774 def EnsemblePerturbationWithGivenCovariance(
779 "Ajout d'une perturbation à chaque membre d'un ensemble selon une covariance prescrite"
780 if hasattr(__Covariance,"assparsematrix"):
781 if (abs(__Ensemble).mean() > mpr) and (abs(__Covariance.assparsematrix())/abs(__Ensemble).mean() < mpr).all():
782 # Traitement d'une covariance nulle ou presque
784 if (abs(__Ensemble).mean() <= mpr) and (abs(__Covariance.assparsematrix()) < mpr).all():
785 # Traitement d'une covariance nulle ou presque
788 if (abs(__Ensemble).mean() > mpr) and (abs(__Covariance)/abs(__Ensemble).mean() < mpr).all():
789 # Traitement d'une covariance nulle ou presque
791 if (abs(__Ensemble).mean() <= mpr) and (abs(__Covariance) < mpr).all():
792 # Traitement d'une covariance nulle ou presque
795 __n, __m = __Ensemble.shape
796 if __Seed is not None: numpy.random.seed(__Seed)
798 if hasattr(__Covariance,"isscalar") and __Covariance.isscalar():
799 # Traitement d'une covariance multiple de l'identité
801 __std = numpy.sqrt(__Covariance.assparsematrix())
802 __Ensemble += numpy.random.normal(__zero, __std, size=(__m,__n)).T
804 elif hasattr(__Covariance,"isvector") and __Covariance.isvector():
805 # Traitement d'une covariance diagonale avec variances non identiques
806 __zero = numpy.zeros(__n)
807 __std = numpy.sqrt(__Covariance.assparsematrix())
808 __Ensemble += numpy.asarray([numpy.random.normal(__zero, __std) for i in range(__m)]).T
810 elif hasattr(__Covariance,"ismatrix") and __Covariance.ismatrix():
811 # Traitement d'une covariance pleine
812 __Ensemble += numpy.random.multivariate_normal(numpy.zeros(__n), __Covariance.asfullmatrix(__n), size=__m).T
814 elif isinstance(__Covariance, numpy.ndarray):
815 # Traitement d'une covariance numpy pleine, sachant qu'on arrive ici en dernier
816 __Ensemble += numpy.random.multivariate_normal(numpy.zeros(__n), __Covariance, size=__m).T
819 raise ValueError("Error in ensemble perturbation with inadequate covariance specification")
823 # ==============================================================================
824 def CovarianceInflation(
826 __InflationType = None,
827 __InflationFactor = None,
828 __BackgroundCov = None,
831 Inflation applicable soit sur Pb ou Pa, soit sur les ensembles EXb ou EXa
833 Synthèse : Hunt 2007, section 2.3.5
835 if __InflationFactor is None:
836 return __InputCovOrEns
838 __InflationFactor = float(__InflationFactor)
840 __InputCovOrEns = numpy.asarray(__InputCovOrEns)
841 if __InputCovOrEns.size == 0: return __InputCovOrEns
843 if __InflationType in ["MultiplicativeOnAnalysisCovariance", "MultiplicativeOnBackgroundCovariance"]:
844 if __InflationFactor < 1.:
845 raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
846 if __InflationFactor < 1.+mpr: # No inflation = 1
847 return __InputCovOrEns
848 __OutputCovOrEns = __InflationFactor**2 * __InputCovOrEns
850 elif __InflationType in ["MultiplicativeOnAnalysisAnomalies", "MultiplicativeOnBackgroundAnomalies"]:
851 if __InflationFactor < 1.:
852 raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
853 if __InflationFactor < 1.+mpr: # No inflation = 1
854 return __InputCovOrEns
855 __InputCovOrEnsMean = __InputCovOrEns.mean(axis=1, dtype=mfp).astype('float')
856 __OutputCovOrEns = __InputCovOrEnsMean[:,numpy.newaxis] \
857 + __InflationFactor * (__InputCovOrEns - __InputCovOrEnsMean[:,numpy.newaxis])
859 elif __InflationType in ["AdditiveOnAnalysisCovariance", "AdditiveOnBackgroundCovariance"]:
860 if __InflationFactor < 0.:
861 raise ValueError("Inflation factor for additive inflation has to be greater or equal than 0.")
862 if __InflationFactor < mpr: # No inflation = 0
863 return __InputCovOrEns
864 __n, __m = __InputCovOrEns.shape
866 raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
867 __tr = __InputCovOrEns.trace()/__n
868 if __InflationFactor > __tr:
869 raise ValueError("Inflation factor for additive inflation has to be small over %.0e."%__tr)
870 __OutputCovOrEns = (1. - __InflationFactor)*__InputCovOrEns + __InflationFactor * numpy.identity(__n)
872 elif __InflationType == "HybridOnBackgroundCovariance":
873 if __InflationFactor < 0.:
874 raise ValueError("Inflation factor for hybrid inflation has to be greater or equal than 0.")
875 if __InflationFactor < mpr: # No inflation = 0
876 return __InputCovOrEns
877 __n, __m = __InputCovOrEns.shape
879 raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
880 if __BackgroundCov is None:
881 raise ValueError("Background covariance matrix B has to be given for hybrid inflation.")
882 if __InputCovOrEns.shape != __BackgroundCov.shape:
883 raise ValueError("Ensemble covariance matrix has to be of same size than background covariance matrix B.")
884 __OutputCovOrEns = (1. - __InflationFactor) * __InputCovOrEns + __InflationFactor * __BackgroundCov
886 elif __InflationType == "Relaxation":
887 raise NotImplementedError("Relaxation inflation type not implemented")
890 raise ValueError("Error in inflation type, '%s' is not a valid keyword."%__InflationType)
892 return __OutputCovOrEns
894 # ==============================================================================
895 def HessienneEstimation( __selfA, __nb, __HaM, __HtM, __BI, __RI ):
896 "Estimation de la Hessienne"
899 for i in range(int(__nb)):
900 __ee = numpy.zeros((__nb,1))
902 __HtEE = numpy.dot(__HtM,__ee).reshape((-1,1))
903 __HessienneI.append( numpy.ravel( __BI * __ee + __HaM * (__RI * __HtEE) ) )
905 __A = numpy.linalg.inv(numpy.array( __HessienneI ))
906 __A = (__A + __A.T) * 0.5 # Symétrie
907 __A = __A + mpr*numpy.trace( __A ) * numpy.identity(__nb) # Positivité
909 if min(__A.shape) != max(__A.shape):
911 "The %s a posteriori covariance matrix A"%(__selfA._name,)+\
912 " is of shape %s, despites it has to be a"%(str(__A.shape),)+\
913 " squared matrix. There is an error in the observation operator,"+\
915 if (numpy.diag(__A) < 0).any():
917 "The %s a posteriori covariance matrix A"%(__selfA._name,)+\
918 " has at least one negative value on its diagonal. There is an"+\
919 " error in the observation operator, please check it.")
920 if logging.getLogger().level < logging.WARNING: # La vérification n'a lieu qu'en debug
922 numpy.linalg.cholesky( __A )
923 logging.debug("%s La matrice de covariance a posteriori A est bien symétrique définie positive."%(__selfA._name,))
926 "The %s a posteriori covariance matrix A"%(__selfA._name,)+\
927 " is not symmetric positive-definite. Please check your a"+\
928 " priori covariances and your observation operator.")
932 # ==============================================================================
933 def QuantilesEstimations( selfA, A, Xa, HXa = None, Hm = None, HtM = None ):
934 "Estimation des quantiles a posteriori à partir de A>0 (selfA est modifié)"
935 nbsamples = selfA._parameters["NumberOfSamplesForQuantiles"]
937 # Traitement des bornes
938 if "StateBoundsForQuantiles" in selfA._parameters:
939 LBounds = selfA._parameters["StateBoundsForQuantiles"] # Prioritaire
940 elif "Bounds" in selfA._parameters:
941 LBounds = selfA._parameters["Bounds"] # Défaut raisonnable
944 if LBounds is not None:
945 LBounds = ForceNumericBounds( LBounds )
946 __Xa = numpy.ravel(Xa)
948 # Échantillonnage des états
951 for i in range(nbsamples):
952 if selfA._parameters["SimulationForQuantiles"] == "Linear" and HtM is not None and HXa is not None:
953 dXr = (numpy.random.multivariate_normal(__Xa,A) - __Xa).reshape((-1,1))
954 if LBounds is not None: # "EstimateProjection" par défaut
955 dXr = numpy.max(numpy.hstack((dXr,LBounds[:,0].reshape((-1,1))) - __Xa.reshape((-1,1))),axis=1)
956 dXr = numpy.min(numpy.hstack((dXr,LBounds[:,1].reshape((-1,1))) - __Xa.reshape((-1,1))),axis=1)
958 Yr = HXa.reshape((-1,1)) + dYr
959 if selfA._toStore("SampledStateForQuantiles"): Xr = __Xa + numpy.ravel(dXr)
960 elif selfA._parameters["SimulationForQuantiles"] == "NonLinear" and Hm is not None:
961 Xr = numpy.random.multivariate_normal(__Xa,A)
962 if LBounds is not None: # "EstimateProjection" par défaut
963 Xr = numpy.max(numpy.hstack((Xr.reshape((-1,1)),LBounds[:,0].reshape((-1,1)))),axis=1)
964 Xr = numpy.min(numpy.hstack((Xr.reshape((-1,1)),LBounds[:,1].reshape((-1,1)))),axis=1)
965 Yr = numpy.asarray(Hm( Xr ))
967 raise ValueError("Quantile simulations has only to be Linear or NonLinear.")
970 YfQ = Yr.reshape((-1,1))
971 if selfA._toStore("SampledStateForQuantiles"): EXr = Xr.reshape((-1,1))
973 YfQ = numpy.hstack((YfQ,Yr.reshape((-1,1))))
974 if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.hstack((EXr,Xr.reshape((-1,1))))
976 # Extraction des quantiles
979 for quantile in selfA._parameters["Quantiles"]:
980 if not (0. <= float(quantile) <= 1.): continue
981 indice = int(nbsamples * float(quantile) - 1./nbsamples)
982 if YQ is None: YQ = YfQ[:,indice].reshape((-1,1))
983 else: YQ = numpy.hstack((YQ,YfQ[:,indice].reshape((-1,1))))
984 if YQ is not None: # Liste non vide de quantiles
985 selfA.StoredVariables["SimulationQuantiles"].store( YQ )
986 if selfA._toStore("SampledStateForQuantiles"):
987 selfA.StoredVariables["SampledStateForQuantiles"].store( EXr )
991 # ==============================================================================
992 def ForceNumericBounds( __Bounds, __infNumbers = True ):
993 "Force les bornes à être des valeurs numériques, sauf si globalement None"
994 # Conserve une valeur par défaut à None s'il n'y a pas de bornes
995 if __Bounds is None: return None
996 # Converti toutes les bornes individuelles None à +/- l'infini chiffré
997 __Bounds = numpy.asarray( __Bounds, dtype=float )
998 if len(__Bounds.shape) != 2 or min(__Bounds.shape) <= 0 or __Bounds.shape[1] != 2:
999 raise ValueError("Incorrectly shaped bounds data")
1001 __Bounds[numpy.isnan(__Bounds[:,0]),0] = -float('inf')
1002 __Bounds[numpy.isnan(__Bounds[:,1]),1] = float('inf')
1004 __Bounds[numpy.isnan(__Bounds[:,0]),0] = -sys.float_info.max
1005 __Bounds[numpy.isnan(__Bounds[:,1]),1] = sys.float_info.max
1008 # ==============================================================================
1009 def RecentredBounds( __Bounds, __Center, __Scale = None ):
1010 "Recentre les bornes autour de 0, sauf si globalement None"
1011 # Conserve une valeur par défaut à None s'il n'y a pas de bornes
1012 if __Bounds is None: return None
1014 # Recentre les valeurs numériques de bornes
1015 return ForceNumericBounds( __Bounds ) - numpy.ravel( __Center ).reshape((-1,1))
1017 # Recentre les valeurs numériques de bornes et change l'échelle par une matrice
1018 return __Scale @ (ForceNumericBounds( __Bounds, False ) - numpy.ravel( __Center ).reshape((-1,1)))
1020 # ==============================================================================
1021 def ApplyBounds( __Vector, __Bounds, __newClip = True ):
1022 "Applique des bornes numériques à un point"
1023 # Conserve une valeur par défaut s'il n'y a pas de bornes
1024 if __Bounds is None: return __Vector
1026 if not isinstance(__Vector, numpy.ndarray): # Is an array
1027 raise ValueError("Incorrect array definition of vector data")
1028 if not isinstance(__Bounds, numpy.ndarray): # Is an array
1029 raise ValueError("Incorrect array definition of bounds data")
1030 if 2*__Vector.size != __Bounds.size: # Is a 2 column array of vector length
1031 raise ValueError("Incorrect bounds number (%i) to be applied for this vector (of size %i)"%(__Bounds.size,__Vector.size))
1032 if len(__Bounds.shape) != 2 or min(__Bounds.shape) <= 0 or __Bounds.shape[1] != 2:
1033 raise ValueError("Incorrectly shaped bounds data")
1036 __Vector = __Vector.clip(
1037 __Bounds[:,0].reshape(__Vector.shape),
1038 __Bounds[:,1].reshape(__Vector.shape),
1041 __Vector = numpy.max(numpy.hstack((__Vector.reshape((-1,1)),numpy.asmatrix(__Bounds)[:,0])),axis=1)
1042 __Vector = numpy.min(numpy.hstack((__Vector.reshape((-1,1)),numpy.asmatrix(__Bounds)[:,1])),axis=1)
1043 __Vector = numpy.asarray(__Vector)
1047 # ==============================================================================
1048 def VariablesAndIncrementsBounds( __Bounds, __BoxBounds, __Xini, __Name, __Multiplier = 1. ):
1049 __Bounds = ForceNumericBounds( __Bounds )
1050 __BoxBounds = ForceNumericBounds( __BoxBounds )
1051 if __Bounds is None and __BoxBounds is None:
1052 raise ValueError("Algorithm %s requires bounds on all variables (by Bounds), or on all variable increments (by BoxBounds), or both, to be explicitly given."%(__Name,))
1053 elif __Bounds is None and __BoxBounds is not None:
1054 __Bounds = __BoxBounds
1055 logging.debug("%s Definition of parameter bounds from current parameter increment bounds"%(__Name,))
1056 elif __Bounds is not None and __BoxBounds is None:
1057 __BoxBounds = __Multiplier * (__Bounds - __Xini.reshape((-1,1))) # "M * [Xmin,Xmax]-Xini"
1058 logging.debug("%s Definition of parameter increment bounds from current parameter bounds"%(__Name,))
1059 return __Bounds, __BoxBounds
1061 # ==============================================================================
1062 def Apply3DVarRecentringOnEnsemble( __EnXn, __EnXf, __Ynpu, __HO, __R, __B, __SuppPars ):
1063 "Recentre l'ensemble Xn autour de l'analyse 3DVAR"
1064 __Betaf = __SuppPars["HybridCovarianceEquilibrium"]
1066 Xf = EnsembleMean( __EnXf )
1067 Pf = Covariance( asCovariance=EnsembleErrorCovariance(__EnXf) )
1068 Pf = (1 - __Betaf) * __B.asfullmatrix(Xf.size) + __Betaf * Pf
1070 selfB = PartialAlgorithm("3DVAR")
1071 selfB._parameters["Minimizer"] = "LBFGSB"
1072 selfB._parameters["MaximumNumberOfIterations"] = __SuppPars["HybridMaximumNumberOfIterations"]
1073 selfB._parameters["CostDecrementTolerance"] = __SuppPars["HybridCostDecrementTolerance"]
1074 selfB._parameters["ProjectedGradientTolerance"] = -1
1075 selfB._parameters["GradientNormTolerance"] = 1.e-05
1076 selfB._parameters["StoreInternalVariables"] = False
1077 selfB._parameters["optiprint"] = -1
1078 selfB._parameters["optdisp"] = 0
1079 selfB._parameters["Bounds"] = None
1080 selfB._parameters["InitializationPoint"] = Xf
1081 from daAlgorithms.Atoms import std3dvar
1082 std3dvar.std3dvar(selfB, Xf, __Ynpu, None, __HO, None, __R, Pf)
1083 Xa = selfB.get("Analysis")[-1].reshape((-1,1))
1086 return Xa + EnsembleOfAnomalies( __EnXn )
1088 # ==============================================================================
1089 def GenerateRandomPointInHyperSphere( __Center, __Radius ):
1090 "Génère un point aléatoire uniformément à l'intérieur d'une hyper-sphère"
1091 __Dimension = numpy.asarray( __Center ).size
1092 __GaussDelta = numpy.random.normal( 0, 1, size=__Center.shape )
1093 __VectorNorm = numpy.linalg.norm( __GaussDelta )
1094 __PointOnHS = __Radius * (__GaussDelta / __VectorNorm)
1095 __MoveInHS = math.exp( math.log(numpy.random.uniform()) / __Dimension) # rand()**1/n
1096 __PointInHS = __MoveInHS * __PointOnHS
1097 return __Center + __PointInHS
1099 # ==============================================================================
1100 def GetNeighborhoodTopology( __ntype, __ipop ):
1101 "Renvoi une topologie de connexion pour une population de points"
1102 if __ntype in ["FullyConnectedNeighborhood", "FullyConnectedNeighbourhood", "gbest"]:
1103 __topology = [__ipop for __i in __ipop]
1104 elif __ntype in ["RingNeighborhoodWithRadius1", "RingNeighbourhoodWithRadius1", "lbest"]:
1105 __cpop = list(__ipop[-1:]) + list(__ipop) + list(__ipop[:1])
1106 __topology = [__cpop[__n:__n+3] for __n in range(len(__ipop))]
1107 elif __ntype in ["RingNeighborhoodWithRadius2", "RingNeighbourhoodWithRadius2"]:
1108 __cpop = list(__ipop[-2:]) + list(__ipop) + list(__ipop[:2])
1109 __topology = [__cpop[__n:__n+5] for __n in range(len(__ipop))]
1110 elif __ntype in ["AdaptativeRandomWith3Neighbors", "AdaptativeRandomWith3Neighbours", "abest"]:
1111 __cpop = 3*list(__ipop)
1112 __topology = [[__i]+list(numpy.random.choice(__cpop,3)) for __i in __ipop]
1113 elif __ntype in ["AdaptativeRandomWith5Neighbors", "AdaptativeRandomWith5Neighbours"]:
1114 __cpop = 5*list(__ipop)
1115 __topology = [[__i]+list(numpy.random.choice(__cpop,5)) for __i in __ipop]
1117 raise ValueError("Swarm topology type unavailable because name \"%s\" is unknown."%__ntype)
1120 # ==============================================================================
1121 def FindIndexesFromNames( __NameOfLocations = None, __ExcludeLocations = None, ForceArray = False ):
1122 "Exprime les indices des noms exclus, en ignorant les absents"
1123 if __ExcludeLocations is None:
1124 __ExcludeIndexes = ()
1125 elif isinstance(__ExcludeLocations, (list, numpy.ndarray, tuple)) and len(__ExcludeLocations)==0:
1126 __ExcludeIndexes = ()
1128 elif __NameOfLocations is None:
1130 __ExcludeIndexes = numpy.asarray(__ExcludeLocations, dtype=int)
1131 except ValueError as e:
1132 if "invalid literal for int() with base 10:" in str(e):
1133 raise ValueError("to exclude named locations, initial location name list can not be void and has to have the same length as one state")
1135 raise ValueError(str(e))
1136 elif isinstance(__NameOfLocations, (list, numpy.ndarray, tuple)) and len(__NameOfLocations)==0:
1138 __ExcludeIndexes = numpy.asarray(__ExcludeLocations, dtype=int)
1139 except ValueError as e:
1140 if "invalid literal for int() with base 10:" in str(e):
1141 raise ValueError("to exclude named locations, initial location name list can not be void and has to have the same length as one state")
1143 raise ValueError(str(e))
1147 __ExcludeIndexes = numpy.asarray(__ExcludeLocations, dtype=int)
1148 except ValueError as e:
1149 if "invalid literal for int() with base 10:" in str(e):
1150 if len(__NameOfLocations) < 1.e6+1 and len(__ExcludeLocations) > 1500:
1154 if ForceArray or __Heuristic:
1155 # Recherche par array permettant des noms invalides, peu efficace
1156 __NameToIndex = dict(numpy.array((
1158 range(len(__NameOfLocations))
1160 __ExcludeIndexes = numpy.asarray([__NameToIndex.get(k, -1) for k in __ExcludeLocations], dtype=int)
1163 # Recherche par liste permettant des noms invalides, très efficace
1164 def __NameToIndex_get( cle, default = -1 ):
1165 if cle in __NameOfLocations:
1166 return __NameOfLocations.index(cle)
1169 __ExcludeIndexes = numpy.asarray([__NameToIndex_get(k, -1) for k in __ExcludeLocations], dtype=int)
1171 # Recherche par liste interdisant des noms invalides, mais encore un peu plus efficace
1172 # __ExcludeIndexes = numpy.asarray([__NameOfLocations.index(k) for k in __ExcludeLocations], dtype=int)
1174 # Ignore les noms absents
1175 __ExcludeIndexes = numpy.compress(__ExcludeIndexes > -1, __ExcludeIndexes)
1176 if len(__ExcludeIndexes)==0: __ExcludeIndexes = ()
1178 raise ValueError(str(e))
1180 return __ExcludeIndexes
1182 # ==============================================================================
1183 def BuildComplexSampleList(
1185 __SampleAsExplicitHyperCube,
1186 __SampleAsMinMaxStepHyperCube,
1187 __SampleAsMinMaxLatinHyperCube,
1188 __SampleAsMinMaxSobolSequence,
1189 __SampleAsIndependantRandomVariables,
1193 # ---------------------------
1194 if len(__SampleAsnUplet) > 0:
1195 sampleList = __SampleAsnUplet
1196 for i,Xx in enumerate(sampleList):
1197 if numpy.ravel(Xx).size != __X0.size:
1198 raise ValueError("The size %i of the %ith state X in the sample and %i of the checking point Xb are different, they have to be identical."%(numpy.ravel(Xx).size,i+1,__X0.size))
1199 # ---------------------------
1200 elif len(__SampleAsExplicitHyperCube) > 0:
1201 sampleList = itertools.product(*list(__SampleAsExplicitHyperCube))
1202 # ---------------------------
1203 elif len(__SampleAsMinMaxStepHyperCube) > 0:
1204 coordinatesList = []
1205 for i,dim in enumerate(__SampleAsMinMaxStepHyperCube):
1207 raise ValueError("For dimension %i, the variable definition \"%s\" is incorrect, it should be [min,max,step]."%(i,dim))
1209 coordinatesList.append(numpy.linspace(dim[0],dim[1],1+int((float(dim[1])-float(dim[0]))/float(dim[2]))))
1210 sampleList = itertools.product(*coordinatesList)
1211 # ---------------------------
1212 elif len(__SampleAsMinMaxLatinHyperCube) > 0:
1213 import scipy, warnings
1214 if vt(scipy.version.version) <= vt("1.7.0"):
1215 __msg = "In order to use Latin Hypercube sampling, you must at least use Scipy version 1.7.0 (and you are presently using Scipy %s). A void sample is then generated."%scipy.version.version
1216 warnings.warn(__msg, FutureWarning, stacklevel=50)
1219 __spDesc = list(__SampleAsMinMaxLatinHyperCube)
1220 __nbDime,__nbSamp = map(int, __spDesc.pop()) # Réduction du dernier
1221 __sample = scipy.stats.qmc.LatinHypercube(
1223 seed = numpy.random.default_rng(__Seed),
1225 __sample = __sample.random(n = __nbSamp)
1226 __bounds = numpy.array(__spDesc)[:,0:2]
1227 __l_bounds = __bounds[:,0]
1228 __u_bounds = __bounds[:,1]
1229 sampleList = scipy.stats.qmc.scale(__sample, __l_bounds, __u_bounds)
1230 # ---------------------------
1231 elif len(__SampleAsMinMaxSobolSequence) > 0:
1232 import scipy, warnings
1233 if vt(scipy.version.version) <= vt("1.7.0"):
1234 __msg = "In order to use Latin Hypercube sampling, you must at least use Scipy version 1.7.0 (and you are presently using Scipy %s). A void sample is then generated."%scipy.version.version
1235 warnings.warn(__msg, FutureWarning, stacklevel=50)
1238 __spDesc = list(__SampleAsMinMaxSobolSequence)
1239 __nbDime,__nbSamp = map(int, __spDesc.pop()) # Réduction du dernier
1240 if __nbDime != len(__spDesc):
1241 warnings.warn("Declared space dimension (%i) is not equal to number of bounds (%i), the last one will be used."%(__nbDime, len(__spDesc)), FutureWarning, stacklevel=50)
1242 __sample = scipy.stats.qmc.Sobol(
1244 seed = numpy.random.default_rng(__Seed),
1246 __sample = __sample.random_base2(m = int(math.log2(__nbSamp))+1)
1247 __bounds = numpy.array(__spDesc)[:,0:2]
1248 __l_bounds = __bounds[:,0]
1249 __u_bounds = __bounds[:,1]
1250 sampleList = scipy.stats.qmc.scale(__sample, __l_bounds, __u_bounds)
1251 # ---------------------------
1252 elif len(__SampleAsIndependantRandomVariables) > 0:
1253 coordinatesList = []
1254 for i,dim in enumerate(__SampleAsIndependantRandomVariables):
1256 raise ValueError("For dimension %i, the variable definition \"%s\" is incorrect, it should be ('distribution',(parameters),length) with distribution in ['normal'(mean,std),'lognormal'(mean,sigma),'uniform'(low,high),'weibull'(shape)]."%(i,dim))
1257 elif not( str(dim[0]) in ['normal','lognormal','uniform','weibull'] and hasattr(numpy.random,dim[0]) ):
1258 raise ValueError("For dimension %i, the distribution name \"%s\" is not allowed, please choose in ['normal'(mean,std),'lognormal'(mean,sigma),'uniform'(low,high),'weibull'(shape)]"%(i,dim[0]))
1260 distribution = getattr(numpy.random,str(dim[0]),'normal')
1261 coordinatesList.append(distribution(*dim[1], size=max(1,int(dim[2]))))
1262 sampleList = itertools.product(*coordinatesList)
1264 sampleList = iter([__X0,])
1268 # ==============================================================================
1269 def multiXOsteps(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, oneCycle,
1270 __CovForecast = False):
1272 Prévision multi-pas avec une correction par pas (multi-méthodes)
1277 if selfA._parameters["EstimationOf"] == "State":
1278 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1279 Xn = numpy.asarray(Xb)
1280 if __CovForecast: Pn = B
1281 selfA.StoredVariables["Analysis"].store( Xn )
1282 if selfA._toStore("APosterioriCovariance"):
1283 if hasattr(B,"asfullmatrix"):
1284 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(Xn.size) )
1286 selfA.StoredVariables["APosterioriCovariance"].store( B )
1287 selfA._setInternalState("seed", numpy.random.get_state())
1288 elif selfA._parameters["nextStep"]:
1289 Xn = selfA._getInternalState("Xn")
1290 if __CovForecast: Pn = selfA._getInternalState("Pn")
1292 Xn = numpy.asarray(Xb)
1293 if __CovForecast: Pn = B
1295 if hasattr(Y,"stepnumber"):
1296 duration = Y.stepnumber()
1302 for step in range(duration-1):
1303 selfA.StoredVariables["CurrentStepNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1305 if hasattr(Y,"store"):
1306 Ynpu = numpy.asarray( Y[step+1] ).reshape((-1,1))
1308 Ynpu = numpy.asarray( Y ).reshape((-1,1))
1311 if hasattr(U,"store") and len(U)>1:
1312 Un = numpy.asarray( U[step] ).reshape((-1,1))
1313 elif hasattr(U,"store") and len(U)==1:
1314 Un = numpy.asarray( U[0] ).reshape((-1,1))
1316 Un = numpy.asarray( U ).reshape((-1,1))
1320 # Predict (Time Update)
1321 # ---------------------
1322 if selfA._parameters["EstimationOf"] == "State":
1324 Mt = EM["Tangent"].asMatrix(Xn)
1325 Mt = Mt.reshape(Xn.size,Xn.size) # ADAO & check shape
1327 Ma = EM["Adjoint"].asMatrix(Xn)
1328 Ma = Ma.reshape(Xn.size,Xn.size) # ADAO & check shape
1329 Pn_predicted = Q + Mt @ (Pn @ Ma)
1330 M = EM["Direct"].appliedControledFormTo
1331 Xn_predicted = M( (Xn, Un) ).reshape((-1,1))
1332 if CM is not None and "Tangent" in CM and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1333 Cm = CM["Tangent"].asMatrix(Xn_predicted)
1334 Cm = Cm.reshape(Xn.size,Un.size) # ADAO & check shape
1335 Xn_predicted = Xn_predicted + (Cm @ Un).reshape((-1,1))
1336 elif selfA._parameters["EstimationOf"] == "Parameters": # No forecast
1337 # --- > Par principe, M = Id, Q = 0
1339 if __CovForecast: Pn_predicted = Pn
1340 Xn_predicted = numpy.asarray(Xn_predicted).reshape((-1,1))
1341 if selfA._toStore("ForecastState"):
1342 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
1344 if hasattr(Pn_predicted,"asfullmatrix"):
1345 Pn_predicted = Pn_predicted.asfullmatrix(Xn.size)
1347 Pn_predicted = numpy.asarray(Pn_predicted).reshape((Xn.size,Xn.size))
1348 if selfA._toStore("ForecastCovariance"):
1349 selfA.StoredVariables["ForecastCovariance"].store( Pn_predicted )
1351 # Correct (Measurement Update)
1352 # ----------------------------
1354 oneCycle(selfA, Xn_predicted, Ynpu, Un, HO, CM, R, Pn_predicted, True)
1356 oneCycle(selfA, Xn_predicted, Ynpu, Un, HO, CM, R, B, True)
1358 #--------------------------
1359 Xn = selfA._getInternalState("Xn")
1360 if __CovForecast: Pn = selfA._getInternalState("Pn")
1364 # ==============================================================================
1365 if __name__ == "__main__":
1366 print("\n AUTODIAGNOSTIC\n")