1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2020 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les objets numériques génériques.
26 __author__ = "Jean-Philippe ARGAUD"
28 import os, time, copy, types, sys, logging
29 import math, numpy, scipy
30 from daCore.BasicObjects import Operator
31 from daCore.PlatformInfo import PlatformInfo
32 mpr = PlatformInfo().MachinePrecision()
33 mfp = PlatformInfo().MaximumPrecision()
34 # logging.getLogger().setLevel(logging.DEBUG)
36 # ==============================================================================
37 def ExecuteFunction( paire ):
38 assert len(paire) == 2, "Incorrect number of arguments"
40 __X = numpy.asmatrix(numpy.ravel( X )).T
41 __sys_path_tmp = sys.path ; sys.path.insert(0,funcrepr["__userFunction__path"])
42 __module = __import__(funcrepr["__userFunction__modl"], globals(), locals(), [])
43 __fonction = getattr(__module,funcrepr["__userFunction__name"])
44 sys.path = __sys_path_tmp ; del __sys_path_tmp
45 __HX = __fonction( __X )
46 return numpy.ravel( __HX )
48 # ==============================================================================
49 class FDApproximation(object):
51 Cette classe sert d'interface pour définir les opérateurs approximés. A la
52 création d'un objet, en fournissant une fonction "Function", on obtient un
53 objet qui dispose de 3 méthodes "DirectOperator", "TangentOperator" et
54 "AdjointOperator". On contrôle l'approximation DF avec l'incrément
55 multiplicatif "increment" valant par défaut 1%, ou avec l'incrément fixe
56 "dX" qui sera multiplié par "increment" (donc en %), et on effectue de DF
57 centrées si le booléen "centeredDF" est vrai.
60 name = "FDApproximation",
65 avoidingRedundancy = True,
66 toleranceInRedundancy = 1.e-18,
67 lenghtOfRedundancy = -1,
72 self.__name = str(name)
75 import multiprocessing
76 self.__mpEnabled = True
78 self.__mpEnabled = False
80 self.__mpEnabled = False
81 self.__mpWorkers = mpWorkers
82 if self.__mpWorkers is not None and self.__mpWorkers < 1:
83 self.__mpWorkers = None
84 logging.debug("FDA Calculs en multiprocessing : %s (nombre de processus : %s)"%(self.__mpEnabled,self.__mpWorkers))
87 self.__mfEnabled = True
89 self.__mfEnabled = False
90 logging.debug("FDA Calculs en multifonctions : %s"%(self.__mfEnabled,))
92 if avoidingRedundancy:
94 self.__tolerBP = float(toleranceInRedundancy)
95 self.__lenghtRJ = int(lenghtOfRedundancy)
96 self.__listJPCP = [] # Jacobian Previous Calculated Points
97 self.__listJPCI = [] # Jacobian Previous Calculated Increment
98 self.__listJPCR = [] # Jacobian Previous Calculated Results
99 self.__listJPPN = [] # Jacobian Previous Calculated Point Norms
100 self.__listJPIN = [] # Jacobian Previous Calculated Increment Norms
102 self.__avoidRC = False
105 if isinstance(Function,types.FunctionType):
106 logging.debug("FDA Calculs en multiprocessing : FunctionType")
107 self.__userFunction__name = Function.__name__
109 mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
111 mod = os.path.abspath(Function.__globals__['__file__'])
112 if not os.path.isfile(mod):
113 raise ImportError("No user defined function or method found with the name %s"%(mod,))
114 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
115 self.__userFunction__path = os.path.dirname(mod)
117 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled )
118 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
119 elif isinstance(Function,types.MethodType):
120 logging.debug("FDA Calculs en multiprocessing : MethodType")
121 self.__userFunction__name = Function.__name__
123 mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
125 mod = os.path.abspath(Function.__func__.__globals__['__file__'])
126 if not os.path.isfile(mod):
127 raise ImportError("No user defined function or method found with the name %s"%(mod,))
128 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
129 self.__userFunction__path = os.path.dirname(mod)
131 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled )
132 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
134 raise TypeError("User defined function or method has to be provided for finite differences approximation.")
136 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled )
137 self.__userFunction = self.__userOperator.appliedTo
139 self.__centeredDF = bool(centeredDF)
140 if abs(float(increment)) > 1.e-15:
141 self.__increment = float(increment)
143 self.__increment = 0.01
147 self.__dX = numpy.asmatrix(numpy.ravel( dX )).T
148 logging.debug("FDA Reduction des doublons de calcul : %s"%self.__avoidRC)
150 logging.debug("FDA Tolerance de determination des doublons : %.2e"%self.__tolerBP)
152 # ---------------------------------------------------------
153 def __doublon__(self, e, l, n, v=None):
154 __ac, __iac = False, -1
155 for i in range(len(l)-1,-1,-1):
156 if numpy.linalg.norm(e - l[i]) < self.__tolerBP * n[i]:
157 __ac, __iac = True, i
158 if v is not None: logging.debug("FDA Cas%s déja calculé, récupération du doublon %i"%(v,__iac))
162 # ---------------------------------------------------------
163 def DirectOperator(self, X ):
165 Calcul du direct à l'aide de la fonction fournie.
167 logging.debug("FDA Calcul DirectOperator (explicite)")
169 _HX = self.__userFunction( X, argsAsSerie = True )
171 _X = numpy.asmatrix(numpy.ravel( X )).T
172 _HX = numpy.ravel(self.__userFunction( _X ))
176 # ---------------------------------------------------------
177 def TangentMatrix(self, X ):
179 Calcul de l'opérateur tangent comme la Jacobienne par différences finies,
180 c'est-à-dire le gradient de H en X. On utilise des différences finies
181 directionnelles autour du point X. X est un numpy.matrix.
183 Différences finies centrées (approximation d'ordre 2):
184 1/ Pour chaque composante i de X, on ajoute et on enlève la perturbation
185 dX[i] à la composante X[i], pour composer X_plus_dXi et X_moins_dXi, et
186 on calcule les réponses HX_plus_dXi = H( X_plus_dXi ) et HX_moins_dXi =
188 2/ On effectue les différences (HX_plus_dXi-HX_moins_dXi) et on divise par
190 3/ Chaque résultat, par composante, devient une colonne de la Jacobienne
192 Différences finies non centrées (approximation d'ordre 1):
193 1/ Pour chaque composante i de X, on ajoute la perturbation dX[i] à la
194 composante X[i] pour composer X_plus_dXi, et on calcule la réponse
195 HX_plus_dXi = H( X_plus_dXi )
196 2/ On calcule la valeur centrale HX = H(X)
197 3/ On effectue les différences (HX_plus_dXi-HX) et on divise par
199 4/ Chaque résultat, par composante, devient une colonne de la Jacobienne
202 logging.debug("FDA Début du calcul de la Jacobienne")
203 logging.debug("FDA Incrément de............: %s*X"%float(self.__increment))
204 logging.debug("FDA Approximation centrée...: %s"%(self.__centeredDF))
206 if X is None or len(X)==0:
207 raise ValueError("Nominal point X for approximate derivatives can not be None or void (given X: %s)."%(str(X),))
209 _X = numpy.asmatrix(numpy.ravel( X )).T
211 if self.__dX is None:
212 _dX = self.__increment * _X
214 _dX = numpy.asmatrix(numpy.ravel( self.__dX )).T
216 if (_dX == 0.).any():
219 _dX = numpy.where( _dX == 0., float(self.__increment), _dX )
221 _dX = numpy.where( _dX == 0., moyenne, _dX )
223 __alreadyCalculated = False
225 __bidon, __alreadyCalculatedP = self.__doublon__(_X, self.__listJPCP, self.__listJPPN, None)
226 __bidon, __alreadyCalculatedI = self.__doublon__(_dX, self.__listJPCI, self.__listJPIN, None)
227 if __alreadyCalculatedP == __alreadyCalculatedI > -1:
228 __alreadyCalculated, __i = True, __alreadyCalculatedP
229 logging.debug("FDA Cas J déja calculé, récupération du doublon %i"%__i)
231 if __alreadyCalculated:
232 logging.debug("FDA Calcul Jacobienne (par récupération du doublon %i)"%__i)
233 _Jacobienne = self.__listJPCR[__i]
235 logging.debug("FDA Calcul Jacobienne (explicite)")
236 if self.__centeredDF:
238 if self.__mpEnabled and not self.__mfEnabled:
240 "__userFunction__path" : self.__userFunction__path,
241 "__userFunction__modl" : self.__userFunction__modl,
242 "__userFunction__name" : self.__userFunction__name,
245 for i in range( len(_dX) ):
247 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
248 _X_plus_dXi[i] = _X[i] + _dXi
249 _X_moins_dXi = numpy.array( _X.A1, dtype=float )
250 _X_moins_dXi[i] = _X[i] - _dXi
252 _jobs.append( (_X_plus_dXi, funcrepr) )
253 _jobs.append( (_X_moins_dXi, funcrepr) )
255 import multiprocessing
256 self.__pool = multiprocessing.Pool(self.__mpWorkers)
257 _HX_plusmoins_dX = self.__pool.map( ExecuteFunction, _jobs )
262 for i in range( len(_dX) ):
263 _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
265 elif self.__mfEnabled:
267 for i in range( len(_dX) ):
269 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
270 _X_plus_dXi[i] = _X[i] + _dXi
271 _X_moins_dXi = numpy.array( _X.A1, dtype=float )
272 _X_moins_dXi[i] = _X[i] - _dXi
274 _xserie.append( _X_plus_dXi )
275 _xserie.append( _X_moins_dXi )
277 _HX_plusmoins_dX = self.DirectOperator( _xserie )
280 for i in range( len(_dX) ):
281 _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
285 for i in range( _dX.size ):
287 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
288 _X_plus_dXi[i] = _X[i] + _dXi
289 _X_moins_dXi = numpy.array( _X.A1, dtype=float )
290 _X_moins_dXi[i] = _X[i] - _dXi
292 _HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
293 _HX_moins_dXi = self.DirectOperator( _X_moins_dXi )
295 _Jacobienne.append( numpy.ravel( _HX_plus_dXi - _HX_moins_dXi ) / (2.*_dXi) )
299 if self.__mpEnabled and not self.__mfEnabled:
301 "__userFunction__path" : self.__userFunction__path,
302 "__userFunction__modl" : self.__userFunction__modl,
303 "__userFunction__name" : self.__userFunction__name,
306 _jobs.append( (_X.A1, funcrepr) )
307 for i in range( len(_dX) ):
308 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
309 _X_plus_dXi[i] = _X[i] + _dX[i]
311 _jobs.append( (_X_plus_dXi, funcrepr) )
313 import multiprocessing
314 self.__pool = multiprocessing.Pool(self.__mpWorkers)
315 _HX_plus_dX = self.__pool.map( ExecuteFunction, _jobs )
319 _HX = _HX_plus_dX.pop(0)
322 for i in range( len(_dX) ):
323 _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
325 elif self.__mfEnabled:
327 _xserie.append( _X.A1 )
328 for i in range( len(_dX) ):
329 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
330 _X_plus_dXi[i] = _X[i] + _dX[i]
332 _xserie.append( _X_plus_dXi )
334 _HX_plus_dX = self.DirectOperator( _xserie )
336 _HX = _HX_plus_dX.pop(0)
339 for i in range( len(_dX) ):
340 _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
344 _HX = self.DirectOperator( _X )
345 for i in range( _dX.size ):
347 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
348 _X_plus_dXi[i] = _X[i] + _dXi
350 _HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
352 _Jacobienne.append( numpy.ravel(( _HX_plus_dXi - _HX ) / _dXi) )
355 _Jacobienne = numpy.asmatrix( numpy.vstack( _Jacobienne ) ).T
357 if self.__lenghtRJ < 0: self.__lenghtRJ = 2 * _X.size
358 while len(self.__listJPCP) > self.__lenghtRJ:
359 self.__listJPCP.pop(0)
360 self.__listJPCI.pop(0)
361 self.__listJPCR.pop(0)
362 self.__listJPPN.pop(0)
363 self.__listJPIN.pop(0)
364 self.__listJPCP.append( copy.copy(_X) )
365 self.__listJPCI.append( copy.copy(_dX) )
366 self.__listJPCR.append( copy.copy(_Jacobienne) )
367 self.__listJPPN.append( numpy.linalg.norm(_X) )
368 self.__listJPIN.append( numpy.linalg.norm(_Jacobienne) )
370 logging.debug("FDA Fin du calcul de la Jacobienne")
374 # ---------------------------------------------------------
375 def TangentOperator(self, paire ):
377 Calcul du tangent à l'aide de la Jacobienne.
380 assert len(paire) == 1, "Incorrect lenght of arguments"
382 assert len(_paire) == 2, "Incorrect number of arguments"
384 assert len(paire) == 2, "Incorrect number of arguments"
387 _Jacobienne = self.TangentMatrix( X )
388 if dX is None or len(dX) == 0:
390 # Calcul de la forme matricielle si le second argument est None
391 # -------------------------------------------------------------
392 if self.__mfEnabled: return [_Jacobienne,]
393 else: return _Jacobienne
396 # Calcul de la valeur linéarisée de H en X appliqué à dX
397 # ------------------------------------------------------
398 _dX = numpy.asmatrix(numpy.ravel( dX )).T
399 _HtX = numpy.dot(_Jacobienne, _dX)
400 if self.__mfEnabled: return [_HtX.A1,]
403 # ---------------------------------------------------------
404 def AdjointOperator(self, paire ):
406 Calcul de l'adjoint à l'aide de la Jacobienne.
409 assert len(paire) == 1, "Incorrect lenght of arguments"
411 assert len(_paire) == 2, "Incorrect number of arguments"
413 assert len(paire) == 2, "Incorrect number of arguments"
416 _JacobienneT = self.TangentMatrix( X ).T
417 if Y is None or len(Y) == 0:
419 # Calcul de la forme matricielle si le second argument est None
420 # -------------------------------------------------------------
421 if self.__mfEnabled: return [_JacobienneT,]
422 else: return _JacobienneT
425 # Calcul de la valeur de l'adjoint en X appliqué à Y
426 # --------------------------------------------------
427 _Y = numpy.asmatrix(numpy.ravel( Y )).T
428 _HaY = numpy.dot(_JacobienneT, _Y)
429 if self.__mfEnabled: return [_HaY.A1,]
432 # ==============================================================================
444 Implémentation informatique de l'algorithme MMQR, basée sur la publication :
445 David R. Hunter, Kenneth Lange, "Quantile Regression via an MM Algorithm",
446 Journal of Computational and Graphical Statistics, 9, 1, pp.60-77, 2000.
449 # Recuperation des donnees et informations initiales
450 # --------------------------------------------------
451 variables = numpy.ravel( x0 )
452 mesures = numpy.ravel( y )
453 increment = sys.float_info[0]
456 quantile = float(quantile)
458 # Calcul des parametres du MM
459 # ---------------------------
460 tn = float(toler) / n
461 e0 = -tn / math.log(tn)
462 epsilon = (e0-tn)/(1+math.log(e0))
464 # Calculs d'initialisation
465 # ------------------------
466 residus = mesures - numpy.ravel( func( variables ) )
467 poids = 1./(epsilon+numpy.abs(residus))
468 veps = 1. - 2. * quantile - residus * poids
469 lastsurrogate = -numpy.sum(residus*veps) - (1.-2.*quantile)*numpy.sum(residus)
472 # Recherche iterative
473 # -------------------
474 while (increment > toler) and (iteration < maxfun) :
477 Derivees = numpy.array(fprime(variables))
478 Derivees = Derivees.reshape(n,p) # Necessaire pour remettre en place la matrice si elle passe par des tuyaux YACS
479 DeriveesT = Derivees.transpose()
480 M = numpy.dot( DeriveesT , (numpy.array(numpy.matrix(p*[poids,]).T)*Derivees) )
481 SM = numpy.transpose(numpy.dot( DeriveesT , veps ))
482 step = - numpy.linalg.lstsq( M, SM, rcond=-1 )[0]
484 variables = variables + step
485 if bounds is not None:
486 # Attention : boucle infinie à éviter si un intervalle est trop petit
487 while( (variables < numpy.ravel(numpy.asmatrix(bounds)[:,0])).any() or (variables > numpy.ravel(numpy.asmatrix(bounds)[:,1])).any() ):
489 variables = variables - step
490 residus = mesures - numpy.ravel( func(variables) )
491 surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
493 while ( (surrogate > lastsurrogate) and ( max(list(numpy.abs(step))) > 1.e-16 ) ) :
495 variables = variables - step
496 residus = mesures - numpy.ravel( func(variables) )
497 surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
499 increment = lastsurrogate-surrogate
500 poids = 1./(epsilon+numpy.abs(residus))
501 veps = 1. - 2. * quantile - residus * poids
502 lastsurrogate = -numpy.sum(residus * veps) - (1.-2.*quantile)*numpy.sum(residus)
506 Ecart = quantile * numpy.sum(residus) - numpy.sum( residus[residus<0] )
508 return variables, Ecart, [n,p,iteration,increment,0]
510 # ==============================================================================
511 def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
513 Stochastic EnKF (Envensen 1994, Burgers 1998)
515 selfA est identique au "self" d'algorithme appelant et contient les
518 if selfA._parameters["EstimationOf"] == "Parameters":
519 selfA._parameters["StoreInternalVariables"] = True
523 H = HO["Direct"].appliedControledFormTo
525 if selfA._parameters["EstimationOf"] == "State":
526 M = EM["Direct"].appliedControledFormTo
528 if CM is not None and "Tangent" in CM and U is not None:
529 Cm = CM["Tangent"].asMatrix(Xb)
533 # Nombre de pas identique au nombre de pas d'observations
534 # -------------------------------------------------------
535 if hasattr(Y,"stepnumber"):
536 duration = Y.stepnumber()
537 __p = numpy.cumprod(Y.shape())[-1]
540 __p = numpy.array(Y).size
542 # Précalcul des inversions de B et R
543 # ----------------------------------
544 if selfA._parameters["StoreInternalVariables"] \
545 or selfA._toStore("CostFunctionJ") \
546 or selfA._toStore("CostFunctionJb") \
547 or selfA._toStore("CostFunctionJo") \
548 or selfA._toStore("CurrentOptimum") \
549 or selfA._toStore("APosterioriCovariance"):
556 __m = selfA._parameters["NumberOfMembers"]
557 Xn = numpy.asmatrix(numpy.dot( Xb.reshape(__n,1), numpy.ones((1,__m)) ))
558 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
560 if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
562 if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
565 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
566 selfA.StoredVariables["Analysis"].store( numpy.ravel(Xb) )
567 if selfA._toStore("APosterioriCovariance"):
568 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
571 previousJMinimum = numpy.finfo(float).max
574 Xn_predicted = numpy.asmatrix(numpy.zeros((__n,__m)))
575 HX_predicted = numpy.asmatrix(numpy.zeros((__p,__m)))
577 for step in range(duration-1):
578 if hasattr(Y,"store"):
579 Ynpu = numpy.asmatrix(numpy.ravel( Y[step+1] )).T
581 Ynpu = numpy.asmatrix(numpy.ravel( Y )).T
584 if hasattr(U,"store") and len(U)>1:
585 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
586 elif hasattr(U,"store") and len(U)==1:
587 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
589 Un = numpy.asmatrix(numpy.ravel( U )).T
593 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
595 qi = numpy.asmatrix(numpy.random.multivariate_normal(numpy.zeros(__n), Qn, (1,1,1))).T
596 Xn_predicted[:,i] = numpy.asmatrix(numpy.ravel( M((Xn[:,i], Un)) )).T + qi
597 HX_predicted[:,i] = numpy.asmatrix(numpy.ravel( H((Xn_predicted[:,i], Un)) )).T
598 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
599 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
600 Xn_predicted = Xn_predicted + Cm * Un
601 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
602 # --- > Par principe, M = Id, Q = 0
605 HX_predicted[:,i] = numpy.asmatrix(numpy.ravel( H((Xn_predicted[:,i], Un)) )).T
607 # Mean of forecast and observation of forecast
608 Xfm = numpy.asmatrix(numpy.ravel(Xn_predicted.mean(axis=1, dtype=mfp).astype('float'))).T
609 Hfm = numpy.asmatrix(numpy.ravel(HX_predicted.mean(axis=1, dtype=mfp).astype('float'))).T
613 Exfi = Xn_predicted[:,i] - Xfm
614 Eyfi = HX_predicted[:,i] - Hfm
615 PfHT += Exfi * Eyfi.T
616 HPfHT += Eyfi * Eyfi.T
617 PfHT = (1./(__m-1)) * PfHT
618 HPfHT = (1./(__m-1)) * HPfHT
619 K = PfHT * ( R + HPfHT ).I
623 ri = numpy.asmatrix(numpy.random.multivariate_normal(numpy.zeros(__p), Rn, (1,1,1))).T
624 Xn[:,i] = Xn_predicted[:,i] + K * (Ynpu + ri - HX_predicted[:,i])
626 Xa = Xn.mean(axis=1, dtype=mfp).astype('float')
628 if selfA._parameters["StoreInternalVariables"] \
629 or selfA._toStore("CostFunctionJ") \
630 or selfA._toStore("CostFunctionJb") \
631 or selfA._toStore("CostFunctionJo") \
632 or selfA._toStore("APosterioriCovariance") \
633 or selfA._toStore("InnovationAtCurrentAnalysis") \
634 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
635 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
636 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
637 _Innovation = Ynpu - _HXa
639 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
641 selfA.StoredVariables["Analysis"].store( Xa )
642 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
643 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
644 if selfA._toStore("InnovationAtCurrentAnalysis"):
645 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
646 # ---> avec current state
647 if selfA._parameters["StoreInternalVariables"] \
648 or selfA._toStore("CurrentState"):
649 selfA.StoredVariables["CurrentState"].store( Xn )
650 if selfA._toStore("ForecastState"):
651 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
652 if selfA._toStore("BMA"):
653 selfA.StoredVariables["BMA"].store( Xn_predicted - Xa )
654 if selfA._toStore("InnovationAtCurrentState"):
655 selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
656 if selfA._toStore("SimulatedObservationAtCurrentState") \
657 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
658 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
660 if selfA._parameters["StoreInternalVariables"] \
661 or selfA._toStore("CostFunctionJ") \
662 or selfA._toStore("CostFunctionJb") \
663 or selfA._toStore("CostFunctionJo") \
664 or selfA._toStore("CurrentOptimum") \
665 or selfA._toStore("APosterioriCovariance"):
666 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
667 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
669 selfA.StoredVariables["CostFunctionJb"].store( Jb )
670 selfA.StoredVariables["CostFunctionJo"].store( Jo )
671 selfA.StoredVariables["CostFunctionJ" ].store( J )
673 if selfA._toStore("IndexOfOptimum") \
674 or selfA._toStore("CurrentOptimum") \
675 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
676 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
677 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
678 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
679 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
680 if selfA._toStore("IndexOfOptimum"):
681 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
682 if selfA._toStore("CurrentOptimum"):
683 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
684 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
685 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
686 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
687 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
688 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
689 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
690 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
691 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
692 if selfA._toStore("APosterioriCovariance"):
693 Eai = (1/numpy.sqrt(__m-1)) * (Xn - Xa.reshape((__n,-1))) # Anomalies
695 Pn = 0.5 * (Pn + Pn.T)
696 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
697 if selfA._parameters["EstimationOf"] == "Parameters" \
698 and J < previousJMinimum:
701 if selfA._toStore("APosterioriCovariance"):
704 # Stockage final supplémentaire de l'optimum en estimation de paramètres
705 # ----------------------------------------------------------------------
706 if selfA._parameters["EstimationOf"] == "Parameters":
707 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
708 selfA.StoredVariables["Analysis"].store( XaMin )
709 if selfA._toStore("APosterioriCovariance"):
710 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
711 if selfA._toStore("BMA"):
712 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
716 # ==============================================================================
717 def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
719 Ensemble-Transform EnKF (ETKF or Deterministic EnKF: Bishop 2001, Hunt 2007)
721 selfA est identique au "self" d'algorithme appelant et contient les
724 if selfA._parameters["EstimationOf"] == "Parameters":
725 selfA._parameters["StoreInternalVariables"] = True
729 H = HO["Direct"].appliedControledFormTo
731 if selfA._parameters["EstimationOf"] == "State":
732 M = EM["Direct"].appliedControledFormTo
734 if CM is not None and "Tangent" in CM and U is not None:
735 Cm = CM["Tangent"].asMatrix(Xb)
739 # Nombre de pas identique au nombre de pas d'observations
740 # -------------------------------------------------------
741 if hasattr(Y,"stepnumber"):
742 duration = Y.stepnumber()
743 __p = numpy.cumprod(Y.shape())[-1]
746 __p = numpy.array(Y).size
748 # Précalcul des inversions de B et R
749 # ----------------------------------
750 if selfA._parameters["StoreInternalVariables"] \
751 or selfA._toStore("CostFunctionJ") \
752 or selfA._toStore("CostFunctionJb") \
753 or selfA._toStore("CostFunctionJo") \
754 or selfA._toStore("CurrentOptimum") \
755 or selfA._toStore("APosterioriCovariance"):
758 RIdemi = R.choleskyI()
763 __m = selfA._parameters["NumberOfMembers"]
764 Xn = numpy.asmatrix(numpy.dot( Xb.reshape(__n,1), numpy.ones((1,__m)) ))
765 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
767 if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
769 if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
772 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
773 selfA.StoredVariables["Analysis"].store( numpy.ravel(Xb) )
774 if selfA._toStore("APosterioriCovariance"):
775 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
778 previousJMinimum = numpy.finfo(float).max
781 Xn_predicted = numpy.asmatrix(numpy.zeros((__n,__m)))
782 HX_predicted = numpy.asmatrix(numpy.zeros((__p,__m)))
784 for step in range(duration-1):
785 if hasattr(Y,"store"):
786 Ynpu = numpy.asmatrix(numpy.ravel( Y[step+1] )).T
788 Ynpu = numpy.asmatrix(numpy.ravel( Y )).T
791 if hasattr(U,"store") and len(U)>1:
792 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
793 elif hasattr(U,"store") and len(U)==1:
794 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
796 Un = numpy.asmatrix(numpy.ravel( U )).T
800 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
802 qi = numpy.asmatrix(numpy.random.multivariate_normal(numpy.zeros(__n), Qn, (1,1,1))).T
803 Xn_predicted[:,i] = numpy.asmatrix(numpy.ravel( M((Xn[:,i], Un)) )).T + qi
804 HX_predicted[:,i] = numpy.asmatrix(numpy.ravel( H((Xn_predicted[:,i], Un)) )).T
805 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
806 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
807 Xn_predicted = Xn_predicted + Cm * Un
808 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
809 # --- > Par principe, M = Id, Q = 0
812 HX_predicted[:,i] = numpy.asmatrix(numpy.ravel( H((Xn_predicted[:,i], Un)) )).T
814 # Mean of forecast and observation of forecast
815 Xfm = Xn_predicted.mean(axis=1, dtype=mfp).astype('float')
816 Hfm = HX_predicted.mean(axis=1, dtype=mfp).astype('float')
818 EaX = (Xn_predicted - Xfm.reshape((__n,-1))) / numpy.sqrt(__m-1)
819 EaHX = (HX_predicted - Hfm.reshape((__p,-1))) / numpy.sqrt(__m-1)
822 delta = RIdemi * ( Ynpu.reshape((__p,-1)) - Hfm.reshape((__p,-1)) )
823 mT = numpy.linalg.inv( numpy.eye(__m) + mS.T @ mS )
824 vw = mT @ mS.transpose() @ delta
826 Tdemi = numpy.linalg.cholesky(mT)
829 Xn = Xfm.reshape((__n,-1)) + EaX @ ( vw.reshape((__m,-1)) + numpy.sqrt(__m-1) * Tdemi @ mU )
831 Xa = Xn.mean(axis=1, dtype=mfp).astype('float')
833 if selfA._parameters["StoreInternalVariables"] \
834 or selfA._toStore("CostFunctionJ") \
835 or selfA._toStore("CostFunctionJb") \
836 or selfA._toStore("CostFunctionJo") \
837 or selfA._toStore("APosterioriCovariance") \
838 or selfA._toStore("InnovationAtCurrentAnalysis") \
839 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
840 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
841 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
842 _Innovation = Ynpu - _HXa
844 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
846 selfA.StoredVariables["Analysis"].store( Xa )
847 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
848 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
849 if selfA._toStore("InnovationAtCurrentAnalysis"):
850 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
851 # ---> avec current state
852 if selfA._parameters["StoreInternalVariables"] \
853 or selfA._toStore("CurrentState"):
854 selfA.StoredVariables["CurrentState"].store( Xn )
855 if selfA._toStore("ForecastState"):
856 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
857 if selfA._toStore("BMA"):
858 selfA.StoredVariables["BMA"].store( Xn_predicted - Xa )
859 if selfA._toStore("InnovationAtCurrentState"):
860 selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
861 if selfA._toStore("SimulatedObservationAtCurrentState") \
862 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
863 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
865 if selfA._parameters["StoreInternalVariables"] \
866 or selfA._toStore("CostFunctionJ") \
867 or selfA._toStore("CostFunctionJb") \
868 or selfA._toStore("CostFunctionJo") \
869 or selfA._toStore("CurrentOptimum") \
870 or selfA._toStore("APosterioriCovariance"):
871 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
872 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
874 selfA.StoredVariables["CostFunctionJb"].store( Jb )
875 selfA.StoredVariables["CostFunctionJo"].store( Jo )
876 selfA.StoredVariables["CostFunctionJ" ].store( J )
878 if selfA._toStore("IndexOfOptimum") \
879 or selfA._toStore("CurrentOptimum") \
880 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
881 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
882 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
883 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
884 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
885 if selfA._toStore("IndexOfOptimum"):
886 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
887 if selfA._toStore("CurrentOptimum"):
888 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
889 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
890 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
891 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
892 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
893 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
894 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
895 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
896 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
897 if selfA._toStore("APosterioriCovariance"):
898 Eai = (1/numpy.sqrt(__m-1)) * (Xn - Xa.reshape((__n,-1))) # Anomalies
900 Pn = 0.5 * (Pn + Pn.T)
901 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
902 if selfA._parameters["EstimationOf"] == "Parameters" \
903 and J < previousJMinimum:
906 if selfA._toStore("APosterioriCovariance"):
909 # Stockage final supplémentaire de l'optimum en estimation de paramètres
910 # ----------------------------------------------------------------------
911 if selfA._parameters["EstimationOf"] == "Parameters":
912 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
913 selfA.StoredVariables["Analysis"].store( XaMin )
914 if selfA._toStore("APosterioriCovariance"):
915 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
916 if selfA._toStore("BMA"):
917 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
921 # ==============================================================================
922 if __name__ == "__main__":
923 print('\n AUTODIAGNOSTIC\n')