Salome HOME
Minor internal clarification and performance improvements
[modules/adao.git] / src / daComposant / daCore / NumericObjects.py
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) 2008-2021 EDF R&D
4 #
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
9 #
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13 # Lesser General Public License for more details.
14 #
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
18 #
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
20 #
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
22
23 __doc__ = """
24     Définit les objets numériques génériques.
25 """
26 __author__ = "Jean-Philippe ARGAUD"
27
28 import os, time, copy, types, sys, logging
29 import math, numpy, scipy, scipy.optimize, scipy.version
30 from daCore.BasicObjects import Operator
31 from daCore.PlatformInfo import PlatformInfo
32 mpr = PlatformInfo().MachinePrecision()
33 mfp = PlatformInfo().MaximumPrecision()
34 # logging.getLogger().setLevel(logging.DEBUG)
35
36 # ==============================================================================
37 def ExecuteFunction( triplet ):
38     assert len(triplet) == 3, "Incorrect number of arguments"
39     X, xArgs, funcrepr = triplet
40     __X = numpy.ravel( X ).reshape((-1,1))
41     __sys_path_tmp = sys.path ; sys.path.insert(0,funcrepr["__userFunction__path"])
42     __module = __import__(funcrepr["__userFunction__modl"], globals(), locals(), [])
43     __fonction = getattr(__module,funcrepr["__userFunction__name"])
44     sys.path = __sys_path_tmp ; del __sys_path_tmp
45     if isinstance(xArgs, dict):
46         __HX  = __fonction( __X, **xArgs )
47     else:
48         __HX  = __fonction( __X )
49     return numpy.ravel( __HX )
50
51 # ==============================================================================
52 class FDApproximation(object):
53     """
54     Cette classe sert d'interface pour définir les opérateurs approximés. A la
55     création d'un objet, en fournissant une fonction "Function", on obtient un
56     objet qui dispose de 3 méthodes "DirectOperator", "TangentOperator" et
57     "AdjointOperator". On contrôle l'approximation DF avec l'incrément
58     multiplicatif "increment" valant par défaut 1%, ou avec l'incrément fixe
59     "dX" qui sera multiplié par "increment" (donc en %), et on effectue de DF
60     centrées si le booléen "centeredDF" est vrai.
61     """
62     def __init__(self,
63             name                  = "FDApproximation",
64             Function              = None,
65             centeredDF            = False,
66             increment             = 0.01,
67             dX                    = None,
68             extraArguments        = None,
69             avoidingRedundancy    = True,
70             toleranceInRedundancy = 1.e-18,
71             lenghtOfRedundancy    = -1,
72             mpEnabled             = False,
73             mpWorkers             = None,
74             mfEnabled             = False,
75             ):
76         self.__name = str(name)
77         self.__extraArgs = extraArguments
78         if mpEnabled:
79             try:
80                 import multiprocessing
81                 self.__mpEnabled = True
82             except ImportError:
83                 self.__mpEnabled = False
84         else:
85             self.__mpEnabled = False
86         self.__mpWorkers = mpWorkers
87         if self.__mpWorkers is not None and self.__mpWorkers < 1:
88             self.__mpWorkers = None
89         logging.debug("FDA Calculs en multiprocessing : %s (nombre de processus : %s)"%(self.__mpEnabled,self.__mpWorkers))
90         #
91         if mfEnabled:
92             self.__mfEnabled = True
93         else:
94             self.__mfEnabled = False
95         logging.debug("FDA Calculs en multifonctions : %s"%(self.__mfEnabled,))
96         #
97         if avoidingRedundancy:
98             self.__avoidRC = True
99             self.__tolerBP = float(toleranceInRedundancy)
100             self.__lenghtRJ = int(lenghtOfRedundancy)
101             self.__listJPCP = [] # Jacobian Previous Calculated Points
102             self.__listJPCI = [] # Jacobian Previous Calculated Increment
103             self.__listJPCR = [] # Jacobian Previous Calculated Results
104             self.__listJPPN = [] # Jacobian Previous Calculated Point Norms
105             self.__listJPIN = [] # Jacobian Previous Calculated Increment Norms
106         else:
107             self.__avoidRC = False
108         #
109         if self.__mpEnabled:
110             if isinstance(Function,types.FunctionType):
111                 logging.debug("FDA Calculs en multiprocessing : FunctionType")
112                 self.__userFunction__name = Function.__name__
113                 try:
114                     mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
115                 except:
116                     mod = os.path.abspath(Function.__globals__['__file__'])
117                 if not os.path.isfile(mod):
118                     raise ImportError("No user defined function or method found with the name %s"%(mod,))
119                 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
120                 self.__userFunction__path = os.path.dirname(mod)
121                 del mod
122                 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
123                 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
124             elif isinstance(Function,types.MethodType):
125                 logging.debug("FDA Calculs en multiprocessing : MethodType")
126                 self.__userFunction__name = Function.__name__
127                 try:
128                     mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
129                 except:
130                     mod = os.path.abspath(Function.__func__.__globals__['__file__'])
131                 if not os.path.isfile(mod):
132                     raise ImportError("No user defined function or method found with the name %s"%(mod,))
133                 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
134                 self.__userFunction__path = os.path.dirname(mod)
135                 del mod
136                 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
137                 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
138             else:
139                 raise TypeError("User defined function or method has to be provided for finite differences approximation.")
140         else:
141             self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
142             self.__userFunction = self.__userOperator.appliedTo
143         #
144         self.__centeredDF = bool(centeredDF)
145         if abs(float(increment)) > 1.e-15:
146             self.__increment  = float(increment)
147         else:
148             self.__increment  = 0.01
149         if dX is None:
150             self.__dX     = None
151         else:
152             self.__dX     = numpy.ravel( dX )
153         logging.debug("FDA Reduction des doublons de calcul : %s"%self.__avoidRC)
154         if self.__avoidRC:
155             logging.debug("FDA Tolerance de determination des doublons : %.2e"%self.__tolerBP)
156
157     # ---------------------------------------------------------
158     def __doublon__(self, e, l, n, v=None):
159         __ac, __iac = False, -1
160         for i in range(len(l)-1,-1,-1):
161             if numpy.linalg.norm(e - l[i]) < self.__tolerBP * n[i]:
162                 __ac, __iac = True, i
163                 if v is not None: logging.debug("FDA Cas%s déja calculé, récupération du doublon %i"%(v,__iac))
164                 break
165         return __ac, __iac
166
167     # ---------------------------------------------------------
168     def DirectOperator(self, X, **extraArgs ):
169         """
170         Calcul du direct à l'aide de la fonction fournie.
171
172         NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
173         ne doivent pas être données ici à la fonction utilisateur.
174         """
175         logging.debug("FDA Calcul DirectOperator (explicite)")
176         if self.__mfEnabled:
177             _HX = self.__userFunction( X, argsAsSerie = True )
178         else:
179             _HX = numpy.ravel(self.__userFunction( numpy.ravel(X) ))
180         #
181         return _HX
182
183     # ---------------------------------------------------------
184     def TangentMatrix(self, X ):
185         """
186         Calcul de l'opérateur tangent comme la Jacobienne par différences finies,
187         c'est-à-dire le gradient de H en X. On utilise des différences finies
188         directionnelles autour du point X. X est un numpy.ndarray.
189
190         Différences finies centrées (approximation d'ordre 2):
191         1/ Pour chaque composante i de X, on ajoute et on enlève la perturbation
192            dX[i] à la  composante X[i], pour composer X_plus_dXi et X_moins_dXi, et
193            on calcule les réponses HX_plus_dXi = H( X_plus_dXi ) et HX_moins_dXi =
194            H( X_moins_dXi )
195         2/ On effectue les différences (HX_plus_dXi-HX_moins_dXi) et on divise par
196            le pas 2*dXi
197         3/ Chaque résultat, par composante, devient une colonne de la Jacobienne
198
199         Différences finies non centrées (approximation d'ordre 1):
200         1/ Pour chaque composante i de X, on ajoute la perturbation dX[i] à la
201            composante X[i] pour composer X_plus_dXi, et on calcule la réponse
202            HX_plus_dXi = H( X_plus_dXi )
203         2/ On calcule la valeur centrale HX = H(X)
204         3/ On effectue les différences (HX_plus_dXi-HX) et on divise par
205            le pas dXi
206         4/ Chaque résultat, par composante, devient une colonne de la Jacobienne
207
208         """
209         logging.debug("FDA Début du calcul de la Jacobienne")
210         logging.debug("FDA   Incrément de............: %s*X"%float(self.__increment))
211         logging.debug("FDA   Approximation centrée...: %s"%(self.__centeredDF))
212         #
213         if X is None or len(X)==0:
214             raise ValueError("Nominal point X for approximate derivatives can not be None or void (given X: %s)."%(str(X),))
215         #
216         _X = numpy.ravel( X )
217         #
218         if self.__dX is None:
219             _dX  = self.__increment * _X
220         else:
221             _dX = numpy.ravel( self.__dX )
222         assert len(_X) == len(_dX), "Inconsistent dX increment length with respect to the X one"
223         assert _X.size == _dX.size, "Inconsistent dX increment size with respect to the X one"
224         #
225         if (_dX == 0.).any():
226             moyenne = _dX.mean()
227             if moyenne == 0.:
228                 _dX = numpy.where( _dX == 0., float(self.__increment), _dX )
229             else:
230                 _dX = numpy.where( _dX == 0., moyenne, _dX )
231         #
232         __alreadyCalculated  = False
233         if self.__avoidRC:
234             __bidon, __alreadyCalculatedP = self.__doublon__(_X,  self.__listJPCP, self.__listJPPN, None)
235             __bidon, __alreadyCalculatedI = self.__doublon__(_dX, self.__listJPCI, self.__listJPIN, None)
236             if __alreadyCalculatedP == __alreadyCalculatedI > -1:
237                 __alreadyCalculated, __i = True, __alreadyCalculatedP
238                 logging.debug("FDA Cas J déjà calculé, récupération du doublon %i"%__i)
239         #
240         if __alreadyCalculated:
241             logging.debug("FDA   Calcul Jacobienne (par récupération du doublon %i)"%__i)
242             _Jacobienne = self.__listJPCR[__i]
243         else:
244             logging.debug("FDA   Calcul Jacobienne (explicite)")
245             if self.__centeredDF:
246                 #
247                 if self.__mpEnabled and not self.__mfEnabled:
248                     funcrepr = {
249                         "__userFunction__path" : self.__userFunction__path,
250                         "__userFunction__modl" : self.__userFunction__modl,
251                         "__userFunction__name" : self.__userFunction__name,
252                     }
253                     _jobs = []
254                     for i in range( len(_dX) ):
255                         _dXi            = _dX[i]
256                         _X_plus_dXi     = numpy.array( _X, dtype=float )
257                         _X_plus_dXi[i]  = _X[i] + _dXi
258                         _X_moins_dXi    = numpy.array( _X, dtype=float )
259                         _X_moins_dXi[i] = _X[i] - _dXi
260                         #
261                         _jobs.append( (_X_plus_dXi,  self.__extraArgs, funcrepr) )
262                         _jobs.append( (_X_moins_dXi, self.__extraArgs, funcrepr) )
263                     #
264                     import multiprocessing
265                     self.__pool = multiprocessing.Pool(self.__mpWorkers)
266                     _HX_plusmoins_dX = self.__pool.map( ExecuteFunction, _jobs )
267                     self.__pool.close()
268                     self.__pool.join()
269                     #
270                     _Jacobienne  = []
271                     for i in range( len(_dX) ):
272                         _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
273                     #
274                 elif self.__mfEnabled:
275                     _xserie = []
276                     for i in range( len(_dX) ):
277                         _dXi            = _dX[i]
278                         _X_plus_dXi     = numpy.array( _X, dtype=float )
279                         _X_plus_dXi[i]  = _X[i] + _dXi
280                         _X_moins_dXi    = numpy.array( _X, dtype=float )
281                         _X_moins_dXi[i] = _X[i] - _dXi
282                         #
283                         _xserie.append( _X_plus_dXi )
284                         _xserie.append( _X_moins_dXi )
285                     #
286                     _HX_plusmoins_dX = self.DirectOperator( _xserie )
287                      #
288                     _Jacobienne  = []
289                     for i in range( len(_dX) ):
290                         _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
291                     #
292                 else:
293                     _Jacobienne  = []
294                     for i in range( _dX.size ):
295                         _dXi            = _dX[i]
296                         _X_plus_dXi     = numpy.array( _X, dtype=float )
297                         _X_plus_dXi[i]  = _X[i] + _dXi
298                         _X_moins_dXi    = numpy.array( _X, dtype=float )
299                         _X_moins_dXi[i] = _X[i] - _dXi
300                         #
301                         _HX_plus_dXi    = self.DirectOperator( _X_plus_dXi )
302                         _HX_moins_dXi   = self.DirectOperator( _X_moins_dXi )
303                         #
304                         _Jacobienne.append( numpy.ravel( _HX_plus_dXi - _HX_moins_dXi ) / (2.*_dXi) )
305                 #
306             else:
307                 #
308                 if self.__mpEnabled and not self.__mfEnabled:
309                     funcrepr = {
310                         "__userFunction__path" : self.__userFunction__path,
311                         "__userFunction__modl" : self.__userFunction__modl,
312                         "__userFunction__name" : self.__userFunction__name,
313                     }
314                     _jobs = []
315                     _jobs.append( (_X, self.__extraArgs, funcrepr) )
316                     for i in range( len(_dX) ):
317                         _X_plus_dXi    = numpy.array( _X, dtype=float )
318                         _X_plus_dXi[i] = _X[i] + _dX[i]
319                         #
320                         _jobs.append( (_X_plus_dXi, self.__extraArgs, funcrepr) )
321                     #
322                     import multiprocessing
323                     self.__pool = multiprocessing.Pool(self.__mpWorkers)
324                     _HX_plus_dX = self.__pool.map( ExecuteFunction, _jobs )
325                     self.__pool.close()
326                     self.__pool.join()
327                     #
328                     _HX = _HX_plus_dX.pop(0)
329                     #
330                     _Jacobienne = []
331                     for i in range( len(_dX) ):
332                         _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
333                     #
334                 elif self.__mfEnabled:
335                     _xserie = []
336                     _xserie.append( _X )
337                     for i in range( len(_dX) ):
338                         _X_plus_dXi    = numpy.array( _X, dtype=float )
339                         _X_plus_dXi[i] = _X[i] + _dX[i]
340                         #
341                         _xserie.append( _X_plus_dXi )
342                     #
343                     _HX_plus_dX = self.DirectOperator( _xserie )
344                     #
345                     _HX = _HX_plus_dX.pop(0)
346                     #
347                     _Jacobienne = []
348                     for i in range( len(_dX) ):
349                         _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
350                    #
351                 else:
352                     _Jacobienne  = []
353                     _HX = self.DirectOperator( _X )
354                     for i in range( _dX.size ):
355                         _dXi            = _dX[i]
356                         _X_plus_dXi     = numpy.array( _X, dtype=float )
357                         _X_plus_dXi[i]  = _X[i] + _dXi
358                         #
359                         _HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
360                         #
361                         _Jacobienne.append( numpy.ravel(( _HX_plus_dXi - _HX ) / _dXi) )
362                 #
363             #
364             _Jacobienne = numpy.transpose( numpy.vstack( _Jacobienne ) )
365             if self.__avoidRC:
366                 if self.__lenghtRJ < 0: self.__lenghtRJ = 2 * _X.size
367                 while len(self.__listJPCP) > self.__lenghtRJ:
368                     self.__listJPCP.pop(0)
369                     self.__listJPCI.pop(0)
370                     self.__listJPCR.pop(0)
371                     self.__listJPPN.pop(0)
372                     self.__listJPIN.pop(0)
373                 self.__listJPCP.append( copy.copy(_X) )
374                 self.__listJPCI.append( copy.copy(_dX) )
375                 self.__listJPCR.append( copy.copy(_Jacobienne) )
376                 self.__listJPPN.append( numpy.linalg.norm(_X) )
377                 self.__listJPIN.append( numpy.linalg.norm(_Jacobienne) )
378         #
379         logging.debug("FDA Fin du calcul de la Jacobienne")
380         #
381         return _Jacobienne
382
383     # ---------------------------------------------------------
384     def TangentOperator(self, paire, **extraArgs ):
385         """
386         Calcul du tangent à l'aide de la Jacobienne.
387
388         NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
389         ne doivent pas être données ici à la fonction utilisateur.
390         """
391         if self.__mfEnabled:
392             assert len(paire) == 1, "Incorrect lenght of arguments"
393             _paire = paire[0]
394             assert len(_paire) == 2, "Incorrect number of arguments"
395         else:
396             assert len(paire) == 2, "Incorrect number of arguments"
397             _paire = paire
398         X, dX = _paire
399         _Jacobienne = self.TangentMatrix( X )
400         if dX is None or len(dX) == 0:
401             #
402             # Calcul de la forme matricielle si le second argument est None
403             # -------------------------------------------------------------
404             if self.__mfEnabled: return [_Jacobienne,]
405             else:                return _Jacobienne
406         else:
407             #
408             # Calcul de la valeur linéarisée de H en X appliqué à dX
409             # ------------------------------------------------------
410             _dX = numpy.ravel( dX )
411             _HtX = numpy.dot(_Jacobienne, _dX)
412             if self.__mfEnabled: return [_HtX,]
413             else:                return _HtX
414
415     # ---------------------------------------------------------
416     def AdjointOperator(self, paire, **extraArgs ):
417         """
418         Calcul de l'adjoint à l'aide de la Jacobienne.
419
420         NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
421         ne doivent pas être données ici à la fonction utilisateur.
422         """
423         if self.__mfEnabled:
424             assert len(paire) == 1, "Incorrect lenght of arguments"
425             _paire = paire[0]
426             assert len(_paire) == 2, "Incorrect number of arguments"
427         else:
428             assert len(paire) == 2, "Incorrect number of arguments"
429             _paire = paire
430         X, Y = _paire
431         _JacobienneT = self.TangentMatrix( X ).T
432         if Y is None or len(Y) == 0:
433             #
434             # Calcul de la forme matricielle si le second argument est None
435             # -------------------------------------------------------------
436             if self.__mfEnabled: return [_JacobienneT,]
437             else:                return _JacobienneT
438         else:
439             #
440             # Calcul de la valeur de l'adjoint en X appliqué à Y
441             # --------------------------------------------------
442             _Y = numpy.ravel( Y )
443             _HaY = numpy.dot(_JacobienneT, _Y)
444             if self.__mfEnabled: return [_HaY,]
445             else:                return _HaY
446
447 # ==============================================================================
448 def EnsembleOfCenteredPerturbations( _bgcenter, _bgcovariance, _nbmembers ):
449     "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
450     #
451     _bgcenter = numpy.ravel(_bgcenter)[:,None]
452     if _nbmembers < 1:
453         raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
454     #
455     if _bgcovariance is None:
456         BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
457     else:
458         _Z = numpy.random.multivariate_normal(numpy.zeros(_bgcenter.size), _bgcovariance, size=_nbmembers).T
459         BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers) + _Z
460     #
461     return BackgroundEnsemble
462
463 # ==============================================================================
464 def EnsembleOfBackgroundPerturbations( _bgcenter, _bgcovariance, _nbmembers, _withSVD = True):
465     "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
466     def __CenteredRandomAnomalies(Zr, N):
467         """
468         Génère une matrice de N anomalies aléatoires centrées sur Zr selon les
469         notes manuscrites de MB et conforme au code de PS avec eps = -1
470         """
471         eps = -1
472         Q = numpy.identity(N-1)-numpy.ones((N-1,N-1))/numpy.sqrt(N)/(numpy.sqrt(N)-eps)
473         Q = numpy.concatenate((Q, [eps*numpy.ones(N-1)/numpy.sqrt(N)]), axis=0)
474         R, _ = numpy.linalg.qr(numpy.random.normal(size = (N-1,N-1)))
475         Q = numpy.dot(Q,R)
476         Zr = numpy.dot(Q,Zr)
477         return Zr.T
478     #
479     _bgcenter = numpy.ravel(_bgcenter).reshape((-1,1))
480     if _nbmembers < 1:
481         raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
482     if _bgcovariance is None:
483         BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
484     else:
485         if _withSVD:
486             U, s, V = numpy.linalg.svd(_bgcovariance, full_matrices=False)
487             _nbctl = _bgcenter.size
488             if _nbmembers > _nbctl:
489                 _Z = numpy.concatenate((numpy.dot(
490                     numpy.diag(numpy.sqrt(s[:_nbctl])), V[:_nbctl]),
491                     numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1-_nbctl)), axis = 0)
492             else:
493                 _Z = numpy.dot(numpy.diag(numpy.sqrt(s[:_nbmembers-1])), V[:_nbmembers-1])
494             _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
495             BackgroundEnsemble = _bgcenter + _Zca
496         else:
497             if max(abs(_bgcovariance.flatten())) > 0:
498                 _nbctl = _bgcenter.size
499                 _Z = numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1)
500                 _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
501                 BackgroundEnsemble = _bgcenter + _Zca
502             else:
503                 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
504     #
505     return BackgroundEnsemble
506
507 # ==============================================================================
508 def EnsembleMean( __Ensemble ):
509     "Renvoie la moyenne empirique d'un ensemble"
510     return numpy.asarray(__Ensemble).mean(axis=1, dtype=mfp).astype('float').reshape((-1,1))
511
512 # ==============================================================================
513 def EnsembleOfAnomalies( __Ensemble, __OptMean = None, __Normalisation = 1.):
514     "Renvoie les anomalies centrées à partir d'un ensemble"
515     if __OptMean is None:
516         __Em = EnsembleMean( __Ensemble )
517     else:
518         __Em = numpy.ravel( __OptMean ).reshape((-1,1))
519     #
520     return __Normalisation * (numpy.asarray( __Ensemble ) - __Em)
521
522 # ==============================================================================
523 def EnsembleErrorCovariance( __Ensemble, __quick = False ):
524     "Renvoie l'estimation empirique de la covariance d'ensemble"
525     if __quick:
526         # Covariance rapide mais rarement définie positive
527         __Covariance = numpy.cov( __Ensemble )
528     else:
529         # Résultat souvent identique à numpy.cov, mais plus robuste
530         __n, __m = numpy.asarray( __Ensemble ).shape
531         __Anomalies = EnsembleOfAnomalies( __Ensemble )
532         # Estimation empirique
533         __Covariance = ( __Anomalies @ __Anomalies.T ) / (__m-1)
534         # Assure la symétrie
535         __Covariance = ( __Covariance + __Covariance.T ) * 0.5
536         # Assure la positivité
537         __epsilon    = mpr*numpy.trace( __Covariance )
538         __Covariance = __Covariance + __epsilon * numpy.identity(__n)
539     #
540     return __Covariance
541
542 # ==============================================================================
543 def EnsemblePerturbationWithGivenCovariance( __Ensemble, __Covariance, __Seed=None ):
544     "Ajout d'une perturbation à chaque membre d'un ensemble selon une covariance prescrite"
545     if hasattr(__Covariance,"assparsematrix"):
546         if (abs(__Ensemble).mean() > mpr) and (abs(__Covariance.assparsematrix())/abs(__Ensemble).mean() < mpr).all():
547             # Traitement d'une covariance nulle ou presque
548             return __Ensemble
549         if (abs(__Ensemble).mean() <= mpr) and (abs(__Covariance.assparsematrix()) < mpr).all():
550             # Traitement d'une covariance nulle ou presque
551             return __Ensemble
552     else:
553         if (abs(__Ensemble).mean() > mpr) and (abs(__Covariance)/abs(__Ensemble).mean() < mpr).all():
554             # Traitement d'une covariance nulle ou presque
555             return __Ensemble
556         if (abs(__Ensemble).mean() <= mpr) and (abs(__Covariance) < mpr).all():
557             # Traitement d'une covariance nulle ou presque
558             return __Ensemble
559     #
560     __n, __m = __Ensemble.shape
561     if __Seed is not None: numpy.random.seed(__Seed)
562     #
563     if hasattr(__Covariance,"isscalar") and __Covariance.isscalar():
564         # Traitement d'une covariance multiple de l'identité
565         __zero = 0.
566         __std  = numpy.sqrt(__Covariance.assparsematrix())
567         __Ensemble += numpy.random.normal(__zero, __std, size=(__m,__n)).T
568     #
569     elif hasattr(__Covariance,"isvector") and __Covariance.isvector():
570         # Traitement d'une covariance diagonale avec variances non identiques
571         __zero = numpy.zeros(__n)
572         __std  = numpy.sqrt(__Covariance.assparsematrix())
573         __Ensemble += numpy.asarray([numpy.random.normal(__zero, __std) for i in range(__m)]).T
574     #
575     elif hasattr(__Covariance,"ismatrix") and __Covariance.ismatrix():
576         # Traitement d'une covariance pleine
577         __Ensemble += numpy.random.multivariate_normal(numpy.zeros(__n), __Covariance.asfullmatrix(__n), size=__m).T
578     #
579     elif isinstance(__Covariance, numpy.ndarray):
580         # Traitement d'une covariance numpy pleine, sachant qu'on arrive ici en dernier
581         __Ensemble += numpy.random.multivariate_normal(numpy.zeros(__n), __Covariance, size=__m).T
582     #
583     else:
584         raise ValueError("Error in ensemble perturbation with inadequate covariance specification")
585     #
586     return __Ensemble
587
588 # ==============================================================================
589 def CovarianceInflation(
590         InputCovOrEns,
591         InflationType   = None,
592         InflationFactor = None,
593         BackgroundCov   = None,
594         ):
595     """
596     Inflation applicable soit sur Pb ou Pa, soit sur les ensembles EXb ou EXa
597
598     Synthèse : Hunt 2007, section 2.3.5
599     """
600     if InflationFactor is None:
601         return InputCovOrEns
602     else:
603         InflationFactor = float(InflationFactor)
604     #
605     if InflationType in ["MultiplicativeOnAnalysisCovariance", "MultiplicativeOnBackgroundCovariance"]:
606         if InflationFactor < 1.:
607             raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
608         if InflationFactor < 1.+mpr:
609             return InputCovOrEns
610         OutputCovOrEns = InflationFactor**2 * InputCovOrEns
611     #
612     elif InflationType in ["MultiplicativeOnAnalysisAnomalies", "MultiplicativeOnBackgroundAnomalies"]:
613         if InflationFactor < 1.:
614             raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
615         if InflationFactor < 1.+mpr:
616             return InputCovOrEns
617         InputCovOrEnsMean = InputCovOrEns.mean(axis=1, dtype=mfp).astype('float')
618         OutputCovOrEns = InputCovOrEnsMean[:,numpy.newaxis] \
619             + InflationFactor * (InputCovOrEns - InputCovOrEnsMean[:,numpy.newaxis])
620     #
621     elif InflationType in ["AdditiveOnAnalysisCovariance", "AdditiveOnBackgroundCovariance"]:
622         if InflationFactor < 0.:
623             raise ValueError("Inflation factor for additive inflation has to be greater or equal than 0.")
624         if InflationFactor < mpr:
625             return InputCovOrEns
626         __n, __m = numpy.asarray(InputCovOrEns).shape
627         if __n != __m:
628             raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
629         OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * numpy.identity(__n)
630     #
631     elif InflationType == "HybridOnBackgroundCovariance":
632         if InflationFactor < 0.:
633             raise ValueError("Inflation factor for hybrid inflation has to be greater or equal than 0.")
634         if InflationFactor < mpr:
635             return InputCovOrEns
636         __n, __m = numpy.asarray(InputCovOrEns).shape
637         if __n != __m:
638             raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
639         if BackgroundCov is None:
640             raise ValueError("Background covariance matrix B has to be given for hybrid inflation.")
641         if InputCovOrEns.shape != BackgroundCov.shape:
642             raise ValueError("Ensemble covariance matrix has to be of same size than background covariance matrix B.")
643         OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * BackgroundCov
644     #
645     elif InflationType == "Relaxation":
646         raise NotImplementedError("InflationType Relaxation")
647     #
648     else:
649         raise ValueError("Error in inflation type, '%s' is not a valid keyword."%InflationType)
650     #
651     return OutputCovOrEns
652
653 # ==============================================================================
654 def HessienneEstimation(nb, HaM, HtM, BI, RI):
655     "Estimation de la Hessienne"
656     #
657     HessienneI = []
658     for i in range(int(nb)):
659         _ee    = numpy.zeros((nb,1))
660         _ee[i] = 1.
661         _HtEE  = numpy.dot(HtM,_ee).reshape((-1,1))
662         HessienneI.append( numpy.ravel( BI * _ee + HaM * (RI * _HtEE) ) )
663     #
664     A = numpy.linalg.inv(numpy.array( HessienneI ))
665     #
666     if min(A.shape) != max(A.shape):
667         raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
668     if (numpy.diag(A) < 0).any():
669         raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
670     if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
671         try:
672             L = numpy.linalg.cholesky( A )
673         except:
674             raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
675     #
676     return A
677
678 # ==============================================================================
679 def QuantilesEstimations(selfA, A, Xa, HXa = None, Hm = None, HtM = None):
680     "Estimation des quantiles a posteriori (selfA est modifié)"
681     nbsamples = selfA._parameters["NumberOfSamplesForQuantiles"]
682     #
683     # Traitement des bornes
684     if "StateBoundsForQuantiles" in selfA._parameters:
685         LBounds = selfA._parameters["StateBoundsForQuantiles"] # Prioritaire
686     elif "Bounds" in selfA._parameters:
687         LBounds = selfA._parameters["Bounds"]  # Défaut raisonnable
688     else:
689         LBounds = None
690     if LBounds is not None:
691         LBounds = ForceNumericBounds( LBounds )
692     _Xa = numpy.ravel(Xa)
693     #
694     # Échantillonnage des états
695     YfQ  = None
696     EXr  = None
697     for i in range(nbsamples):
698         if selfA._parameters["SimulationForQuantiles"] == "Linear" and HtM is not None and HXa is not None:
699             dXr = (numpy.random.multivariate_normal(_Xa,A) - _Xa).reshape((-1,1))
700             if LBounds is not None: # "EstimateProjection" par défaut
701                 dXr = numpy.max(numpy.hstack((dXr,LBounds[:,0].reshape((-1,1))) - Xa),axis=1)
702                 dXr = numpy.min(numpy.hstack((dXr,LBounds[:,1].reshape((-1,1))) - Xa),axis=1)
703             dYr = HtM @ dXr
704             Yr = HXa.reshape((-1,1)) + dYr
705             if selfA._toStore("SampledStateForQuantiles"): Xr = _Xa + numpy.ravel(dXr)
706         elif selfA._parameters["SimulationForQuantiles"] == "NonLinear" and Hm is not None:
707             Xr = numpy.random.multivariate_normal(_Xa,A)
708             if LBounds is not None: # "EstimateProjection" par défaut
709                 Xr = numpy.max(numpy.hstack((Xr.reshape((-1,1)),LBounds[:,0].reshape((-1,1)))),axis=1)
710                 Xr = numpy.min(numpy.hstack((Xr.reshape((-1,1)),LBounds[:,1].reshape((-1,1)))),axis=1)
711             Yr = Hm( Xr )
712         else:
713             raise ValueError("Quantile simulations has only to be Linear or NonLinear.")
714         #
715         if YfQ is None:
716             YfQ = Yr.reshape((-1,1))
717             if selfA._toStore("SampledStateForQuantiles"): EXr = Xr.reshape((-1,1))
718         else:
719             YfQ = numpy.hstack((YfQ,Yr.reshape((-1,1))))
720             if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.hstack((EXr,Xr.reshape((-1,1))))
721     #
722     # Extraction des quantiles
723     YfQ.sort(axis=-1)
724     YQ = None
725     for quantile in selfA._parameters["Quantiles"]:
726         if not (0. <= float(quantile) <= 1.): continue
727         indice = int(nbsamples * float(quantile) - 1./nbsamples)
728         if YQ is None: YQ = YfQ[:,indice].reshape((-1,1))
729         else:          YQ = numpy.hstack((YQ,YfQ[:,indice].reshape((-1,1))))
730     if YQ is not None: # Liste non vide de quantiles
731         selfA.StoredVariables["SimulationQuantiles"].store( YQ )
732     if selfA._toStore("SampledStateForQuantiles"):
733         selfA.StoredVariables["SampledStateForQuantiles"].store( EXr )
734     #
735     return 0
736
737 # ==============================================================================
738 def ForceNumericBounds( __Bounds ):
739     "Force les bornes à être des valeurs numériques, sauf si globalement None"
740     # Conserve une valeur par défaut à None s'il n'y a pas de bornes
741     if __Bounds is None: return None
742     # Converti toutes les bornes individuelles None à +/- l'infini
743     __Bounds = numpy.asarray( __Bounds, dtype=float )
744     if len(__Bounds.shape) != 2 or min(__Bounds.shape) <= 0 or __Bounds.shape[1] != 2:
745         raise ValueError("Incorrectly shaped bounds data")
746     __Bounds[numpy.isnan(__Bounds[:,0]),0] = -sys.float_info.max
747     __Bounds[numpy.isnan(__Bounds[:,1]),1] =  sys.float_info.max
748     return __Bounds
749
750 # ==============================================================================
751 def RecentredBounds( __Bounds, __Center):
752     "Recentre les bornes autour de 0, sauf si globalement None"
753     # Conserve une valeur par défaut à None s'il n'y a pas de bornes
754     if __Bounds is None: return None
755     # Recentre les valeurs numériques de bornes
756     return ForceNumericBounds( __Bounds ) - numpy.ravel( __Center ).transpose((-1,1))
757
758 # ==============================================================================
759 def ApplyBounds( __Vector, __Bounds, __newClip = True):
760     "Applique des bornes numériques à un point"
761     # Conserve une valeur par défaut s'il n'y a pas de bornes
762     if __Bounds is None: return __Vector
763     #
764     if not isinstance(__Vector, numpy.ndarray): # Is an array
765         raise ValueError("Incorrect array definition of vector data")
766     if not isinstance(__Bounds, numpy.ndarray): # Is an array
767         raise ValueError("Incorrect array definition of bounds data")
768     if 2*__Vector.size != __Bounds.size: # Is a 2 column array of vector lenght
769         raise ValueError("Incorrect bounds number to be applied for this vector")
770     if len(__Bounds.shape) != 2 or min(__Bounds.shape) <= 0 or __Bounds.shape[1] != 2:
771         raise ValueError("Incorrectly shaped bounds data")
772     #
773     if __newClip:
774         __Vector = __Vector.clip(
775             __Bounds[:,0].reshape(__Vector.shape),
776             __Bounds[:,1].reshape(__Vector.shape),
777             )
778     else:
779         __Vector = numpy.max(numpy.hstack((__Vector.reshape((-1,1)),numpy.asmatrix(__Bounds)[:,0])),axis=1)
780         __Vector = numpy.min(numpy.hstack((__Vector.reshape((-1,1)),numpy.asmatrix(__Bounds)[:,1])),axis=1)
781         __Vector = numpy.asarray(__Vector)
782     #
783     return __Vector
784
785 # ==============================================================================
786 def c2ukf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
787     """
788     Constrained Unscented Kalman Filter
789     """
790     if selfA._parameters["EstimationOf"] == "Parameters":
791         selfA._parameters["StoreInternalVariables"] = True
792     selfA._parameters["Bounds"] = ForceNumericBounds( selfA._parameters["Bounds"] )
793     #
794     L     = Xb.size
795     Alpha = selfA._parameters["Alpha"]
796     Beta  = selfA._parameters["Beta"]
797     if selfA._parameters["Kappa"] == 0:
798         if selfA._parameters["EstimationOf"] == "State":
799             Kappa = 0
800         elif selfA._parameters["EstimationOf"] == "Parameters":
801             Kappa = 3 - L
802     else:
803         Kappa = selfA._parameters["Kappa"]
804     Lambda = float( Alpha**2 ) * ( L + Kappa ) - L
805     Gamma  = math.sqrt( L + Lambda )
806     #
807     Ww = []
808     Ww.append( 0. )
809     for i in range(2*L):
810         Ww.append( 1. / (2.*(L + Lambda)) )
811     #
812     Wm = numpy.array( Ww )
813     Wm[0] = Lambda / (L + Lambda)
814     Wc = numpy.array( Ww )
815     Wc[0] = Lambda / (L + Lambda) + (1. - Alpha**2 + Beta)
816     #
817     # Opérateurs
818     Hm = HO["Direct"].appliedControledFormTo
819     #
820     if selfA._parameters["EstimationOf"] == "State":
821         Mm = EM["Direct"].appliedControledFormTo
822     #
823     if CM is not None and "Tangent" in CM and U is not None:
824         Cm = CM["Tangent"].asMatrix(Xb)
825     else:
826         Cm = None
827     #
828     # Durée d'observation et tailles
829     if hasattr(Y,"stepnumber"):
830         duration = Y.stepnumber()
831         __p = numpy.cumprod(Y.shape())[-1]
832     else:
833         duration = 2
834         __p = numpy.array(Y).size
835     #
836     # Précalcul des inversions de B et R
837     if selfA._parameters["StoreInternalVariables"] \
838         or selfA._toStore("CostFunctionJ") \
839         or selfA._toStore("CostFunctionJb") \
840         or selfA._toStore("CostFunctionJo") \
841         or selfA._toStore("CurrentOptimum") \
842         or selfA._toStore("APosterioriCovariance"):
843         BI = B.getI()
844         RI = R.getI()
845     #
846     __n = Xb.size
847     #
848     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
849         Xn = Xb
850         if hasattr(B,"asfullmatrix"):
851             Pn = B.asfullmatrix(__n)
852         else:
853             Pn = B
854         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
855         selfA.StoredVariables["Analysis"].store( Xb )
856         if selfA._toStore("APosterioriCovariance"):
857             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
858     elif selfA._parameters["nextStep"]:
859         Xn = selfA._getInternalState("Xn")
860         Pn = selfA._getInternalState("Pn")
861     #
862     if selfA._parameters["EstimationOf"] == "Parameters":
863         XaMin            = Xn
864         previousJMinimum = numpy.finfo(float).max
865     #
866     for step in range(duration-1):
867         if hasattr(Y,"store"):
868             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
869         else:
870             Ynpu = numpy.ravel( Y ).reshape((__p,1))
871         #
872         if U is not None:
873             if hasattr(U,"store") and len(U)>1:
874                 Un = numpy.ravel( U[step] ).reshape((-1,1))
875             elif hasattr(U,"store") and len(U)==1:
876                 Un = numpy.ravel( U[0] ).reshape((-1,1))
877             else:
878                 Un = numpy.ravel( U ).reshape((-1,1))
879         else:
880             Un = None
881         #
882         Pndemi = numpy.real(scipy.linalg.sqrtm(Pn))
883         Xnp = numpy.hstack([Xn, Xn+Gamma*Pndemi, Xn-Gamma*Pndemi])
884         nbSpts = 2*Xn.size+1
885         #
886         if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
887             for point in range(nbSpts):
888                 Xnp[:,point] = ApplyBounds( Xnp[:,point], selfA._parameters["Bounds"] )
889         #
890         XEtnnp = []
891         for point in range(nbSpts):
892             if selfA._parameters["EstimationOf"] == "State":
893                 XEtnnpi = numpy.asarray( Mm( (Xnp[:,point], Un) ) ).reshape((-1,1))
894                 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
895                     Cm = Cm.reshape(Xn.size,Un.size) # ADAO & check shape
896                     XEtnnpi = XEtnnpi + Cm * Un
897                 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
898                     XEtnnpi = ApplyBounds( XEtnnpi, selfA._parameters["Bounds"] )
899             elif selfA._parameters["EstimationOf"] == "Parameters":
900                 # --- > Par principe, M = Id, Q = 0
901                 XEtnnpi = Xnp[:,point]
902             XEtnnp.append( numpy.ravel(XEtnnpi).reshape((-1,1)) )
903         XEtnnp = numpy.concatenate( XEtnnp, axis=1 )
904         #
905         Xncm = ( XEtnnp * Wm ).sum(axis=1)
906         #
907         if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
908             Xncm = ApplyBounds( Xncm, selfA._parameters["Bounds"] )
909         #
910         if selfA._parameters["EstimationOf"] == "State":        Pnm = Q
911         elif selfA._parameters["EstimationOf"] == "Parameters": Pnm = 0.
912         for point in range(nbSpts):
913             Pnm += Wc[i] * ((XEtnnp[:,point]-Xncm).reshape((-1,1)) * (XEtnnp[:,point]-Xncm))
914         #
915         if selfA._parameters["EstimationOf"] == "Parameters" and selfA._parameters["Bounds"] is not None:
916             Pnmdemi = selfA._parameters["Reconditioner"] * numpy.real(scipy.linalg.sqrtm(Pnm))
917         else:
918             Pnmdemi = numpy.real(scipy.linalg.sqrtm(Pnm))
919         #
920         Xnnp = numpy.hstack([Xncm.reshape((-1,1)), Xncm.reshape((-1,1))+Gamma*Pnmdemi, Xncm.reshape((-1,1))-Gamma*Pnmdemi])
921         #
922         if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
923             for point in range(nbSpts):
924                 Xnnp[:,point] = ApplyBounds( Xnnp[:,point], selfA._parameters["Bounds"] )
925         #
926         Ynnp = []
927         for point in range(nbSpts):
928             if selfA._parameters["EstimationOf"] == "State":
929                 Ynnpi = Hm( (Xnnp[:,point], None) )
930             elif selfA._parameters["EstimationOf"] == "Parameters":
931                 Ynnpi = Hm( (Xnnp[:,point], Un) )
932             Ynnp.append( numpy.ravel(Ynnpi).reshape((-1,1)) )
933         Ynnp = numpy.concatenate( Ynnp, axis=1 )
934         #
935         Yncm = ( Ynnp * Wm ).sum(axis=1)
936         #
937         Pyyn = R
938         Pxyn = 0.
939         for point in range(nbSpts):
940             Pyyn += Wc[i] * ((Ynnp[:,point]-Yncm).reshape((-1,1)) * (Ynnp[:,point]-Yncm))
941             Pxyn += Wc[i] * ((Xnnp[:,point]-Xncm).reshape((-1,1)) * (Ynnp[:,point]-Yncm))
942         #
943         _Innovation  = Ynpu - Yncm.reshape((-1,1))
944         if selfA._parameters["EstimationOf"] == "Parameters":
945             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
946                 _Innovation = _Innovation - Cm * Un
947         #
948         Kn = Pxyn * Pyyn.I
949         Xn = Xncm.reshape((-1,1)) + Kn * _Innovation
950         Pn = Pnm - Kn * Pyyn * Kn.T
951         #
952         if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
953             Xn = ApplyBounds( Xn, selfA._parameters["Bounds"] )
954         #
955         Xa = Xn # Pointeurs
956         #--------------------------
957         selfA._setInternalState("Xn", Xn)
958         selfA._setInternalState("Pn", Pn)
959         #--------------------------
960         #
961         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
962         # ---> avec analysis
963         selfA.StoredVariables["Analysis"].store( Xa )
964         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
965             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( Hm((Xa, Un)) )
966         if selfA._toStore("InnovationAtCurrentAnalysis"):
967             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
968         # ---> avec current state
969         if selfA._parameters["StoreInternalVariables"] \
970             or selfA._toStore("CurrentState"):
971             selfA.StoredVariables["CurrentState"].store( Xn )
972         if selfA._toStore("ForecastState"):
973             selfA.StoredVariables["ForecastState"].store( Xncm )
974         if selfA._toStore("ForecastCovariance"):
975             selfA.StoredVariables["ForecastCovariance"].store( Pnm )
976         if selfA._toStore("BMA"):
977             selfA.StoredVariables["BMA"].store( Xncm - Xa )
978         if selfA._toStore("InnovationAtCurrentState"):
979             selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
980         if selfA._toStore("SimulatedObservationAtCurrentState") \
981             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
982             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Yncm )
983         # ---> autres
984         if selfA._parameters["StoreInternalVariables"] \
985             or selfA._toStore("CostFunctionJ") \
986             or selfA._toStore("CostFunctionJb") \
987             or selfA._toStore("CostFunctionJo") \
988             or selfA._toStore("CurrentOptimum") \
989             or selfA._toStore("APosterioriCovariance"):
990             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
991             Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
992             J   = Jb + Jo
993             selfA.StoredVariables["CostFunctionJb"].store( Jb )
994             selfA.StoredVariables["CostFunctionJo"].store( Jo )
995             selfA.StoredVariables["CostFunctionJ" ].store( J )
996             #
997             if selfA._toStore("IndexOfOptimum") \
998                 or selfA._toStore("CurrentOptimum") \
999                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1000                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1001                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1002                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1003                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1004             if selfA._toStore("IndexOfOptimum"):
1005                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1006             if selfA._toStore("CurrentOptimum"):
1007                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1008             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1009                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1010             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1011                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1012             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1013                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1014             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1015                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1016         if selfA._toStore("APosterioriCovariance"):
1017             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1018         if selfA._parameters["EstimationOf"] == "Parameters" \
1019             and J < previousJMinimum:
1020             previousJMinimum    = J
1021             XaMin               = Xa
1022             if selfA._toStore("APosterioriCovariance"):
1023                 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
1024     #
1025     # Stockage final supplémentaire de l'optimum en estimation de paramètres
1026     # ----------------------------------------------------------------------
1027     if selfA._parameters["EstimationOf"] == "Parameters":
1028         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1029         selfA.StoredVariables["Analysis"].store( XaMin )
1030         if selfA._toStore("APosterioriCovariance"):
1031             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1032         if selfA._toStore("BMA"):
1033             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1034     #
1035     return 0
1036
1037 # ==============================================================================
1038 def cekf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1039     """
1040     Contrained Extended Kalman Filter
1041     """
1042     if selfA._parameters["EstimationOf"] == "Parameters":
1043         selfA._parameters["StoreInternalVariables"] = True
1044     selfA._parameters["Bounds"] = ForceNumericBounds( selfA._parameters["Bounds"] )
1045     #
1046     # Opérateurs
1047     H = HO["Direct"].appliedControledFormTo
1048     #
1049     if selfA._parameters["EstimationOf"] == "State":
1050         M = EM["Direct"].appliedControledFormTo
1051     #
1052     if CM is not None and "Tangent" in CM and U is not None:
1053         Cm = CM["Tangent"].asMatrix(Xb)
1054     else:
1055         Cm = None
1056     #
1057     # Durée d'observation et tailles
1058     if hasattr(Y,"stepnumber"):
1059         duration = Y.stepnumber()
1060         __p = numpy.cumprod(Y.shape())[-1]
1061     else:
1062         duration = 2
1063         __p = numpy.array(Y).size
1064     #
1065     # Précalcul des inversions de B et R
1066     if selfA._parameters["StoreInternalVariables"] \
1067         or selfA._toStore("CostFunctionJ") \
1068         or selfA._toStore("CostFunctionJb") \
1069         or selfA._toStore("CostFunctionJo") \
1070         or selfA._toStore("CurrentOptimum") \
1071         or selfA._toStore("APosterioriCovariance"):
1072         BI = B.getI()
1073         RI = R.getI()
1074     #
1075     __n = Xb.size
1076     #
1077     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1078         Xn = Xb
1079         Pn = B
1080         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1081         selfA.StoredVariables["Analysis"].store( Xb )
1082         if selfA._toStore("APosterioriCovariance"):
1083             if hasattr(B,"asfullmatrix"):
1084                 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
1085             else:
1086                 selfA.StoredVariables["APosterioriCovariance"].store( B )
1087         selfA._setInternalState("seed", numpy.random.get_state())
1088     elif selfA._parameters["nextStep"]:
1089         Xn = selfA._getInternalState("Xn")
1090         Pn = selfA._getInternalState("Pn")
1091     #
1092     if selfA._parameters["EstimationOf"] == "Parameters":
1093         XaMin            = Xn
1094         previousJMinimum = numpy.finfo(float).max
1095     #
1096     for step in range(duration-1):
1097         if hasattr(Y,"store"):
1098             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1099         else:
1100             Ynpu = numpy.ravel( Y ).reshape((__p,1))
1101         #
1102         Ht = HO["Tangent"].asMatrix(ValueForMethodForm = Xn)
1103         Ht = Ht.reshape(Ynpu.size,Xn.size) # ADAO & check shape
1104         Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = Xn)
1105         Ha = Ha.reshape(Xn.size,Ynpu.size) # ADAO & check shape
1106         #
1107         if selfA._parameters["EstimationOf"] == "State":
1108             Mt = EM["Tangent"].asMatrix(ValueForMethodForm = Xn)
1109             Mt = Mt.reshape(Xn.size,Xn.size) # ADAO & check shape
1110             Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = Xn)
1111             Ma = Ma.reshape(Xn.size,Xn.size) # ADAO & check shape
1112         #
1113         if U is not None:
1114             if hasattr(U,"store") and len(U)>1:
1115                 Un = numpy.ravel( U[step] ).reshape((-1,1))
1116             elif hasattr(U,"store") and len(U)==1:
1117                 Un = numpy.ravel( U[0] ).reshape((-1,1))
1118             else:
1119                 Un = numpy.ravel( U ).reshape((-1,1))
1120         else:
1121             Un = None
1122         #
1123         if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
1124             Xn = ApplyBounds( Xn, selfA._parameters["Bounds"] )
1125         #
1126         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
1127             Xn_predicted = numpy.ravel( M( (Xn, Un) ) ).reshape((__n,1))
1128             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1129                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
1130                 Xn_predicted = Xn_predicted + Cm * Un
1131             Pn_predicted = Q + Mt * (Pn * Ma)
1132         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
1133             # --- > Par principe, M = Id, Q = 0
1134             Xn_predicted = Xn
1135             Pn_predicted = Pn
1136         #
1137         if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
1138             Xn_predicted = ApplyBounds( Xn_predicted, selfA._parameters["Bounds"] )
1139         #
1140         if selfA._parameters["EstimationOf"] == "State":
1141             HX_predicted = numpy.ravel( H( (Xn_predicted, None) ) ).reshape((__p,1))
1142             _Innovation  = Ynpu - HX_predicted
1143         elif selfA._parameters["EstimationOf"] == "Parameters":
1144             HX_predicted = numpy.ravel( H( (Xn_predicted, Un) ) ).reshape((__p,1))
1145             _Innovation  = Ynpu - HX_predicted
1146             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
1147                 _Innovation = _Innovation - Cm * Un
1148         #
1149         Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha))
1150         Xn = Xn_predicted + Kn * _Innovation
1151         Pn = Pn_predicted - Kn * Ht * Pn_predicted
1152         #
1153         if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
1154             Xn = ApplyBounds( Xn, selfA._parameters["Bounds"] )
1155         #
1156         Xa = Xn # Pointeurs
1157         #--------------------------
1158         selfA._setInternalState("Xn", Xn)
1159         selfA._setInternalState("Pn", Pn)
1160         #--------------------------
1161         #
1162         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1163         # ---> avec analysis
1164         selfA.StoredVariables["Analysis"].store( Xa )
1165         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1166             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( H((Xa, Un)) )
1167         if selfA._toStore("InnovationAtCurrentAnalysis"):
1168             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1169         # ---> avec current state
1170         if selfA._parameters["StoreInternalVariables"] \
1171             or selfA._toStore("CurrentState"):
1172             selfA.StoredVariables["CurrentState"].store( Xn )
1173         if selfA._toStore("ForecastState"):
1174             selfA.StoredVariables["ForecastState"].store( Xn_predicted )
1175         if selfA._toStore("ForecastCovariance"):
1176             selfA.StoredVariables["ForecastCovariance"].store( Pn_predicted )
1177         if selfA._toStore("BMA"):
1178             selfA.StoredVariables["BMA"].store( Xn_predicted - Xa )
1179         if selfA._toStore("InnovationAtCurrentState"):
1180             selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
1181         if selfA._toStore("SimulatedObservationAtCurrentState") \
1182             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1183             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
1184         # ---> autres
1185         if selfA._parameters["StoreInternalVariables"] \
1186             or selfA._toStore("CostFunctionJ") \
1187             or selfA._toStore("CostFunctionJb") \
1188             or selfA._toStore("CostFunctionJo") \
1189             or selfA._toStore("CurrentOptimum") \
1190             or selfA._toStore("APosterioriCovariance"):
1191             Jb  = float( 0.5 * (Xa - Xb).T @ (BI @ (Xa - Xb)) )
1192             Jo  = float( 0.5 * _Innovation.T @ (RI @ _Innovation) )
1193             J   = Jb + Jo
1194             selfA.StoredVariables["CostFunctionJb"].store( Jb )
1195             selfA.StoredVariables["CostFunctionJo"].store( Jo )
1196             selfA.StoredVariables["CostFunctionJ" ].store( J )
1197             #
1198             if selfA._toStore("IndexOfOptimum") \
1199                 or selfA._toStore("CurrentOptimum") \
1200                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1201                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1202                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1203                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1204                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1205             if selfA._toStore("IndexOfOptimum"):
1206                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1207             if selfA._toStore("CurrentOptimum"):
1208                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1209             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1210                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1211             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1212                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1213             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1214                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1215             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1216                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1217         if selfA._toStore("APosterioriCovariance"):
1218             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1219         if selfA._parameters["EstimationOf"] == "Parameters" \
1220             and J < previousJMinimum:
1221             previousJMinimum    = J
1222             XaMin               = Xa
1223             if selfA._toStore("APosterioriCovariance"):
1224                 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
1225     #
1226     # Stockage final supplémentaire de l'optimum en estimation de paramètres
1227     # ----------------------------------------------------------------------
1228     if selfA._parameters["EstimationOf"] == "Parameters":
1229         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1230         selfA.StoredVariables["Analysis"].store( XaMin )
1231         if selfA._toStore("APosterioriCovariance"):
1232             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1233         if selfA._toStore("BMA"):
1234             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1235     #
1236     return 0
1237
1238 # ==============================================================================
1239 def enks(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="EnKS16-KalmanFilterFormula"):
1240     """
1241     EnKS
1242     """
1243     #
1244     # Opérateurs
1245     H = HO["Direct"].appliedControledFormTo
1246     #
1247     if selfA._parameters["EstimationOf"] == "State":
1248         M = EM["Direct"].appliedControledFormTo
1249     #
1250     if CM is not None and "Tangent" in CM and U is not None:
1251         Cm = CM["Tangent"].asMatrix(Xb)
1252     else:
1253         Cm = None
1254     #
1255     # Précalcul des inversions de B et R
1256     RIdemi = R.sqrtmI()
1257     #
1258     # Durée d'observation et tailles
1259     LagL = selfA._parameters["SmootherLagL"]
1260     if (not hasattr(Y,"store")) or (not hasattr(Y,"stepnumber")):
1261         raise ValueError("Fixed-lag smoother requires a series of observation")
1262     if Y.stepnumber() < LagL:
1263         raise ValueError("Fixed-lag smoother requires a series of observation greater then the lag L")
1264     duration = Y.stepnumber()
1265     __p = numpy.cumprod(Y.shape())[-1]
1266     __n = Xb.size
1267     __m = selfA._parameters["NumberOfMembers"]
1268     #
1269     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1270         selfA.StoredVariables["Analysis"].store( Xb )
1271         if selfA._toStore("APosterioriCovariance"):
1272             if hasattr(B,"asfullmatrix"):
1273                 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
1274             else:
1275                 selfA.StoredVariables["APosterioriCovariance"].store( B )
1276     #
1277     # Calcul direct initial (on privilégie la mémorisation au recalcul)
1278     __seed = numpy.random.get_state()
1279     selfB = copy.deepcopy(selfA)
1280     selfB._parameters["StoreSupplementaryCalculations"] = ["CurrentEnsembleState"]
1281     if VariantM == "EnKS16-KalmanFilterFormula":
1282         etkf(selfB, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM = "KalmanFilterFormula")
1283     else:
1284         raise ValueError("VariantM has to be chosen in the authorized methods list.")
1285     if LagL > 0:
1286         EL  = selfB.StoredVariables["CurrentEnsembleState"][LagL-1]
1287     else:
1288         EL = EnsembleOfBackgroundPerturbations( Xb, None, __m ) # Cf. etkf
1289     selfA._parameters["SetSeed"] = numpy.random.set_state(__seed)
1290     #
1291     for step in range(LagL,duration-1):
1292         #
1293         sEL = selfB.StoredVariables["CurrentEnsembleState"][step+1-LagL:step+1]
1294         sEL.append(None)
1295         #
1296         if hasattr(Y,"store"):
1297             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1298         else:
1299             Ynpu = numpy.ravel( Y ).reshape((__p,1))
1300         #
1301         if U is not None:
1302             if hasattr(U,"store") and len(U)>1:
1303                 Un = numpy.ravel( U[step] ).reshape((-1,1))
1304             elif hasattr(U,"store") and len(U)==1:
1305                 Un = numpy.ravel( U[0] ).reshape((-1,1))
1306             else:
1307                 Un = numpy.ravel( U ).reshape((-1,1))
1308         else:
1309             Un = None
1310         #
1311         #--------------------------
1312         if VariantM == "EnKS16-KalmanFilterFormula":
1313             if selfA._parameters["EstimationOf"] == "State": # Forecast
1314                 EL = M( [(EL[:,i], Un) for i in range(__m)],
1315                     argsAsSerie = True,
1316                     returnSerieAsArrayMatrix = True )
1317                 EL = EnsemblePerturbationWithGivenCovariance( EL, Q )
1318                 EZ = H( [(EL[:,i], Un) for i in range(__m)],
1319                     argsAsSerie = True,
1320                     returnSerieAsArrayMatrix = True )
1321                 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1322                     Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
1323                     EZ = EZ + Cm * Un
1324             elif selfA._parameters["EstimationOf"] == "Parameters":
1325                 # --- > Par principe, M = Id, Q = 0
1326                 EZ = H( [(EL[:,i], Un) for i in range(__m)],
1327                     argsAsSerie = True,
1328                     returnSerieAsArrayMatrix = True )
1329             #
1330             vEm   = EL.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1331             vZm   = EZ.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
1332             #
1333             mS    = RIdemi @ EnsembleOfAnomalies( EZ, vZm, 1./math.sqrt(__m-1) )
1334             mS    = mS.reshape((-1,__m)) # Pour dimension 1
1335             delta = RIdemi @ ( Ynpu - vZm )
1336             mT    = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS )
1337             vw    = mT @ mS.T @ delta
1338             #
1339             Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
1340             mU    = numpy.identity(__m)
1341             wTU   = (vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU)
1342             #
1343             EX    = EnsembleOfAnomalies( EL, vEm, 1./math.sqrt(__m-1) )
1344             EL    = vEm + EX @ wTU
1345             #
1346             sEL[LagL] = EL
1347             for irl in range(LagL): # Lissage des L précédentes analysis
1348                 vEm = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1349                 EX = EnsembleOfAnomalies( sEL[irl], vEm, 1./math.sqrt(__m-1) )
1350                 sEL[irl] = vEm + EX @ wTU
1351             #
1352             # Conservation de l'analyse retrospective d'ordre 0 avant rotation
1353             Xa = sEL[0].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1354             if selfA._toStore("APosterioriCovariance"):
1355                 EXn = sEL[0]
1356             #
1357             for irl in range(LagL):
1358                 sEL[irl] = sEL[irl+1]
1359             sEL[LagL] = None
1360         #--------------------------
1361         else:
1362             raise ValueError("VariantM has to be chosen in the authorized methods list.")
1363         #
1364         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1365         # ---> avec analysis
1366         selfA.StoredVariables["Analysis"].store( Xa )
1367         if selfA._toStore("APosterioriCovariance"):
1368             selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(EXn) )
1369     #
1370     # Stockage des dernières analyses incomplètement remises à jour
1371     for irl in range(LagL):
1372         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1373         Xa = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1374         selfA.StoredVariables["Analysis"].store( Xa )
1375     #
1376     return 0
1377
1378 # ==============================================================================
1379 def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
1380     """
1381     Ensemble-Transform EnKF
1382     """
1383     if selfA._parameters["EstimationOf"] == "Parameters":
1384         selfA._parameters["StoreInternalVariables"] = True
1385     #
1386     # Opérateurs
1387     H = HO["Direct"].appliedControledFormTo
1388     #
1389     if selfA._parameters["EstimationOf"] == "State":
1390         M = EM["Direct"].appliedControledFormTo
1391     #
1392     if CM is not None and "Tangent" in CM and U is not None:
1393         Cm = CM["Tangent"].asMatrix(Xb)
1394     else:
1395         Cm = None
1396     #
1397     # Durée d'observation et tailles
1398     if hasattr(Y,"stepnumber"):
1399         duration = Y.stepnumber()
1400         __p = numpy.cumprod(Y.shape())[-1]
1401     else:
1402         duration = 2
1403         __p = numpy.array(Y).size
1404     #
1405     # Précalcul des inversions de B et R
1406     if selfA._parameters["StoreInternalVariables"] \
1407         or selfA._toStore("CostFunctionJ") \
1408         or selfA._toStore("CostFunctionJb") \
1409         or selfA._toStore("CostFunctionJo") \
1410         or selfA._toStore("CurrentOptimum") \
1411         or selfA._toStore("APosterioriCovariance"):
1412         BI = B.getI()
1413         RI = R.getI()
1414     elif VariantM != "KalmanFilterFormula":
1415         RI = R.getI()
1416     if VariantM == "KalmanFilterFormula":
1417         RIdemi = R.sqrtmI()
1418     #
1419     __n = Xb.size
1420     __m = selfA._parameters["NumberOfMembers"]
1421     #
1422     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1423         Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
1424         selfA.StoredVariables["Analysis"].store( Xb )
1425         if selfA._toStore("APosterioriCovariance"):
1426             if hasattr(B,"asfullmatrix"):
1427                 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
1428             else:
1429                 selfA.StoredVariables["APosterioriCovariance"].store( B )
1430         selfA._setInternalState("seed", numpy.random.get_state())
1431     elif selfA._parameters["nextStep"]:
1432         Xn = selfA._getInternalState("Xn")
1433     #
1434     previousJMinimum = numpy.finfo(float).max
1435     #
1436     for step in range(duration-1):
1437         numpy.random.set_state(selfA._getInternalState("seed"))
1438         if hasattr(Y,"store"):
1439             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1440         else:
1441             Ynpu = numpy.ravel( Y ).reshape((__p,1))
1442         #
1443         if U is not None:
1444             if hasattr(U,"store") and len(U)>1:
1445                 Un = numpy.ravel( U[step] ).reshape((-1,1))
1446             elif hasattr(U,"store") and len(U)==1:
1447                 Un = numpy.ravel( U[0] ).reshape((-1,1))
1448             else:
1449                 Un = numpy.ravel( U ).reshape((-1,1))
1450         else:
1451             Un = None
1452         #
1453         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
1454             Xn = CovarianceInflation( Xn,
1455                 selfA._parameters["InflationType"],
1456                 selfA._parameters["InflationFactor"],
1457                 )
1458         #
1459         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
1460             EMX = M( [(Xn[:,i], Un) for i in range(__m)],
1461                 argsAsSerie = True,
1462                 returnSerieAsArrayMatrix = True )
1463             Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
1464             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
1465                 argsAsSerie = True,
1466                 returnSerieAsArrayMatrix = True )
1467             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1468                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
1469                 Xn_predicted = Xn_predicted + Cm * Un
1470         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
1471             # --- > Par principe, M = Id, Q = 0
1472             Xn_predicted = EMX = Xn
1473             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
1474                 argsAsSerie = True,
1475                 returnSerieAsArrayMatrix = True )
1476         #
1477         # Mean of forecast and observation of forecast
1478         Xfm  = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1479         Hfm  = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
1480         #
1481         # Anomalies
1482         EaX   = EnsembleOfAnomalies( Xn_predicted, Xfm )
1483         EaHX  = EnsembleOfAnomalies( HX_predicted, Hfm)
1484         #
1485         #--------------------------
1486         if VariantM == "KalmanFilterFormula":
1487             mS    = RIdemi * EaHX / math.sqrt(__m-1)
1488             mS    = mS.reshape((-1,__m)) # Pour dimension 1
1489             delta = RIdemi * ( Ynpu - Hfm )
1490             mT    = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS )
1491             vw    = mT @ mS.T @ delta
1492             #
1493             Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
1494             mU    = numpy.identity(__m)
1495             #
1496             EaX   = EaX / math.sqrt(__m-1)
1497             Xn    = Xfm + EaX @ ( vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU )
1498         #--------------------------
1499         elif VariantM == "Variational":
1500             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1501             def CostFunction(w):
1502                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1503                 _Jo = 0.5 * _A.T @ (RI * _A)
1504                 _Jb = 0.5 * (__m-1) * w.T @ w
1505                 _J  = _Jo + _Jb
1506                 return float(_J)
1507             def GradientOfCostFunction(w):
1508                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1509                 _GardJo = - EaHX.T @ (RI * _A)
1510                 _GradJb = (__m-1) * w.reshape((__m,1))
1511                 _GradJ  = _GardJo + _GradJb
1512                 return numpy.ravel(_GradJ)
1513             vw = scipy.optimize.fmin_cg(
1514                 f           = CostFunction,
1515                 x0          = numpy.zeros(__m),
1516                 fprime      = GradientOfCostFunction,
1517                 args        = (),
1518                 disp        = False,
1519                 )
1520             #
1521             Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
1522             Htb = (__m-1) * numpy.identity(__m)
1523             Hta = Hto + Htb
1524             #
1525             Pta = numpy.linalg.inv( Hta )
1526             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1527             #
1528             Xn  = Xfm + EaX @ (vw[:,None] + EWa)
1529         #--------------------------
1530         elif VariantM == "FiniteSize11": # Jauge Boc2011
1531             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1532             def CostFunction(w):
1533                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1534                 _Jo = 0.5 * _A.T @ (RI * _A)
1535                 _Jb = 0.5 * __m * math.log(1 + 1/__m + w.T @ w)
1536                 _J  = _Jo + _Jb
1537                 return float(_J)
1538             def GradientOfCostFunction(w):
1539                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1540                 _GardJo = - EaHX.T @ (RI * _A)
1541                 _GradJb = __m * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
1542                 _GradJ  = _GardJo + _GradJb
1543                 return numpy.ravel(_GradJ)
1544             vw = scipy.optimize.fmin_cg(
1545                 f           = CostFunction,
1546                 x0          = numpy.zeros(__m),
1547                 fprime      = GradientOfCostFunction,
1548                 args        = (),
1549                 disp        = False,
1550                 )
1551             #
1552             Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
1553             Htb = __m * \
1554                 ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
1555                 / (1 + 1/__m + vw.T @ vw)**2
1556             Hta = Hto + Htb
1557             #
1558             Pta = numpy.linalg.inv( Hta )
1559             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1560             #
1561             Xn  = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
1562         #--------------------------
1563         elif VariantM == "FiniteSize15": # Jauge Boc2015
1564             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1565             def CostFunction(w):
1566                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1567                 _Jo = 0.5 * _A.T * RI * _A
1568                 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w)
1569                 _J  = _Jo + _Jb
1570                 return float(_J)
1571             def GradientOfCostFunction(w):
1572                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1573                 _GardJo = - EaHX.T @ (RI * _A)
1574                 _GradJb = (__m+1) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
1575                 _GradJ  = _GardJo + _GradJb
1576                 return numpy.ravel(_GradJ)
1577             vw = scipy.optimize.fmin_cg(
1578                 f           = CostFunction,
1579                 x0          = numpy.zeros(__m),
1580                 fprime      = GradientOfCostFunction,
1581                 args        = (),
1582                 disp        = False,
1583                 )
1584             #
1585             Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
1586             Htb = (__m+1) * \
1587                 ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
1588                 / (1 + 1/__m + vw.T @ vw)**2
1589             Hta = Hto + Htb
1590             #
1591             Pta = numpy.linalg.inv( Hta )
1592             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1593             #
1594             Xn  = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
1595         #--------------------------
1596         elif VariantM == "FiniteSize16": # Jauge Boc2016
1597             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1598             def CostFunction(w):
1599                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1600                 _Jo = 0.5 * _A.T @ (RI * _A)
1601                 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w / (__m-1))
1602                 _J  = _Jo + _Jb
1603                 return float(_J)
1604             def GradientOfCostFunction(w):
1605                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1606                 _GardJo = - EaHX.T @ (RI * _A)
1607                 _GradJb = ((__m+1) / (__m-1)) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w / (__m-1))
1608                 _GradJ  = _GardJo + _GradJb
1609                 return numpy.ravel(_GradJ)
1610             vw = scipy.optimize.fmin_cg(
1611                 f           = CostFunction,
1612                 x0          = numpy.zeros(__m),
1613                 fprime      = GradientOfCostFunction,
1614                 args        = (),
1615                 disp        = False,
1616                 )
1617             #
1618             Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
1619             Htb = ((__m+1) / (__m-1)) * \
1620                 ( (1 + 1/__m + vw.T @ vw / (__m-1)) * numpy.identity(__m) - 2 * vw @ vw.T / (__m-1) ) \
1621                 / (1 + 1/__m + vw.T @ vw / (__m-1))**2
1622             Hta = Hto + Htb
1623             #
1624             Pta = numpy.linalg.inv( Hta )
1625             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1626             #
1627             Xn  = Xfm + EaX @ (vw[:,None] + EWa)
1628         #--------------------------
1629         else:
1630             raise ValueError("VariantM has to be chosen in the authorized methods list.")
1631         #
1632         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
1633             Xn = CovarianceInflation( Xn,
1634                 selfA._parameters["InflationType"],
1635                 selfA._parameters["InflationFactor"],
1636                 )
1637         #
1638         Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1639         #--------------------------
1640         selfA._setInternalState("Xn", Xn)
1641         selfA._setInternalState("seed", numpy.random.get_state())
1642         #--------------------------
1643         #
1644         if selfA._parameters["StoreInternalVariables"] \
1645             or selfA._toStore("CostFunctionJ") \
1646             or selfA._toStore("CostFunctionJb") \
1647             or selfA._toStore("CostFunctionJo") \
1648             or selfA._toStore("APosterioriCovariance") \
1649             or selfA._toStore("InnovationAtCurrentAnalysis") \
1650             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1651             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1652             _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
1653             _Innovation = Ynpu - _HXa
1654         #
1655         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1656         # ---> avec analysis
1657         selfA.StoredVariables["Analysis"].store( Xa )
1658         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1659             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
1660         if selfA._toStore("InnovationAtCurrentAnalysis"):
1661             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1662         # ---> avec current state
1663         if selfA._parameters["StoreInternalVariables"] \
1664             or selfA._toStore("CurrentState"):
1665             selfA.StoredVariables["CurrentState"].store( Xn )
1666         if selfA._toStore("ForecastState"):
1667             selfA.StoredVariables["ForecastState"].store( EMX )
1668         if selfA._toStore("ForecastCovariance"):
1669             selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(EMX) )
1670         if selfA._toStore("BMA"):
1671             selfA.StoredVariables["BMA"].store( EMX - Xa )
1672         if selfA._toStore("InnovationAtCurrentState"):
1673             selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
1674         if selfA._toStore("SimulatedObservationAtCurrentState") \
1675             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1676             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
1677         # ---> autres
1678         if selfA._parameters["StoreInternalVariables"] \
1679             or selfA._toStore("CostFunctionJ") \
1680             or selfA._toStore("CostFunctionJb") \
1681             or selfA._toStore("CostFunctionJo") \
1682             or selfA._toStore("CurrentOptimum") \
1683             or selfA._toStore("APosterioriCovariance"):
1684             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
1685             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
1686             J   = Jb + Jo
1687             selfA.StoredVariables["CostFunctionJb"].store( Jb )
1688             selfA.StoredVariables["CostFunctionJo"].store( Jo )
1689             selfA.StoredVariables["CostFunctionJ" ].store( J )
1690             #
1691             if selfA._toStore("IndexOfOptimum") \
1692                 or selfA._toStore("CurrentOptimum") \
1693                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1694                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1695                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1696                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1697                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1698             if selfA._toStore("IndexOfOptimum"):
1699                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1700             if selfA._toStore("CurrentOptimum"):
1701                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1702             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1703                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1704             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1705                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1706             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1707                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1708             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1709                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1710         if selfA._toStore("APosterioriCovariance"):
1711             selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
1712         if selfA._parameters["EstimationOf"] == "Parameters" \
1713             and J < previousJMinimum:
1714             previousJMinimum    = J
1715             XaMin               = Xa
1716             if selfA._toStore("APosterioriCovariance"):
1717                 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
1718         # ---> Pour les smoothers
1719         if selfA._toStore("CurrentEnsembleState"):
1720             selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
1721     #
1722     # Stockage final supplémentaire de l'optimum en estimation de paramètres
1723     # ----------------------------------------------------------------------
1724     if selfA._parameters["EstimationOf"] == "Parameters":
1725         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1726         selfA.StoredVariables["Analysis"].store( XaMin )
1727         if selfA._toStore("APosterioriCovariance"):
1728             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1729         if selfA._toStore("BMA"):
1730             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1731     #
1732     return 0
1733
1734 # ==============================================================================
1735 def exkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1736     """
1737     Extended Kalman Filter
1738     """
1739     if selfA._parameters["EstimationOf"] == "Parameters":
1740         selfA._parameters["StoreInternalVariables"] = True
1741     #
1742     # Opérateurs
1743     H = HO["Direct"].appliedControledFormTo
1744     #
1745     if selfA._parameters["EstimationOf"] == "State":
1746         M = EM["Direct"].appliedControledFormTo
1747     #
1748     if CM is not None and "Tangent" in CM and U is not None:
1749         Cm = CM["Tangent"].asMatrix(Xb)
1750     else:
1751         Cm = None
1752     #
1753     # Durée d'observation et tailles
1754     if hasattr(Y,"stepnumber"):
1755         duration = Y.stepnumber()
1756         __p = numpy.cumprod(Y.shape())[-1]
1757     else:
1758         duration = 2
1759         __p = numpy.array(Y).size
1760     #
1761     # Précalcul des inversions de B et R
1762     if selfA._parameters["StoreInternalVariables"] \
1763         or selfA._toStore("CostFunctionJ") \
1764         or selfA._toStore("CostFunctionJb") \
1765         or selfA._toStore("CostFunctionJo") \
1766         or selfA._toStore("CurrentOptimum") \
1767         or selfA._toStore("APosterioriCovariance"):
1768         BI = B.getI()
1769         RI = R.getI()
1770     #
1771     __n = Xb.size
1772     #
1773     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1774         Xn = Xb
1775         Pn = B
1776         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1777         selfA.StoredVariables["Analysis"].store( Xb )
1778         if selfA._toStore("APosterioriCovariance"):
1779             if hasattr(B,"asfullmatrix"):
1780                 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
1781             else:
1782                 selfA.StoredVariables["APosterioriCovariance"].store( B )
1783         selfA._setInternalState("seed", numpy.random.get_state())
1784     elif selfA._parameters["nextStep"]:
1785         Xn = selfA._getInternalState("Xn")
1786         Pn = selfA._getInternalState("Pn")
1787     #
1788     if selfA._parameters["EstimationOf"] == "Parameters":
1789         XaMin            = Xn
1790         previousJMinimum = numpy.finfo(float).max
1791     #
1792     for step in range(duration-1):
1793         if hasattr(Y,"store"):
1794             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1795         else:
1796             Ynpu = numpy.ravel( Y ).reshape((__p,1))
1797         #
1798         Ht = HO["Tangent"].asMatrix(ValueForMethodForm = Xn)
1799         Ht = Ht.reshape(Ynpu.size,Xn.size) # ADAO & check shape
1800         Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = Xn)
1801         Ha = Ha.reshape(Xn.size,Ynpu.size) # ADAO & check shape
1802         #
1803         if selfA._parameters["EstimationOf"] == "State":
1804             Mt = EM["Tangent"].asMatrix(ValueForMethodForm = Xn)
1805             Mt = Mt.reshape(Xn.size,Xn.size) # ADAO & check shape
1806             Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = Xn)
1807             Ma = Ma.reshape(Xn.size,Xn.size) # ADAO & check shape
1808         #
1809         if U is not None:
1810             if hasattr(U,"store") and len(U)>1:
1811                 Un = numpy.ravel( U[step] ).reshape((-1,1))
1812             elif hasattr(U,"store") and len(U)==1:
1813                 Un = numpy.ravel( U[0] ).reshape((-1,1))
1814             else:
1815                 Un = numpy.ravel( U ).reshape((-1,1))
1816         else:
1817             Un = None
1818         #
1819         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
1820             Xn_predicted = numpy.ravel( M( (Xn, Un) ) ).reshape((__n,1))
1821             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1822                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
1823                 Xn_predicted = Xn_predicted + Cm * Un
1824             Pn_predicted = Q + Mt * (Pn * Ma)
1825         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
1826             # --- > Par principe, M = Id, Q = 0
1827             Xn_predicted = Xn
1828             Pn_predicted = Pn
1829         #
1830         if selfA._parameters["EstimationOf"] == "State":
1831             HX_predicted = numpy.ravel( H( (Xn_predicted, None) ) ).reshape((__p,1))
1832             _Innovation  = Ynpu - HX_predicted
1833         elif selfA._parameters["EstimationOf"] == "Parameters":
1834             HX_predicted = numpy.ravel( H( (Xn_predicted, Un) ) ).reshape((__p,1))
1835             _Innovation  = Ynpu - HX_predicted
1836             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
1837                 _Innovation = _Innovation - Cm * Un
1838         #
1839         Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha))
1840         Xn = Xn_predicted + Kn * _Innovation
1841         Pn = Pn_predicted - Kn * Ht * Pn_predicted
1842         #
1843         Xa = Xn # Pointeurs
1844         #--------------------------
1845         selfA._setInternalState("Xn", Xn)
1846         selfA._setInternalState("Pn", Pn)
1847         #--------------------------
1848         #
1849         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1850         # ---> avec analysis
1851         selfA.StoredVariables["Analysis"].store( Xa )
1852         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1853             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( H((Xa, Un)) )
1854         if selfA._toStore("InnovationAtCurrentAnalysis"):
1855             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1856         # ---> avec current state
1857         if selfA._parameters["StoreInternalVariables"] \
1858             or selfA._toStore("CurrentState"):
1859             selfA.StoredVariables["CurrentState"].store( Xn )
1860         if selfA._toStore("ForecastState"):
1861             selfA.StoredVariables["ForecastState"].store( Xn_predicted )
1862         if selfA._toStore("ForecastCovariance"):
1863             selfA.StoredVariables["ForecastCovariance"].store( Pn_predicted )
1864         if selfA._toStore("BMA"):
1865             selfA.StoredVariables["BMA"].store( Xn_predicted - Xa )
1866         if selfA._toStore("InnovationAtCurrentState"):
1867             selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
1868         if selfA._toStore("SimulatedObservationAtCurrentState") \
1869             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1870             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
1871         # ---> autres
1872         if selfA._parameters["StoreInternalVariables"] \
1873             or selfA._toStore("CostFunctionJ") \
1874             or selfA._toStore("CostFunctionJb") \
1875             or selfA._toStore("CostFunctionJo") \
1876             or selfA._toStore("CurrentOptimum") \
1877             or selfA._toStore("APosterioriCovariance"):
1878             Jb  = float( 0.5 * (Xa - Xb).T @ (BI @ (Xa - Xb)) )
1879             Jo  = float( 0.5 * _Innovation.T @ (RI @ _Innovation) )
1880             J   = Jb + Jo
1881             selfA.StoredVariables["CostFunctionJb"].store( Jb )
1882             selfA.StoredVariables["CostFunctionJo"].store( Jo )
1883             selfA.StoredVariables["CostFunctionJ" ].store( J )
1884             #
1885             if selfA._toStore("IndexOfOptimum") \
1886                 or selfA._toStore("CurrentOptimum") \
1887                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1888                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1889                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1890                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1891                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1892             if selfA._toStore("IndexOfOptimum"):
1893                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1894             if selfA._toStore("CurrentOptimum"):
1895                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1896             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1897                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1898             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1899                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1900             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1901                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1902             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1903                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1904         if selfA._toStore("APosterioriCovariance"):
1905             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1906         if selfA._parameters["EstimationOf"] == "Parameters" \
1907             and J < previousJMinimum:
1908             previousJMinimum    = J
1909             XaMin               = Xa
1910             if selfA._toStore("APosterioriCovariance"):
1911                 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
1912     #
1913     # Stockage final supplémentaire de l'optimum en estimation de paramètres
1914     # ----------------------------------------------------------------------
1915     if selfA._parameters["EstimationOf"] == "Parameters":
1916         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1917         selfA.StoredVariables["Analysis"].store( XaMin )
1918         if selfA._toStore("APosterioriCovariance"):
1919             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1920         if selfA._toStore("BMA"):
1921             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1922     #
1923     return 0
1924
1925 # ==============================================================================
1926 def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12",
1927     BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
1928     """
1929     Iterative EnKF
1930     """
1931     if selfA._parameters["EstimationOf"] == "Parameters":
1932         selfA._parameters["StoreInternalVariables"] = True
1933     #
1934     # Opérateurs
1935     H = HO["Direct"].appliedControledFormTo
1936     #
1937     if selfA._parameters["EstimationOf"] == "State":
1938         M = EM["Direct"].appliedControledFormTo
1939     #
1940     if CM is not None and "Tangent" in CM and U is not None:
1941         Cm = CM["Tangent"].asMatrix(Xb)
1942     else:
1943         Cm = None
1944     #
1945     # Durée d'observation et tailles
1946     if hasattr(Y,"stepnumber"):
1947         duration = Y.stepnumber()
1948         __p = numpy.cumprod(Y.shape())[-1]
1949     else:
1950         duration = 2
1951         __p = numpy.array(Y).size
1952     #
1953     # Précalcul des inversions de B et R
1954     if selfA._parameters["StoreInternalVariables"] \
1955         or selfA._toStore("CostFunctionJ") \
1956         or selfA._toStore("CostFunctionJb") \
1957         or selfA._toStore("CostFunctionJo") \
1958         or selfA._toStore("CurrentOptimum") \
1959         or selfA._toStore("APosterioriCovariance"):
1960         BI = B.getI()
1961     RI = R.getI()
1962     #
1963     __n = Xb.size
1964     __m = selfA._parameters["NumberOfMembers"]
1965     #
1966     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1967         if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
1968         else:                         Pn = B
1969         Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
1970         selfA.StoredVariables["Analysis"].store( Xb )
1971         if selfA._toStore("APosterioriCovariance"):
1972             if hasattr(B,"asfullmatrix"):
1973                 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
1974             else:
1975                 selfA.StoredVariables["APosterioriCovariance"].store( B )
1976         selfA._setInternalState("seed", numpy.random.get_state())
1977     elif selfA._parameters["nextStep"]:
1978         Xn = selfA._getInternalState("Xn")
1979     #
1980     previousJMinimum = numpy.finfo(float).max
1981     #
1982     for step in range(duration-1):
1983         numpy.random.set_state(selfA._getInternalState("seed"))
1984         if hasattr(Y,"store"):
1985             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1986         else:
1987             Ynpu = numpy.ravel( Y ).reshape((__p,1))
1988         #
1989         if U is not None:
1990             if hasattr(U,"store") and len(U)>1:
1991                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
1992             elif hasattr(U,"store") and len(U)==1:
1993                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
1994             else:
1995                 Un = numpy.asmatrix(numpy.ravel( U )).T
1996         else:
1997             Un = None
1998         #
1999         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2000             Xn = CovarianceInflation( Xn,
2001                 selfA._parameters["InflationType"],
2002                 selfA._parameters["InflationFactor"],
2003                 )
2004         #
2005         #--------------------------
2006         if VariantM == "IEnKF12":
2007             Xfm = numpy.ravel(Xn.mean(axis=1, dtype=mfp).astype('float'))
2008             EaX = EnsembleOfAnomalies( Xn ) / math.sqrt(__m-1)
2009             __j = 0
2010             Deltaw = 1
2011             if not BnotT:
2012                 Ta  = numpy.identity(__m)
2013             vw  = numpy.zeros(__m)
2014             while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
2015                 vx1 = (Xfm + EaX @ vw).reshape((__n,1))
2016                 #
2017                 if BnotT:
2018                     E1 = vx1 + _epsilon * EaX
2019                 else:
2020                     E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
2021                 #
2022                 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q
2023                     E2 = M( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
2024                         argsAsSerie = True,
2025                         returnSerieAsArrayMatrix = True )
2026                 elif selfA._parameters["EstimationOf"] == "Parameters":
2027                     # --- > Par principe, M = Id
2028                     E2 = Xn
2029                 vx2 = E2.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2030                 vy1 = H((vx2, Un)).reshape((__p,1))
2031                 #
2032                 HE2 = H( [(E2[:,i,numpy.newaxis], Un) for i in range(__m)],
2033                     argsAsSerie = True,
2034                     returnSerieAsArrayMatrix = True )
2035                 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2036                 #
2037                 if BnotT:
2038                     EaY = (HE2 - vy2) / _epsilon
2039                 else:
2040                     EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
2041                 #
2042                 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy1 )))
2043                 mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY).reshape((-1,__m))
2044                 Deltaw = - numpy.linalg.solve(mH,GradJ)
2045                 #
2046                 vw = vw + Deltaw
2047                 #
2048                 if not BnotT:
2049                     Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2050                 #
2051                 __j = __j + 1
2052             #
2053             A2 = EnsembleOfAnomalies( E2 )
2054             #
2055             if BnotT:
2056                 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2057                 A2 = math.sqrt(__m-1) * A2 @ Ta / _epsilon
2058             #
2059             Xn = vx2 + A2
2060         #--------------------------
2061         else:
2062             raise ValueError("VariantM has to be chosen in the authorized methods list.")
2063         #
2064         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2065             Xn = CovarianceInflation( Xn,
2066                 selfA._parameters["InflationType"],
2067                 selfA._parameters["InflationFactor"],
2068                 )
2069         #
2070         Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2071         #--------------------------
2072         selfA._setInternalState("Xn", Xn)
2073         selfA._setInternalState("seed", numpy.random.get_state())
2074         #--------------------------
2075         #
2076         if selfA._parameters["StoreInternalVariables"] \
2077             or selfA._toStore("CostFunctionJ") \
2078             or selfA._toStore("CostFunctionJb") \
2079             or selfA._toStore("CostFunctionJo") \
2080             or selfA._toStore("APosterioriCovariance") \
2081             or selfA._toStore("InnovationAtCurrentAnalysis") \
2082             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2083             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2084             _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
2085             _Innovation = Ynpu - _HXa
2086         #
2087         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2088         # ---> avec analysis
2089         selfA.StoredVariables["Analysis"].store( Xa )
2090         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2091             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2092         if selfA._toStore("InnovationAtCurrentAnalysis"):
2093             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2094         # ---> avec current state
2095         if selfA._parameters["StoreInternalVariables"] \
2096             or selfA._toStore("CurrentState"):
2097             selfA.StoredVariables["CurrentState"].store( Xn )
2098         if selfA._toStore("ForecastState"):
2099             selfA.StoredVariables["ForecastState"].store( E2 )
2100         if selfA._toStore("ForecastCovariance"):
2101             selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(E2) )
2102         if selfA._toStore("BMA"):
2103             selfA.StoredVariables["BMA"].store( E2 - Xa )
2104         if selfA._toStore("InnovationAtCurrentState"):
2105             selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
2106         if selfA._toStore("SimulatedObservationAtCurrentState") \
2107             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2108             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
2109         # ---> autres
2110         if selfA._parameters["StoreInternalVariables"] \
2111             or selfA._toStore("CostFunctionJ") \
2112             or selfA._toStore("CostFunctionJb") \
2113             or selfA._toStore("CostFunctionJo") \
2114             or selfA._toStore("CurrentOptimum") \
2115             or selfA._toStore("APosterioriCovariance"):
2116             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2117             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
2118             J   = Jb + Jo
2119             selfA.StoredVariables["CostFunctionJb"].store( Jb )
2120             selfA.StoredVariables["CostFunctionJo"].store( Jo )
2121             selfA.StoredVariables["CostFunctionJ" ].store( J )
2122             #
2123             if selfA._toStore("IndexOfOptimum") \
2124                 or selfA._toStore("CurrentOptimum") \
2125                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2126                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2127                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2128                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2129                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2130             if selfA._toStore("IndexOfOptimum"):
2131                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2132             if selfA._toStore("CurrentOptimum"):
2133                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2134             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2135                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2136             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2137                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2138             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2139                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2140             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2141                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2142         if selfA._toStore("APosterioriCovariance"):
2143             selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
2144         if selfA._parameters["EstimationOf"] == "Parameters" \
2145             and J < previousJMinimum:
2146             previousJMinimum    = J
2147             XaMin               = Xa
2148             if selfA._toStore("APosterioriCovariance"):
2149                 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
2150         # ---> Pour les smoothers
2151         if selfA._toStore("CurrentEnsembleState"):
2152             selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
2153     #
2154     # Stockage final supplémentaire de l'optimum en estimation de paramètres
2155     # ----------------------------------------------------------------------
2156     if selfA._parameters["EstimationOf"] == "Parameters":
2157         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2158         selfA.StoredVariables["Analysis"].store( XaMin )
2159         if selfA._toStore("APosterioriCovariance"):
2160             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2161         if selfA._toStore("BMA"):
2162             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2163     #
2164     return 0
2165
2166 # ==============================================================================
2167 def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
2168     """
2169     3DVAR incrémental
2170     """
2171     #
2172     # Initialisations
2173     # ---------------
2174     #
2175     # Opérateur non-linéaire pour la boucle externe
2176     Hm = HO["Direct"].appliedTo
2177     #
2178     # Précalcul des inversions de B et R
2179     BI = B.getI()
2180     RI = R.getI()
2181     #
2182     # Point de démarrage de l'optimisation
2183     Xini = selfA._parameters["InitializationPoint"]
2184     #
2185     HXb = numpy.asmatrix(numpy.ravel( Hm( Xb ) )).T
2186     Innovation = Y - HXb
2187     #
2188     # Outer Loop
2189     # ----------
2190     iOuter = 0
2191     J      = 1./mpr
2192     DeltaJ = 1./mpr
2193     Xr     = Xini.reshape((-1,1))
2194     while abs(DeltaJ) >= selfA._parameters["CostDecrementTolerance"] and iOuter <= selfA._parameters["MaximumNumberOfSteps"]:
2195         #
2196         # Inner Loop
2197         # ----------
2198         Ht = HO["Tangent"].asMatrix(Xr)
2199         Ht = Ht.reshape(Y.size,Xr.size) # ADAO & check shape
2200         #
2201         # Définition de la fonction-coût
2202         # ------------------------------
2203         def CostFunction(dx):
2204             _dX  = numpy.asmatrix(numpy.ravel( dx )).T
2205             if selfA._parameters["StoreInternalVariables"] or \
2206                 selfA._toStore("CurrentState") or \
2207                 selfA._toStore("CurrentOptimum"):
2208                 selfA.StoredVariables["CurrentState"].store( Xb + _dX )
2209             _HdX = Ht * _dX
2210             _HdX = numpy.asmatrix(numpy.ravel( _HdX )).T
2211             _dInnovation = Innovation - _HdX
2212             if selfA._toStore("SimulatedObservationAtCurrentState") or \
2213                 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2214                 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HXb + _HdX )
2215             if selfA._toStore("InnovationAtCurrentState"):
2216                 selfA.StoredVariables["InnovationAtCurrentState"].store( _dInnovation )
2217             #
2218             Jb  = float( 0.5 * _dX.T * BI * _dX )
2219             Jo  = float( 0.5 * _dInnovation.T * RI * _dInnovation )
2220             J   = Jb + Jo
2221             #
2222             selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
2223             selfA.StoredVariables["CostFunctionJb"].store( Jb )
2224             selfA.StoredVariables["CostFunctionJo"].store( Jo )
2225             selfA.StoredVariables["CostFunctionJ" ].store( J )
2226             if selfA._toStore("IndexOfOptimum") or \
2227                 selfA._toStore("CurrentOptimum") or \
2228                 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
2229                 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
2230                 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
2231                 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2232                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2233             if selfA._toStore("IndexOfOptimum"):
2234                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2235             if selfA._toStore("CurrentOptimum"):
2236                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
2237             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2238                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
2239             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2240                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2241             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2242                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2243             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2244                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2245             return J
2246         #
2247         def GradientOfCostFunction(dx):
2248             _dX          = numpy.asmatrix(numpy.ravel( dx )).T
2249             _HdX         = Ht * _dX
2250             _HdX         = numpy.asmatrix(numpy.ravel( _HdX )).T
2251             _dInnovation = Innovation - _HdX
2252             GradJb       = BI * _dX
2253             GradJo       = - Ht.T @ (RI * _dInnovation)
2254             GradJ        = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
2255             return GradJ
2256         #
2257         # Minimisation de la fonctionnelle
2258         # --------------------------------
2259         nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
2260         #
2261         if selfA._parameters["Minimizer"] == "LBFGSB":
2262             # Minimum, J_optimal, Informations = scipy.optimize.fmin_l_bfgs_b(
2263             if "0.19" <= scipy.version.version <= "1.1.0":
2264                 import lbfgsbhlt as optimiseur
2265             else:
2266                 import scipy.optimize as optimiseur
2267             Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
2268                 func        = CostFunction,
2269                 x0          = numpy.zeros(Xini.size),
2270                 fprime      = GradientOfCostFunction,
2271                 args        = (),
2272                 bounds      = RecentredBounds(selfA._parameters["Bounds"], Xb),
2273                 maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
2274                 factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
2275                 pgtol       = selfA._parameters["ProjectedGradientTolerance"],
2276                 iprint      = selfA._parameters["optiprint"],
2277                 )
2278             nfeval = Informations['funcalls']
2279             rc     = Informations['warnflag']
2280         elif selfA._parameters["Minimizer"] == "TNC":
2281             Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
2282                 func        = CostFunction,
2283                 x0          = numpy.zeros(Xini.size),
2284                 fprime      = GradientOfCostFunction,
2285                 args        = (),
2286                 bounds      = RecentredBounds(selfA._parameters["Bounds"], Xb),
2287                 maxfun      = selfA._parameters["MaximumNumberOfSteps"],
2288                 pgtol       = selfA._parameters["ProjectedGradientTolerance"],
2289                 ftol        = selfA._parameters["CostDecrementTolerance"],
2290                 messages    = selfA._parameters["optmessages"],
2291                 )
2292         elif selfA._parameters["Minimizer"] == "CG":
2293             Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
2294                 f           = CostFunction,
2295                 x0          = numpy.zeros(Xini.size),
2296                 fprime      = GradientOfCostFunction,
2297                 args        = (),
2298                 maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2299                 gtol        = selfA._parameters["GradientNormTolerance"],
2300                 disp        = selfA._parameters["optdisp"],
2301                 full_output = True,
2302                 )
2303         elif selfA._parameters["Minimizer"] == "NCG":
2304             Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
2305                 f           = CostFunction,
2306                 x0          = numpy.zeros(Xini.size),
2307                 fprime      = GradientOfCostFunction,
2308                 args        = (),
2309                 maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2310                 avextol     = selfA._parameters["CostDecrementTolerance"],
2311                 disp        = selfA._parameters["optdisp"],
2312                 full_output = True,
2313                 )
2314         elif selfA._parameters["Minimizer"] == "BFGS":
2315             Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
2316                 f           = CostFunction,
2317                 x0          = numpy.zeros(Xini.size),
2318                 fprime      = GradientOfCostFunction,
2319                 args        = (),
2320                 maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2321                 gtol        = selfA._parameters["GradientNormTolerance"],
2322                 disp        = selfA._parameters["optdisp"],
2323                 full_output = True,
2324                 )
2325         else:
2326             raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
2327         #
2328         IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2329         MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
2330         #
2331         if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
2332             Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
2333         else:
2334             Minimum = Xb + Minimum.reshape((-1,1))
2335         #
2336         Xr     = Minimum
2337         DeltaJ = selfA.StoredVariables["CostFunctionJ" ][-1] - J
2338         iOuter = selfA.StoredVariables["CurrentIterationNumber"][-1]
2339     #
2340     Xa = Xr
2341     #--------------------------
2342     #
2343     selfA.StoredVariables["Analysis"].store( Xa )
2344     #
2345     if selfA._toStore("OMA") or \
2346         selfA._toStore("SigmaObs2") or \
2347         selfA._toStore("SimulationQuantiles") or \
2348         selfA._toStore("SimulatedObservationAtOptimum"):
2349         if selfA._toStore("SimulatedObservationAtCurrentState"):
2350             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
2351         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2352             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
2353         else:
2354             HXa = Hm( Xa )
2355     #
2356     if selfA._toStore("APosterioriCovariance") or \
2357         selfA._toStore("SimulationQuantiles") or \
2358         selfA._toStore("JacobianMatrixAtOptimum") or \
2359         selfA._toStore("KalmanGainAtOptimum"):
2360         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
2361         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
2362     if selfA._toStore("APosterioriCovariance") or \
2363         selfA._toStore("SimulationQuantiles") or \
2364         selfA._toStore("KalmanGainAtOptimum"):
2365         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
2366         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
2367     if selfA._toStore("APosterioriCovariance") or \
2368         selfA._toStore("SimulationQuantiles"):
2369         A = HessienneEstimation(Xa.size, HaM, HtM, BI, RI)
2370     if selfA._toStore("APosterioriCovariance"):
2371         selfA.StoredVariables["APosterioriCovariance"].store( A )
2372     if selfA._toStore("JacobianMatrixAtOptimum"):
2373         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
2374     if selfA._toStore("KalmanGainAtOptimum"):
2375         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
2376         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
2377         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
2378     #
2379     # Calculs et/ou stockages supplémentaires
2380     # ---------------------------------------
2381     if selfA._toStore("Innovation") or \
2382         selfA._toStore("SigmaObs2") or \
2383         selfA._toStore("MahalanobisConsistency") or \
2384         selfA._toStore("OMB"):
2385         d  = Y - HXb
2386     if selfA._toStore("Innovation"):
2387         selfA.StoredVariables["Innovation"].store( d )
2388     if selfA._toStore("BMA"):
2389         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
2390     if selfA._toStore("OMA"):
2391         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
2392     if selfA._toStore("OMB"):
2393         selfA.StoredVariables["OMB"].store( d )
2394     if selfA._toStore("SigmaObs2"):
2395         TraceR = R.trace(Y.size)
2396         selfA.StoredVariables["SigmaObs2"].store( float( (d.T @ (numpy.ravel(Y)-numpy.ravel(HXa))) ) / TraceR )
2397     if selfA._toStore("MahalanobisConsistency"):
2398         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
2399     if selfA._toStore("SimulationQuantiles"):
2400         QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
2401     if selfA._toStore("SimulatedObservationAtBackground"):
2402         selfA.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
2403     if selfA._toStore("SimulatedObservationAtOptimum"):
2404         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
2405     #
2406     return 0
2407
2408 # ==============================================================================
2409 def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13",
2410     BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
2411     """
2412     Maximum Likelihood Ensemble Filter
2413     """
2414     if selfA._parameters["EstimationOf"] == "Parameters":
2415         selfA._parameters["StoreInternalVariables"] = True
2416     #
2417     # Opérateurs
2418     H = HO["Direct"].appliedControledFormTo
2419     #
2420     if selfA._parameters["EstimationOf"] == "State":
2421         M = EM["Direct"].appliedControledFormTo
2422     #
2423     if CM is not None and "Tangent" in CM and U is not None:
2424         Cm = CM["Tangent"].asMatrix(Xb)
2425     else:
2426         Cm = None
2427     #
2428     # Durée d'observation et tailles
2429     if hasattr(Y,"stepnumber"):
2430         duration = Y.stepnumber()
2431         __p = numpy.cumprod(Y.shape())[-1]
2432     else:
2433         duration = 2
2434         __p = numpy.array(Y).size
2435     #
2436     # Précalcul des inversions de B et R
2437     if selfA._parameters["StoreInternalVariables"] \
2438         or selfA._toStore("CostFunctionJ") \
2439         or selfA._toStore("CostFunctionJb") \
2440         or selfA._toStore("CostFunctionJo") \
2441         or selfA._toStore("CurrentOptimum") \
2442         or selfA._toStore("APosterioriCovariance"):
2443         BI = B.getI()
2444     RI = R.getI()
2445     #
2446     __n = Xb.size
2447     __m = selfA._parameters["NumberOfMembers"]
2448     #
2449     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2450         Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
2451         selfA.StoredVariables["Analysis"].store( Xb )
2452         if selfA._toStore("APosterioriCovariance"):
2453             if hasattr(B,"asfullmatrix"):
2454                 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
2455             else:
2456                 selfA.StoredVariables["APosterioriCovariance"].store( B )
2457         selfA._setInternalState("seed", numpy.random.get_state())
2458     elif selfA._parameters["nextStep"]:
2459         Xn = selfA._getInternalState("Xn")
2460     #
2461     previousJMinimum = numpy.finfo(float).max
2462     #
2463     for step in range(duration-1):
2464         numpy.random.set_state(selfA._getInternalState("seed"))
2465         if hasattr(Y,"store"):
2466             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
2467         else:
2468             Ynpu = numpy.ravel( Y ).reshape((__p,1))
2469         #
2470         if U is not None:
2471             if hasattr(U,"store") and len(U)>1:
2472                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2473             elif hasattr(U,"store") and len(U)==1:
2474                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2475             else:
2476                 Un = numpy.asmatrix(numpy.ravel( U )).T
2477         else:
2478             Un = None
2479         #
2480         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2481             Xn = CovarianceInflation( Xn,
2482                 selfA._parameters["InflationType"],
2483                 selfA._parameters["InflationFactor"],
2484                 )
2485         #
2486         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
2487             EMX = M( [(Xn[:,i], Un) for i in range(__m)],
2488                 argsAsSerie = True,
2489                 returnSerieAsArrayMatrix = True )
2490             Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
2491             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2492                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2493                 Xn_predicted = Xn_predicted + Cm * Un
2494         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
2495             # --- > Par principe, M = Id, Q = 0
2496             Xn_predicted = EMX = Xn
2497         #
2498         #--------------------------
2499         if VariantM == "MLEF13":
2500             Xfm = numpy.ravel(Xn_predicted.mean(axis=1, dtype=mfp).astype('float'))
2501             EaX = EnsembleOfAnomalies( Xn_predicted, Xfm, 1./math.sqrt(__m-1) )
2502             Ua  = numpy.identity(__m)
2503             __j = 0
2504             Deltaw = 1
2505             if not BnotT:
2506                 Ta  = numpy.identity(__m)
2507             vw  = numpy.zeros(__m)
2508             while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
2509                 vx1 = (Xfm + EaX @ vw).reshape((__n,1))
2510                 #
2511                 if BnotT:
2512                     E1 = vx1 + _epsilon * EaX
2513                 else:
2514                     E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
2515                 #
2516                 HE2 = H( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
2517                     argsAsSerie = True,
2518                     returnSerieAsArrayMatrix = True )
2519                 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2520                 #
2521                 if BnotT:
2522                     EaY = (HE2 - vy2) / _epsilon
2523                 else:
2524                     EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
2525                 #
2526                 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy2 )))
2527                 mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY).reshape((-1,__m))
2528                 Deltaw = - numpy.linalg.solve(mH,GradJ)
2529                 #
2530                 vw = vw + Deltaw
2531                 #
2532                 if not BnotT:
2533                     Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2534                 #
2535                 __j = __j + 1
2536             #
2537             if BnotT:
2538                 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2539             #
2540             Xn = vx1 + math.sqrt(__m-1) * EaX @ Ta @ Ua
2541         #--------------------------
2542         else:
2543             raise ValueError("VariantM has to be chosen in the authorized methods list.")
2544         #
2545         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2546             Xn = CovarianceInflation( Xn,
2547                 selfA._parameters["InflationType"],
2548                 selfA._parameters["InflationFactor"],
2549                 )
2550         #
2551         Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2552         #--------------------------
2553         selfA._setInternalState("Xn", Xn)
2554         selfA._setInternalState("seed", numpy.random.get_state())
2555         #--------------------------
2556         #
2557         if selfA._parameters["StoreInternalVariables"] \
2558             or selfA._toStore("CostFunctionJ") \
2559             or selfA._toStore("CostFunctionJb") \
2560             or selfA._toStore("CostFunctionJo") \
2561             or selfA._toStore("APosterioriCovariance") \
2562             or selfA._toStore("InnovationAtCurrentAnalysis") \
2563             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2564             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2565             _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
2566             _Innovation = Ynpu - _HXa
2567         #
2568         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2569         # ---> avec analysis
2570         selfA.StoredVariables["Analysis"].store( Xa )
2571         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2572             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2573         if selfA._toStore("InnovationAtCurrentAnalysis"):
2574             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2575         # ---> avec current state
2576         if selfA._parameters["StoreInternalVariables"] \
2577             or selfA._toStore("CurrentState"):
2578             selfA.StoredVariables["CurrentState"].store( Xn )
2579         if selfA._toStore("ForecastState"):
2580             selfA.StoredVariables["ForecastState"].store( EMX )
2581         if selfA._toStore("ForecastCovariance"):
2582             selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(EMX) )
2583         if selfA._toStore("BMA"):
2584             selfA.StoredVariables["BMA"].store( EMX - Xa )
2585         if selfA._toStore("InnovationAtCurrentState"):
2586             selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
2587         if selfA._toStore("SimulatedObservationAtCurrentState") \
2588             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2589             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
2590         # ---> autres
2591         if selfA._parameters["StoreInternalVariables"] \
2592             or selfA._toStore("CostFunctionJ") \
2593             or selfA._toStore("CostFunctionJb") \
2594             or selfA._toStore("CostFunctionJo") \
2595             or selfA._toStore("CurrentOptimum") \
2596             or selfA._toStore("APosterioriCovariance"):
2597             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2598             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
2599             J   = Jb + Jo
2600             selfA.StoredVariables["CostFunctionJb"].store( Jb )
2601             selfA.StoredVariables["CostFunctionJo"].store( Jo )
2602             selfA.StoredVariables["CostFunctionJ" ].store( J )
2603             #
2604             if selfA._toStore("IndexOfOptimum") \
2605                 or selfA._toStore("CurrentOptimum") \
2606                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2607                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2608                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2609                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2610                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2611             if selfA._toStore("IndexOfOptimum"):
2612                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2613             if selfA._toStore("CurrentOptimum"):
2614                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2615             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2616                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2617             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2618                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2619             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2620                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2621             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2622                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2623         if selfA._toStore("APosterioriCovariance"):
2624             selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
2625         if selfA._parameters["EstimationOf"] == "Parameters" \
2626             and J < previousJMinimum:
2627             previousJMinimum    = J
2628             XaMin               = Xa
2629             if selfA._toStore("APosterioriCovariance"):
2630                 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
2631         # ---> Pour les smoothers
2632         if selfA._toStore("CurrentEnsembleState"):
2633             selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
2634     #
2635     # Stockage final supplémentaire de l'optimum en estimation de paramètres
2636     # ----------------------------------------------------------------------
2637     if selfA._parameters["EstimationOf"] == "Parameters":
2638         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2639         selfA.StoredVariables["Analysis"].store( XaMin )
2640         if selfA._toStore("APosterioriCovariance"):
2641             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2642         if selfA._toStore("BMA"):
2643             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2644     #
2645     return 0
2646
2647 # ==============================================================================
2648 def mmqr(
2649         func     = None,
2650         x0       = None,
2651         fprime   = None,
2652         bounds   = None,
2653         quantile = 0.5,
2654         maxfun   = 15000,
2655         toler    = 1.e-06,
2656         y        = None,
2657         ):
2658     """
2659     Implémentation informatique de l'algorithme MMQR, basée sur la publication :
2660     David R. Hunter, Kenneth Lange, "Quantile Regression via an MM Algorithm",
2661     Journal of Computational and Graphical Statistics, 9, 1, pp.60-77, 2000.
2662     """
2663     #
2664     # Recuperation des donnees et informations initiales
2665     # --------------------------------------------------
2666     variables = numpy.ravel( x0 )
2667     mesures   = numpy.ravel( y )
2668     increment = sys.float_info[0]
2669     p         = variables.size
2670     n         = mesures.size
2671     quantile  = float(quantile)
2672     #
2673     # Calcul des parametres du MM
2674     # ---------------------------
2675     tn      = float(toler) / n
2676     e0      = -tn / math.log(tn)
2677     epsilon = (e0-tn)/(1+math.log(e0))
2678     #
2679     # Calculs d'initialisation
2680     # ------------------------
2681     residus  = mesures - numpy.ravel( func( variables ) )
2682     poids    = 1./(epsilon+numpy.abs(residus))
2683     veps     = 1. - 2. * quantile - residus * poids
2684     lastsurrogate = -numpy.sum(residus*veps) - (1.-2.*quantile)*numpy.sum(residus)
2685     iteration = 0
2686     #
2687     # Recherche iterative
2688     # -------------------
2689     while (increment > toler) and (iteration < maxfun) :
2690         iteration += 1
2691         #
2692         Derivees  = numpy.array(fprime(variables))
2693         Derivees  = Derivees.reshape(n,p) # Necessaire pour remettre en place la matrice si elle passe par des tuyaux YACS
2694         DeriveesT = Derivees.transpose()
2695         M         =   numpy.dot( DeriveesT , (numpy.array(numpy.matrix(p*[poids,]).T)*Derivees) )
2696         SM        =   numpy.transpose(numpy.dot( DeriveesT , veps ))
2697         step      = - numpy.linalg.lstsq( M, SM, rcond=-1 )[0]
2698         #
2699         variables = variables + step
2700         if bounds is not None:
2701             # Attention : boucle infinie à éviter si un intervalle est trop petit
2702             while( (variables < numpy.ravel(numpy.asmatrix(bounds)[:,0])).any() or (variables > numpy.ravel(numpy.asmatrix(bounds)[:,1])).any() ):
2703                 step      = step/2.
2704                 variables = variables - step
2705         residus   = mesures - numpy.ravel( func(variables) )
2706         surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
2707         #
2708         while ( (surrogate > lastsurrogate) and ( max(list(numpy.abs(step))) > 1.e-16 ) ) :
2709             step      = step/2.
2710             variables = variables - step
2711             residus   = mesures - numpy.ravel( func(variables) )
2712             surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
2713         #
2714         increment     = lastsurrogate-surrogate
2715         poids         = 1./(epsilon+numpy.abs(residus))
2716         veps          = 1. - 2. * quantile - residus * poids
2717         lastsurrogate = -numpy.sum(residus * veps) - (1.-2.*quantile)*numpy.sum(residus)
2718     #
2719     # Mesure d'écart
2720     # --------------
2721     Ecart = quantile * numpy.sum(residus) - numpy.sum( residus[residus<0] )
2722     #
2723     return variables, Ecart, [n,p,iteration,increment,0]
2724
2725 # ==============================================================================
2726 def multi3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, oneCycle):
2727     """
2728     3DVAR multi-pas et multi-méthodes
2729     """
2730     #
2731     # Initialisation
2732     if selfA._parameters["EstimationOf"] == "State":
2733         M = EM["Direct"].appliedTo
2734         #
2735         if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2736             Xn = numpy.ravel(Xb).reshape((-1,1))
2737             selfA.StoredVariables["Analysis"].store( Xn )
2738             if selfA._toStore("APosterioriCovariance"):
2739                 if hasattr(B,"asfullmatrix"):
2740                     selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(Xn.size) )
2741                 else:
2742                     selfA.StoredVariables["APosterioriCovariance"].store( B )
2743             if selfA._toStore("ForecastState"):
2744                 selfA.StoredVariables["ForecastState"].store( Xn )
2745         elif selfA._parameters["nextStep"]:
2746             Xn = selfA._getInternalState("Xn")
2747     else:
2748         Xn = numpy.ravel(Xb).reshape((-1,1))
2749     #
2750     if hasattr(Y,"stepnumber"):
2751         duration = Y.stepnumber()
2752     else:
2753         duration = 2
2754     #
2755     # Multi-pas
2756     for step in range(duration-1):
2757         if hasattr(Y,"store"):
2758             Ynpu = numpy.ravel( Y[step+1] ).reshape((-1,1))
2759         else:
2760             Ynpu = numpy.ravel( Y ).reshape((-1,1))
2761         #
2762         if selfA._parameters["EstimationOf"] == "State": # Forecast
2763             Xn_predicted = M( Xn )
2764             if selfA._toStore("ForecastState"):
2765                 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
2766         elif selfA._parameters["EstimationOf"] == "Parameters": # No forecast
2767             # --- > Par principe, M = Id, Q = 0
2768             Xn_predicted = Xn
2769         Xn_predicted = numpy.ravel(Xn_predicted).reshape((-1,1))
2770         #
2771         oneCycle(selfA, Xn_predicted, Ynpu, U, HO, None, None, R, B, None)
2772         #
2773         Xn = selfA.StoredVariables["Analysis"][-1]
2774         #--------------------------
2775         selfA._setInternalState("Xn", Xn)
2776     #
2777     return 0
2778
2779 # ==============================================================================
2780 def psas3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
2781     """
2782     3DVAR PSAS
2783     """
2784     #
2785     # Initialisations
2786     # ---------------
2787     #
2788     # Opérateurs
2789     Hm = HO["Direct"].appliedTo
2790     #
2791     # Utilisation éventuelle d'un vecteur H(Xb) précalculé
2792     if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
2793         HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
2794     else:
2795         HXb = Hm( Xb )
2796     HXb = numpy.asmatrix(numpy.ravel( HXb )).T
2797     if Y.size != HXb.size:
2798         raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
2799     if max(Y.shape) != max(HXb.shape):
2800         raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
2801     #
2802     if selfA._toStore("JacobianMatrixAtBackground"):
2803         HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
2804         HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
2805         selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
2806     #
2807     Ht = HO["Tangent"].asMatrix(Xb)
2808     BHT = B * Ht.T
2809     HBHTpR = R + Ht * BHT
2810     Innovation = Y - HXb
2811     #
2812     # Point de démarrage de l'optimisation
2813     Xini = numpy.zeros(Xb.shape)
2814     #
2815     # Définition de la fonction-coût
2816     # ------------------------------
2817     def CostFunction(w):
2818         _W = w.reshape((-1,1))
2819         if selfA._parameters["StoreInternalVariables"] or \
2820             selfA._toStore("CurrentState") or \
2821             selfA._toStore("CurrentOptimum"):
2822             selfA.StoredVariables["CurrentState"].store( Xb + BHT @ _W )
2823         if selfA._toStore("SimulatedObservationAtCurrentState") or \
2824             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2825             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Hm( Xb + BHT @ _W ) )
2826         if selfA._toStore("InnovationAtCurrentState"):
2827             selfA.StoredVariables["InnovationAtCurrentState"].store( Innovation )
2828         #
2829         Jb  = float( 0.5 * _W.T @ (HBHTpR @ _W) )
2830         Jo  = float( - _W.T @ Innovation )
2831         J   = Jb + Jo
2832         #
2833         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
2834         selfA.StoredVariables["CostFunctionJb"].store( Jb )
2835         selfA.StoredVariables["CostFunctionJo"].store( Jo )
2836         selfA.StoredVariables["CostFunctionJ" ].store( J )
2837         if selfA._toStore("IndexOfOptimum") or \
2838             selfA._toStore("CurrentOptimum") or \
2839             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
2840             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
2841             selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
2842             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2843             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2844         if selfA._toStore("IndexOfOptimum"):
2845             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2846         if selfA._toStore("CurrentOptimum"):
2847             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
2848         if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2849             selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
2850         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2851             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2852         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2853             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2854         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2855             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2856         return J
2857     #
2858     def GradientOfCostFunction(w):
2859         _W = w.reshape((-1,1))
2860         GradJb  = HBHTpR @ _W
2861         GradJo  = - Innovation
2862         GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
2863         return GradJ
2864     #
2865     # Minimisation de la fonctionnelle
2866     # --------------------------------
2867     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
2868     #
2869     if selfA._parameters["Minimizer"] == "LBFGSB":
2870         if "0.19" <= scipy.version.version <= "1.1.0":
2871             import lbfgsbhlt as optimiseur
2872         else:
2873             import scipy.optimize as optimiseur
2874         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
2875             func        = CostFunction,
2876             x0          = Xini,
2877             fprime      = GradientOfCostFunction,
2878             args        = (),
2879             bounds      = RecentredBounds(selfA._parameters["Bounds"], Xb),
2880             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
2881             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
2882             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
2883             iprint      = selfA._parameters["optiprint"],
2884             )
2885         nfeval = Informations['funcalls']
2886         rc     = Informations['warnflag']
2887     elif selfA._parameters["Minimizer"] == "TNC":
2888         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
2889             func        = CostFunction,
2890             x0          = Xini,
2891             fprime      = GradientOfCostFunction,
2892             args        = (),
2893             bounds      = RecentredBounds(selfA._parameters["Bounds"], Xb),
2894             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
2895             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
2896             ftol        = selfA._parameters["CostDecrementTolerance"],
2897             messages    = selfA._parameters["optmessages"],
2898             )
2899     elif selfA._parameters["Minimizer"] == "CG":
2900         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
2901             f           = CostFunction,
2902             x0          = Xini,
2903             fprime      = GradientOfCostFunction,
2904             args        = (),
2905             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2906             gtol        = selfA._parameters["GradientNormTolerance"],
2907             disp        = selfA._parameters["optdisp"],
2908             full_output = True,
2909             )
2910     elif selfA._parameters["Minimizer"] == "NCG":
2911         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
2912             f           = CostFunction,
2913             x0          = Xini,
2914             fprime      = GradientOfCostFunction,
2915             args        = (),
2916             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2917             avextol     = selfA._parameters["CostDecrementTolerance"],
2918             disp        = selfA._parameters["optdisp"],
2919             full_output = True,
2920             )
2921     elif selfA._parameters["Minimizer"] == "BFGS":
2922         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
2923             f           = CostFunction,
2924             x0          = Xini,
2925             fprime      = GradientOfCostFunction,
2926             args        = (),
2927             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2928             gtol        = selfA._parameters["GradientNormTolerance"],
2929             disp        = selfA._parameters["optdisp"],
2930             full_output = True,
2931             )
2932     else:
2933         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
2934     #
2935     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2936     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
2937     #
2938     # Correction pour pallier a un bug de TNC sur le retour du Minimum
2939     # ----------------------------------------------------------------
2940     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
2941         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
2942     else:
2943         Minimum = Xb + BHT @ Minimum.reshape((-1,1))
2944     #
2945     Xa = Minimum
2946     #--------------------------
2947     #
2948     selfA.StoredVariables["Analysis"].store( Xa )
2949     #
2950     if selfA._toStore("OMA") or \
2951         selfA._toStore("SigmaObs2") or \
2952         selfA._toStore("SimulationQuantiles") or \
2953         selfA._toStore("SimulatedObservationAtOptimum"):
2954         if selfA._toStore("SimulatedObservationAtCurrentState"):
2955             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
2956         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2957             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
2958         else:
2959             HXa = Hm( Xa )
2960     #
2961     if selfA._toStore("APosterioriCovariance") or \
2962         selfA._toStore("SimulationQuantiles") or \
2963         selfA._toStore("JacobianMatrixAtOptimum") or \
2964         selfA._toStore("KalmanGainAtOptimum"):
2965         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
2966         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
2967     if selfA._toStore("APosterioriCovariance") or \
2968         selfA._toStore("SimulationQuantiles") or \
2969         selfA._toStore("KalmanGainAtOptimum"):
2970         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
2971         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
2972     if selfA._toStore("APosterioriCovariance") or \
2973         selfA._toStore("SimulationQuantiles"):
2974         BI = B.getI()
2975         RI = R.getI()
2976         A = HessienneEstimation(Xa.size, HaM, HtM, BI, RI)
2977     if selfA._toStore("APosterioriCovariance"):
2978         selfA.StoredVariables["APosterioriCovariance"].store( A )
2979     if selfA._toStore("JacobianMatrixAtOptimum"):
2980         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
2981     if selfA._toStore("KalmanGainAtOptimum"):
2982         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
2983         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
2984         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
2985     #
2986     # Calculs et/ou stockages supplémentaires
2987     # ---------------------------------------
2988     if selfA._toStore("Innovation") or \
2989         selfA._toStore("SigmaObs2") or \
2990         selfA._toStore("MahalanobisConsistency") or \
2991         selfA._toStore("OMB"):
2992         d  = Y - HXb
2993     if selfA._toStore("Innovation"):
2994         selfA.StoredVariables["Innovation"].store( d )
2995     if selfA._toStore("BMA"):
2996         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
2997     if selfA._toStore("OMA"):
2998         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
2999     if selfA._toStore("OMB"):
3000         selfA.StoredVariables["OMB"].store( d )
3001     if selfA._toStore("SigmaObs2"):
3002         TraceR = R.trace(Y.size)
3003         selfA.StoredVariables["SigmaObs2"].store( float( (d.T @ (numpy.ravel(Y)-numpy.ravel(HXa))) ) / TraceR )
3004     if selfA._toStore("MahalanobisConsistency"):
3005         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
3006     if selfA._toStore("SimulationQuantiles"):
3007         QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
3008     if selfA._toStore("SimulatedObservationAtBackground"):
3009         selfA.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
3010     if selfA._toStore("SimulatedObservationAtOptimum"):
3011         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
3012     #
3013     return 0
3014
3015 # ==============================================================================
3016 def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula16"):
3017     """
3018     Stochastic EnKF
3019     """
3020     if selfA._parameters["EstimationOf"] == "Parameters":
3021         selfA._parameters["StoreInternalVariables"] = True
3022     #
3023     # Opérateurs
3024     H = HO["Direct"].appliedControledFormTo
3025     #
3026     if selfA._parameters["EstimationOf"] == "State":
3027         M = EM["Direct"].appliedControledFormTo
3028     #
3029     if CM is not None and "Tangent" in CM and U is not None:
3030         Cm = CM["Tangent"].asMatrix(Xb)
3031     else:
3032         Cm = None
3033     #
3034     # Durée d'observation et tailles
3035     if hasattr(Y,"stepnumber"):
3036         duration = Y.stepnumber()
3037         __p = numpy.cumprod(Y.shape())[-1]
3038     else:
3039         duration = 2
3040         __p = numpy.array(Y).size
3041     #
3042     # Précalcul des inversions de B et R
3043     if selfA._parameters["StoreInternalVariables"] \
3044         or selfA._toStore("CostFunctionJ") \
3045         or selfA._toStore("CostFunctionJb") \
3046         or selfA._toStore("CostFunctionJo") \
3047         or selfA._toStore("CurrentOptimum") \
3048         or selfA._toStore("APosterioriCovariance"):
3049         BI = B.getI()
3050         RI = R.getI()
3051     #
3052     __n = Xb.size
3053     __m = selfA._parameters["NumberOfMembers"]
3054     #
3055     if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
3056     else:                         Rn = R
3057     #
3058     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
3059         if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
3060         else:                         Pn = B
3061         Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
3062         selfA.StoredVariables["Analysis"].store( Xb )
3063         if selfA._toStore("APosterioriCovariance"):
3064             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
3065         selfA._setInternalState("seed", numpy.random.get_state())
3066     elif selfA._parameters["nextStep"]:
3067         Xn = selfA._getInternalState("Xn")
3068     #
3069     previousJMinimum = numpy.finfo(float).max
3070     #
3071     for step in range(duration-1):
3072         numpy.random.set_state(selfA._getInternalState("seed"))
3073         if hasattr(Y,"store"):
3074             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
3075         else:
3076             Ynpu = numpy.ravel( Y ).reshape((__p,1))
3077         #
3078         if U is not None:
3079             if hasattr(U,"store") and len(U)>1:
3080                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
3081             elif hasattr(U,"store") and len(U)==1:
3082                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
3083             else:
3084                 Un = numpy.asmatrix(numpy.ravel( U )).T
3085         else:
3086             Un = None
3087         #
3088         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
3089             Xn = CovarianceInflation( Xn,
3090                 selfA._parameters["InflationType"],
3091                 selfA._parameters["InflationFactor"],
3092                 )
3093         #
3094         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
3095             EMX = M( [(Xn[:,i], Un) for i in range(__m)],
3096                 argsAsSerie = True,
3097                 returnSerieAsArrayMatrix = True )
3098             Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
3099             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
3100                 argsAsSerie = True,
3101                 returnSerieAsArrayMatrix = True )
3102             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
3103                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
3104                 Xn_predicted = Xn_predicted + Cm * Un
3105         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
3106             # --- > Par principe, M = Id, Q = 0
3107             Xn_predicted = EMX = Xn
3108             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
3109                 argsAsSerie = True,
3110                 returnSerieAsArrayMatrix = True )
3111         #
3112         # Mean of forecast and observation of forecast
3113         Xfm  = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
3114         Hfm  = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
3115         #
3116         #--------------------------
3117         if VariantM == "KalmanFilterFormula05":
3118             PfHT, HPfHT = 0., 0.
3119             for i in range(__m):
3120                 Exfi = Xn_predicted[:,i].reshape((__n,1)) - Xfm
3121                 Eyfi = HX_predicted[:,i].reshape((__p,1)) - Hfm
3122                 PfHT  += Exfi * Eyfi.T
3123                 HPfHT += Eyfi * Eyfi.T
3124             PfHT  = (1./(__m-1)) * PfHT
3125             HPfHT = (1./(__m-1)) * HPfHT
3126             Kn     = PfHT * ( R + HPfHT ).I
3127             del PfHT, HPfHT
3128             #
3129             for i in range(__m):
3130                 ri = numpy.random.multivariate_normal(numpy.zeros(__p), Rn)
3131                 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(Ynpu) + ri - HX_predicted[:,i])
3132         #--------------------------
3133         elif VariantM == "KalmanFilterFormula16":
3134             EpY   = EnsembleOfCenteredPerturbations(Ynpu, Rn, __m)
3135             EpYm  = EpY.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
3136             #
3137             EaX   = EnsembleOfAnomalies( Xn_predicted ) / math.sqrt(__m-1)
3138             EaY = (HX_predicted - Hfm - EpY + EpYm) / math.sqrt(__m-1)
3139             #
3140             Kn = EaX @ EaY.T @ numpy.linalg.inv( EaY @ EaY.T)
3141             #
3142             for i in range(__m):
3143                 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(EpY[:,i]) - HX_predicted[:,i])
3144         #--------------------------
3145         else:
3146             raise ValueError("VariantM has to be chosen in the authorized methods list.")
3147         #
3148         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
3149             Xn = CovarianceInflation( Xn,
3150                 selfA._parameters["InflationType"],
3151                 selfA._parameters["InflationFactor"],
3152                 )
3153         #
3154         Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
3155         #--------------------------
3156         selfA._setInternalState("Xn", Xn)
3157         selfA._setInternalState("seed", numpy.random.get_state())
3158         #--------------------------
3159         #
3160         if selfA._parameters["StoreInternalVariables"] \
3161             or selfA._toStore("CostFunctionJ") \
3162             or selfA._toStore("CostFunctionJb") \
3163             or selfA._toStore("CostFunctionJo") \
3164             or selfA._toStore("APosterioriCovariance") \
3165             or selfA._toStore("InnovationAtCurrentAnalysis") \
3166             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
3167             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3168             _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
3169             _Innovation = Ynpu - _HXa
3170         #
3171         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3172         # ---> avec analysis
3173         selfA.StoredVariables["Analysis"].store( Xa )
3174         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
3175             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
3176         if selfA._toStore("InnovationAtCurrentAnalysis"):
3177             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
3178         # ---> avec current state
3179         if selfA._parameters["StoreInternalVariables"] \
3180             or selfA._toStore("CurrentState"):
3181             selfA.StoredVariables["CurrentState"].store( Xn )
3182         if selfA._toStore("ForecastState"):
3183             selfA.StoredVariables["ForecastState"].store( EMX )
3184         if selfA._toStore("ForecastCovariance"):
3185             selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(EMX) )
3186         if selfA._toStore("BMA"):
3187             selfA.StoredVariables["BMA"].store( EMX - Xa )
3188         if selfA._toStore("InnovationAtCurrentState"):
3189             selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
3190         if selfA._toStore("SimulatedObservationAtCurrentState") \
3191             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3192             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
3193         # ---> autres
3194         if selfA._parameters["StoreInternalVariables"] \
3195             or selfA._toStore("CostFunctionJ") \
3196             or selfA._toStore("CostFunctionJb") \
3197             or selfA._toStore("CostFunctionJo") \
3198             or selfA._toStore("CurrentOptimum") \
3199             or selfA._toStore("APosterioriCovariance"):
3200             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
3201             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
3202             J   = Jb + Jo
3203             selfA.StoredVariables["CostFunctionJb"].store( Jb )
3204             selfA.StoredVariables["CostFunctionJo"].store( Jo )
3205             selfA.StoredVariables["CostFunctionJ" ].store( J )
3206             #
3207             if selfA._toStore("IndexOfOptimum") \
3208                 or selfA._toStore("CurrentOptimum") \
3209                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
3210                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
3211                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
3212                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3213                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3214             if selfA._toStore("IndexOfOptimum"):
3215                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3216             if selfA._toStore("CurrentOptimum"):
3217                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
3218             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3219                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
3220             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3221                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3222             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3223                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3224             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3225                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3226         if selfA._toStore("APosterioriCovariance"):
3227             selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
3228         if selfA._parameters["EstimationOf"] == "Parameters" \
3229             and J < previousJMinimum:
3230             previousJMinimum    = J
3231             XaMin               = Xa
3232             if selfA._toStore("APosterioriCovariance"):
3233                 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
3234         # ---> Pour les smoothers
3235         if selfA._toStore("CurrentEnsembleState"):
3236             selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
3237     #
3238     # Stockage final supplémentaire de l'optimum en estimation de paramètres
3239     # ----------------------------------------------------------------------
3240     if selfA._parameters["EstimationOf"] == "Parameters":
3241         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3242         selfA.StoredVariables["Analysis"].store( XaMin )
3243         if selfA._toStore("APosterioriCovariance"):
3244             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
3245         if selfA._toStore("BMA"):
3246             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
3247     #
3248     return 0
3249
3250 # ==============================================================================
3251 def std3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3252     """
3253     3DVAR
3254     """
3255     #
3256     # Initialisations
3257     # ---------------
3258     #
3259     # Opérateurs
3260     Hm = HO["Direct"].appliedTo
3261     Ha = HO["Adjoint"].appliedInXTo
3262     #
3263     # Utilisation éventuelle d'un vecteur H(Xb) précalculé
3264     if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
3265         HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
3266     else:
3267         HXb = Hm( Xb )
3268     HXb = HXb.reshape((-1,1))
3269     if Y.size != HXb.size:
3270         raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
3271     if max(Y.shape) != max(HXb.shape):
3272         raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
3273     #
3274     if selfA._toStore("JacobianMatrixAtBackground"):
3275         HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
3276         HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
3277         selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
3278     #
3279     # Précalcul des inversions de B et R
3280     BI = B.getI()
3281     RI = R.getI()
3282     #
3283     # Point de démarrage de l'optimisation
3284     Xini = selfA._parameters["InitializationPoint"]
3285     #
3286     # Définition de la fonction-coût
3287     # ------------------------------
3288     def CostFunction(x):
3289         _X  = numpy.ravel( x ).reshape((-1,1))
3290         if selfA._parameters["StoreInternalVariables"] or \
3291             selfA._toStore("CurrentState") or \
3292             selfA._toStore("CurrentOptimum"):
3293             selfA.StoredVariables["CurrentState"].store( _X )
3294         _HX = Hm( _X ).reshape((-1,1))
3295         _Innovation = Y - _HX
3296         if selfA._toStore("SimulatedObservationAtCurrentState") or \
3297             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3298             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
3299         if selfA._toStore("InnovationAtCurrentState"):
3300             selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
3301         #
3302         Jb  = float( 0.5 * (_X - Xb).T * (BI * (_X - Xb)) )
3303         Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
3304         J   = Jb + Jo
3305         #
3306         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
3307         selfA.StoredVariables["CostFunctionJb"].store( Jb )
3308         selfA.StoredVariables["CostFunctionJo"].store( Jo )
3309         selfA.StoredVariables["CostFunctionJ" ].store( J )
3310         if selfA._toStore("IndexOfOptimum") or \
3311             selfA._toStore("CurrentOptimum") or \
3312             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
3313             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
3314             selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
3315             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3316             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3317         if selfA._toStore("IndexOfOptimum"):
3318             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3319         if selfA._toStore("CurrentOptimum"):
3320             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
3321         if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3322             selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
3323         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3324             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3325         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3326             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3327         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3328             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3329         return J
3330     #
3331     def GradientOfCostFunction(x):
3332         _X      = x.reshape((-1,1))
3333         _HX     = Hm( _X ).reshape((-1,1))
3334         GradJb  = BI * (_X - Xb)
3335         GradJo  = - Ha( (_X, RI * (Y - _HX)) )
3336         GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
3337         return GradJ
3338     #
3339     # Minimisation de la fonctionnelle
3340     # --------------------------------
3341     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
3342     #
3343     if selfA._parameters["Minimizer"] == "LBFGSB":
3344         if "0.19" <= scipy.version.version <= "1.1.0":
3345             import lbfgsbhlt as optimiseur
3346         else:
3347             import scipy.optimize as optimiseur
3348         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
3349             func        = CostFunction,
3350             x0          = Xini,
3351             fprime      = GradientOfCostFunction,
3352             args        = (),
3353             bounds      = selfA._parameters["Bounds"],
3354             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
3355             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
3356             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
3357             iprint      = selfA._parameters["optiprint"],
3358             )
3359         nfeval = Informations['funcalls']
3360         rc     = Informations['warnflag']
3361     elif selfA._parameters["Minimizer"] == "TNC":
3362         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
3363             func        = CostFunction,
3364             x0          = Xini,
3365             fprime      = GradientOfCostFunction,
3366             args        = (),
3367             bounds      = selfA._parameters["Bounds"],
3368             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
3369             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
3370             ftol        = selfA._parameters["CostDecrementTolerance"],
3371             messages    = selfA._parameters["optmessages"],
3372             )
3373     elif selfA._parameters["Minimizer"] == "CG":
3374         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
3375             f           = CostFunction,
3376             x0          = Xini,
3377             fprime      = GradientOfCostFunction,
3378             args        = (),
3379             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3380             gtol        = selfA._parameters["GradientNormTolerance"],
3381             disp        = selfA._parameters["optdisp"],
3382             full_output = True,
3383             )
3384     elif selfA._parameters["Minimizer"] == "NCG":
3385         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
3386             f           = CostFunction,
3387             x0          = Xini,
3388             fprime      = GradientOfCostFunction,
3389             args        = (),
3390             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3391             avextol     = selfA._parameters["CostDecrementTolerance"],
3392             disp        = selfA._parameters["optdisp"],
3393             full_output = True,
3394             )
3395     elif selfA._parameters["Minimizer"] == "BFGS":
3396         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
3397             f           = CostFunction,
3398             x0          = Xini,
3399             fprime      = GradientOfCostFunction,
3400             args        = (),
3401             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3402             gtol        = selfA._parameters["GradientNormTolerance"],
3403             disp        = selfA._parameters["optdisp"],
3404             full_output = True,
3405             )
3406     else:
3407         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
3408     #
3409     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3410     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
3411     #
3412     # Correction pour pallier a un bug de TNC sur le retour du Minimum
3413     # ----------------------------------------------------------------
3414     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
3415         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
3416     #
3417     Xa = Minimum
3418     #--------------------------
3419     #
3420     selfA.StoredVariables["Analysis"].store( Xa )
3421     #
3422     if selfA._toStore("OMA") or \
3423         selfA._toStore("SigmaObs2") or \
3424         selfA._toStore("SimulationQuantiles") or \
3425         selfA._toStore("SimulatedObservationAtOptimum"):
3426         if selfA._toStore("SimulatedObservationAtCurrentState"):
3427             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
3428         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3429             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
3430         else:
3431             HXa = Hm( Xa )
3432     #
3433     if selfA._toStore("APosterioriCovariance") or \
3434         selfA._toStore("SimulationQuantiles") or \
3435         selfA._toStore("JacobianMatrixAtOptimum") or \
3436         selfA._toStore("KalmanGainAtOptimum"):
3437         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
3438         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
3439     if selfA._toStore("APosterioriCovariance") or \
3440         selfA._toStore("SimulationQuantiles") or \
3441         selfA._toStore("KalmanGainAtOptimum"):
3442         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
3443         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
3444     if selfA._toStore("APosterioriCovariance") or \
3445         selfA._toStore("SimulationQuantiles"):
3446         A = HessienneEstimation(Xa.size, HaM, HtM, BI, RI)
3447     if selfA._toStore("APosterioriCovariance"):
3448         selfA.StoredVariables["APosterioriCovariance"].store( A )
3449     if selfA._toStore("JacobianMatrixAtOptimum"):
3450         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
3451     if selfA._toStore("KalmanGainAtOptimum"):
3452         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
3453         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
3454         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
3455     #
3456     # Calculs et/ou stockages supplémentaires
3457     # ---------------------------------------
3458     if selfA._toStore("Innovation") or \
3459         selfA._toStore("SigmaObs2") or \
3460         selfA._toStore("MahalanobisConsistency") or \
3461         selfA._toStore("OMB"):
3462         d  = Y - HXb
3463     if selfA._toStore("Innovation"):
3464         selfA.StoredVariables["Innovation"].store( d )
3465     if selfA._toStore("BMA"):
3466         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
3467     if selfA._toStore("OMA"):
3468         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
3469     if selfA._toStore("OMB"):
3470         selfA.StoredVariables["OMB"].store( d )
3471     if selfA._toStore("SigmaObs2"):
3472         TraceR = R.trace(Y.size)
3473         selfA.StoredVariables["SigmaObs2"].store( float( (d.T @ (numpy.ravel(Y)-numpy.ravel(HXa))) ) / TraceR )
3474     if selfA._toStore("MahalanobisConsistency"):
3475         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
3476     if selfA._toStore("SimulationQuantiles"):
3477         QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
3478     if selfA._toStore("SimulatedObservationAtBackground"):
3479         selfA.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
3480     if selfA._toStore("SimulatedObservationAtOptimum"):
3481         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
3482     #
3483     return 0
3484
3485 # ==============================================================================
3486 def std4dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3487     """
3488     4DVAR
3489     """
3490     #
3491     # Initialisations
3492     # ---------------
3493     #
3494     # Opérateurs
3495     Hm = HO["Direct"].appliedControledFormTo
3496     Mm = EM["Direct"].appliedControledFormTo
3497     #
3498     if CM is not None and "Tangent" in CM and U is not None:
3499         Cm = CM["Tangent"].asMatrix(Xb)
3500     else:
3501         Cm = None
3502     #
3503     def Un(_step):
3504         if U is not None:
3505             if hasattr(U,"store") and 1<=_step<len(U) :
3506                 _Un = numpy.asmatrix(numpy.ravel( U[_step] )).T
3507             elif hasattr(U,"store") and len(U)==1:
3508                 _Un = numpy.asmatrix(numpy.ravel( U[0] )).T
3509             else:
3510                 _Un = numpy.asmatrix(numpy.ravel( U )).T
3511         else:
3512             _Un = None
3513         return _Un
3514     def CmUn(_xn,_un):
3515         if Cm is not None and _un is not None: # Attention : si Cm est aussi dans M, doublon !
3516             _Cm   = Cm.reshape(_xn.size,_un.size) # ADAO & check shape
3517             _CmUn = _Cm * _un
3518         else:
3519             _CmUn = 0.
3520         return _CmUn
3521     #
3522     # Remarque : les observations sont exploitées à partir du pas de temps
3523     # numéro 1, et sont utilisées dans Yo comme rangées selon ces indices.
3524     # Donc le pas 0 n'est pas utilisé puisque la première étape commence
3525     # avec l'observation du pas 1.
3526     #
3527     # Nombre de pas identique au nombre de pas d'observations
3528     if hasattr(Y,"stepnumber"):
3529         duration = Y.stepnumber()
3530     else:
3531         duration = 2
3532     #
3533     # Précalcul des inversions de B et R
3534     BI = B.getI()
3535     RI = R.getI()
3536     #
3537     # Point de démarrage de l'optimisation
3538     Xini = selfA._parameters["InitializationPoint"]
3539     #
3540     # Définition de la fonction-coût
3541     # ------------------------------
3542     selfA.DirectCalculation = [None,] # Le pas 0 n'est pas observé
3543     selfA.DirectInnovation  = [None,] # Le pas 0 n'est pas observé
3544     def CostFunction(x):
3545         _X  = numpy.asmatrix(numpy.ravel( x )).T
3546         if selfA._parameters["StoreInternalVariables"] or \
3547             selfA._toStore("CurrentState") or \
3548             selfA._toStore("CurrentOptimum"):
3549             selfA.StoredVariables["CurrentState"].store( _X )
3550         Jb  = float( 0.5 * (_X - Xb).T * BI * (_X - Xb) )
3551         selfA.DirectCalculation = [None,]
3552         selfA.DirectInnovation  = [None,]
3553         Jo  = 0.
3554         _Xn = _X
3555         for step in range(0,duration-1):
3556             if hasattr(Y,"store"):
3557                 _Ynpu = numpy.asmatrix(numpy.ravel( Y[step+1] )).T
3558             else:
3559                 _Ynpu = numpy.asmatrix(numpy.ravel( Y )).T
3560             _Un = Un(step)
3561             #
3562             # Etape d'évolution
3563             if selfA._parameters["EstimationOf"] == "State":
3564                 _Xn = Mm( (_Xn, _Un) ) + CmUn(_Xn, _Un)
3565             elif selfA._parameters["EstimationOf"] == "Parameters":
3566                 pass
3567             #
3568             if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
3569                 _Xn = ApplyBounds( _Xn, ForceNumericBounds(selfA._parameters["Bounds"]) )
3570             #
3571             # Etape de différence aux observations
3572             if selfA._parameters["EstimationOf"] == "State":
3573                 _YmHMX = _Ynpu - numpy.asmatrix(numpy.ravel( Hm( (_Xn, None) ) )).T
3574             elif selfA._parameters["EstimationOf"] == "Parameters":
3575                 _YmHMX = _Ynpu - numpy.asmatrix(numpy.ravel( Hm( (_Xn, _Un) ) )).T - CmUn(_Xn, _Un)
3576             #
3577             # Stockage de l'état
3578             selfA.DirectCalculation.append( _Xn )
3579             selfA.DirectInnovation.append( _YmHMX )
3580             #
3581             # Ajout dans la fonctionnelle d'observation
3582             Jo = Jo + 0.5 * float( _YmHMX.T * RI * _YmHMX )
3583         J = Jb + Jo
3584         #
3585         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
3586         selfA.StoredVariables["CostFunctionJb"].store( Jb )
3587         selfA.StoredVariables["CostFunctionJo"].store( Jo )
3588         selfA.StoredVariables["CostFunctionJ" ].store( J )
3589         if selfA._toStore("IndexOfOptimum") or \
3590             selfA._toStore("CurrentOptimum") or \
3591             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
3592             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
3593             selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3594             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3595         if selfA._toStore("IndexOfOptimum"):
3596             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3597         if selfA._toStore("CurrentOptimum"):
3598             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
3599         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3600             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3601         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3602             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3603         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3604             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3605         return J
3606     #
3607     def GradientOfCostFunction(x):
3608         _X      = numpy.asmatrix(numpy.ravel( x )).T
3609         GradJb  = BI * (_X - Xb)
3610         GradJo  = 0.
3611         for step in range(duration-1,0,-1):
3612             # Étape de récupération du dernier stockage de l'évolution
3613             _Xn = selfA.DirectCalculation.pop()
3614             # Étape de récupération du dernier stockage de l'innovation
3615             _YmHMX = selfA.DirectInnovation.pop()
3616             # Calcul des adjoints
3617             Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
3618             Ha = Ha.reshape(_Xn.size,_YmHMX.size) # ADAO & check shape
3619             Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
3620             Ma = Ma.reshape(_Xn.size,_Xn.size) # ADAO & check shape
3621             # Calcul du gradient par état adjoint
3622             GradJo = GradJo + Ha * (RI * _YmHMX) # Équivaut pour Ha linéaire à : Ha( (_Xn, RI * _YmHMX) )
3623             GradJo = Ma * GradJo                 # Équivaut pour Ma linéaire à : Ma( (_Xn, GradJo) )
3624         GradJ = numpy.ravel( GradJb ) - numpy.ravel( GradJo )
3625         return GradJ
3626     #
3627     # Minimisation de la fonctionnelle
3628     # --------------------------------
3629     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
3630     #
3631     if selfA._parameters["Minimizer"] == "LBFGSB":
3632         if "0.19" <= scipy.version.version <= "1.1.0":
3633             import lbfgsbhlt as optimiseur
3634         else:
3635             import scipy.optimize as optimiseur
3636         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
3637             func        = CostFunction,
3638             x0          = Xini,
3639             fprime      = GradientOfCostFunction,
3640             args        = (),
3641             bounds      = selfA._parameters["Bounds"],
3642             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
3643             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
3644             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
3645             iprint      = selfA._parameters["optiprint"],
3646             )
3647         nfeval = Informations['funcalls']
3648         rc     = Informations['warnflag']
3649     elif selfA._parameters["Minimizer"] == "TNC":
3650         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
3651             func        = CostFunction,
3652             x0          = Xini,
3653             fprime      = GradientOfCostFunction,
3654             args        = (),
3655             bounds      = selfA._parameters["Bounds"],
3656             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
3657             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
3658             ftol        = selfA._parameters["CostDecrementTolerance"],
3659             messages    = selfA._parameters["optmessages"],
3660             )
3661     elif selfA._parameters["Minimizer"] == "CG":
3662         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
3663             f           = CostFunction,
3664             x0          = Xini,
3665             fprime      = GradientOfCostFunction,
3666             args        = (),
3667             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3668             gtol        = selfA._parameters["GradientNormTolerance"],
3669             disp        = selfA._parameters["optdisp"],
3670             full_output = True,
3671             )
3672     elif selfA._parameters["Minimizer"] == "NCG":
3673         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
3674             f           = CostFunction,
3675             x0          = Xini,
3676             fprime      = GradientOfCostFunction,
3677             args        = (),
3678             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3679             avextol     = selfA._parameters["CostDecrementTolerance"],
3680             disp        = selfA._parameters["optdisp"],
3681             full_output = True,
3682             )
3683     elif selfA._parameters["Minimizer"] == "BFGS":
3684         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
3685             f           = CostFunction,
3686             x0          = Xini,
3687             fprime      = GradientOfCostFunction,
3688             args        = (),
3689             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3690             gtol        = selfA._parameters["GradientNormTolerance"],
3691             disp        = selfA._parameters["optdisp"],
3692             full_output = True,
3693             )
3694     else:
3695         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
3696     #
3697     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3698     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
3699     #
3700     # Correction pour pallier a un bug de TNC sur le retour du Minimum
3701     # ----------------------------------------------------------------
3702     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
3703         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
3704     #
3705     # Obtention de l'analyse
3706     # ----------------------
3707     Xa = Minimum
3708     #
3709     selfA.StoredVariables["Analysis"].store( Xa )
3710     #
3711     # Calculs et/ou stockages supplémentaires
3712     # ---------------------------------------
3713     if selfA._toStore("BMA"):
3714         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
3715     #
3716     return 0
3717
3718 # ==============================================================================
3719 def stdkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3720     """
3721     Standard Kalman Filter
3722     """
3723     if selfA._parameters["EstimationOf"] == "Parameters":
3724         selfA._parameters["StoreInternalVariables"] = True
3725     #
3726     # Opérateurs
3727     # ----------
3728     Ht = HO["Tangent"].asMatrix(Xb)
3729     Ha = HO["Adjoint"].asMatrix(Xb)
3730     #
3731     if selfA._parameters["EstimationOf"] == "State":
3732         Mt = EM["Tangent"].asMatrix(Xb)
3733         Ma = EM["Adjoint"].asMatrix(Xb)
3734     #
3735     if CM is not None and "Tangent" in CM and U is not None:
3736         Cm = CM["Tangent"].asMatrix(Xb)
3737     else:
3738         Cm = None
3739     #
3740     # Durée d'observation et tailles
3741     if hasattr(Y,"stepnumber"):
3742         duration = Y.stepnumber()
3743         __p = numpy.cumprod(Y.shape())[-1]
3744     else:
3745         duration = 2
3746         __p = numpy.array(Y).size
3747     #
3748     # Précalcul des inversions de B et R
3749     if selfA._parameters["StoreInternalVariables"] \
3750         or selfA._toStore("CostFunctionJ") \
3751         or selfA._toStore("CostFunctionJb") \
3752         or selfA._toStore("CostFunctionJo") \
3753         or selfA._toStore("CurrentOptimum") \
3754         or selfA._toStore("APosterioriCovariance"):
3755         BI = B.getI()
3756         RI = R.getI()
3757     #
3758     __n = Xb.size
3759     #
3760     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
3761         Xn = Xb
3762         Pn = B
3763         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3764         selfA.StoredVariables["Analysis"].store( Xb )
3765         if selfA._toStore("APosterioriCovariance"):
3766             if hasattr(B,"asfullmatrix"):
3767                 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
3768             else:
3769                 selfA.StoredVariables["APosterioriCovariance"].store( B )
3770         selfA._setInternalState("seed", numpy.random.get_state())
3771     elif selfA._parameters["nextStep"]:
3772         Xn = selfA._getInternalState("Xn")
3773         Pn = selfA._getInternalState("Pn")
3774     #
3775     if selfA._parameters["EstimationOf"] == "Parameters":
3776         XaMin            = Xn
3777         previousJMinimum = numpy.finfo(float).max
3778     #
3779     for step in range(duration-1):
3780         if hasattr(Y,"store"):
3781             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
3782         else:
3783             Ynpu = numpy.ravel( Y ).reshape((__p,1))
3784         #
3785         if U is not None:
3786             if hasattr(U,"store") and len(U)>1:
3787                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
3788             elif hasattr(U,"store") and len(U)==1:
3789                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
3790             else:
3791                 Un = numpy.asmatrix(numpy.ravel( U )).T
3792         else:
3793             Un = None
3794         #
3795         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
3796             Xn_predicted = Mt * Xn
3797             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
3798                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
3799                 Xn_predicted = Xn_predicted + Cm * Un
3800             Pn_predicted = Q + Mt * (Pn * Ma)
3801         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
3802             # --- > Par principe, M = Id, Q = 0
3803             Xn_predicted = Xn
3804             Pn_predicted = Pn
3805         #
3806         if selfA._parameters["EstimationOf"] == "State":
3807             HX_predicted = Ht * Xn_predicted
3808             _Innovation  = Ynpu - HX_predicted
3809         elif selfA._parameters["EstimationOf"] == "Parameters":
3810             HX_predicted = Ht * Xn_predicted
3811             _Innovation  = Ynpu - HX_predicted
3812             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
3813                 _Innovation = _Innovation - Cm * Un
3814         #
3815         Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha))
3816         Xn = Xn_predicted + Kn * _Innovation
3817         Pn = Pn_predicted - Kn * Ht * Pn_predicted
3818         #
3819         Xa = Xn # Pointeurs
3820         #--------------------------
3821         selfA._setInternalState("Xn", Xn)
3822         selfA._setInternalState("Pn", Pn)
3823         #--------------------------
3824         #
3825         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3826         # ---> avec analysis
3827         selfA.StoredVariables["Analysis"].store( Xa )
3828         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
3829             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( Ht * Xa )
3830         if selfA._toStore("InnovationAtCurrentAnalysis"):
3831             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
3832         # ---> avec current state
3833         if selfA._parameters["StoreInternalVariables"] \
3834             or selfA._toStore("CurrentState"):
3835             selfA.StoredVariables["CurrentState"].store( Xn )
3836         if selfA._toStore("ForecastState"):
3837             selfA.StoredVariables["ForecastState"].store( Xn_predicted )
3838         if selfA._toStore("ForecastCovariance"):
3839             selfA.StoredVariables["ForecastCovariance"].store( Pn_predicted )
3840         if selfA._toStore("BMA"):
3841             selfA.StoredVariables["BMA"].store( Xn_predicted - Xa )
3842         if selfA._toStore("InnovationAtCurrentState"):
3843             selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
3844         if selfA._toStore("SimulatedObservationAtCurrentState") \
3845             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3846             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
3847         # ---> autres
3848         if selfA._parameters["StoreInternalVariables"] \
3849             or selfA._toStore("CostFunctionJ") \
3850             or selfA._toStore("CostFunctionJb") \
3851             or selfA._toStore("CostFunctionJo") \
3852             or selfA._toStore("CurrentOptimum") \
3853             or selfA._toStore("APosterioriCovariance"):
3854             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
3855             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
3856             J   = Jb + Jo
3857             selfA.StoredVariables["CostFunctionJb"].store( Jb )
3858             selfA.StoredVariables["CostFunctionJo"].store( Jo )
3859             selfA.StoredVariables["CostFunctionJ" ].store( J )
3860             #
3861             if selfA._toStore("IndexOfOptimum") \
3862                 or selfA._toStore("CurrentOptimum") \
3863                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
3864                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
3865                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
3866                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3867                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3868             if selfA._toStore("IndexOfOptimum"):
3869                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3870             if selfA._toStore("CurrentOptimum"):
3871                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
3872             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3873                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
3874             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3875                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3876             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3877                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3878             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3879                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3880         if selfA._toStore("APosterioriCovariance"):
3881             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
3882         if selfA._parameters["EstimationOf"] == "Parameters" \
3883             and J < previousJMinimum:
3884             previousJMinimum    = J
3885             XaMin               = Xa
3886             if selfA._toStore("APosterioriCovariance"):
3887                 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
3888     #
3889     # Stockage final supplémentaire de l'optimum en estimation de paramètres
3890     # ----------------------------------------------------------------------
3891     if selfA._parameters["EstimationOf"] == "Parameters":
3892         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3893         selfA.StoredVariables["Analysis"].store( XaMin )
3894         if selfA._toStore("APosterioriCovariance"):
3895             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
3896         if selfA._toStore("BMA"):
3897             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
3898     #
3899     return 0
3900
3901 # ==============================================================================
3902 def uskf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3903     """
3904     Unscented Kalman Filter
3905     """
3906     if selfA._parameters["EstimationOf"] == "Parameters":
3907         selfA._parameters["StoreInternalVariables"] = True
3908     #
3909     L     = Xb.size
3910     Alpha = selfA._parameters["Alpha"]
3911     Beta  = selfA._parameters["Beta"]
3912     if selfA._parameters["Kappa"] == 0:
3913         if selfA._parameters["EstimationOf"] == "State":
3914             Kappa = 0
3915         elif selfA._parameters["EstimationOf"] == "Parameters":
3916             Kappa = 3 - L
3917     else:
3918         Kappa = selfA._parameters["Kappa"]
3919     Lambda = float( Alpha**2 ) * ( L + Kappa ) - L
3920     Gamma  = math.sqrt( L + Lambda )
3921     #
3922     Ww = []
3923     Ww.append( 0. )
3924     for i in range(2*L):
3925         Ww.append( 1. / (2.*(L + Lambda)) )
3926     #
3927     Wm = numpy.array( Ww )
3928     Wm[0] = Lambda / (L + Lambda)
3929     Wc = numpy.array( Ww )
3930     Wc[0] = Lambda / (L + Lambda) + (1. - Alpha**2 + Beta)
3931     #
3932     # Opérateurs
3933     Hm = HO["Direct"].appliedControledFormTo
3934     #
3935     if selfA._parameters["EstimationOf"] == "State":
3936         Mm = EM["Direct"].appliedControledFormTo
3937     #
3938     if CM is not None and "Tangent" in CM and U is not None:
3939         Cm = CM["Tangent"].asMatrix(Xb)
3940     else:
3941         Cm = None
3942     #
3943     # Durée d'observation et tailles
3944     if hasattr(Y,"stepnumber"):
3945         duration = Y.stepnumber()
3946         __p = numpy.cumprod(Y.shape())[-1]
3947     else:
3948         duration = 2
3949         __p = numpy.array(Y).size
3950     #
3951     # Précalcul des inversions de B et R
3952     if selfA._parameters["StoreInternalVariables"] \
3953         or selfA._toStore("CostFunctionJ") \
3954         or selfA._toStore("CostFunctionJb") \
3955         or selfA._toStore("CostFunctionJo") \
3956         or selfA._toStore("CurrentOptimum") \
3957         or selfA._toStore("APosterioriCovariance"):
3958         BI = B.getI()
3959         RI = R.getI()
3960     #
3961     __n = Xb.size
3962     #
3963     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
3964         Xn = Xb
3965         if hasattr(B,"asfullmatrix"):
3966             Pn = B.asfullmatrix(__n)
3967         else:
3968             Pn = B
3969         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3970         selfA.StoredVariables["Analysis"].store( Xb )
3971         if selfA._toStore("APosterioriCovariance"):
3972             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
3973     elif selfA._parameters["nextStep"]:
3974         Xn = selfA._getInternalState("Xn")
3975         Pn = selfA._getInternalState("Pn")
3976     #
3977     if selfA._parameters["EstimationOf"] == "Parameters":
3978         XaMin            = Xn
3979         previousJMinimum = numpy.finfo(float).max
3980     #
3981     for step in range(duration-1):
3982         if hasattr(Y,"store"):
3983             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
3984         else:
3985             Ynpu = numpy.ravel( Y ).reshape((__p,1))
3986         #
3987         if U is not None:
3988             if hasattr(U,"store") and len(U)>1:
3989                 Un = numpy.ravel( U[step] ).reshape((-1,1))
3990             elif hasattr(U,"store") and len(U)==1:
3991                 Un = numpy.ravel( U[0] ).reshape((-1,1))
3992             else:
3993                 Un = numpy.ravel( U ).reshape((-1,1))
3994         else:
3995             Un = None
3996         #
3997         Pndemi = numpy.real(scipy.linalg.sqrtm(Pn))
3998         Xnp = numpy.hstack([Xn, Xn+Gamma*Pndemi, Xn-Gamma*Pndemi])
3999         nbSpts = 2*Xn.size+1
4000         #
4001         XEtnnp = []
4002         for point in range(nbSpts):
4003             if selfA._parameters["EstimationOf"] == "State":
4004                 XEtnnpi = numpy.asarray( Mm( (Xnp[:,point], Un) ) ).reshape((-1,1))
4005                 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
4006                     Cm = Cm.reshape(Xn.size,Un.size) # ADAO & check shape
4007                     XEtnnpi = XEtnnpi + Cm * Un
4008             elif selfA._parameters["EstimationOf"] == "Parameters":
4009                 # --- > Par principe, M = Id, Q = 0
4010                 XEtnnpi = Xnp[:,point]
4011             XEtnnp.append( numpy.ravel(XEtnnpi).reshape((-1,1)) )
4012         XEtnnp = numpy.concatenate( XEtnnp, axis=1 )
4013         #
4014         Xncm = ( XEtnnp * Wm ).sum(axis=1)
4015         #
4016         if selfA._parameters["EstimationOf"] == "State":        Pnm = Q
4017         elif selfA._parameters["EstimationOf"] == "Parameters": Pnm = 0.
4018         for point in range(nbSpts):
4019             Pnm += Wc[i] * ((XEtnnp[:,point]-Xncm).reshape((-1,1)) * (XEtnnp[:,point]-Xncm))
4020         #
4021         Pnmdemi = numpy.real(scipy.linalg.sqrtm(Pnm))
4022         #
4023         Xnnp = numpy.hstack([Xncm.reshape((-1,1)), Xncm.reshape((-1,1))+Gamma*Pnmdemi, Xncm.reshape((-1,1))-Gamma*Pnmdemi])
4024         #
4025         Ynnp = []
4026         for point in range(nbSpts):
4027             if selfA._parameters["EstimationOf"] == "State":
4028                 Ynnpi = Hm( (Xnnp[:,point], None) )
4029             elif selfA._parameters["EstimationOf"] == "Parameters":
4030                 Ynnpi = Hm( (Xnnp[:,point], Un) )
4031             Ynnp.append( numpy.ravel(Ynnpi).reshape((-1,1)) )
4032         Ynnp = numpy.concatenate( Ynnp, axis=1 )
4033         #
4034         Yncm = ( Ynnp * Wm ).sum(axis=1)
4035         #
4036         Pyyn = R
4037         Pxyn = 0.
4038         for point in range(nbSpts):
4039             Pyyn += Wc[i] * ((Ynnp[:,point]-Yncm).reshape((-1,1)) * (Ynnp[:,point]-Yncm))
4040             Pxyn += Wc[i] * ((Xnnp[:,point]-Xncm).reshape((-1,1)) * (Ynnp[:,point]-Yncm))
4041         #
4042         _Innovation  = Ynpu - Yncm.reshape((-1,1))
4043         if selfA._parameters["EstimationOf"] == "Parameters":
4044             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
4045                 _Innovation = _Innovation - Cm * Un
4046         #
4047         Kn = Pxyn * Pyyn.I
4048         Xn = Xncm.reshape((-1,1)) + Kn * _Innovation
4049         Pn = Pnm - Kn * Pyyn * Kn.T
4050         #
4051         Xa = Xn # Pointeurs
4052         #--------------------------
4053         selfA._setInternalState("Xn", Xn)
4054         selfA._setInternalState("Pn", Pn)
4055         #--------------------------
4056         #
4057         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
4058         # ---> avec analysis
4059         selfA.StoredVariables["Analysis"].store( Xa )
4060         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
4061             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( Hm((Xa, Un)) )
4062         if selfA._toStore("InnovationAtCurrentAnalysis"):
4063             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
4064         # ---> avec current state
4065         if selfA._parameters["StoreInternalVariables"] \
4066             or selfA._toStore("CurrentState"):
4067             selfA.StoredVariables["CurrentState"].store( Xn )
4068         if selfA._toStore("ForecastState"):
4069             selfA.StoredVariables["ForecastState"].store( Xncm )
4070         if selfA._toStore("ForecastCovariance"):
4071             selfA.StoredVariables["ForecastCovariance"].store( Pnm )
4072         if selfA._toStore("BMA"):
4073             selfA.StoredVariables["BMA"].store( Xncm - Xa )
4074         if selfA._toStore("InnovationAtCurrentState"):
4075             selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
4076         if selfA._toStore("SimulatedObservationAtCurrentState") \
4077             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4078             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Yncm )
4079         # ---> autres
4080         if selfA._parameters["StoreInternalVariables"] \
4081             or selfA._toStore("CostFunctionJ") \
4082             or selfA._toStore("CostFunctionJb") \
4083             or selfA._toStore("CostFunctionJo") \
4084             or selfA._toStore("CurrentOptimum") \
4085             or selfA._toStore("APosterioriCovariance"):
4086             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
4087             Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
4088             J   = Jb + Jo
4089             selfA.StoredVariables["CostFunctionJb"].store( Jb )
4090             selfA.StoredVariables["CostFunctionJo"].store( Jo )
4091             selfA.StoredVariables["CostFunctionJ" ].store( J )
4092             #
4093             if selfA._toStore("IndexOfOptimum") \
4094                 or selfA._toStore("CurrentOptimum") \
4095                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
4096                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
4097                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
4098                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4099                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
4100             if selfA._toStore("IndexOfOptimum"):
4101                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
4102             if selfA._toStore("CurrentOptimum"):
4103                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
4104             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4105                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
4106             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
4107                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
4108             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
4109                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
4110             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
4111                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
4112         if selfA._toStore("APosterioriCovariance"):
4113             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
4114         if selfA._parameters["EstimationOf"] == "Parameters" \
4115             and J < previousJMinimum:
4116             previousJMinimum    = J
4117             XaMin               = Xa
4118             if selfA._toStore("APosterioriCovariance"):
4119                 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
4120     #
4121     # Stockage final supplémentaire de l'optimum en estimation de paramètres
4122     # ----------------------------------------------------------------------
4123     if selfA._parameters["EstimationOf"] == "Parameters":
4124         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
4125         selfA.StoredVariables["Analysis"].store( XaMin )
4126         if selfA._toStore("APosterioriCovariance"):
4127             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
4128         if selfA._toStore("BMA"):
4129             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
4130     #
4131     return 0
4132
4133 # ==============================================================================
4134 def van3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
4135     """
4136     3DVAR variational analysis with no inversion of B
4137     """
4138     #
4139     # Initialisations
4140     # ---------------
4141     #
4142     # Opérateurs
4143     Hm = HO["Direct"].appliedTo
4144     Ha = HO["Adjoint"].appliedInXTo
4145     #
4146     # Précalcul des inversions de B et R
4147     BT = B.getT()
4148     RI = R.getI()
4149     #
4150     # Point de démarrage de l'optimisation
4151     Xini = numpy.zeros(Xb.shape)
4152     #
4153     # Définition de la fonction-coût
4154     # ------------------------------
4155     def CostFunction(v):
4156         _V = numpy.asmatrix(numpy.ravel( v )).T
4157         _X = Xb + B * _V
4158         if selfA._parameters["StoreInternalVariables"] or \
4159             selfA._toStore("CurrentState") or \
4160             selfA._toStore("CurrentOptimum"):
4161             selfA.StoredVariables["CurrentState"].store( _X )
4162         _HX = Hm( _X )
4163         _HX = numpy.asmatrix(numpy.ravel( _HX )).T
4164         _Innovation = Y - _HX
4165         if selfA._toStore("SimulatedObservationAtCurrentState") or \
4166             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4167             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
4168         if selfA._toStore("InnovationAtCurrentState"):
4169             selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
4170         #
4171         Jb  = float( 0.5 * _V.T * BT * _V )
4172         Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
4173         J   = Jb + Jo
4174         #
4175         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
4176         selfA.StoredVariables["CostFunctionJb"].store( Jb )
4177         selfA.StoredVariables["CostFunctionJo"].store( Jo )
4178         selfA.StoredVariables["CostFunctionJ" ].store( J )
4179         if selfA._toStore("IndexOfOptimum") or \
4180             selfA._toStore("CurrentOptimum") or \
4181             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
4182             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
4183             selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
4184             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4185             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
4186         if selfA._toStore("IndexOfOptimum"):
4187             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
4188         if selfA._toStore("CurrentOptimum"):
4189             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
4190         if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4191             selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
4192         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
4193             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
4194         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
4195             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
4196         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
4197             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
4198         return J
4199     #
4200     def GradientOfCostFunction(v):
4201         _V = v.reshape((-1,1))
4202         _X = Xb + (B @ _V).reshape((-1,1))
4203         _HX     = Hm( _X ).reshape((-1,1))
4204         GradJb  = BT * _V
4205         GradJo  = - Ha( (_X, RI * (Y - _HX)) )
4206         GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
4207         return GradJ
4208     #
4209     # Minimisation de la fonctionnelle
4210     # --------------------------------
4211     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
4212     #
4213     if selfA._parameters["Minimizer"] == "LBFGSB":
4214         if "0.19" <= scipy.version.version <= "1.1.0":
4215             import lbfgsbhlt as optimiseur
4216         else:
4217             import scipy.optimize as optimiseur
4218         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
4219             func        = CostFunction,
4220             x0          = Xini,
4221             fprime      = GradientOfCostFunction,
4222             args        = (),
4223             bounds      = RecentredBounds(selfA._parameters["Bounds"], Xb),
4224             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
4225             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
4226             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
4227             iprint      = selfA._parameters["optiprint"],
4228             )
4229         nfeval = Informations['funcalls']
4230         rc     = Informations['warnflag']
4231     elif selfA._parameters["Minimizer"] == "TNC":
4232         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
4233             func        = CostFunction,
4234             x0          = Xini,
4235             fprime      = GradientOfCostFunction,
4236             args        = (),
4237             bounds      = RecentredBounds(selfA._parameters["Bounds"], Xb),
4238             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
4239             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
4240             ftol        = selfA._parameters["CostDecrementTolerance"],
4241             messages    = selfA._parameters["optmessages"],
4242             )
4243     elif selfA._parameters["Minimizer"] == "CG":
4244         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
4245             f           = CostFunction,
4246             x0          = Xini,
4247             fprime      = GradientOfCostFunction,
4248             args        = (),
4249             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
4250             gtol        = selfA._parameters["GradientNormTolerance"],
4251             disp        = selfA._parameters["optdisp"],
4252             full_output = True,
4253             )
4254     elif selfA._parameters["Minimizer"] == "NCG":
4255         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
4256             f           = CostFunction,
4257             x0          = Xini,
4258             fprime      = GradientOfCostFunction,
4259             args        = (),
4260             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
4261             avextol     = selfA._parameters["CostDecrementTolerance"],
4262             disp        = selfA._parameters["optdisp"],
4263             full_output = True,
4264             )
4265     elif selfA._parameters["Minimizer"] == "BFGS":
4266         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
4267             f           = CostFunction,
4268             x0          = Xini,
4269             fprime      = GradientOfCostFunction,
4270             args        = (),
4271             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
4272             gtol        = selfA._parameters["GradientNormTolerance"],
4273             disp        = selfA._parameters["optdisp"],
4274             full_output = True,
4275             )
4276     else:
4277         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
4278     #
4279     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
4280     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
4281     #
4282     # Correction pour pallier a un bug de TNC sur le retour du Minimum
4283     # ----------------------------------------------------------------
4284     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
4285         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
4286     else:
4287         Minimum = Xb + B * Minimum.reshape((-1,1)) # Pas @
4288     #
4289     Xa = Minimum
4290     #--------------------------
4291     #
4292     selfA.StoredVariables["Analysis"].store( Xa )
4293     #
4294     if selfA._toStore("OMA") or \
4295         selfA._toStore("SigmaObs2") or \
4296         selfA._toStore("SimulationQuantiles") or \
4297         selfA._toStore("SimulatedObservationAtOptimum"):
4298         if selfA._toStore("SimulatedObservationAtCurrentState"):
4299             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
4300         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4301             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
4302         else:
4303             HXa = Hm( Xa )
4304     #
4305     if selfA._toStore("APosterioriCovariance") or \
4306         selfA._toStore("SimulationQuantiles") or \
4307         selfA._toStore("JacobianMatrixAtOptimum") or \
4308         selfA._toStore("KalmanGainAtOptimum"):
4309         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
4310         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
4311     if selfA._toStore("APosterioriCovariance") or \
4312         selfA._toStore("SimulationQuantiles") or \
4313         selfA._toStore("KalmanGainAtOptimum"):
4314         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
4315         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
4316     if selfA._toStore("APosterioriCovariance") or \
4317         selfA._toStore("SimulationQuantiles"):
4318         BI = B.getI()
4319         A = HessienneEstimation(Xa.size, HaM, HtM, BI, RI)
4320     if selfA._toStore("APosterioriCovariance"):
4321         selfA.StoredVariables["APosterioriCovariance"].store( A )
4322     if selfA._toStore("JacobianMatrixAtOptimum"):
4323         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
4324     if selfA._toStore("KalmanGainAtOptimum"):
4325         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
4326         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
4327         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
4328     #
4329     # Calculs et/ou stockages supplémentaires
4330     # ---------------------------------------
4331     if selfA._toStore("Innovation") or \
4332         selfA._toStore("SigmaObs2") or \
4333         selfA._toStore("MahalanobisConsistency") or \
4334         selfA._toStore("OMB"):
4335         d  = Y - HXb
4336     if selfA._toStore("Innovation"):
4337         selfA.StoredVariables["Innovation"].store( d )
4338     if selfA._toStore("BMA"):
4339         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
4340     if selfA._toStore("OMA"):
4341         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
4342     if selfA._toStore("OMB"):
4343         selfA.StoredVariables["OMB"].store( d )
4344     if selfA._toStore("SigmaObs2"):
4345         TraceR = R.trace(Y.size)
4346         selfA.StoredVariables["SigmaObs2"].store( float( (d.T @ (numpy.ravel(Y)-numpy.ravel(HXa))) ) / TraceR )
4347     if selfA._toStore("MahalanobisConsistency"):
4348         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
4349     if selfA._toStore("SimulationQuantiles"):
4350         QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
4351     if selfA._toStore("SimulatedObservationAtBackground"):
4352         selfA.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
4353     if selfA._toStore("SimulatedObservationAtOptimum"):
4354         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
4355     #
4356     return 0
4357
4358 # ==============================================================================
4359 if __name__ == "__main__":
4360     print('\n AUTODIAGNOSTIC\n')