Salome HOME
Improvement and extension of EnKF algorithm (EnKS)
[modules/adao.git] / src / daComposant / daCore / NumericObjects.py
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) 2008-2021 EDF R&D
4 #
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
9 #
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13 # Lesser General Public License for more details.
14 #
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
18 #
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
20 #
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
22
23 __doc__ = """
24     Définit les objets numériques génériques.
25 """
26 __author__ = "Jean-Philippe ARGAUD"
27
28 import os, time, copy, types, sys, logging
29 import math, numpy, scipy, scipy.optimize, scipy.version
30 from daCore.BasicObjects import Operator
31 from daCore.PlatformInfo import PlatformInfo
32 mpr = PlatformInfo().MachinePrecision()
33 mfp = PlatformInfo().MaximumPrecision()
34 # logging.getLogger().setLevel(logging.DEBUG)
35
36 # ==============================================================================
37 def ExecuteFunction( triplet ):
38     assert len(triplet) == 3, "Incorrect number of arguments"
39     X, xArgs, funcrepr = triplet
40     __X = numpy.asmatrix(numpy.ravel( X )).T
41     __sys_path_tmp = sys.path ; sys.path.insert(0,funcrepr["__userFunction__path"])
42     __module = __import__(funcrepr["__userFunction__modl"], globals(), locals(), [])
43     __fonction = getattr(__module,funcrepr["__userFunction__name"])
44     sys.path = __sys_path_tmp ; del __sys_path_tmp
45     if isinstance(xArgs, dict):
46         __HX  = __fonction( __X, **xArgs )
47     else:
48         __HX  = __fonction( __X )
49     return numpy.ravel( __HX )
50
51 # ==============================================================================
52 class FDApproximation(object):
53     """
54     Cette classe sert d'interface pour définir les opérateurs approximés. A la
55     création d'un objet, en fournissant une fonction "Function", on obtient un
56     objet qui dispose de 3 méthodes "DirectOperator", "TangentOperator" et
57     "AdjointOperator". On contrôle l'approximation DF avec l'incrément
58     multiplicatif "increment" valant par défaut 1%, ou avec l'incrément fixe
59     "dX" qui sera multiplié par "increment" (donc en %), et on effectue de DF
60     centrées si le booléen "centeredDF" est vrai.
61     """
62     def __init__(self,
63             name                  = "FDApproximation",
64             Function              = None,
65             centeredDF            = False,
66             increment             = 0.01,
67             dX                    = None,
68             extraArguments        = None,
69             avoidingRedundancy    = True,
70             toleranceInRedundancy = 1.e-18,
71             lenghtOfRedundancy    = -1,
72             mpEnabled             = False,
73             mpWorkers             = None,
74             mfEnabled             = False,
75             ):
76         self.__name = str(name)
77         self.__extraArgs = extraArguments
78         if mpEnabled:
79             try:
80                 import multiprocessing
81                 self.__mpEnabled = True
82             except ImportError:
83                 self.__mpEnabled = False
84         else:
85             self.__mpEnabled = False
86         self.__mpWorkers = mpWorkers
87         if self.__mpWorkers is not None and self.__mpWorkers < 1:
88             self.__mpWorkers = None
89         logging.debug("FDA Calculs en multiprocessing : %s (nombre de processus : %s)"%(self.__mpEnabled,self.__mpWorkers))
90         #
91         if mfEnabled:
92             self.__mfEnabled = True
93         else:
94             self.__mfEnabled = False
95         logging.debug("FDA Calculs en multifonctions : %s"%(self.__mfEnabled,))
96         #
97         if avoidingRedundancy:
98             self.__avoidRC = True
99             self.__tolerBP = float(toleranceInRedundancy)
100             self.__lenghtRJ = int(lenghtOfRedundancy)
101             self.__listJPCP = [] # Jacobian Previous Calculated Points
102             self.__listJPCI = [] # Jacobian Previous Calculated Increment
103             self.__listJPCR = [] # Jacobian Previous Calculated Results
104             self.__listJPPN = [] # Jacobian Previous Calculated Point Norms
105             self.__listJPIN = [] # Jacobian Previous Calculated Increment Norms
106         else:
107             self.__avoidRC = False
108         #
109         if self.__mpEnabled:
110             if isinstance(Function,types.FunctionType):
111                 logging.debug("FDA Calculs en multiprocessing : FunctionType")
112                 self.__userFunction__name = Function.__name__
113                 try:
114                     mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
115                 except:
116                     mod = os.path.abspath(Function.__globals__['__file__'])
117                 if not os.path.isfile(mod):
118                     raise ImportError("No user defined function or method found with the name %s"%(mod,))
119                 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
120                 self.__userFunction__path = os.path.dirname(mod)
121                 del mod
122                 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
123                 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
124             elif isinstance(Function,types.MethodType):
125                 logging.debug("FDA Calculs en multiprocessing : MethodType")
126                 self.__userFunction__name = Function.__name__
127                 try:
128                     mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
129                 except:
130                     mod = os.path.abspath(Function.__func__.__globals__['__file__'])
131                 if not os.path.isfile(mod):
132                     raise ImportError("No user defined function or method found with the name %s"%(mod,))
133                 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
134                 self.__userFunction__path = os.path.dirname(mod)
135                 del mod
136                 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
137                 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
138             else:
139                 raise TypeError("User defined function or method has to be provided for finite differences approximation.")
140         else:
141             self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
142             self.__userFunction = self.__userOperator.appliedTo
143         #
144         self.__centeredDF = bool(centeredDF)
145         if abs(float(increment)) > 1.e-15:
146             self.__increment  = float(increment)
147         else:
148             self.__increment  = 0.01
149         if dX is None:
150             self.__dX     = None
151         else:
152             self.__dX     = numpy.asmatrix(numpy.ravel( dX )).T
153         logging.debug("FDA Reduction des doublons de calcul : %s"%self.__avoidRC)
154         if self.__avoidRC:
155             logging.debug("FDA Tolerance de determination des doublons : %.2e"%self.__tolerBP)
156
157     # ---------------------------------------------------------
158     def __doublon__(self, e, l, n, v=None):
159         __ac, __iac = False, -1
160         for i in range(len(l)-1,-1,-1):
161             if numpy.linalg.norm(e - l[i]) < self.__tolerBP * n[i]:
162                 __ac, __iac = True, i
163                 if v is not None: logging.debug("FDA Cas%s déja calculé, récupération du doublon %i"%(v,__iac))
164                 break
165         return __ac, __iac
166
167     # ---------------------------------------------------------
168     def DirectOperator(self, X, **extraArgs ):
169         """
170         Calcul du direct à l'aide de la fonction fournie.
171
172         NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
173         ne doivent pas être données ici à la fonction utilisateur.
174         """
175         logging.debug("FDA Calcul DirectOperator (explicite)")
176         if self.__mfEnabled:
177             _HX = self.__userFunction( X, argsAsSerie = True )
178         else:
179             _X = numpy.asmatrix(numpy.ravel( X )).T
180             _HX = numpy.ravel(self.__userFunction( _X ))
181         #
182         return _HX
183
184     # ---------------------------------------------------------
185     def TangentMatrix(self, X ):
186         """
187         Calcul de l'opérateur tangent comme la Jacobienne par différences finies,
188         c'est-à-dire le gradient de H en X. On utilise des différences finies
189         directionnelles autour du point X. X est un numpy.matrix.
190
191         Différences finies centrées (approximation d'ordre 2):
192         1/ Pour chaque composante i de X, on ajoute et on enlève la perturbation
193            dX[i] à la  composante X[i], pour composer X_plus_dXi et X_moins_dXi, et
194            on calcule les réponses HX_plus_dXi = H( X_plus_dXi ) et HX_moins_dXi =
195            H( X_moins_dXi )
196         2/ On effectue les différences (HX_plus_dXi-HX_moins_dXi) et on divise par
197            le pas 2*dXi
198         3/ Chaque résultat, par composante, devient une colonne de la Jacobienne
199
200         Différences finies non centrées (approximation d'ordre 1):
201         1/ Pour chaque composante i de X, on ajoute la perturbation dX[i] à la
202            composante X[i] pour composer X_plus_dXi, et on calcule la réponse
203            HX_plus_dXi = H( X_plus_dXi )
204         2/ On calcule la valeur centrale HX = H(X)
205         3/ On effectue les différences (HX_plus_dXi-HX) et on divise par
206            le pas dXi
207         4/ Chaque résultat, par composante, devient une colonne de la Jacobienne
208
209         """
210         logging.debug("FDA Début du calcul de la Jacobienne")
211         logging.debug("FDA   Incrément de............: %s*X"%float(self.__increment))
212         logging.debug("FDA   Approximation centrée...: %s"%(self.__centeredDF))
213         #
214         if X is None or len(X)==0:
215             raise ValueError("Nominal point X for approximate derivatives can not be None or void (given X: %s)."%(str(X),))
216         #
217         _X = numpy.asmatrix(numpy.ravel( X )).T
218         #
219         if self.__dX is None:
220             _dX  = self.__increment * _X
221         else:
222             _dX = numpy.asmatrix(numpy.ravel( self.__dX )).T
223         #
224         if (_dX == 0.).any():
225             moyenne = _dX.mean()
226             if moyenne == 0.:
227                 _dX = numpy.where( _dX == 0., float(self.__increment), _dX )
228             else:
229                 _dX = numpy.where( _dX == 0., moyenne, _dX )
230         #
231         __alreadyCalculated  = False
232         if self.__avoidRC:
233             __bidon, __alreadyCalculatedP = self.__doublon__(_X,  self.__listJPCP, self.__listJPPN, None)
234             __bidon, __alreadyCalculatedI = self.__doublon__(_dX, self.__listJPCI, self.__listJPIN, None)
235             if __alreadyCalculatedP == __alreadyCalculatedI > -1:
236                 __alreadyCalculated, __i = True, __alreadyCalculatedP
237                 logging.debug("FDA Cas J déja calculé, récupération du doublon %i"%__i)
238         #
239         if __alreadyCalculated:
240             logging.debug("FDA   Calcul Jacobienne (par récupération du doublon %i)"%__i)
241             _Jacobienne = self.__listJPCR[__i]
242         else:
243             logging.debug("FDA   Calcul Jacobienne (explicite)")
244             if self.__centeredDF:
245                 #
246                 if self.__mpEnabled and not self.__mfEnabled:
247                     funcrepr = {
248                         "__userFunction__path" : self.__userFunction__path,
249                         "__userFunction__modl" : self.__userFunction__modl,
250                         "__userFunction__name" : self.__userFunction__name,
251                     }
252                     _jobs = []
253                     for i in range( len(_dX) ):
254                         _dXi            = _dX[i]
255                         _X_plus_dXi     = numpy.array( _X.A1, dtype=float )
256                         _X_plus_dXi[i]  = _X[i] + _dXi
257                         _X_moins_dXi    = numpy.array( _X.A1, dtype=float )
258                         _X_moins_dXi[i] = _X[i] - _dXi
259                         #
260                         _jobs.append( (_X_plus_dXi,  self.__extraArgs, funcrepr) )
261                         _jobs.append( (_X_moins_dXi, self.__extraArgs, funcrepr) )
262                     #
263                     import multiprocessing
264                     self.__pool = multiprocessing.Pool(self.__mpWorkers)
265                     _HX_plusmoins_dX = self.__pool.map( ExecuteFunction, _jobs )
266                     self.__pool.close()
267                     self.__pool.join()
268                     #
269                     _Jacobienne  = []
270                     for i in range( len(_dX) ):
271                         _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
272                     #
273                 elif self.__mfEnabled:
274                     _xserie = []
275                     for i in range( len(_dX) ):
276                         _dXi            = _dX[i]
277                         _X_plus_dXi     = numpy.array( _X.A1, dtype=float )
278                         _X_plus_dXi[i]  = _X[i] + _dXi
279                         _X_moins_dXi    = numpy.array( _X.A1, dtype=float )
280                         _X_moins_dXi[i] = _X[i] - _dXi
281                         #
282                         _xserie.append( _X_plus_dXi )
283                         _xserie.append( _X_moins_dXi )
284                     #
285                     _HX_plusmoins_dX = self.DirectOperator( _xserie )
286                      #
287                     _Jacobienne  = []
288                     for i in range( len(_dX) ):
289                         _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
290                     #
291                 else:
292                     _Jacobienne  = []
293                     for i in range( _dX.size ):
294                         _dXi            = _dX[i]
295                         _X_plus_dXi     = numpy.array( _X.A1, dtype=float )
296                         _X_plus_dXi[i]  = _X[i] + _dXi
297                         _X_moins_dXi    = numpy.array( _X.A1, dtype=float )
298                         _X_moins_dXi[i] = _X[i] - _dXi
299                         #
300                         _HX_plus_dXi    = self.DirectOperator( _X_plus_dXi )
301                         _HX_moins_dXi   = self.DirectOperator( _X_moins_dXi )
302                         #
303                         _Jacobienne.append( numpy.ravel( _HX_plus_dXi - _HX_moins_dXi ) / (2.*_dXi) )
304                 #
305             else:
306                 #
307                 if self.__mpEnabled and not self.__mfEnabled:
308                     funcrepr = {
309                         "__userFunction__path" : self.__userFunction__path,
310                         "__userFunction__modl" : self.__userFunction__modl,
311                         "__userFunction__name" : self.__userFunction__name,
312                     }
313                     _jobs = []
314                     _jobs.append( (_X.A1, self.__extraArgs, funcrepr) )
315                     for i in range( len(_dX) ):
316                         _X_plus_dXi    = numpy.array( _X.A1, dtype=float )
317                         _X_plus_dXi[i] = _X[i] + _dX[i]
318                         #
319                         _jobs.append( (_X_plus_dXi, self.__extraArgs, funcrepr) )
320                     #
321                     import multiprocessing
322                     self.__pool = multiprocessing.Pool(self.__mpWorkers)
323                     _HX_plus_dX = self.__pool.map( ExecuteFunction, _jobs )
324                     self.__pool.close()
325                     self.__pool.join()
326                     #
327                     _HX = _HX_plus_dX.pop(0)
328                     #
329                     _Jacobienne = []
330                     for i in range( len(_dX) ):
331                         _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
332                     #
333                 elif self.__mfEnabled:
334                     _xserie = []
335                     _xserie.append( _X.A1 )
336                     for i in range( len(_dX) ):
337                         _X_plus_dXi    = numpy.array( _X.A1, dtype=float )
338                         _X_plus_dXi[i] = _X[i] + _dX[i]
339                         #
340                         _xserie.append( _X_plus_dXi )
341                     #
342                     _HX_plus_dX = self.DirectOperator( _xserie )
343                     #
344                     _HX = _HX_plus_dX.pop(0)
345                     #
346                     _Jacobienne = []
347                     for i in range( len(_dX) ):
348                         _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
349                    #
350                 else:
351                     _Jacobienne  = []
352                     _HX = self.DirectOperator( _X )
353                     for i in range( _dX.size ):
354                         _dXi            = _dX[i]
355                         _X_plus_dXi     = numpy.array( _X.A1, dtype=float )
356                         _X_plus_dXi[i]  = _X[i] + _dXi
357                         #
358                         _HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
359                         #
360                         _Jacobienne.append( numpy.ravel(( _HX_plus_dXi - _HX ) / _dXi) )
361                 #
362             #
363             _Jacobienne = numpy.asmatrix( numpy.vstack( _Jacobienne ) ).T
364             if self.__avoidRC:
365                 if self.__lenghtRJ < 0: self.__lenghtRJ = 2 * _X.size
366                 while len(self.__listJPCP) > self.__lenghtRJ:
367                     self.__listJPCP.pop(0)
368                     self.__listJPCI.pop(0)
369                     self.__listJPCR.pop(0)
370                     self.__listJPPN.pop(0)
371                     self.__listJPIN.pop(0)
372                 self.__listJPCP.append( copy.copy(_X) )
373                 self.__listJPCI.append( copy.copy(_dX) )
374                 self.__listJPCR.append( copy.copy(_Jacobienne) )
375                 self.__listJPPN.append( numpy.linalg.norm(_X) )
376                 self.__listJPIN.append( numpy.linalg.norm(_Jacobienne) )
377         #
378         logging.debug("FDA Fin du calcul de la Jacobienne")
379         #
380         return _Jacobienne
381
382     # ---------------------------------------------------------
383     def TangentOperator(self, paire, **extraArgs ):
384         """
385         Calcul du tangent à l'aide de la Jacobienne.
386
387         NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
388         ne doivent pas être données ici à la fonction utilisateur.
389         """
390         if self.__mfEnabled:
391             assert len(paire) == 1, "Incorrect lenght of arguments"
392             _paire = paire[0]
393             assert len(_paire) == 2, "Incorrect number of arguments"
394         else:
395             assert len(paire) == 2, "Incorrect number of arguments"
396             _paire = paire
397         X, dX = _paire
398         _Jacobienne = self.TangentMatrix( X )
399         if dX is None or len(dX) == 0:
400             #
401             # Calcul de la forme matricielle si le second argument est None
402             # -------------------------------------------------------------
403             if self.__mfEnabled: return [_Jacobienne,]
404             else:                return _Jacobienne
405         else:
406             #
407             # Calcul de la valeur linéarisée de H en X appliqué à dX
408             # ------------------------------------------------------
409             _dX = numpy.asmatrix(numpy.ravel( dX )).T
410             _HtX = numpy.dot(_Jacobienne, _dX)
411             if self.__mfEnabled: return [_HtX.A1,]
412             else:                return _HtX.A1
413
414     # ---------------------------------------------------------
415     def AdjointOperator(self, paire, **extraArgs ):
416         """
417         Calcul de l'adjoint à l'aide de la Jacobienne.
418
419         NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
420         ne doivent pas être données ici à la fonction utilisateur.
421         """
422         if self.__mfEnabled:
423             assert len(paire) == 1, "Incorrect lenght of arguments"
424             _paire = paire[0]
425             assert len(_paire) == 2, "Incorrect number of arguments"
426         else:
427             assert len(paire) == 2, "Incorrect number of arguments"
428             _paire = paire
429         X, Y = _paire
430         _JacobienneT = self.TangentMatrix( X ).T
431         if Y is None or len(Y) == 0:
432             #
433             # Calcul de la forme matricielle si le second argument est None
434             # -------------------------------------------------------------
435             if self.__mfEnabled: return [_JacobienneT,]
436             else:                return _JacobienneT
437         else:
438             #
439             # Calcul de la valeur de l'adjoint en X appliqué à Y
440             # --------------------------------------------------
441             _Y = numpy.asmatrix(numpy.ravel( Y )).T
442             _HaY = numpy.dot(_JacobienneT, _Y)
443             if self.__mfEnabled: return [_HaY.A1,]
444             else:                return _HaY.A1
445
446 # ==============================================================================
447 def EnsembleOfCenteredPerturbations( _bgcenter, _bgcovariance, _nbmembers ):
448     "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
449     #
450     _bgcenter = numpy.ravel(_bgcenter)[:,None]
451     if _nbmembers < 1:
452         raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
453     #
454     if _bgcovariance is None:
455         BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
456     else:
457         _Z = numpy.random.multivariate_normal(numpy.zeros(_bgcenter.size), _bgcovariance, size=_nbmembers).T
458         BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers) + _Z
459     #
460     return BackgroundEnsemble
461
462 # ==============================================================================
463 def EnsembleOfBackgroundPerturbations( _bgcenter, _bgcovariance, _nbmembers, _withSVD = True):
464     "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
465     def __CenteredRandomAnomalies(Zr, N):
466         """
467         Génère une matrice de N anomalies aléatoires centrées sur Zr selon les
468         notes manuscrites de MB et conforme au code de PS avec eps = -1
469         """
470         eps = -1
471         Q = numpy.identity(N-1)-numpy.ones((N-1,N-1))/numpy.sqrt(N)/(numpy.sqrt(N)-eps)
472         Q = numpy.concatenate((Q, [eps*numpy.ones(N-1)/numpy.sqrt(N)]), axis=0)
473         R, _ = numpy.linalg.qr(numpy.random.normal(size = (N-1,N-1)))
474         Q = numpy.dot(Q,R)
475         Zr = numpy.dot(Q,Zr)
476         return Zr.T
477     #
478     _bgcenter = numpy.ravel(_bgcenter).reshape((-1,1))
479     if _nbmembers < 1:
480         raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
481     if _bgcovariance is None:
482         BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
483     else:
484         if _withSVD:
485             U, s, V = numpy.linalg.svd(_bgcovariance, full_matrices=False)
486             _nbctl = _bgcenter.size
487             if _nbmembers > _nbctl:
488                 _Z = numpy.concatenate((numpy.dot(
489                     numpy.diag(numpy.sqrt(s[:_nbctl])), V[:_nbctl]),
490                     numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1-_nbctl)), axis = 0)
491             else:
492                 _Z = numpy.dot(numpy.diag(numpy.sqrt(s[:_nbmembers-1])), V[:_nbmembers-1])
493             _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
494             BackgroundEnsemble = _bgcenter + _Zca
495         else:
496             if max(abs(_bgcovariance.flatten())) > 0:
497                 _nbctl = _bgcenter.size
498                 _Z = numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1)
499                 _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
500                 BackgroundEnsemble = _bgcenter + _Zca
501             else:
502                 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
503     #
504     return BackgroundEnsemble
505
506 # ==============================================================================
507 def EnsembleOfAnomalies( Ensemble, OptMean = None, Normalisation = 1.):
508     "Renvoie les anomalies centrées à partir d'un ensemble TailleEtat*NbMembres"
509     if OptMean is None:
510         __Em = numpy.asarray(Ensemble).mean(axis=1, dtype=mfp).astype('float').reshape((-1,1))
511     else:
512         __Em = numpy.ravel(OptMean).reshape((-1,1))
513     #
514     return Normalisation * (numpy.asarray(Ensemble) - __Em)
515
516 # ==============================================================================
517 def EnsembleErrorCovariance( Ensemble ):
518     "Renvoie la covariance d'ensemble"
519     __Anomalies = EnsembleOfAnomalies( Ensemble )
520     __n, __m = numpy.asarray(__Anomalies).shape
521     # Estimation empirique
522     __Covariance = (__Anomalies @ __Anomalies.T) / (__m-1)
523     # Assure la symétrie
524     __Covariance = (__Covariance + __Covariance.T) * 0.5
525     # Assure la positivité
526     __epsilon    = mpr*numpy.trace(__Covariance)
527     __Covariance = __Covariance + __epsilon * numpy.identity(__n)
528     #
529     return __Covariance
530
531 # ==============================================================================
532 def CovarianceInflation(
533         InputCovOrEns,
534         InflationType   = None,
535         InflationFactor = None,
536         BackgroundCov   = None,
537         ):
538     """
539     Inflation applicable soit sur Pb ou Pa, soit sur les ensembles EXb ou EXa
540
541     Synthèse : Hunt 2007, section 2.3.5
542     """
543     if InflationFactor is None:
544         return InputCovOrEns
545     else:
546         InflationFactor = float(InflationFactor)
547     #
548     if InflationType in ["MultiplicativeOnAnalysisCovariance", "MultiplicativeOnBackgroundCovariance"]:
549         if InflationFactor < 1.:
550             raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
551         if InflationFactor < 1.+mpr:
552             return InputCovOrEns
553         OutputCovOrEns = InflationFactor**2 * InputCovOrEns
554     #
555     elif InflationType in ["MultiplicativeOnAnalysisAnomalies", "MultiplicativeOnBackgroundAnomalies"]:
556         if InflationFactor < 1.:
557             raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
558         if InflationFactor < 1.+mpr:
559             return InputCovOrEns
560         InputCovOrEnsMean = InputCovOrEns.mean(axis=1, dtype=mfp).astype('float')
561         OutputCovOrEns = InputCovOrEnsMean[:,numpy.newaxis] \
562             + InflationFactor * (InputCovOrEns - InputCovOrEnsMean[:,numpy.newaxis])
563     #
564     elif InflationType in ["AdditiveOnAnalysisCovariance", "AdditiveOnBackgroundCovariance"]:
565         if InflationFactor < 0.:
566             raise ValueError("Inflation factor for additive inflation has to be greater or equal than 0.")
567         if InflationFactor < mpr:
568             return InputCovOrEns
569         __n, __m = numpy.asarray(InputCovOrEns).shape
570         if __n != __m:
571             raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
572         OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * numpy.identity(__n)
573     #
574     elif InflationType == "HybridOnBackgroundCovariance":
575         if InflationFactor < 0.:
576             raise ValueError("Inflation factor for hybrid inflation has to be greater or equal than 0.")
577         if InflationFactor < mpr:
578             return InputCovOrEns
579         __n, __m = numpy.asarray(InputCovOrEns).shape
580         if __n != __m:
581             raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
582         if BackgroundCov is None:
583             raise ValueError("Background covariance matrix B has to be given for hybrid inflation.")
584         if InputCovOrEns.shape != BackgroundCov.shape:
585             raise ValueError("Ensemble covariance matrix has to be of same size than background covariance matrix B.")
586         OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * BackgroundCov
587     #
588     elif InflationType == "Relaxation":
589         raise NotImplementedError("InflationType Relaxation")
590     #
591     else:
592         raise ValueError("Error in inflation type, '%s' is not a valid keyword."%InflationType)
593     #
594     return OutputCovOrEns
595
596 # ==============================================================================
597 def enks(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="EnKS16-KalmanFilterFormula"):
598     """
599     EnKS
600     """
601     #
602     # Initialisations
603     # ---------------
604     #
605     # Opérateurs
606     H = HO["Direct"].appliedControledFormTo
607     #
608     if selfA._parameters["EstimationOf"] == "State":
609         M = EM["Direct"].appliedControledFormTo
610     #
611     if CM is not None and "Tangent" in CM and U is not None:
612         Cm = CM["Tangent"].asMatrix(Xb)
613     else:
614         Cm = None
615     #
616     # Précalcul des inversions de B et R
617     RIdemi = R.sqrtmI()
618     #
619     LagL = selfA._parameters["SmootherLagL"]
620     if (not hasattr(Y,"store")) or (not hasattr(Y,"stepnumber")):
621         raise ValueError("Fixed-lag smoother requires a series of observation")
622     if Y.stepnumber() < LagL:
623         raise ValueError("Fixed-lag smoother requires a series of observation greater then the lag L")
624     duration = Y.stepnumber()
625     __p = numpy.cumprod(Y.shape())[-1]
626     __n = Xb.size
627     __m = selfA._parameters["NumberOfMembers"]
628     #
629     if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
630     else:                         Pn = B
631     if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
632     else:                         Qn = Q
633     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
634         selfA.StoredVariables["Analysis"].store( Xb )
635         if selfA._toStore("APosterioriCovariance"):
636             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
637             covarianceXa = Pn
638     #
639     # Calcul direct initial (on privilégie la mémorisation au recalcul)
640     __seed = numpy.random.get_state()
641     selfB = copy.deepcopy(selfA)
642     selfB._parameters["StoreSupplementaryCalculations"] = ["CurrentEnsembleState"]
643     if VariantM == "EnKS16-KalmanFilterFormula":
644         etkf(selfB, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM = "KalmanFilterFormula")
645     else:
646         raise ValueError("VariantM has to be chosen in the authorized methods list.")
647     if LagL > 0:
648         EL  = selfB.StoredVariables["CurrentEnsembleState"][LagL-1]
649     else:
650         EL = EnsembleOfBackgroundPerturbations( Xb, None, __m ) # Cf. etkf
651     selfA._parameters["SetSeed"] = numpy.random.set_state(__seed)
652     #
653     for step in range(LagL,duration-1):
654         #
655         sEL = selfB.StoredVariables["CurrentEnsembleState"][step+1-LagL:step+1]
656         sEL.append(None)
657         #
658         if hasattr(Y,"store"):
659             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
660         else:
661             Ynpu = numpy.ravel( Y ).reshape((__p,1))
662         #
663         if U is not None:
664             if hasattr(U,"store") and len(U)>1:
665                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
666             elif hasattr(U,"store") and len(U)==1:
667                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
668             else:
669                 Un = numpy.asmatrix(numpy.ravel( U )).T
670         else:
671             Un = None
672         #
673         #--------------------------
674         if VariantM == "EnKS16-KalmanFilterFormula":
675             if selfA._parameters["EstimationOf"] == "State": # Forecast
676                 EL = M( [(EL[:,i], Un) for i in range(__m)],
677                     argsAsSerie = True,
678                     returnSerieAsArrayMatrix = True )
679                 EL = EL + numpy.random.multivariate_normal(numpy.zeros(__n), Qn, size=__m).T
680                 EZ = H( [(EL[:,i], Un) for i in range(__m)],
681                     argsAsSerie = True,
682                     returnSerieAsArrayMatrix = True )
683                 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
684                     Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
685                     EZ = EZ + Cm * Un
686             elif selfA._parameters["EstimationOf"] == "Parameters":
687                 # --- > Par principe, M = Id, Q = 0
688                 EZ = H( [(EL[:,i], Un) for i in range(__m)],
689                     argsAsSerie = True,
690                     returnSerieAsArrayMatrix = True )
691             #
692             vEm   = EL.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
693             vZm   = EZ.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
694             #
695             mS    = RIdemi @ EnsembleOfAnomalies( EZ, vZm, 1./math.sqrt(__m-1) )
696             delta = RIdemi @ ( Ynpu - vZm )
697             mT    = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS )
698             vw    = mT @ mS.T @ delta
699             #
700             Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
701             mU    = numpy.identity(__m)
702             wTU   = (vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU)
703             #
704             EX    = EnsembleOfAnomalies( EL, vEm, 1./math.sqrt(__m-1) )
705             EL    = vEm + EX @ wTU
706             #
707             sEL[LagL] = EL
708             for irl in range(LagL): # Lissage des L précédentes analysis
709                 vEm = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
710                 EX = EnsembleOfAnomalies( sEL[irl], vEm, 1./math.sqrt(__m-1) )
711                 sEL[irl] = vEm + EX @ wTU
712             #
713             # Conservation de l'analyse retrospective d'ordre 0 avant rotation
714             Xa = sEL[0].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
715             if selfA._toStore("APosterioriCovariance"):
716                 EXn = sEL[0]
717             #
718             for irl in range(LagL):
719                 sEL[irl] = sEL[irl+1]
720             sEL[LagL] = None
721         #--------------------------
722         else:
723             raise ValueError("VariantM has to be chosen in the authorized methods list.")
724         #
725         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
726         selfA.StoredVariables["Analysis"].store( Xa )
727         if selfA._toStore("APosterioriCovariance"):
728             selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(EXn) )
729     #
730     # Stockage des dernières analyses incomplètement remises à jour
731     for irl in range(LagL):
732         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
733         Xa = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
734         selfA.StoredVariables["Analysis"].store( Xa )
735     #
736     return 0
737
738 # ==============================================================================
739 def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
740     """
741     Ensemble-Transform EnKF
742     """
743     if selfA._parameters["EstimationOf"] == "Parameters":
744         selfA._parameters["StoreInternalVariables"] = True
745     #
746     # Opérateurs
747     # ----------
748     H = HO["Direct"].appliedControledFormTo
749     #
750     if selfA._parameters["EstimationOf"] == "State":
751         M = EM["Direct"].appliedControledFormTo
752     #
753     if CM is not None and "Tangent" in CM and U is not None:
754         Cm = CM["Tangent"].asMatrix(Xb)
755     else:
756         Cm = None
757     #
758     # Nombre de pas identique au nombre de pas d'observations
759     # -------------------------------------------------------
760     if hasattr(Y,"stepnumber"):
761         duration = Y.stepnumber()
762         __p = numpy.cumprod(Y.shape())[-1]
763     else:
764         duration = 2
765         __p = numpy.array(Y).size
766     #
767     # Précalcul des inversions de B et R
768     # ----------------------------------
769     if selfA._parameters["StoreInternalVariables"] \
770         or selfA._toStore("CostFunctionJ") \
771         or selfA._toStore("CostFunctionJb") \
772         or selfA._toStore("CostFunctionJo") \
773         or selfA._toStore("CurrentOptimum") \
774         or selfA._toStore("APosterioriCovariance"):
775         BI = B.getI()
776         RI = R.getI()
777     elif VariantM != "KalmanFilterFormula":
778         RI = R.getI()
779     if VariantM == "KalmanFilterFormula":
780         RIdemi = R.sqrtmI()
781     #
782     # Initialisation
783     # --------------
784     __n = Xb.size
785     __m = selfA._parameters["NumberOfMembers"]
786     if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
787     else:                         Pn = B
788     if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
789     else:                         Qn = Q
790     Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
791     #~ Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
792     #
793     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
794         selfA.StoredVariables["Analysis"].store( Xb )
795         if selfA._toStore("APosterioriCovariance"):
796             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
797             covarianceXa = Pn
798     #
799     previousJMinimum = numpy.finfo(float).max
800     #
801     for step in range(duration-1):
802         if hasattr(Y,"store"):
803             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
804         else:
805             Ynpu = numpy.ravel( Y ).reshape((__p,1))
806         #
807         if U is not None:
808             if hasattr(U,"store") and len(U)>1:
809                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
810             elif hasattr(U,"store") and len(U)==1:
811                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
812             else:
813                 Un = numpy.asmatrix(numpy.ravel( U )).T
814         else:
815             Un = None
816         #
817         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
818             Xn = CovarianceInflation( Xn,
819                 selfA._parameters["InflationType"],
820                 selfA._parameters["InflationFactor"],
821                 )
822         #
823         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
824             EMX = M( [(Xn[:,i], Un) for i in range(__m)],
825                 argsAsSerie = True,
826                 returnSerieAsArrayMatrix = True )
827             qi = numpy.random.multivariate_normal(numpy.zeros(__n), Qn, size=__m).T
828             Xn_predicted = EMX + qi
829             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
830                 argsAsSerie = True,
831                 returnSerieAsArrayMatrix = True )
832             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
833                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
834                 Xn_predicted = Xn_predicted + Cm * Un
835         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
836             # --- > Par principe, M = Id, Q = 0
837             Xn_predicted = Xn
838             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
839                 argsAsSerie = True,
840                 returnSerieAsArrayMatrix = True )
841         #
842         # Mean of forecast and observation of forecast
843         Xfm  = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
844         Hfm  = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
845         #
846         # Anomalies
847         EaX   = EnsembleOfAnomalies( Xn_predicted, Xfm )
848         EaHX  = EnsembleOfAnomalies( HX_predicted, Hfm)
849         #
850         #--------------------------
851         if VariantM == "KalmanFilterFormula":
852             mS    = RIdemi * EaHX / math.sqrt(__m-1)
853             delta = RIdemi * ( Ynpu - Hfm )
854             mT    = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS )
855             vw    = mT @ mS.T @ delta
856             #
857             Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
858             mU    = numpy.identity(__m)
859             #
860             EaX   = EaX / math.sqrt(__m-1)
861             Xn    = Xfm + EaX @ ( vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU )
862         #--------------------------
863         elif VariantM == "Variational":
864             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
865             def CostFunction(w):
866                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
867                 _Jo = 0.5 * _A.T @ (RI * _A)
868                 _Jb = 0.5 * (__m-1) * w.T @ w
869                 _J  = _Jo + _Jb
870                 return float(_J)
871             def GradientOfCostFunction(w):
872                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
873                 _GardJo = - EaHX.T @ (RI * _A)
874                 _GradJb = (__m-1) * w.reshape((__m,1))
875                 _GradJ  = _GardJo + _GradJb
876                 return numpy.ravel(_GradJ)
877             vw = scipy.optimize.fmin_cg(
878                 f           = CostFunction,
879                 x0          = numpy.zeros(__m),
880                 fprime      = GradientOfCostFunction,
881                 args        = (),
882                 disp        = False,
883                 )
884             #
885             Hto = EaHX.T @ (RI * EaHX)
886             Htb = (__m-1) * numpy.identity(__m)
887             Hta = Hto + Htb
888             #
889             Pta = numpy.linalg.inv( Hta )
890             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
891             #
892             Xn  = Xfm + EaX @ (vw[:,None] + EWa)
893         #--------------------------
894         elif VariantM == "FiniteSize11": # Jauge Boc2011
895             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
896             def CostFunction(w):
897                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
898                 _Jo = 0.5 * _A.T @ (RI * _A)
899                 _Jb = 0.5 * __m * math.log(1 + 1/__m + w.T @ w)
900                 _J  = _Jo + _Jb
901                 return float(_J)
902             def GradientOfCostFunction(w):
903                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
904                 _GardJo = - EaHX.T @ (RI * _A)
905                 _GradJb = __m * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
906                 _GradJ  = _GardJo + _GradJb
907                 return numpy.ravel(_GradJ)
908             vw = scipy.optimize.fmin_cg(
909                 f           = CostFunction,
910                 x0          = numpy.zeros(__m),
911                 fprime      = GradientOfCostFunction,
912                 args        = (),
913                 disp        = False,
914                 )
915             #
916             Hto = EaHX.T @ (RI * EaHX)
917             Htb = __m * \
918                 ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
919                 / (1 + 1/__m + vw.T @ vw)**2
920             Hta = Hto + Htb
921             #
922             Pta = numpy.linalg.inv( Hta )
923             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
924             #
925             Xn  = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
926         #--------------------------
927         elif VariantM == "FiniteSize15": # Jauge Boc2015
928             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
929             def CostFunction(w):
930                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
931                 _Jo = 0.5 * _A.T * RI * _A
932                 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w)
933                 _J  = _Jo + _Jb
934                 return float(_J)
935             def GradientOfCostFunction(w):
936                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
937                 _GardJo = - EaHX.T @ (RI * _A)
938                 _GradJb = (__m+1) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
939                 _GradJ  = _GardJo + _GradJb
940                 return numpy.ravel(_GradJ)
941             vw = scipy.optimize.fmin_cg(
942                 f           = CostFunction,
943                 x0          = numpy.zeros(__m),
944                 fprime      = GradientOfCostFunction,
945                 args        = (),
946                 disp        = False,
947                 )
948             #
949             Hto = EaHX.T @ (RI * EaHX)
950             Htb = (__m+1) * \
951                 ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
952                 / (1 + 1/__m + vw.T @ vw)**2
953             Hta = Hto + Htb
954             #
955             Pta = numpy.linalg.inv( Hta )
956             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
957             #
958             Xn  = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
959         #--------------------------
960         elif VariantM == "FiniteSize16": # Jauge Boc2016
961             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
962             def CostFunction(w):
963                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
964                 _Jo = 0.5 * _A.T @ (RI * _A)
965                 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w / (__m-1))
966                 _J  = _Jo + _Jb
967                 return float(_J)
968             def GradientOfCostFunction(w):
969                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
970                 _GardJo = - EaHX.T @ (RI * _A)
971                 _GradJb = ((__m+1) / (__m-1)) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w / (__m-1))
972                 _GradJ  = _GardJo + _GradJb
973                 return numpy.ravel(_GradJ)
974             vw = scipy.optimize.fmin_cg(
975                 f           = CostFunction,
976                 x0          = numpy.zeros(__m),
977                 fprime      = GradientOfCostFunction,
978                 args        = (),
979                 disp        = False,
980                 )
981             #
982             Hto = EaHX.T @ (RI * EaHX)
983             Htb = ((__m+1) / (__m-1)) * \
984                 ( (1 + 1/__m + vw.T @ vw / (__m-1)) * numpy.identity(__m) - 2 * vw @ vw.T / (__m-1) ) \
985                 / (1 + 1/__m + vw.T @ vw / (__m-1))**2
986             Hta = Hto + Htb
987             #
988             Pta = numpy.linalg.inv( Hta )
989             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
990             #
991             Xn  = Xfm + EaX @ (vw[:,None] + EWa)
992         #--------------------------
993         else:
994             raise ValueError("VariantM has to be chosen in the authorized methods list.")
995         #
996         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
997             Xn = CovarianceInflation( Xn,
998                 selfA._parameters["InflationType"],
999                 selfA._parameters["InflationFactor"],
1000                 )
1001         #
1002         Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1003         #--------------------------
1004         #
1005         if selfA._parameters["StoreInternalVariables"] \
1006             or selfA._toStore("CostFunctionJ") \
1007             or selfA._toStore("CostFunctionJb") \
1008             or selfA._toStore("CostFunctionJo") \
1009             or selfA._toStore("APosterioriCovariance") \
1010             or selfA._toStore("InnovationAtCurrentAnalysis") \
1011             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1012             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1013             _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
1014             _Innovation = Ynpu - _HXa
1015         #
1016         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1017         # ---> avec analysis
1018         selfA.StoredVariables["Analysis"].store( Xa )
1019         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1020             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
1021         if selfA._toStore("InnovationAtCurrentAnalysis"):
1022             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1023         # ---> avec current state
1024         if selfA._parameters["StoreInternalVariables"] \
1025             or selfA._toStore("CurrentState"):
1026             selfA.StoredVariables["CurrentState"].store( Xn )
1027         if selfA._toStore("ForecastState"):
1028             selfA.StoredVariables["ForecastState"].store( EMX )
1029         if selfA._toStore("BMA"):
1030             selfA.StoredVariables["BMA"].store( EMX - Xa.reshape((__n,1)) )
1031         if selfA._toStore("InnovationAtCurrentState"):
1032             selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
1033         if selfA._toStore("SimulatedObservationAtCurrentState") \
1034             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1035             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
1036         # ---> autres
1037         if selfA._parameters["StoreInternalVariables"] \
1038             or selfA._toStore("CostFunctionJ") \
1039             or selfA._toStore("CostFunctionJb") \
1040             or selfA._toStore("CostFunctionJo") \
1041             or selfA._toStore("CurrentOptimum") \
1042             or selfA._toStore("APosterioriCovariance"):
1043             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
1044             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
1045             J   = Jb + Jo
1046             selfA.StoredVariables["CostFunctionJb"].store( Jb )
1047             selfA.StoredVariables["CostFunctionJo"].store( Jo )
1048             selfA.StoredVariables["CostFunctionJ" ].store( J )
1049             #
1050             if selfA._toStore("IndexOfOptimum") \
1051                 or selfA._toStore("CurrentOptimum") \
1052                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1053                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1054                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1055                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1056                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1057             if selfA._toStore("IndexOfOptimum"):
1058                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1059             if selfA._toStore("CurrentOptimum"):
1060                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1061             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1062                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1063             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1064                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1065             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1066                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1067             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1068                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1069         if selfA._toStore("APosterioriCovariance"):
1070             selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
1071         if selfA._parameters["EstimationOf"] == "Parameters" \
1072             and J < previousJMinimum:
1073             previousJMinimum    = J
1074             XaMin               = Xa
1075             if selfA._toStore("APosterioriCovariance"):
1076                 covarianceXaMin = Pn
1077         # ---> Pour les smoothers
1078         if selfA._toStore("CurrentEnsembleState"):
1079             selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
1080     #
1081     # Stockage final supplémentaire de l'optimum en estimation de paramètres
1082     # ----------------------------------------------------------------------
1083     if selfA._parameters["EstimationOf"] == "Parameters":
1084         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1085         selfA.StoredVariables["Analysis"].store( XaMin )
1086         if selfA._toStore("APosterioriCovariance"):
1087             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1088         if selfA._toStore("BMA"):
1089             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1090     #
1091     return 0
1092
1093 # ==============================================================================
1094 def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12",
1095     BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
1096     """
1097     Iterative EnKF
1098     """
1099     if selfA._parameters["EstimationOf"] == "Parameters":
1100         selfA._parameters["StoreInternalVariables"] = True
1101     #
1102     # Opérateurs
1103     # ----------
1104     H = HO["Direct"].appliedControledFormTo
1105     #
1106     if selfA._parameters["EstimationOf"] == "State":
1107         M = EM["Direct"].appliedControledFormTo
1108     #
1109     if CM is not None and "Tangent" in CM and U is not None:
1110         Cm = CM["Tangent"].asMatrix(Xb)
1111     else:
1112         Cm = None
1113     #
1114     # Nombre de pas identique au nombre de pas d'observations
1115     # -------------------------------------------------------
1116     if hasattr(Y,"stepnumber"):
1117         duration = Y.stepnumber()
1118         __p = numpy.cumprod(Y.shape())[-1]
1119     else:
1120         duration = 2
1121         __p = numpy.array(Y).size
1122     #
1123     # Précalcul des inversions de B et R
1124     # ----------------------------------
1125     if selfA._parameters["StoreInternalVariables"] \
1126         or selfA._toStore("CostFunctionJ") \
1127         or selfA._toStore("CostFunctionJb") \
1128         or selfA._toStore("CostFunctionJo") \
1129         or selfA._toStore("CurrentOptimum") \
1130         or selfA._toStore("APosterioriCovariance"):
1131         BI = B.getI()
1132     RI = R.getI()
1133     #
1134     # Initialisation
1135     # --------------
1136     __n = Xb.size
1137     __m = selfA._parameters["NumberOfMembers"]
1138     if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
1139     else:                         Pn = B
1140     if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
1141     else:                         Rn = R
1142     if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
1143     else:                         Qn = Q
1144     Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
1145     #
1146     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1147         selfA.StoredVariables["Analysis"].store( Xb )
1148         if selfA._toStore("APosterioriCovariance"):
1149             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1150             covarianceXa = Pn
1151     #
1152     previousJMinimum = numpy.finfo(float).max
1153     #
1154     for step in range(duration-1):
1155         if hasattr(Y,"store"):
1156             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1157         else:
1158             Ynpu = numpy.ravel( Y ).reshape((__p,1))
1159         #
1160         if U is not None:
1161             if hasattr(U,"store") and len(U)>1:
1162                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
1163             elif hasattr(U,"store") and len(U)==1:
1164                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
1165             else:
1166                 Un = numpy.asmatrix(numpy.ravel( U )).T
1167         else:
1168             Un = None
1169         #
1170         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
1171             Xn = CovarianceInflation( Xn,
1172                 selfA._parameters["InflationType"],
1173                 selfA._parameters["InflationFactor"],
1174                 )
1175         #
1176         #--------------------------
1177         if VariantM == "IEnKF12":
1178             Xfm = numpy.ravel(Xn.mean(axis=1, dtype=mfp).astype('float'))
1179             EaX = EnsembleOfAnomalies( Xn ) / math.sqrt(__m-1)
1180             __j = 0
1181             Deltaw = 1
1182             if not BnotT:
1183                 Ta  = numpy.identity(__m)
1184             vw  = numpy.zeros(__m)
1185             while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
1186                 vx1 = (Xfm + EaX @ vw).reshape((__n,1))
1187                 #
1188                 if BnotT:
1189                     E1 = vx1 + _epsilon * EaX
1190                 else:
1191                     E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
1192                 #
1193                 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q
1194                     E2 = M( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
1195                         argsAsSerie = True,
1196                         returnSerieAsArrayMatrix = True )
1197                 elif selfA._parameters["EstimationOf"] == "Parameters":
1198                     # --- > Par principe, M = Id
1199                     E2 = Xn
1200                 vx2 = E2.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1201                 vy1 = H((vx2, Un)).reshape((__p,1))
1202                 #
1203                 HE2 = H( [(E2[:,i,numpy.newaxis], Un) for i in range(__m)],
1204                     argsAsSerie = True,
1205                     returnSerieAsArrayMatrix = True )
1206                 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
1207                 #
1208                 if BnotT:
1209                     EaY = (HE2 - vy2) / _epsilon
1210                 else:
1211                     EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
1212                 #
1213                 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy1 )))
1214                 mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY)
1215                 Deltaw = - numpy.linalg.solve(mH,GradJ)
1216                 #
1217                 vw = vw + Deltaw
1218                 #
1219                 if not BnotT:
1220                     Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
1221                 #
1222                 __j = __j + 1
1223             #
1224             A2 = EnsembleOfAnomalies( E2 )
1225             #
1226             if BnotT:
1227                 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
1228                 A2 = math.sqrt(__m-1) * A2 @ Ta / _epsilon
1229             #
1230             Xn = vx2 + A2
1231         #--------------------------
1232         else:
1233             raise ValueError("VariantM has to be chosen in the authorized methods list.")
1234         #
1235         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
1236             Xn = CovarianceInflation( Xn,
1237                 selfA._parameters["InflationType"],
1238                 selfA._parameters["InflationFactor"],
1239                 )
1240         #
1241         Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1242         #--------------------------
1243         #
1244         if selfA._parameters["StoreInternalVariables"] \
1245             or selfA._toStore("CostFunctionJ") \
1246             or selfA._toStore("CostFunctionJb") \
1247             or selfA._toStore("CostFunctionJo") \
1248             or selfA._toStore("APosterioriCovariance") \
1249             or selfA._toStore("InnovationAtCurrentAnalysis") \
1250             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1251             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1252             _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
1253             _Innovation = Ynpu - _HXa
1254         #
1255         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1256         # ---> avec analysis
1257         selfA.StoredVariables["Analysis"].store( Xa )
1258         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1259             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
1260         if selfA._toStore("InnovationAtCurrentAnalysis"):
1261             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1262         # ---> avec current state
1263         if selfA._parameters["StoreInternalVariables"] \
1264             or selfA._toStore("CurrentState"):
1265             selfA.StoredVariables["CurrentState"].store( Xn )
1266         if selfA._toStore("ForecastState"):
1267             selfA.StoredVariables["ForecastState"].store( E2 )
1268         if selfA._toStore("BMA"):
1269             selfA.StoredVariables["BMA"].store( E2 - Xa )
1270         if selfA._toStore("InnovationAtCurrentState"):
1271             selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
1272         if selfA._toStore("SimulatedObservationAtCurrentState") \
1273             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1274             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
1275         # ---> autres
1276         if selfA._parameters["StoreInternalVariables"] \
1277             or selfA._toStore("CostFunctionJ") \
1278             or selfA._toStore("CostFunctionJb") \
1279             or selfA._toStore("CostFunctionJo") \
1280             or selfA._toStore("CurrentOptimum") \
1281             or selfA._toStore("APosterioriCovariance"):
1282             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
1283             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
1284             J   = Jb + Jo
1285             selfA.StoredVariables["CostFunctionJb"].store( Jb )
1286             selfA.StoredVariables["CostFunctionJo"].store( Jo )
1287             selfA.StoredVariables["CostFunctionJ" ].store( J )
1288             #
1289             if selfA._toStore("IndexOfOptimum") \
1290                 or selfA._toStore("CurrentOptimum") \
1291                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1292                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1293                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1294                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1295                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1296             if selfA._toStore("IndexOfOptimum"):
1297                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1298             if selfA._toStore("CurrentOptimum"):
1299                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1300             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1301                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1302             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1303                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1304             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1305                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1306             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1307                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1308         if selfA._toStore("APosterioriCovariance"):
1309             selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
1310         if selfA._parameters["EstimationOf"] == "Parameters" \
1311             and J < previousJMinimum:
1312             previousJMinimum    = J
1313             XaMin               = Xa
1314             if selfA._toStore("APosterioriCovariance"):
1315                 covarianceXaMin = Pn
1316     #
1317     # Stockage final supplémentaire de l'optimum en estimation de paramètres
1318     # ----------------------------------------------------------------------
1319     if selfA._parameters["EstimationOf"] == "Parameters":
1320         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1321         selfA.StoredVariables["Analysis"].store( XaMin )
1322         if selfA._toStore("APosterioriCovariance"):
1323             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1324         if selfA._toStore("BMA"):
1325             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1326     #
1327     return 0
1328
1329 # ==============================================================================
1330 def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1331     """
1332     3DVAR incrémental
1333     """
1334     #
1335     # Initialisations
1336     # ---------------
1337     #
1338     # Opérateur non-linéaire pour la boucle externe
1339     Hm = HO["Direct"].appliedTo
1340     #
1341     # Précalcul des inversions de B et R
1342     BI = B.getI()
1343     RI = R.getI()
1344     #
1345     # Point de démarrage de l'optimisation
1346     Xini = selfA._parameters["InitializationPoint"]
1347     #
1348     HXb = numpy.asmatrix(numpy.ravel( Hm( Xb ) )).T
1349     Innovation = Y - HXb
1350     #
1351     # Outer Loop
1352     # ----------
1353     iOuter = 0
1354     J      = 1./mpr
1355     DeltaJ = 1./mpr
1356     Xr     = Xini.reshape((-1,1))
1357     while abs(DeltaJ) >= selfA._parameters["CostDecrementTolerance"] and iOuter <= selfA._parameters["MaximumNumberOfSteps"]:
1358         #
1359         # Inner Loop
1360         # ----------
1361         Ht = HO["Tangent"].asMatrix(Xr)
1362         Ht = Ht.reshape(Y.size,Xr.size) # ADAO & check shape
1363         #
1364         # Définition de la fonction-coût
1365         # ------------------------------
1366         def CostFunction(dx):
1367             _dX  = numpy.asmatrix(numpy.ravel( dx )).T
1368             if selfA._parameters["StoreInternalVariables"] or \
1369                 selfA._toStore("CurrentState") or \
1370                 selfA._toStore("CurrentOptimum"):
1371                 selfA.StoredVariables["CurrentState"].store( Xb + _dX )
1372             _HdX = Ht * _dX
1373             _HdX = numpy.asmatrix(numpy.ravel( _HdX )).T
1374             _dInnovation = Innovation - _HdX
1375             if selfA._toStore("SimulatedObservationAtCurrentState") or \
1376                 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1377                 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HXb + _HdX )
1378             if selfA._toStore("InnovationAtCurrentState"):
1379                 selfA.StoredVariables["InnovationAtCurrentState"].store( _dInnovation )
1380             #
1381             Jb  = float( 0.5 * _dX.T * BI * _dX )
1382             Jo  = float( 0.5 * _dInnovation.T * RI * _dInnovation )
1383             J   = Jb + Jo
1384             #
1385             selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
1386             selfA.StoredVariables["CostFunctionJb"].store( Jb )
1387             selfA.StoredVariables["CostFunctionJo"].store( Jo )
1388             selfA.StoredVariables["CostFunctionJ" ].store( J )
1389             if selfA._toStore("IndexOfOptimum") or \
1390                 selfA._toStore("CurrentOptimum") or \
1391                 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
1392                 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
1393                 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
1394                 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1395                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1396             if selfA._toStore("IndexOfOptimum"):
1397                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1398             if selfA._toStore("CurrentOptimum"):
1399                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
1400             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1401                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
1402             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1403                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1404             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1405                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1406             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1407                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1408             return J
1409         #
1410         def GradientOfCostFunction(dx):
1411             _dX          = numpy.asmatrix(numpy.ravel( dx )).T
1412             _HdX         = Ht * _dX
1413             _HdX         = numpy.asmatrix(numpy.ravel( _HdX )).T
1414             _dInnovation = Innovation - _HdX
1415             GradJb       = BI * _dX
1416             GradJo       = - Ht.T @ (RI * _dInnovation)
1417             GradJ        = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
1418             return GradJ
1419         #
1420         # Minimisation de la fonctionnelle
1421         # --------------------------------
1422         nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
1423         #
1424         if selfA._parameters["Minimizer"] == "LBFGSB":
1425             # Minimum, J_optimal, Informations = scipy.optimize.fmin_l_bfgs_b(
1426             if "0.19" <= scipy.version.version <= "1.1.0":
1427                 import lbfgsbhlt as optimiseur
1428             else:
1429                 import scipy.optimize as optimiseur
1430             Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
1431                 func        = CostFunction,
1432                 x0          = numpy.zeros(Xini.size),
1433                 fprime      = GradientOfCostFunction,
1434                 args        = (),
1435                 bounds      = selfA._parameters["Bounds"],
1436                 maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
1437                 factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
1438                 pgtol       = selfA._parameters["ProjectedGradientTolerance"],
1439                 iprint      = selfA._parameters["optiprint"],
1440                 )
1441             nfeval = Informations['funcalls']
1442             rc     = Informations['warnflag']
1443         elif selfA._parameters["Minimizer"] == "TNC":
1444             Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
1445                 func        = CostFunction,
1446                 x0          = numpy.zeros(Xini.size),
1447                 fprime      = GradientOfCostFunction,
1448                 args        = (),
1449                 bounds      = selfA._parameters["Bounds"],
1450                 maxfun      = selfA._parameters["MaximumNumberOfSteps"],
1451                 pgtol       = selfA._parameters["ProjectedGradientTolerance"],
1452                 ftol        = selfA._parameters["CostDecrementTolerance"],
1453                 messages    = selfA._parameters["optmessages"],
1454                 )
1455         elif selfA._parameters["Minimizer"] == "CG":
1456             Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
1457                 f           = CostFunction,
1458                 x0          = numpy.zeros(Xini.size),
1459                 fprime      = GradientOfCostFunction,
1460                 args        = (),
1461                 maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1462                 gtol        = selfA._parameters["GradientNormTolerance"],
1463                 disp        = selfA._parameters["optdisp"],
1464                 full_output = True,
1465                 )
1466         elif selfA._parameters["Minimizer"] == "NCG":
1467             Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
1468                 f           = CostFunction,
1469                 x0          = numpy.zeros(Xini.size),
1470                 fprime      = GradientOfCostFunction,
1471                 args        = (),
1472                 maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1473                 avextol     = selfA._parameters["CostDecrementTolerance"],
1474                 disp        = selfA._parameters["optdisp"],
1475                 full_output = True,
1476                 )
1477         elif selfA._parameters["Minimizer"] == "BFGS":
1478             Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
1479                 f           = CostFunction,
1480                 x0          = numpy.zeros(Xini.size),
1481                 fprime      = GradientOfCostFunction,
1482                 args        = (),
1483                 maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1484                 gtol        = selfA._parameters["GradientNormTolerance"],
1485                 disp        = selfA._parameters["optdisp"],
1486                 full_output = True,
1487                 )
1488         else:
1489             raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
1490         #
1491         IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1492         MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
1493         #
1494         if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
1495             Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
1496             Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
1497         else:
1498             Minimum = Xb + numpy.asmatrix(numpy.ravel( Minimum )).T
1499         #
1500         Xr     = Minimum
1501         DeltaJ = selfA.StoredVariables["CostFunctionJ" ][-1] - J
1502         iOuter = selfA.StoredVariables["CurrentIterationNumber"][-1]
1503     #
1504     # Obtention de l'analyse
1505     # ----------------------
1506     Xa = Xr
1507     #
1508     selfA.StoredVariables["Analysis"].store( Xa )
1509     #
1510     if selfA._toStore("OMA") or \
1511         selfA._toStore("SigmaObs2") or \
1512         selfA._toStore("SimulationQuantiles") or \
1513         selfA._toStore("SimulatedObservationAtOptimum"):
1514         if selfA._toStore("SimulatedObservationAtCurrentState"):
1515             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
1516         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1517             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
1518         else:
1519             HXa = Hm( Xa )
1520     #
1521     # Calcul de la covariance d'analyse
1522     # ---------------------------------
1523     if selfA._toStore("APosterioriCovariance") or \
1524         selfA._toStore("SimulationQuantiles") or \
1525         selfA._toStore("JacobianMatrixAtOptimum") or \
1526         selfA._toStore("KalmanGainAtOptimum"):
1527         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
1528         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
1529     if selfA._toStore("APosterioriCovariance") or \
1530         selfA._toStore("SimulationQuantiles") or \
1531         selfA._toStore("KalmanGainAtOptimum"):
1532         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
1533         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
1534     if selfA._toStore("APosterioriCovariance") or \
1535         selfA._toStore("SimulationQuantiles"):
1536         HessienneI = []
1537         nb = Xa.size
1538         for i in range(nb):
1539             _ee    = numpy.matrix(numpy.zeros(nb)).T
1540             _ee[i] = 1.
1541             _HtEE  = numpy.dot(HtM,_ee)
1542             _HtEE  = numpy.asmatrix(numpy.ravel( _HtEE )).T
1543             HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
1544         HessienneI = numpy.matrix( HessienneI )
1545         A = HessienneI.I
1546         if min(A.shape) != max(A.shape):
1547             raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
1548         if (numpy.diag(A) < 0).any():
1549             raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
1550         if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
1551             try:
1552                 L = numpy.linalg.cholesky( A )
1553             except:
1554                 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
1555     if selfA._toStore("APosterioriCovariance"):
1556         selfA.StoredVariables["APosterioriCovariance"].store( A )
1557     if selfA._toStore("JacobianMatrixAtOptimum"):
1558         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
1559     if selfA._toStore("KalmanGainAtOptimum"):
1560         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
1561         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
1562         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
1563     #
1564     # Calculs et/ou stockages supplémentaires
1565     # ---------------------------------------
1566     if selfA._toStore("Innovation") or \
1567         selfA._toStore("SigmaObs2") or \
1568         selfA._toStore("MahalanobisConsistency") or \
1569         selfA._toStore("OMB"):
1570         d  = Y - HXb
1571     if selfA._toStore("Innovation"):
1572         selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
1573     if selfA._toStore("BMA"):
1574         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
1575     if selfA._toStore("OMA"):
1576         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
1577     if selfA._toStore("OMB"):
1578         selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
1579     if selfA._toStore("SigmaObs2"):
1580         TraceR = R.trace(Y.size)
1581         selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
1582     if selfA._toStore("MahalanobisConsistency"):
1583         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
1584     if selfA._toStore("SimulationQuantiles"):
1585         nech = selfA._parameters["NumberOfSamplesForQuantiles"]
1586         HXa  = numpy.matrix(numpy.ravel( HXa )).T
1587         YfQ  = None
1588         for i in range(nech):
1589             if selfA._parameters["SimulationForQuantiles"] == "Linear":
1590                 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
1591                 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
1592                 Yr = HXa + dYr
1593             elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
1594                 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
1595                 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
1596             if YfQ is None:
1597                 YfQ = Yr
1598             else:
1599                 YfQ = numpy.hstack((YfQ,Yr))
1600         YfQ.sort(axis=-1)
1601         YQ = None
1602         for quantile in selfA._parameters["Quantiles"]:
1603             if not (0. <= float(quantile) <= 1.): continue
1604             indice = int(nech * float(quantile) - 1./nech)
1605             if YQ is None: YQ = YfQ[:,indice]
1606             else:          YQ = numpy.hstack((YQ,YfQ[:,indice]))
1607         selfA.StoredVariables["SimulationQuantiles"].store( YQ )
1608     if selfA._toStore("SimulatedObservationAtBackground"):
1609         selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
1610     if selfA._toStore("SimulatedObservationAtOptimum"):
1611         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
1612     #
1613     return 0
1614
1615 # ==============================================================================
1616 def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13",
1617     BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
1618     """
1619     Maximum Likelihood Ensemble Filter
1620     """
1621     if selfA._parameters["EstimationOf"] == "Parameters":
1622         selfA._parameters["StoreInternalVariables"] = True
1623     #
1624     # Opérateurs
1625     # ----------
1626     H = HO["Direct"].appliedControledFormTo
1627     #
1628     if selfA._parameters["EstimationOf"] == "State":
1629         M = EM["Direct"].appliedControledFormTo
1630     #
1631     if CM is not None and "Tangent" in CM and U is not None:
1632         Cm = CM["Tangent"].asMatrix(Xb)
1633     else:
1634         Cm = None
1635     #
1636     # Nombre de pas identique au nombre de pas d'observations
1637     # -------------------------------------------------------
1638     if hasattr(Y,"stepnumber"):
1639         duration = Y.stepnumber()
1640         __p = numpy.cumprod(Y.shape())[-1]
1641     else:
1642         duration = 2
1643         __p = numpy.array(Y).size
1644     #
1645     # Précalcul des inversions de B et R
1646     # ----------------------------------
1647     if selfA._parameters["StoreInternalVariables"] \
1648         or selfA._toStore("CostFunctionJ") \
1649         or selfA._toStore("CostFunctionJb") \
1650         or selfA._toStore("CostFunctionJo") \
1651         or selfA._toStore("CurrentOptimum") \
1652         or selfA._toStore("APosterioriCovariance"):
1653         BI = B.getI()
1654     RI = R.getI()
1655     #
1656     # Initialisation
1657     # --------------
1658     __n = Xb.size
1659     __m = selfA._parameters["NumberOfMembers"]
1660     if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
1661     else:                         Pn = B
1662     if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
1663     else:                         Rn = R
1664     if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
1665     else:                         Qn = Q
1666     Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
1667     #
1668     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1669         selfA.StoredVariables["Analysis"].store( Xb )
1670         if selfA._toStore("APosterioriCovariance"):
1671             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1672             covarianceXa = Pn
1673     #
1674     previousJMinimum = numpy.finfo(float).max
1675     #
1676     for step in range(duration-1):
1677         if hasattr(Y,"store"):
1678             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1679         else:
1680             Ynpu = numpy.ravel( Y ).reshape((__p,1))
1681         #
1682         if U is not None:
1683             if hasattr(U,"store") and len(U)>1:
1684                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
1685             elif hasattr(U,"store") and len(U)==1:
1686                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
1687             else:
1688                 Un = numpy.asmatrix(numpy.ravel( U )).T
1689         else:
1690             Un = None
1691         #
1692         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
1693             Xn = CovarianceInflation( Xn,
1694                 selfA._parameters["InflationType"],
1695                 selfA._parameters["InflationFactor"],
1696                 )
1697         #
1698         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
1699             EMX = M( [(Xn[:,i], Un) for i in range(__m)],
1700                 argsAsSerie = True,
1701                 returnSerieAsArrayMatrix = True )
1702             qi = numpy.random.multivariate_normal(numpy.zeros(__n), Qn, size=__m).T
1703             Xn_predicted = EMX + qi
1704             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1705                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
1706                 Xn_predicted = Xn_predicted + Cm * Un
1707         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
1708             # --- > Par principe, M = Id, Q = 0
1709             Xn_predicted = Xn
1710         #
1711         #--------------------------
1712         if VariantM == "MLEF13":
1713             Xfm = numpy.ravel(Xn_predicted.mean(axis=1, dtype=mfp).astype('float'))
1714             EaX = EnsembleOfAnomalies( Xn_predicted, Xfm, 1./math.sqrt(__m-1) )
1715             Ua  = numpy.identity(__m)
1716             __j = 0
1717             Deltaw = 1
1718             if not BnotT:
1719                 Ta  = numpy.identity(__m)
1720             vw  = numpy.zeros(__m)
1721             while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
1722                 vx1 = (Xfm + EaX @ vw).reshape((__n,1))
1723                 #
1724                 if BnotT:
1725                     E1 = vx1 + _epsilon * EaX
1726                 else:
1727                     E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
1728                 #
1729                 HE2 = H( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
1730                     argsAsSerie = True,
1731                     returnSerieAsArrayMatrix = True )
1732                 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
1733                 #
1734                 if BnotT:
1735                     EaY = (HE2 - vy2) / _epsilon
1736                 else:
1737                     EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
1738                 #
1739                 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy2 )))
1740                 mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY)
1741                 Deltaw = - numpy.linalg.solve(mH,GradJ)
1742                 #
1743                 vw = vw + Deltaw
1744                 #
1745                 if not BnotT:
1746                     Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
1747                 #
1748                 __j = __j + 1
1749             #
1750             if BnotT:
1751                 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
1752             #
1753             Xn = vx1 + math.sqrt(__m-1) * EaX @ Ta @ Ua
1754         #--------------------------
1755         else:
1756             raise ValueError("VariantM has to be chosen in the authorized methods list.")
1757         #
1758         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
1759             Xn = CovarianceInflation( Xn,
1760                 selfA._parameters["InflationType"],
1761                 selfA._parameters["InflationFactor"],
1762                 )
1763         #
1764         Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1765         #--------------------------
1766         #
1767         if selfA._parameters["StoreInternalVariables"] \
1768             or selfA._toStore("CostFunctionJ") \
1769             or selfA._toStore("CostFunctionJb") \
1770             or selfA._toStore("CostFunctionJo") \
1771             or selfA._toStore("APosterioriCovariance") \
1772             or selfA._toStore("InnovationAtCurrentAnalysis") \
1773             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1774             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1775             _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
1776             _Innovation = Ynpu - _HXa
1777         #
1778         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1779         # ---> avec analysis
1780         selfA.StoredVariables["Analysis"].store( Xa )
1781         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1782             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
1783         if selfA._toStore("InnovationAtCurrentAnalysis"):
1784             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1785         # ---> avec current state
1786         if selfA._parameters["StoreInternalVariables"] \
1787             or selfA._toStore("CurrentState"):
1788             selfA.StoredVariables["CurrentState"].store( Xn )
1789         if selfA._toStore("ForecastState"):
1790             selfA.StoredVariables["ForecastState"].store( EMX )
1791         if selfA._toStore("BMA"):
1792             selfA.StoredVariables["BMA"].store( EMX - Xa )
1793         if selfA._toStore("InnovationAtCurrentState"):
1794             selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
1795         if selfA._toStore("SimulatedObservationAtCurrentState") \
1796             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1797             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
1798         # ---> autres
1799         if selfA._parameters["StoreInternalVariables"] \
1800             or selfA._toStore("CostFunctionJ") \
1801             or selfA._toStore("CostFunctionJb") \
1802             or selfA._toStore("CostFunctionJo") \
1803             or selfA._toStore("CurrentOptimum") \
1804             or selfA._toStore("APosterioriCovariance"):
1805             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
1806             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
1807             J   = Jb + Jo
1808             selfA.StoredVariables["CostFunctionJb"].store( Jb )
1809             selfA.StoredVariables["CostFunctionJo"].store( Jo )
1810             selfA.StoredVariables["CostFunctionJ" ].store( J )
1811             #
1812             if selfA._toStore("IndexOfOptimum") \
1813                 or selfA._toStore("CurrentOptimum") \
1814                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1815                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1816                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1817                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1818                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1819             if selfA._toStore("IndexOfOptimum"):
1820                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1821             if selfA._toStore("CurrentOptimum"):
1822                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1823             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1824                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1825             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1826                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1827             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1828                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1829             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1830                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1831         if selfA._toStore("APosterioriCovariance"):
1832             selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
1833         if selfA._parameters["EstimationOf"] == "Parameters" \
1834             and J < previousJMinimum:
1835             previousJMinimum    = J
1836             XaMin               = Xa
1837             if selfA._toStore("APosterioriCovariance"):
1838                 covarianceXaMin = Pn
1839     #
1840     # Stockage final supplémentaire de l'optimum en estimation de paramètres
1841     # ----------------------------------------------------------------------
1842     if selfA._parameters["EstimationOf"] == "Parameters":
1843         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1844         selfA.StoredVariables["Analysis"].store( XaMin )
1845         if selfA._toStore("APosterioriCovariance"):
1846             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1847         if selfA._toStore("BMA"):
1848             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1849     #
1850     return 0
1851
1852 # ==============================================================================
1853 def mmqr(
1854         func     = None,
1855         x0       = None,
1856         fprime   = None,
1857         bounds   = None,
1858         quantile = 0.5,
1859         maxfun   = 15000,
1860         toler    = 1.e-06,
1861         y        = None,
1862         ):
1863     """
1864     Implémentation informatique de l'algorithme MMQR, basée sur la publication :
1865     David R. Hunter, Kenneth Lange, "Quantile Regression via an MM Algorithm",
1866     Journal of Computational and Graphical Statistics, 9, 1, pp.60-77, 2000.
1867     """
1868     #
1869     # Recuperation des donnees et informations initiales
1870     # --------------------------------------------------
1871     variables = numpy.ravel( x0 )
1872     mesures   = numpy.ravel( y )
1873     increment = sys.float_info[0]
1874     p         = variables.size
1875     n         = mesures.size
1876     quantile  = float(quantile)
1877     #
1878     # Calcul des parametres du MM
1879     # ---------------------------
1880     tn      = float(toler) / n
1881     e0      = -tn / math.log(tn)
1882     epsilon = (e0-tn)/(1+math.log(e0))
1883     #
1884     # Calculs d'initialisation
1885     # ------------------------
1886     residus  = mesures - numpy.ravel( func( variables ) )
1887     poids    = 1./(epsilon+numpy.abs(residus))
1888     veps     = 1. - 2. * quantile - residus * poids
1889     lastsurrogate = -numpy.sum(residus*veps) - (1.-2.*quantile)*numpy.sum(residus)
1890     iteration = 0
1891     #
1892     # Recherche iterative
1893     # -------------------
1894     while (increment > toler) and (iteration < maxfun) :
1895         iteration += 1
1896         #
1897         Derivees  = numpy.array(fprime(variables))
1898         Derivees  = Derivees.reshape(n,p) # Necessaire pour remettre en place la matrice si elle passe par des tuyaux YACS
1899         DeriveesT = Derivees.transpose()
1900         M         =   numpy.dot( DeriveesT , (numpy.array(numpy.matrix(p*[poids,]).T)*Derivees) )
1901         SM        =   numpy.transpose(numpy.dot( DeriveesT , veps ))
1902         step      = - numpy.linalg.lstsq( M, SM, rcond=-1 )[0]
1903         #
1904         variables = variables + step
1905         if bounds is not None:
1906             # Attention : boucle infinie à éviter si un intervalle est trop petit
1907             while( (variables < numpy.ravel(numpy.asmatrix(bounds)[:,0])).any() or (variables > numpy.ravel(numpy.asmatrix(bounds)[:,1])).any() ):
1908                 step      = step/2.
1909                 variables = variables - step
1910         residus   = mesures - numpy.ravel( func(variables) )
1911         surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
1912         #
1913         while ( (surrogate > lastsurrogate) and ( max(list(numpy.abs(step))) > 1.e-16 ) ) :
1914             step      = step/2.
1915             variables = variables - step
1916             residus   = mesures - numpy.ravel( func(variables) )
1917             surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
1918         #
1919         increment     = lastsurrogate-surrogate
1920         poids         = 1./(epsilon+numpy.abs(residus))
1921         veps          = 1. - 2. * quantile - residus * poids
1922         lastsurrogate = -numpy.sum(residus * veps) - (1.-2.*quantile)*numpy.sum(residus)
1923     #
1924     # Mesure d'écart
1925     # --------------
1926     Ecart = quantile * numpy.sum(residus) - numpy.sum( residus[residus<0] )
1927     #
1928     return variables, Ecart, [n,p,iteration,increment,0]
1929
1930 # ==============================================================================
1931 def multi3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, oneCycle):
1932     """
1933     3DVAR multi-pas et multi-méthodes
1934     """
1935     #
1936     # Initialisation
1937     # --------------
1938     Xn = numpy.ravel(Xb).reshape((-1,1))
1939     #
1940     if selfA._parameters["EstimationOf"] == "State":
1941         M = EM["Direct"].appliedTo
1942         #
1943         if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1944             selfA.StoredVariables["Analysis"].store( Xn )
1945             if selfA._toStore("APosterioriCovariance"):
1946                 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(Xn.size)
1947                 else:                         Pn = B
1948                 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1949             if selfA._toStore("ForecastState"):
1950                 selfA.StoredVariables["ForecastState"].store( Xn )
1951     #
1952     if hasattr(Y,"stepnumber"):
1953         duration = Y.stepnumber()
1954     else:
1955         duration = 2
1956     #
1957     # Multi-pas
1958     # ---------
1959     for step in range(duration-1):
1960         if hasattr(Y,"store"):
1961             Ynpu = numpy.ravel( Y[step+1] ).reshape((-1,1))
1962         else:
1963             Ynpu = numpy.ravel( Y ).reshape((-1,1))
1964         #
1965         if selfA._parameters["EstimationOf"] == "State": # Forecast
1966             Xn = selfA.StoredVariables["Analysis"][-1]
1967             Xn_predicted = M( Xn )
1968             if selfA._toStore("ForecastState"):
1969                 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
1970         elif selfA._parameters["EstimationOf"] == "Parameters": # No forecast
1971             # --- > Par principe, M = Id, Q = 0
1972             Xn_predicted = Xn
1973         Xn_predicted = numpy.ravel(Xn_predicted).reshape((-1,1))
1974         #
1975         oneCycle(selfA, Xn_predicted, Ynpu, U, HO, None, None, R, B, None)
1976     #
1977     return 0
1978
1979 # ==============================================================================
1980 def psas3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1981     """
1982     3DVAR PSAS
1983     """
1984     #
1985     # Initialisations
1986     # ---------------
1987     #
1988     # Opérateurs
1989     Hm = HO["Direct"].appliedTo
1990     #
1991     # Utilisation éventuelle d'un vecteur H(Xb) précalculé
1992     if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
1993         HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
1994     else:
1995         HXb = Hm( Xb )
1996     HXb = numpy.asmatrix(numpy.ravel( HXb )).T
1997     if Y.size != HXb.size:
1998         raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
1999     if max(Y.shape) != max(HXb.shape):
2000         raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
2001     #
2002     if selfA._toStore("JacobianMatrixAtBackground"):
2003         HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
2004         HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
2005         selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
2006     #
2007     Ht = HO["Tangent"].asMatrix(Xb)
2008     BHT = B * Ht.T
2009     HBHTpR = R + Ht * BHT
2010     Innovation = Y - HXb
2011     #
2012     # Point de démarrage de l'optimisation
2013     Xini = numpy.zeros(Xb.shape)
2014     #
2015     # Définition de la fonction-coût
2016     # ------------------------------
2017     def CostFunction(w):
2018         _W = numpy.asmatrix(numpy.ravel( w )).T
2019         if selfA._parameters["StoreInternalVariables"] or \
2020             selfA._toStore("CurrentState") or \
2021             selfA._toStore("CurrentOptimum"):
2022             selfA.StoredVariables["CurrentState"].store( Xb + BHT * _W )
2023         if selfA._toStore("SimulatedObservationAtCurrentState") or \
2024             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2025             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Hm( Xb + BHT * _W ) )
2026         if selfA._toStore("InnovationAtCurrentState"):
2027             selfA.StoredVariables["InnovationAtCurrentState"].store( Innovation )
2028         #
2029         Jb  = float( 0.5 * _W.T * HBHTpR * _W )
2030         Jo  = float( - _W.T * Innovation )
2031         J   = Jb + Jo
2032         #
2033         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
2034         selfA.StoredVariables["CostFunctionJb"].store( Jb )
2035         selfA.StoredVariables["CostFunctionJo"].store( Jo )
2036         selfA.StoredVariables["CostFunctionJ" ].store( J )
2037         if selfA._toStore("IndexOfOptimum") or \
2038             selfA._toStore("CurrentOptimum") or \
2039             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
2040             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
2041             selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
2042             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2043             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2044         if selfA._toStore("IndexOfOptimum"):
2045             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2046         if selfA._toStore("CurrentOptimum"):
2047             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
2048         if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2049             selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
2050         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2051             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2052         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2053             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2054         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2055             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2056         return J
2057     #
2058     def GradientOfCostFunction(w):
2059         _W = numpy.asmatrix(numpy.ravel( w )).T
2060         GradJb  = HBHTpR * _W
2061         GradJo  = - Innovation
2062         GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
2063         return GradJ
2064     #
2065     # Minimisation de la fonctionnelle
2066     # --------------------------------
2067     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
2068     #
2069     if selfA._parameters["Minimizer"] == "LBFGSB":
2070         if "0.19" <= scipy.version.version <= "1.1.0":
2071             import lbfgsbhlt as optimiseur
2072         else:
2073             import scipy.optimize as optimiseur
2074         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
2075             func        = CostFunction,
2076             x0          = Xini,
2077             fprime      = GradientOfCostFunction,
2078             args        = (),
2079             bounds      = selfA._parameters["Bounds"],
2080             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
2081             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
2082             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
2083             iprint      = selfA._parameters["optiprint"],
2084             )
2085         nfeval = Informations['funcalls']
2086         rc     = Informations['warnflag']
2087     elif selfA._parameters["Minimizer"] == "TNC":
2088         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
2089             func        = CostFunction,
2090             x0          = Xini,
2091             fprime      = GradientOfCostFunction,
2092             args        = (),
2093             bounds      = selfA._parameters["Bounds"],
2094             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
2095             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
2096             ftol        = selfA._parameters["CostDecrementTolerance"],
2097             messages    = selfA._parameters["optmessages"],
2098             )
2099     elif selfA._parameters["Minimizer"] == "CG":
2100         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
2101             f           = CostFunction,
2102             x0          = Xini,
2103             fprime      = GradientOfCostFunction,
2104             args        = (),
2105             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2106             gtol        = selfA._parameters["GradientNormTolerance"],
2107             disp        = selfA._parameters["optdisp"],
2108             full_output = True,
2109             )
2110     elif selfA._parameters["Minimizer"] == "NCG":
2111         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
2112             f           = CostFunction,
2113             x0          = Xini,
2114             fprime      = GradientOfCostFunction,
2115             args        = (),
2116             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2117             avextol     = selfA._parameters["CostDecrementTolerance"],
2118             disp        = selfA._parameters["optdisp"],
2119             full_output = True,
2120             )
2121     elif selfA._parameters["Minimizer"] == "BFGS":
2122         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
2123             f           = CostFunction,
2124             x0          = Xini,
2125             fprime      = GradientOfCostFunction,
2126             args        = (),
2127             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2128             gtol        = selfA._parameters["GradientNormTolerance"],
2129             disp        = selfA._parameters["optdisp"],
2130             full_output = True,
2131             )
2132     else:
2133         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
2134     #
2135     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2136     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
2137     #
2138     # Correction pour pallier a un bug de TNC sur le retour du Minimum
2139     # ----------------------------------------------------------------
2140     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
2141         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
2142         Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
2143     else:
2144         Minimum = Xb + BHT * numpy.asmatrix(numpy.ravel( Minimum )).T
2145     #
2146     # Obtention de l'analyse
2147     # ----------------------
2148     Xa = Minimum
2149     #
2150     selfA.StoredVariables["Analysis"].store( Xa )
2151     #
2152     if selfA._toStore("OMA") or \
2153         selfA._toStore("SigmaObs2") or \
2154         selfA._toStore("SimulationQuantiles") or \
2155         selfA._toStore("SimulatedObservationAtOptimum"):
2156         if selfA._toStore("SimulatedObservationAtCurrentState"):
2157             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
2158         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2159             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
2160         else:
2161             HXa = Hm( Xa )
2162     #
2163     # Calcul de la covariance d'analyse
2164     # ---------------------------------
2165     if selfA._toStore("APosterioriCovariance") or \
2166         selfA._toStore("SimulationQuantiles") or \
2167         selfA._toStore("JacobianMatrixAtOptimum") or \
2168         selfA._toStore("KalmanGainAtOptimum"):
2169         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
2170         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
2171     if selfA._toStore("APosterioriCovariance") or \
2172         selfA._toStore("SimulationQuantiles") or \
2173         selfA._toStore("KalmanGainAtOptimum"):
2174         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
2175         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
2176     if selfA._toStore("APosterioriCovariance") or \
2177         selfA._toStore("SimulationQuantiles"):
2178         BI = B.getI()
2179         RI = R.getI()
2180         HessienneI = []
2181         nb = Xa.size
2182         for i in range(nb):
2183             _ee    = numpy.matrix(numpy.zeros(nb)).T
2184             _ee[i] = 1.
2185             _HtEE  = numpy.dot(HtM,_ee)
2186             _HtEE  = numpy.asmatrix(numpy.ravel( _HtEE )).T
2187             HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
2188         HessienneI = numpy.matrix( HessienneI )
2189         A = HessienneI.I
2190         if min(A.shape) != max(A.shape):
2191             raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
2192         if (numpy.diag(A) < 0).any():
2193             raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
2194         if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
2195             try:
2196                 L = numpy.linalg.cholesky( A )
2197             except:
2198                 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
2199     if selfA._toStore("APosterioriCovariance"):
2200         selfA.StoredVariables["APosterioriCovariance"].store( A )
2201     if selfA._toStore("JacobianMatrixAtOptimum"):
2202         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
2203     if selfA._toStore("KalmanGainAtOptimum"):
2204         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
2205         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
2206         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
2207     #
2208     # Calculs et/ou stockages supplémentaires
2209     # ---------------------------------------
2210     if selfA._toStore("Innovation") or \
2211         selfA._toStore("SigmaObs2") or \
2212         selfA._toStore("MahalanobisConsistency") or \
2213         selfA._toStore("OMB"):
2214         d  = Y - HXb
2215     if selfA._toStore("Innovation"):
2216         selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
2217     if selfA._toStore("BMA"):
2218         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
2219     if selfA._toStore("OMA"):
2220         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
2221     if selfA._toStore("OMB"):
2222         selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
2223     if selfA._toStore("SigmaObs2"):
2224         TraceR = R.trace(Y.size)
2225         selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
2226     if selfA._toStore("MahalanobisConsistency"):
2227         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
2228     if selfA._toStore("SimulationQuantiles"):
2229         nech = selfA._parameters["NumberOfSamplesForQuantiles"]
2230         HXa  = numpy.matrix(numpy.ravel( HXa )).T
2231         YfQ  = None
2232         for i in range(nech):
2233             if selfA._parameters["SimulationForQuantiles"] == "Linear":
2234                 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
2235                 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
2236                 Yr = HXa + dYr
2237             elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
2238                 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
2239                 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
2240             if YfQ is None:
2241                 YfQ = Yr
2242             else:
2243                 YfQ = numpy.hstack((YfQ,Yr))
2244         YfQ.sort(axis=-1)
2245         YQ = None
2246         for quantile in selfA._parameters["Quantiles"]:
2247             if not (0. <= float(quantile) <= 1.): continue
2248             indice = int(nech * float(quantile) - 1./nech)
2249             if YQ is None: YQ = YfQ[:,indice]
2250             else:          YQ = numpy.hstack((YQ,YfQ[:,indice]))
2251         selfA.StoredVariables["SimulationQuantiles"].store( YQ )
2252     if selfA._toStore("SimulatedObservationAtBackground"):
2253         selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
2254     if selfA._toStore("SimulatedObservationAtOptimum"):
2255         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
2256     #
2257     return 0
2258
2259 # ==============================================================================
2260 def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
2261     """
2262     Stochastic EnKF
2263     """
2264     if selfA._parameters["EstimationOf"] == "Parameters":
2265         selfA._parameters["StoreInternalVariables"] = True
2266     #
2267     # Opérateurs
2268     # ----------
2269     H = HO["Direct"].appliedControledFormTo
2270     #
2271     if selfA._parameters["EstimationOf"] == "State":
2272         M = EM["Direct"].appliedControledFormTo
2273     #
2274     if CM is not None and "Tangent" in CM and U is not None:
2275         Cm = CM["Tangent"].asMatrix(Xb)
2276     else:
2277         Cm = None
2278     #
2279     # Nombre de pas identique au nombre de pas d'observations
2280     # -------------------------------------------------------
2281     if hasattr(Y,"stepnumber"):
2282         duration = Y.stepnumber()
2283         __p = numpy.cumprod(Y.shape())[-1]
2284     else:
2285         duration = 2
2286         __p = numpy.array(Y).size
2287     #
2288     # Précalcul des inversions de B et R
2289     # ----------------------------------
2290     if selfA._parameters["StoreInternalVariables"] \
2291         or selfA._toStore("CostFunctionJ") \
2292         or selfA._toStore("CostFunctionJb") \
2293         or selfA._toStore("CostFunctionJo") \
2294         or selfA._toStore("CurrentOptimum") \
2295         or selfA._toStore("APosterioriCovariance"):
2296         BI = B.getI()
2297         RI = R.getI()
2298     #
2299     # Initialisation
2300     # --------------
2301     __n = Xb.size
2302     __m = selfA._parameters["NumberOfMembers"]
2303     if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
2304     else:                         Pn = B
2305     if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
2306     else:                         Rn = R
2307     if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
2308     else:                         Qn = Q
2309     Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
2310     #
2311     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2312         selfA.StoredVariables["Analysis"].store( Xb )
2313         if selfA._toStore("APosterioriCovariance"):
2314             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2315             covarianceXa = Pn
2316     #
2317     previousJMinimum = numpy.finfo(float).max
2318     #
2319     for step in range(duration-1):
2320         if hasattr(Y,"store"):
2321             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
2322         else:
2323             Ynpu = numpy.ravel( Y ).reshape((__p,1))
2324         #
2325         if U is not None:
2326             if hasattr(U,"store") and len(U)>1:
2327                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2328             elif hasattr(U,"store") and len(U)==1:
2329                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2330             else:
2331                 Un = numpy.asmatrix(numpy.ravel( U )).T
2332         else:
2333             Un = None
2334         #
2335         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2336             Xn = CovarianceInflation( Xn,
2337                 selfA._parameters["InflationType"],
2338                 selfA._parameters["InflationFactor"],
2339                 )
2340         #
2341         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
2342             EMX = M( [(Xn[:,i], Un) for i in range(__m)],
2343                 argsAsSerie = True,
2344                 returnSerieAsArrayMatrix = True )
2345             qi = numpy.random.multivariate_normal(numpy.zeros(__n), Qn, size=__m).T
2346             Xn_predicted = EMX + qi
2347             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2348                 argsAsSerie = True,
2349                 returnSerieAsArrayMatrix = True )
2350             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2351                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2352                 Xn_predicted = Xn_predicted + Cm * Un
2353         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
2354             # --- > Par principe, M = Id, Q = 0
2355             Xn_predicted = Xn
2356             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2357                 argsAsSerie = True,
2358                 returnSerieAsArrayMatrix = True )
2359         #
2360         # Mean of forecast and observation of forecast
2361         Xfm  = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2362         Hfm  = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2363         #
2364         #--------------------------
2365         if VariantM == "KalmanFilterFormula05":
2366             PfHT, HPfHT = 0., 0.
2367             for i in range(__m):
2368                 Exfi = Xn_predicted[:,i].reshape((__n,1)) - Xfm
2369                 Eyfi = HX_predicted[:,i].reshape((__p,1)) - Hfm
2370                 PfHT  += Exfi * Eyfi.T
2371                 HPfHT += Eyfi * Eyfi.T
2372             PfHT  = (1./(__m-1)) * PfHT
2373             HPfHT = (1./(__m-1)) * HPfHT
2374             Kn     = PfHT * ( R + HPfHT ).I
2375             del PfHT, HPfHT
2376             #
2377             for i in range(__m):
2378                 ri = numpy.random.multivariate_normal(numpy.zeros(__p), Rn)
2379                 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(Ynpu) + ri - HX_predicted[:,i])
2380         #--------------------------
2381         elif VariantM == "KalmanFilterFormula16":
2382             EpY   = EnsembleOfCenteredPerturbations(Ynpu, Rn, __m)
2383             EpYm  = EpY.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2384             #
2385             EaX   = EnsembleOfAnomalies( Xn_predicted ) / math.sqrt(__m-1)
2386             EaY = (HX_predicted - Hfm - EpY + EpYm) / math.sqrt(__m-1)
2387             #
2388             Kn = EaX @ EaY.T @ numpy.linalg.inv( EaY @ EaY.T)
2389             #
2390             for i in range(__m):
2391                 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(EpY[:,i]) - HX_predicted[:,i])
2392         #--------------------------
2393         else:
2394             raise ValueError("VariantM has to be chosen in the authorized methods list.")
2395         #
2396         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2397             Xn = CovarianceInflation( Xn,
2398                 selfA._parameters["InflationType"],
2399                 selfA._parameters["InflationFactor"],
2400                 )
2401         #
2402         Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2403         #--------------------------
2404         #
2405         if selfA._parameters["StoreInternalVariables"] \
2406             or selfA._toStore("CostFunctionJ") \
2407             or selfA._toStore("CostFunctionJb") \
2408             or selfA._toStore("CostFunctionJo") \
2409             or selfA._toStore("APosterioriCovariance") \
2410             or selfA._toStore("InnovationAtCurrentAnalysis") \
2411             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2412             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2413             _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
2414             _Innovation = Ynpu - _HXa
2415         #
2416         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2417         # ---> avec analysis
2418         selfA.StoredVariables["Analysis"].store( Xa )
2419         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2420             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2421         if selfA._toStore("InnovationAtCurrentAnalysis"):
2422             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2423         # ---> avec current state
2424         if selfA._parameters["StoreInternalVariables"] \
2425             or selfA._toStore("CurrentState"):
2426             selfA.StoredVariables["CurrentState"].store( Xn )
2427         if selfA._toStore("ForecastState"):
2428             selfA.StoredVariables["ForecastState"].store( EMX )
2429         if selfA._toStore("BMA"):
2430             selfA.StoredVariables["BMA"].store( EMX - Xa )
2431         if selfA._toStore("InnovationAtCurrentState"):
2432             selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
2433         if selfA._toStore("SimulatedObservationAtCurrentState") \
2434             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2435             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
2436         # ---> autres
2437         if selfA._parameters["StoreInternalVariables"] \
2438             or selfA._toStore("CostFunctionJ") \
2439             or selfA._toStore("CostFunctionJb") \
2440             or selfA._toStore("CostFunctionJo") \
2441             or selfA._toStore("CurrentOptimum") \
2442             or selfA._toStore("APosterioriCovariance"):
2443             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2444             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
2445             J   = Jb + Jo
2446             selfA.StoredVariables["CostFunctionJb"].store( Jb )
2447             selfA.StoredVariables["CostFunctionJo"].store( Jo )
2448             selfA.StoredVariables["CostFunctionJ" ].store( J )
2449             #
2450             if selfA._toStore("IndexOfOptimum") \
2451                 or selfA._toStore("CurrentOptimum") \
2452                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2453                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2454                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2455                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2456                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2457             if selfA._toStore("IndexOfOptimum"):
2458                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2459             if selfA._toStore("CurrentOptimum"):
2460                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2461             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2462                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2463             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2464                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2465             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2466                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2467             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2468                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2469         if selfA._toStore("APosterioriCovariance"):
2470             selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
2471         if selfA._parameters["EstimationOf"] == "Parameters" \
2472             and J < previousJMinimum:
2473             previousJMinimum    = J
2474             XaMin               = Xa
2475             if selfA._toStore("APosterioriCovariance"):
2476                 covarianceXaMin = Pn
2477     #
2478     # Stockage final supplémentaire de l'optimum en estimation de paramètres
2479     # ----------------------------------------------------------------------
2480     if selfA._parameters["EstimationOf"] == "Parameters":
2481         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2482         selfA.StoredVariables["Analysis"].store( XaMin )
2483         if selfA._toStore("APosterioriCovariance"):
2484             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2485         if selfA._toStore("BMA"):
2486             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2487     #
2488     return 0
2489
2490 # ==============================================================================
2491 def std3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
2492     """
2493     3DVAR
2494     """
2495     #
2496     # Initialisations
2497     # ---------------
2498     #
2499     # Opérateurs
2500     Hm = HO["Direct"].appliedTo
2501     Ha = HO["Adjoint"].appliedInXTo
2502     #
2503     # Utilisation éventuelle d'un vecteur H(Xb) précalculé
2504     if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
2505         HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
2506     else:
2507         HXb = Hm( Xb )
2508     HXb = numpy.asmatrix(numpy.ravel( HXb )).T
2509     if Y.size != HXb.size:
2510         raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
2511     if max(Y.shape) != max(HXb.shape):
2512         raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
2513     #
2514     if selfA._toStore("JacobianMatrixAtBackground"):
2515         HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
2516         HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
2517         selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
2518     #
2519     # Précalcul des inversions de B et R
2520     BI = B.getI()
2521     RI = R.getI()
2522     #
2523     # Point de démarrage de l'optimisation
2524     Xini = selfA._parameters["InitializationPoint"]
2525     #
2526     # Définition de la fonction-coût
2527     # ------------------------------
2528     def CostFunction(x):
2529         _X  = numpy.asmatrix(numpy.ravel( x )).T
2530         if selfA._parameters["StoreInternalVariables"] or \
2531             selfA._toStore("CurrentState") or \
2532             selfA._toStore("CurrentOptimum"):
2533             selfA.StoredVariables["CurrentState"].store( _X )
2534         _HX = Hm( _X )
2535         _HX = numpy.asmatrix(numpy.ravel( _HX )).T
2536         _Innovation = Y - _HX
2537         if selfA._toStore("SimulatedObservationAtCurrentState") or \
2538             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2539             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
2540         if selfA._toStore("InnovationAtCurrentState"):
2541             selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
2542         #
2543         Jb  = float( 0.5 * (_X - Xb).T * BI * (_X - Xb) )
2544         Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
2545         J   = Jb + Jo
2546         #
2547         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
2548         selfA.StoredVariables["CostFunctionJb"].store( Jb )
2549         selfA.StoredVariables["CostFunctionJo"].store( Jo )
2550         selfA.StoredVariables["CostFunctionJ" ].store( J )
2551         if selfA._toStore("IndexOfOptimum") or \
2552             selfA._toStore("CurrentOptimum") or \
2553             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
2554             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
2555             selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
2556             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2557             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2558         if selfA._toStore("IndexOfOptimum"):
2559             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2560         if selfA._toStore("CurrentOptimum"):
2561             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
2562         if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2563             selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
2564         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2565             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2566         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2567             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2568         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2569             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2570         return J
2571     #
2572     def GradientOfCostFunction(x):
2573         _X      = numpy.asmatrix(numpy.ravel( x )).T
2574         _HX     = Hm( _X )
2575         _HX     = numpy.asmatrix(numpy.ravel( _HX )).T
2576         GradJb  = BI * (_X - Xb)
2577         GradJo  = - Ha( (_X, RI * (Y - _HX)) )
2578         GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
2579         return GradJ
2580     #
2581     # Minimisation de la fonctionnelle
2582     # --------------------------------
2583     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
2584     #
2585     if selfA._parameters["Minimizer"] == "LBFGSB":
2586         if "0.19" <= scipy.version.version <= "1.1.0":
2587             import lbfgsbhlt as optimiseur
2588         else:
2589             import scipy.optimize as optimiseur
2590         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
2591             func        = CostFunction,
2592             x0          = Xini,
2593             fprime      = GradientOfCostFunction,
2594             args        = (),
2595             bounds      = selfA._parameters["Bounds"],
2596             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
2597             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
2598             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
2599             iprint      = selfA._parameters["optiprint"],
2600             )
2601         nfeval = Informations['funcalls']
2602         rc     = Informations['warnflag']
2603     elif selfA._parameters["Minimizer"] == "TNC":
2604         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
2605             func        = CostFunction,
2606             x0          = Xini,
2607             fprime      = GradientOfCostFunction,
2608             args        = (),
2609             bounds      = selfA._parameters["Bounds"],
2610             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
2611             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
2612             ftol        = selfA._parameters["CostDecrementTolerance"],
2613             messages    = selfA._parameters["optmessages"],
2614             )
2615     elif selfA._parameters["Minimizer"] == "CG":
2616         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
2617             f           = CostFunction,
2618             x0          = Xini,
2619             fprime      = GradientOfCostFunction,
2620             args        = (),
2621             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2622             gtol        = selfA._parameters["GradientNormTolerance"],
2623             disp        = selfA._parameters["optdisp"],
2624             full_output = True,
2625             )
2626     elif selfA._parameters["Minimizer"] == "NCG":
2627         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
2628             f           = CostFunction,
2629             x0          = Xini,
2630             fprime      = GradientOfCostFunction,
2631             args        = (),
2632             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2633             avextol     = selfA._parameters["CostDecrementTolerance"],
2634             disp        = selfA._parameters["optdisp"],
2635             full_output = True,
2636             )
2637     elif selfA._parameters["Minimizer"] == "BFGS":
2638         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
2639             f           = CostFunction,
2640             x0          = Xini,
2641             fprime      = GradientOfCostFunction,
2642             args        = (),
2643             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2644             gtol        = selfA._parameters["GradientNormTolerance"],
2645             disp        = selfA._parameters["optdisp"],
2646             full_output = True,
2647             )
2648     else:
2649         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
2650     #
2651     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2652     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
2653     #
2654     # Correction pour pallier a un bug de TNC sur le retour du Minimum
2655     # ----------------------------------------------------------------
2656     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
2657         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
2658     #
2659     # Obtention de l'analyse
2660     # ----------------------
2661     Xa = numpy.asmatrix(numpy.ravel( Minimum )).T
2662     #
2663     selfA.StoredVariables["Analysis"].store( Xa )
2664     #
2665     if selfA._toStore("OMA") or \
2666         selfA._toStore("SigmaObs2") or \
2667         selfA._toStore("SimulationQuantiles") or \
2668         selfA._toStore("SimulatedObservationAtOptimum"):
2669         if selfA._toStore("SimulatedObservationAtCurrentState"):
2670             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
2671         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2672             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
2673         else:
2674             HXa = Hm( Xa )
2675     #
2676     # Calcul de la covariance d'analyse
2677     # ---------------------------------
2678     if selfA._toStore("APosterioriCovariance") or \
2679         selfA._toStore("SimulationQuantiles") or \
2680         selfA._toStore("JacobianMatrixAtOptimum") or \
2681         selfA._toStore("KalmanGainAtOptimum"):
2682         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
2683         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
2684     if selfA._toStore("APosterioriCovariance") or \
2685         selfA._toStore("SimulationQuantiles") or \
2686         selfA._toStore("KalmanGainAtOptimum"):
2687         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
2688         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
2689     if selfA._toStore("APosterioriCovariance") or \
2690         selfA._toStore("SimulationQuantiles"):
2691         HessienneI = []
2692         nb = Xa.size
2693         for i in range(nb):
2694             _ee    = numpy.matrix(numpy.zeros(nb)).T
2695             _ee[i] = 1.
2696             _HtEE  = numpy.dot(HtM,_ee)
2697             _HtEE  = numpy.asmatrix(numpy.ravel( _HtEE )).T
2698             HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
2699         HessienneI = numpy.matrix( HessienneI )
2700         A = HessienneI.I
2701         if min(A.shape) != max(A.shape):
2702             raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
2703         if (numpy.diag(A) < 0).any():
2704             raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
2705         if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
2706             try:
2707                 L = numpy.linalg.cholesky( A )
2708             except:
2709                 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
2710     if selfA._toStore("APosterioriCovariance"):
2711         selfA.StoredVariables["APosterioriCovariance"].store( A )
2712     if selfA._toStore("JacobianMatrixAtOptimum"):
2713         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
2714     if selfA._toStore("KalmanGainAtOptimum"):
2715         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
2716         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
2717         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
2718     #
2719     # Calculs et/ou stockages supplémentaires
2720     # ---------------------------------------
2721     if selfA._toStore("Innovation") or \
2722         selfA._toStore("SigmaObs2") or \
2723         selfA._toStore("MahalanobisConsistency") or \
2724         selfA._toStore("OMB"):
2725         d  = Y - HXb
2726     if selfA._toStore("Innovation"):
2727         selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
2728     if selfA._toStore("BMA"):
2729         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
2730     if selfA._toStore("OMA"):
2731         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
2732     if selfA._toStore("OMB"):
2733         selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
2734     if selfA._toStore("SigmaObs2"):
2735         TraceR = R.trace(Y.size)
2736         selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
2737     if selfA._toStore("MahalanobisConsistency"):
2738         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
2739     if selfA._toStore("SimulationQuantiles"):
2740         nech = selfA._parameters["NumberOfSamplesForQuantiles"]
2741         HXa  = numpy.matrix(numpy.ravel( HXa )).T
2742         YfQ  = None
2743         for i in range(nech):
2744             if selfA._parameters["SimulationForQuantiles"] == "Linear":
2745                 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
2746                 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
2747                 Yr = HXa + dYr
2748             elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
2749                 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
2750                 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
2751             if YfQ is None:
2752                 YfQ = Yr
2753             else:
2754                 YfQ = numpy.hstack((YfQ,Yr))
2755         YfQ.sort(axis=-1)
2756         YQ = None
2757         for quantile in selfA._parameters["Quantiles"]:
2758             if not (0. <= float(quantile) <= 1.): continue
2759             indice = int(nech * float(quantile) - 1./nech)
2760             if YQ is None: YQ = YfQ[:,indice]
2761             else:          YQ = numpy.hstack((YQ,YfQ[:,indice]))
2762         selfA.StoredVariables["SimulationQuantiles"].store( YQ )
2763     if selfA._toStore("SimulatedObservationAtBackground"):
2764         selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
2765     if selfA._toStore("SimulatedObservationAtOptimum"):
2766         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
2767     #
2768     return 0
2769
2770 # ==============================================================================
2771 def std4dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
2772     """
2773     4DVAR
2774     """
2775     #
2776     # Initialisations
2777     # ---------------
2778     #
2779     # Opérateurs
2780     Hm = HO["Direct"].appliedControledFormTo
2781     Mm = EM["Direct"].appliedControledFormTo
2782     #
2783     if CM is not None and "Tangent" in CM and U is not None:
2784         Cm = CM["Tangent"].asMatrix(Xb)
2785     else:
2786         Cm = None
2787     #
2788     def Un(_step):
2789         if U is not None:
2790             if hasattr(U,"store") and 1<=_step<len(U) :
2791                 _Un = numpy.asmatrix(numpy.ravel( U[_step] )).T
2792             elif hasattr(U,"store") and len(U)==1:
2793                 _Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2794             else:
2795                 _Un = numpy.asmatrix(numpy.ravel( U )).T
2796         else:
2797             _Un = None
2798         return _Un
2799     def CmUn(_xn,_un):
2800         if Cm is not None and _un is not None: # Attention : si Cm est aussi dans M, doublon !
2801             _Cm   = Cm.reshape(_xn.size,_un.size) # ADAO & check shape
2802             _CmUn = _Cm * _un
2803         else:
2804             _CmUn = 0.
2805         return _CmUn
2806     #
2807     # Remarque : les observations sont exploitées à partir du pas de temps
2808     # numéro 1, et sont utilisées dans Yo comme rangées selon ces indices.
2809     # Donc le pas 0 n'est pas utilisé puisque la première étape commence
2810     # avec l'observation du pas 1.
2811     #
2812     # Nombre de pas identique au nombre de pas d'observations
2813     if hasattr(Y,"stepnumber"):
2814         duration = Y.stepnumber()
2815     else:
2816         duration = 2
2817     #
2818     # Précalcul des inversions de B et R
2819     BI = B.getI()
2820     RI = R.getI()
2821     #
2822     # Point de démarrage de l'optimisation
2823     Xini = selfA._parameters["InitializationPoint"]
2824     #
2825     # Définition de la fonction-coût
2826     # ------------------------------
2827     selfA.DirectCalculation = [None,] # Le pas 0 n'est pas observé
2828     selfA.DirectInnovation  = [None,] # Le pas 0 n'est pas observé
2829     def CostFunction(x):
2830         _X  = numpy.asmatrix(numpy.ravel( x )).T
2831         if selfA._parameters["StoreInternalVariables"] or \
2832             selfA._toStore("CurrentState") or \
2833             selfA._toStore("CurrentOptimum"):
2834             selfA.StoredVariables["CurrentState"].store( _X )
2835         Jb  = float( 0.5 * (_X - Xb).T * BI * (_X - Xb) )
2836         selfA.DirectCalculation = [None,]
2837         selfA.DirectInnovation  = [None,]
2838         Jo  = 0.
2839         _Xn = _X
2840         for step in range(0,duration-1):
2841             if hasattr(Y,"store"):
2842                 _Ynpu = numpy.asmatrix(numpy.ravel( Y[step+1] )).T
2843             else:
2844                 _Ynpu = numpy.asmatrix(numpy.ravel( Y )).T
2845             _Un = Un(step)
2846             #
2847             # Etape d'évolution
2848             if selfA._parameters["EstimationOf"] == "State":
2849                 _Xn = Mm( (_Xn, _Un) ) + CmUn(_Xn, _Un)
2850             elif selfA._parameters["EstimationOf"] == "Parameters":
2851                 pass
2852             #
2853             if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
2854                 _Xn = numpy.max(numpy.hstack((_Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1)
2855                 _Xn = numpy.min(numpy.hstack((_Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1)
2856             #
2857             # Etape de différence aux observations
2858             if selfA._parameters["EstimationOf"] == "State":
2859                 _YmHMX = _Ynpu - numpy.asmatrix(numpy.ravel( Hm( (_Xn, None) ) )).T
2860             elif selfA._parameters["EstimationOf"] == "Parameters":
2861                 _YmHMX = _Ynpu - numpy.asmatrix(numpy.ravel( Hm( (_Xn, _Un) ) )).T - CmUn(_Xn, _Un)
2862             #
2863             # Stockage de l'état
2864             selfA.DirectCalculation.append( _Xn )
2865             selfA.DirectInnovation.append( _YmHMX )
2866             #
2867             # Ajout dans la fonctionnelle d'observation
2868             Jo = Jo + 0.5 * float( _YmHMX.T * RI * _YmHMX )
2869         J = Jb + Jo
2870         #
2871         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
2872         selfA.StoredVariables["CostFunctionJb"].store( Jb )
2873         selfA.StoredVariables["CostFunctionJo"].store( Jo )
2874         selfA.StoredVariables["CostFunctionJ" ].store( J )
2875         if selfA._toStore("IndexOfOptimum") or \
2876             selfA._toStore("CurrentOptimum") or \
2877             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
2878             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
2879             selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2880             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2881         if selfA._toStore("IndexOfOptimum"):
2882             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2883         if selfA._toStore("CurrentOptimum"):
2884             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
2885         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2886             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2887         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2888             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2889         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2890             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2891         return J
2892     #
2893     def GradientOfCostFunction(x):
2894         _X      = numpy.asmatrix(numpy.ravel( x )).T
2895         GradJb  = BI * (_X - Xb)
2896         GradJo  = 0.
2897         for step in range(duration-1,0,-1):
2898             # Étape de récupération du dernier stockage de l'évolution
2899             _Xn = selfA.DirectCalculation.pop()
2900             # Étape de récupération du dernier stockage de l'innovation
2901             _YmHMX = selfA.DirectInnovation.pop()
2902             # Calcul des adjoints
2903             Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
2904             Ha = Ha.reshape(_Xn.size,_YmHMX.size) # ADAO & check shape
2905             Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
2906             Ma = Ma.reshape(_Xn.size,_Xn.size) # ADAO & check shape
2907             # Calcul du gradient par état adjoint
2908             GradJo = GradJo + Ha * RI * _YmHMX # Équivaut pour Ha linéaire à : Ha( (_Xn, RI * _YmHMX) )
2909             GradJo = Ma * GradJo               # Équivaut pour Ma linéaire à : Ma( (_Xn, GradJo) )
2910         GradJ = numpy.ravel( GradJb ) - numpy.ravel( GradJo )
2911         return GradJ
2912     #
2913     # Minimisation de la fonctionnelle
2914     # --------------------------------
2915     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
2916     #
2917     if selfA._parameters["Minimizer"] == "LBFGSB":
2918         if "0.19" <= scipy.version.version <= "1.1.0":
2919             import lbfgsbhlt as optimiseur
2920         else:
2921             import scipy.optimize as optimiseur
2922         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
2923             func        = CostFunction,
2924             x0          = Xini,
2925             fprime      = GradientOfCostFunction,
2926             args        = (),
2927             bounds      = selfA._parameters["Bounds"],
2928             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
2929             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
2930             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
2931             iprint      = selfA._parameters["optiprint"],
2932             )
2933         nfeval = Informations['funcalls']
2934         rc     = Informations['warnflag']
2935     elif selfA._parameters["Minimizer"] == "TNC":
2936         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
2937             func        = CostFunction,
2938             x0          = Xini,
2939             fprime      = GradientOfCostFunction,
2940             args        = (),
2941             bounds      = selfA._parameters["Bounds"],
2942             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
2943             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
2944             ftol        = selfA._parameters["CostDecrementTolerance"],
2945             messages    = selfA._parameters["optmessages"],
2946             )
2947     elif selfA._parameters["Minimizer"] == "CG":
2948         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
2949             f           = CostFunction,
2950             x0          = Xini,
2951             fprime      = GradientOfCostFunction,
2952             args        = (),
2953             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2954             gtol        = selfA._parameters["GradientNormTolerance"],
2955             disp        = selfA._parameters["optdisp"],
2956             full_output = True,
2957             )
2958     elif selfA._parameters["Minimizer"] == "NCG":
2959         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
2960             f           = CostFunction,
2961             x0          = Xini,
2962             fprime      = GradientOfCostFunction,
2963             args        = (),
2964             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2965             avextol     = selfA._parameters["CostDecrementTolerance"],
2966             disp        = selfA._parameters["optdisp"],
2967             full_output = True,
2968             )
2969     elif selfA._parameters["Minimizer"] == "BFGS":
2970         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
2971             f           = CostFunction,
2972             x0          = Xini,
2973             fprime      = GradientOfCostFunction,
2974             args        = (),
2975             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2976             gtol        = selfA._parameters["GradientNormTolerance"],
2977             disp        = selfA._parameters["optdisp"],
2978             full_output = True,
2979             )
2980     else:
2981         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
2982     #
2983     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2984     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
2985     #
2986     # Correction pour pallier a un bug de TNC sur le retour du Minimum
2987     # ----------------------------------------------------------------
2988     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
2989         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
2990     #
2991     # Obtention de l'analyse
2992     # ----------------------
2993     Xa = numpy.asmatrix(numpy.ravel( Minimum )).T
2994     #
2995     selfA.StoredVariables["Analysis"].store( Xa )
2996     #
2997     # Calculs et/ou stockages supplémentaires
2998     # ---------------------------------------
2999     if selfA._toStore("BMA"):
3000         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
3001     #
3002     return 0
3003
3004 # ==============================================================================
3005 def van3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3006     """
3007     3DVAR variational analysis with no inversion of B
3008     """
3009     #
3010     # Initialisations
3011     # ---------------
3012     #
3013     # Opérateurs
3014     Hm = HO["Direct"].appliedTo
3015     Ha = HO["Adjoint"].appliedInXTo
3016     #
3017     # Précalcul des inversions de B et R
3018     BT = B.getT()
3019     RI = R.getI()
3020     #
3021     # Point de démarrage de l'optimisation
3022     Xini = numpy.zeros(Xb.shape)
3023     #
3024     # Définition de la fonction-coût
3025     # ------------------------------
3026     def CostFunction(v):
3027         _V = numpy.asmatrix(numpy.ravel( v )).T
3028         _X = Xb + B * _V
3029         if selfA._parameters["StoreInternalVariables"] or \
3030             selfA._toStore("CurrentState") or \
3031             selfA._toStore("CurrentOptimum"):
3032             selfA.StoredVariables["CurrentState"].store( _X )
3033         _HX = Hm( _X )
3034         _HX = numpy.asmatrix(numpy.ravel( _HX )).T
3035         _Innovation = Y - _HX
3036         if selfA._toStore("SimulatedObservationAtCurrentState") or \
3037             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3038             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
3039         if selfA._toStore("InnovationAtCurrentState"):
3040             selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
3041         #
3042         Jb  = float( 0.5 * _V.T * BT * _V )
3043         Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
3044         J   = Jb + Jo
3045         #
3046         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
3047         selfA.StoredVariables["CostFunctionJb"].store( Jb )
3048         selfA.StoredVariables["CostFunctionJo"].store( Jo )
3049         selfA.StoredVariables["CostFunctionJ" ].store( J )
3050         if selfA._toStore("IndexOfOptimum") or \
3051             selfA._toStore("CurrentOptimum") or \
3052             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
3053             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
3054             selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
3055             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3056             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3057         if selfA._toStore("IndexOfOptimum"):
3058             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3059         if selfA._toStore("CurrentOptimum"):
3060             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
3061         if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3062             selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
3063         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3064             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3065         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3066             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3067         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3068             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3069         return J
3070     #
3071     def GradientOfCostFunction(v):
3072         _V = numpy.asmatrix(numpy.ravel( v )).T
3073         _X = Xb + B * _V
3074         _HX     = Hm( _X )
3075         _HX     = numpy.asmatrix(numpy.ravel( _HX )).T
3076         GradJb  = BT * _V
3077         GradJo  = - Ha( (_X, RI * (Y - _HX)) )
3078         GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
3079         return GradJ
3080     #
3081     # Minimisation de la fonctionnelle
3082     # --------------------------------
3083     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
3084     #
3085     if selfA._parameters["Minimizer"] == "LBFGSB":
3086         if "0.19" <= scipy.version.version <= "1.1.0":
3087             import lbfgsbhlt as optimiseur
3088         else:
3089             import scipy.optimize as optimiseur
3090         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
3091             func        = CostFunction,
3092             x0          = Xini,
3093             fprime      = GradientOfCostFunction,
3094             args        = (),
3095             bounds      = selfA._parameters["Bounds"],
3096             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
3097             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
3098             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
3099             iprint      = selfA._parameters["optiprint"],
3100             )
3101         nfeval = Informations['funcalls']
3102         rc     = Informations['warnflag']
3103     elif selfA._parameters["Minimizer"] == "TNC":
3104         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
3105             func        = CostFunction,
3106             x0          = Xini,
3107             fprime      = GradientOfCostFunction,
3108             args        = (),
3109             bounds      = selfA._parameters["Bounds"],
3110             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
3111             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
3112             ftol        = selfA._parameters["CostDecrementTolerance"],
3113             messages    = selfA._parameters["optmessages"],
3114             )
3115     elif selfA._parameters["Minimizer"] == "CG":
3116         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
3117             f           = CostFunction,
3118             x0          = Xini,
3119             fprime      = GradientOfCostFunction,
3120             args        = (),
3121             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3122             gtol        = selfA._parameters["GradientNormTolerance"],
3123             disp        = selfA._parameters["optdisp"],
3124             full_output = True,
3125             )
3126     elif selfA._parameters["Minimizer"] == "NCG":
3127         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
3128             f           = CostFunction,
3129             x0          = Xini,
3130             fprime      = GradientOfCostFunction,
3131             args        = (),
3132             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3133             avextol     = selfA._parameters["CostDecrementTolerance"],
3134             disp        = selfA._parameters["optdisp"],
3135             full_output = True,
3136             )
3137     elif selfA._parameters["Minimizer"] == "BFGS":
3138         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
3139             f           = CostFunction,
3140             x0          = Xini,
3141             fprime      = GradientOfCostFunction,
3142             args        = (),
3143             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3144             gtol        = selfA._parameters["GradientNormTolerance"],
3145             disp        = selfA._parameters["optdisp"],
3146             full_output = True,
3147             )
3148     else:
3149         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
3150     #
3151     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3152     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
3153     #
3154     # Correction pour pallier a un bug de TNC sur le retour du Minimum
3155     # ----------------------------------------------------------------
3156     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
3157         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
3158         Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
3159     else:
3160         Minimum = Xb + B * numpy.asmatrix(numpy.ravel( Minimum )).T
3161     #
3162     # Obtention de l'analyse
3163     # ----------------------
3164     Xa = Minimum
3165     #
3166     selfA.StoredVariables["Analysis"].store( Xa )
3167     #
3168     if selfA._toStore("OMA") or \
3169         selfA._toStore("SigmaObs2") or \
3170         selfA._toStore("SimulationQuantiles") or \
3171         selfA._toStore("SimulatedObservationAtOptimum"):
3172         if selfA._toStore("SimulatedObservationAtCurrentState"):
3173             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
3174         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3175             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
3176         else:
3177             HXa = Hm( Xa )
3178     #
3179     # Calcul de la covariance d'analyse
3180     # ---------------------------------
3181     if selfA._toStore("APosterioriCovariance") or \
3182         selfA._toStore("SimulationQuantiles") or \
3183         selfA._toStore("JacobianMatrixAtOptimum") or \
3184         selfA._toStore("KalmanGainAtOptimum"):
3185         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
3186         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
3187     if selfA._toStore("APosterioriCovariance") or \
3188         selfA._toStore("SimulationQuantiles") or \
3189         selfA._toStore("KalmanGainAtOptimum"):
3190         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
3191         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
3192     if selfA._toStore("APosterioriCovariance") or \
3193         selfA._toStore("SimulationQuantiles"):
3194         BI = B.getI()
3195         HessienneI = []
3196         nb = Xa.size
3197         for i in range(nb):
3198             _ee    = numpy.matrix(numpy.zeros(nb)).T
3199             _ee[i] = 1.
3200             _HtEE  = numpy.dot(HtM,_ee)
3201             _HtEE  = numpy.asmatrix(numpy.ravel( _HtEE )).T
3202             HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
3203         HessienneI = numpy.matrix( HessienneI )
3204         A = HessienneI.I
3205         if min(A.shape) != max(A.shape):
3206             raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
3207         if (numpy.diag(A) < 0).any():
3208             raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
3209         if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
3210             try:
3211                 L = numpy.linalg.cholesky( A )
3212             except:
3213                 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
3214     if selfA._toStore("APosterioriCovariance"):
3215         selfA.StoredVariables["APosterioriCovariance"].store( A )
3216     if selfA._toStore("JacobianMatrixAtOptimum"):
3217         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
3218     if selfA._toStore("KalmanGainAtOptimum"):
3219         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
3220         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
3221         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
3222     #
3223     # Calculs et/ou stockages supplémentaires
3224     # ---------------------------------------
3225     if selfA._toStore("Innovation") or \
3226         selfA._toStore("SigmaObs2") or \
3227         selfA._toStore("MahalanobisConsistency") or \
3228         selfA._toStore("OMB"):
3229         d  = Y - HXb
3230     if selfA._toStore("Innovation"):
3231         selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
3232     if selfA._toStore("BMA"):
3233         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
3234     if selfA._toStore("OMA"):
3235         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
3236     if selfA._toStore("OMB"):
3237         selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
3238     if selfA._toStore("SigmaObs2"):
3239         TraceR = R.trace(Y.size)
3240         selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
3241     if selfA._toStore("MahalanobisConsistency"):
3242         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
3243     if selfA._toStore("SimulationQuantiles"):
3244         nech = selfA._parameters["NumberOfSamplesForQuantiles"]
3245         HXa  = numpy.matrix(numpy.ravel( HXa )).T
3246         YfQ  = None
3247         for i in range(nech):
3248             if selfA._parameters["SimulationForQuantiles"] == "Linear":
3249                 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
3250                 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
3251                 Yr = HXa + dYr
3252             elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
3253                 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
3254                 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
3255             if YfQ is None:
3256                 YfQ = Yr
3257             else:
3258                 YfQ = numpy.hstack((YfQ,Yr))
3259         YfQ.sort(axis=-1)
3260         YQ = None
3261         for quantile in selfA._parameters["Quantiles"]:
3262             if not (0. <= float(quantile) <= 1.): continue
3263             indice = int(nech * float(quantile) - 1./nech)
3264             if YQ is None: YQ = YfQ[:,indice]
3265             else:          YQ = numpy.hstack((YQ,YfQ[:,indice]))
3266         selfA.StoredVariables["SimulationQuantiles"].store( YQ )
3267     if selfA._toStore("SimulatedObservationAtBackground"):
3268         selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
3269     if selfA._toStore("SimulatedObservationAtOptimum"):
3270         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
3271     #
3272     return 0
3273
3274 # ==============================================================================
3275 if __name__ == "__main__":
3276     print('\n AUTODIAGNOSTIC\n')