Salome HOME
Minor improvements and fixes for internal variables
[modules/adao.git] / src / daComposant / daCore / NumericObjects.py
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) 2008-2021 EDF R&D
4 #
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
9 #
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13 # Lesser General Public License for more details.
14 #
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
18 #
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
20 #
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
22
23 __doc__ = """
24     Définit les objets numériques génériques.
25 """
26 __author__ = "Jean-Philippe ARGAUD"
27
28 import os, time, copy, types, sys, logging
29 import math, numpy, scipy, scipy.optimize, scipy.version
30 from daCore.BasicObjects import Operator
31 from daCore.PlatformInfo import PlatformInfo
32 mpr = PlatformInfo().MachinePrecision()
33 mfp = PlatformInfo().MaximumPrecision()
34 # logging.getLogger().setLevel(logging.DEBUG)
35
36 # ==============================================================================
37 def ExecuteFunction( triplet ):
38     assert len(triplet) == 3, "Incorrect number of arguments"
39     X, xArgs, funcrepr = triplet
40     __X = numpy.asmatrix(numpy.ravel( X )).T
41     __sys_path_tmp = sys.path ; sys.path.insert(0,funcrepr["__userFunction__path"])
42     __module = __import__(funcrepr["__userFunction__modl"], globals(), locals(), [])
43     __fonction = getattr(__module,funcrepr["__userFunction__name"])
44     sys.path = __sys_path_tmp ; del __sys_path_tmp
45     if isinstance(xArgs, dict):
46         __HX  = __fonction( __X, **xArgs )
47     else:
48         __HX  = __fonction( __X )
49     return numpy.ravel( __HX )
50
51 # ==============================================================================
52 class FDApproximation(object):
53     """
54     Cette classe sert d'interface pour définir les opérateurs approximés. A la
55     création d'un objet, en fournissant une fonction "Function", on obtient un
56     objet qui dispose de 3 méthodes "DirectOperator", "TangentOperator" et
57     "AdjointOperator". On contrôle l'approximation DF avec l'incrément
58     multiplicatif "increment" valant par défaut 1%, ou avec l'incrément fixe
59     "dX" qui sera multiplié par "increment" (donc en %), et on effectue de DF
60     centrées si le booléen "centeredDF" est vrai.
61     """
62     def __init__(self,
63             name                  = "FDApproximation",
64             Function              = None,
65             centeredDF            = False,
66             increment             = 0.01,
67             dX                    = None,
68             extraArguments        = None,
69             avoidingRedundancy    = True,
70             toleranceInRedundancy = 1.e-18,
71             lenghtOfRedundancy    = -1,
72             mpEnabled             = False,
73             mpWorkers             = None,
74             mfEnabled             = False,
75             ):
76         self.__name = str(name)
77         self.__extraArgs = extraArguments
78         if mpEnabled:
79             try:
80                 import multiprocessing
81                 self.__mpEnabled = True
82             except ImportError:
83                 self.__mpEnabled = False
84         else:
85             self.__mpEnabled = False
86         self.__mpWorkers = mpWorkers
87         if self.__mpWorkers is not None and self.__mpWorkers < 1:
88             self.__mpWorkers = None
89         logging.debug("FDA Calculs en multiprocessing : %s (nombre de processus : %s)"%(self.__mpEnabled,self.__mpWorkers))
90         #
91         if mfEnabled:
92             self.__mfEnabled = True
93         else:
94             self.__mfEnabled = False
95         logging.debug("FDA Calculs en multifonctions : %s"%(self.__mfEnabled,))
96         #
97         if avoidingRedundancy:
98             self.__avoidRC = True
99             self.__tolerBP = float(toleranceInRedundancy)
100             self.__lenghtRJ = int(lenghtOfRedundancy)
101             self.__listJPCP = [] # Jacobian Previous Calculated Points
102             self.__listJPCI = [] # Jacobian Previous Calculated Increment
103             self.__listJPCR = [] # Jacobian Previous Calculated Results
104             self.__listJPPN = [] # Jacobian Previous Calculated Point Norms
105             self.__listJPIN = [] # Jacobian Previous Calculated Increment Norms
106         else:
107             self.__avoidRC = False
108         #
109         if self.__mpEnabled:
110             if isinstance(Function,types.FunctionType):
111                 logging.debug("FDA Calculs en multiprocessing : FunctionType")
112                 self.__userFunction__name = Function.__name__
113                 try:
114                     mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
115                 except:
116                     mod = os.path.abspath(Function.__globals__['__file__'])
117                 if not os.path.isfile(mod):
118                     raise ImportError("No user defined function or method found with the name %s"%(mod,))
119                 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
120                 self.__userFunction__path = os.path.dirname(mod)
121                 del mod
122                 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
123                 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
124             elif isinstance(Function,types.MethodType):
125                 logging.debug("FDA Calculs en multiprocessing : MethodType")
126                 self.__userFunction__name = Function.__name__
127                 try:
128                     mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
129                 except:
130                     mod = os.path.abspath(Function.__func__.__globals__['__file__'])
131                 if not os.path.isfile(mod):
132                     raise ImportError("No user defined function or method found with the name %s"%(mod,))
133                 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
134                 self.__userFunction__path = os.path.dirname(mod)
135                 del mod
136                 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
137                 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
138             else:
139                 raise TypeError("User defined function or method has to be provided for finite differences approximation.")
140         else:
141             self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
142             self.__userFunction = self.__userOperator.appliedTo
143         #
144         self.__centeredDF = bool(centeredDF)
145         if abs(float(increment)) > 1.e-15:
146             self.__increment  = float(increment)
147         else:
148             self.__increment  = 0.01
149         if dX is None:
150             self.__dX     = None
151         else:
152             self.__dX     = numpy.asmatrix(numpy.ravel( dX )).T
153         logging.debug("FDA Reduction des doublons de calcul : %s"%self.__avoidRC)
154         if self.__avoidRC:
155             logging.debug("FDA Tolerance de determination des doublons : %.2e"%self.__tolerBP)
156
157     # ---------------------------------------------------------
158     def __doublon__(self, e, l, n, v=None):
159         __ac, __iac = False, -1
160         for i in range(len(l)-1,-1,-1):
161             if numpy.linalg.norm(e - l[i]) < self.__tolerBP * n[i]:
162                 __ac, __iac = True, i
163                 if v is not None: logging.debug("FDA Cas%s déja calculé, récupération du doublon %i"%(v,__iac))
164                 break
165         return __ac, __iac
166
167     # ---------------------------------------------------------
168     def DirectOperator(self, X, **extraArgs ):
169         """
170         Calcul du direct à l'aide de la fonction fournie.
171
172         NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
173         ne doivent pas être données ici à la fonction utilisateur.
174         """
175         logging.debug("FDA Calcul DirectOperator (explicite)")
176         if self.__mfEnabled:
177             _HX = self.__userFunction( X, argsAsSerie = True )
178         else:
179             _X = numpy.asmatrix(numpy.ravel( X )).T
180             _HX = numpy.ravel(self.__userFunction( _X ))
181         #
182         return _HX
183
184     # ---------------------------------------------------------
185     def TangentMatrix(self, X ):
186         """
187         Calcul de l'opérateur tangent comme la Jacobienne par différences finies,
188         c'est-à-dire le gradient de H en X. On utilise des différences finies
189         directionnelles autour du point X. X est un numpy.matrix.
190
191         Différences finies centrées (approximation d'ordre 2):
192         1/ Pour chaque composante i de X, on ajoute et on enlève la perturbation
193            dX[i] à la  composante X[i], pour composer X_plus_dXi et X_moins_dXi, et
194            on calcule les réponses HX_plus_dXi = H( X_plus_dXi ) et HX_moins_dXi =
195            H( X_moins_dXi )
196         2/ On effectue les différences (HX_plus_dXi-HX_moins_dXi) et on divise par
197            le pas 2*dXi
198         3/ Chaque résultat, par composante, devient une colonne de la Jacobienne
199
200         Différences finies non centrées (approximation d'ordre 1):
201         1/ Pour chaque composante i de X, on ajoute la perturbation dX[i] à la
202            composante X[i] pour composer X_plus_dXi, et on calcule la réponse
203            HX_plus_dXi = H( X_plus_dXi )
204         2/ On calcule la valeur centrale HX = H(X)
205         3/ On effectue les différences (HX_plus_dXi-HX) et on divise par
206            le pas dXi
207         4/ Chaque résultat, par composante, devient une colonne de la Jacobienne
208
209         """
210         logging.debug("FDA Début du calcul de la Jacobienne")
211         logging.debug("FDA   Incrément de............: %s*X"%float(self.__increment))
212         logging.debug("FDA   Approximation centrée...: %s"%(self.__centeredDF))
213         #
214         if X is None or len(X)==0:
215             raise ValueError("Nominal point X for approximate derivatives can not be None or void (given X: %s)."%(str(X),))
216         #
217         _X = numpy.asmatrix(numpy.ravel( X )).T
218         #
219         if self.__dX is None:
220             _dX  = self.__increment * _X
221         else:
222             _dX = numpy.asmatrix(numpy.ravel( self.__dX )).T
223         #
224         if (_dX == 0.).any():
225             moyenne = _dX.mean()
226             if moyenne == 0.:
227                 _dX = numpy.where( _dX == 0., float(self.__increment), _dX )
228             else:
229                 _dX = numpy.where( _dX == 0., moyenne, _dX )
230         #
231         __alreadyCalculated  = False
232         if self.__avoidRC:
233             __bidon, __alreadyCalculatedP = self.__doublon__(_X,  self.__listJPCP, self.__listJPPN, None)
234             __bidon, __alreadyCalculatedI = self.__doublon__(_dX, self.__listJPCI, self.__listJPIN, None)
235             if __alreadyCalculatedP == __alreadyCalculatedI > -1:
236                 __alreadyCalculated, __i = True, __alreadyCalculatedP
237                 logging.debug("FDA Cas J déja calculé, récupération du doublon %i"%__i)
238         #
239         if __alreadyCalculated:
240             logging.debug("FDA   Calcul Jacobienne (par récupération du doublon %i)"%__i)
241             _Jacobienne = self.__listJPCR[__i]
242         else:
243             logging.debug("FDA   Calcul Jacobienne (explicite)")
244             if self.__centeredDF:
245                 #
246                 if self.__mpEnabled and not self.__mfEnabled:
247                     funcrepr = {
248                         "__userFunction__path" : self.__userFunction__path,
249                         "__userFunction__modl" : self.__userFunction__modl,
250                         "__userFunction__name" : self.__userFunction__name,
251                     }
252                     _jobs = []
253                     for i in range( len(_dX) ):
254                         _dXi            = _dX[i]
255                         _X_plus_dXi     = numpy.array( _X.A1, dtype=float )
256                         _X_plus_dXi[i]  = _X[i] + _dXi
257                         _X_moins_dXi    = numpy.array( _X.A1, dtype=float )
258                         _X_moins_dXi[i] = _X[i] - _dXi
259                         #
260                         _jobs.append( (_X_plus_dXi,  self.__extraArgs, funcrepr) )
261                         _jobs.append( (_X_moins_dXi, self.__extraArgs, funcrepr) )
262                     #
263                     import multiprocessing
264                     self.__pool = multiprocessing.Pool(self.__mpWorkers)
265                     _HX_plusmoins_dX = self.__pool.map( ExecuteFunction, _jobs )
266                     self.__pool.close()
267                     self.__pool.join()
268                     #
269                     _Jacobienne  = []
270                     for i in range( len(_dX) ):
271                         _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
272                     #
273                 elif self.__mfEnabled:
274                     _xserie = []
275                     for i in range( len(_dX) ):
276                         _dXi            = _dX[i]
277                         _X_plus_dXi     = numpy.array( _X.A1, dtype=float )
278                         _X_plus_dXi[i]  = _X[i] + _dXi
279                         _X_moins_dXi    = numpy.array( _X.A1, dtype=float )
280                         _X_moins_dXi[i] = _X[i] - _dXi
281                         #
282                         _xserie.append( _X_plus_dXi )
283                         _xserie.append( _X_moins_dXi )
284                     #
285                     _HX_plusmoins_dX = self.DirectOperator( _xserie )
286                      #
287                     _Jacobienne  = []
288                     for i in range( len(_dX) ):
289                         _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
290                     #
291                 else:
292                     _Jacobienne  = []
293                     for i in range( _dX.size ):
294                         _dXi            = _dX[i]
295                         _X_plus_dXi     = numpy.array( _X.A1, dtype=float )
296                         _X_plus_dXi[i]  = _X[i] + _dXi
297                         _X_moins_dXi    = numpy.array( _X.A1, dtype=float )
298                         _X_moins_dXi[i] = _X[i] - _dXi
299                         #
300                         _HX_plus_dXi    = self.DirectOperator( _X_plus_dXi )
301                         _HX_moins_dXi   = self.DirectOperator( _X_moins_dXi )
302                         #
303                         _Jacobienne.append( numpy.ravel( _HX_plus_dXi - _HX_moins_dXi ) / (2.*_dXi) )
304                 #
305             else:
306                 #
307                 if self.__mpEnabled and not self.__mfEnabled:
308                     funcrepr = {
309                         "__userFunction__path" : self.__userFunction__path,
310                         "__userFunction__modl" : self.__userFunction__modl,
311                         "__userFunction__name" : self.__userFunction__name,
312                     }
313                     _jobs = []
314                     _jobs.append( (_X.A1, self.__extraArgs, funcrepr) )
315                     for i in range( len(_dX) ):
316                         _X_plus_dXi    = numpy.array( _X.A1, dtype=float )
317                         _X_plus_dXi[i] = _X[i] + _dX[i]
318                         #
319                         _jobs.append( (_X_plus_dXi, self.__extraArgs, funcrepr) )
320                     #
321                     import multiprocessing
322                     self.__pool = multiprocessing.Pool(self.__mpWorkers)
323                     _HX_plus_dX = self.__pool.map( ExecuteFunction, _jobs )
324                     self.__pool.close()
325                     self.__pool.join()
326                     #
327                     _HX = _HX_plus_dX.pop(0)
328                     #
329                     _Jacobienne = []
330                     for i in range( len(_dX) ):
331                         _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
332                     #
333                 elif self.__mfEnabled:
334                     _xserie = []
335                     _xserie.append( _X.A1 )
336                     for i in range( len(_dX) ):
337                         _X_plus_dXi    = numpy.array( _X.A1, dtype=float )
338                         _X_plus_dXi[i] = _X[i] + _dX[i]
339                         #
340                         _xserie.append( _X_plus_dXi )
341                     #
342                     _HX_plus_dX = self.DirectOperator( _xserie )
343                     #
344                     _HX = _HX_plus_dX.pop(0)
345                     #
346                     _Jacobienne = []
347                     for i in range( len(_dX) ):
348                         _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
349                    #
350                 else:
351                     _Jacobienne  = []
352                     _HX = self.DirectOperator( _X )
353                     for i in range( _dX.size ):
354                         _dXi            = _dX[i]
355                         _X_plus_dXi     = numpy.array( _X.A1, dtype=float )
356                         _X_plus_dXi[i]  = _X[i] + _dXi
357                         #
358                         _HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
359                         #
360                         _Jacobienne.append( numpy.ravel(( _HX_plus_dXi - _HX ) / _dXi) )
361                 #
362             #
363             _Jacobienne = numpy.asmatrix( numpy.vstack( _Jacobienne ) ).T
364             if self.__avoidRC:
365                 if self.__lenghtRJ < 0: self.__lenghtRJ = 2 * _X.size
366                 while len(self.__listJPCP) > self.__lenghtRJ:
367                     self.__listJPCP.pop(0)
368                     self.__listJPCI.pop(0)
369                     self.__listJPCR.pop(0)
370                     self.__listJPPN.pop(0)
371                     self.__listJPIN.pop(0)
372                 self.__listJPCP.append( copy.copy(_X) )
373                 self.__listJPCI.append( copy.copy(_dX) )
374                 self.__listJPCR.append( copy.copy(_Jacobienne) )
375                 self.__listJPPN.append( numpy.linalg.norm(_X) )
376                 self.__listJPIN.append( numpy.linalg.norm(_Jacobienne) )
377         #
378         logging.debug("FDA Fin du calcul de la Jacobienne")
379         #
380         return _Jacobienne
381
382     # ---------------------------------------------------------
383     def TangentOperator(self, paire, **extraArgs ):
384         """
385         Calcul du tangent à l'aide de la Jacobienne.
386
387         NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
388         ne doivent pas être données ici à la fonction utilisateur.
389         """
390         if self.__mfEnabled:
391             assert len(paire) == 1, "Incorrect lenght of arguments"
392             _paire = paire[0]
393             assert len(_paire) == 2, "Incorrect number of arguments"
394         else:
395             assert len(paire) == 2, "Incorrect number of arguments"
396             _paire = paire
397         X, dX = _paire
398         _Jacobienne = self.TangentMatrix( X )
399         if dX is None or len(dX) == 0:
400             #
401             # Calcul de la forme matricielle si le second argument est None
402             # -------------------------------------------------------------
403             if self.__mfEnabled: return [_Jacobienne,]
404             else:                return _Jacobienne
405         else:
406             #
407             # Calcul de la valeur linéarisée de H en X appliqué à dX
408             # ------------------------------------------------------
409             _dX = numpy.asmatrix(numpy.ravel( dX )).T
410             _HtX = numpy.dot(_Jacobienne, _dX)
411             if self.__mfEnabled: return [_HtX.A1,]
412             else:                return _HtX.A1
413
414     # ---------------------------------------------------------
415     def AdjointOperator(self, paire, **extraArgs ):
416         """
417         Calcul de l'adjoint à l'aide de la Jacobienne.
418
419         NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
420         ne doivent pas être données ici à la fonction utilisateur.
421         """
422         if self.__mfEnabled:
423             assert len(paire) == 1, "Incorrect lenght of arguments"
424             _paire = paire[0]
425             assert len(_paire) == 2, "Incorrect number of arguments"
426         else:
427             assert len(paire) == 2, "Incorrect number of arguments"
428             _paire = paire
429         X, Y = _paire
430         _JacobienneT = self.TangentMatrix( X ).T
431         if Y is None or len(Y) == 0:
432             #
433             # Calcul de la forme matricielle si le second argument est None
434             # -------------------------------------------------------------
435             if self.__mfEnabled: return [_JacobienneT,]
436             else:                return _JacobienneT
437         else:
438             #
439             # Calcul de la valeur de l'adjoint en X appliqué à Y
440             # --------------------------------------------------
441             _Y = numpy.asmatrix(numpy.ravel( Y )).T
442             _HaY = numpy.dot(_JacobienneT, _Y)
443             if self.__mfEnabled: return [_HaY.A1,]
444             else:                return _HaY.A1
445
446 # ==============================================================================
447 def EnsembleOfCenteredPerturbations( _bgcenter, _bgcovariance, _nbmembers ):
448     "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
449     #
450     _bgcenter = numpy.ravel(_bgcenter)[:,None]
451     if _nbmembers < 1:
452         raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
453     #
454     if _bgcovariance is None:
455         BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
456     else:
457         _Z = numpy.random.multivariate_normal(numpy.zeros(_bgcenter.size), _bgcovariance, size=_nbmembers).T
458         BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers) + _Z
459     #
460     return BackgroundEnsemble
461
462 # ==============================================================================
463 def EnsembleOfBackgroundPerturbations( _bgcenter, _bgcovariance, _nbmembers, _withSVD = True):
464     "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
465     def __CenteredRandomAnomalies(Zr, N):
466         """
467         Génère une matrice de N anomalies aléatoires centrées sur Zr selon les
468         notes manuscrites de MB et conforme au code de PS avec eps = -1
469         """
470         eps = -1
471         Q = numpy.identity(N-1)-numpy.ones((N-1,N-1))/numpy.sqrt(N)/(numpy.sqrt(N)-eps)
472         Q = numpy.concatenate((Q, [eps*numpy.ones(N-1)/numpy.sqrt(N)]), axis=0)
473         R, _ = numpy.linalg.qr(numpy.random.normal(size = (N-1,N-1)))
474         Q = numpy.dot(Q,R)
475         Zr = numpy.dot(Q,Zr)
476         return Zr.T
477     #
478     _bgcenter = numpy.ravel(_bgcenter).reshape((-1,1))
479     if _nbmembers < 1:
480         raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
481     if _bgcovariance is None:
482         BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
483     else:
484         if _withSVD:
485             U, s, V = numpy.linalg.svd(_bgcovariance, full_matrices=False)
486             _nbctl = _bgcenter.size
487             if _nbmembers > _nbctl:
488                 _Z = numpy.concatenate((numpy.dot(
489                     numpy.diag(numpy.sqrt(s[:_nbctl])), V[:_nbctl]),
490                     numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1-_nbctl)), axis = 0)
491             else:
492                 _Z = numpy.dot(numpy.diag(numpy.sqrt(s[:_nbmembers-1])), V[:_nbmembers-1])
493             _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
494             BackgroundEnsemble = _bgcenter + _Zca
495         else:
496             if max(abs(_bgcovariance.flatten())) > 0:
497                 _nbctl = _bgcenter.size
498                 _Z = numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1)
499                 _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
500                 BackgroundEnsemble = _bgcenter + _Zca
501             else:
502                 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
503     #
504     return BackgroundEnsemble
505
506 # ==============================================================================
507 def EnsembleOfAnomalies( Ensemble, OptMean = None, Normalisation = 1.):
508     "Renvoie les anomalies centrées à partir d'un ensemble TailleEtat*NbMembres"
509     if OptMean is None:
510         __Em = numpy.asarray(Ensemble).mean(axis=1, dtype=mfp).astype('float').reshape((-1,1))
511     else:
512         __Em = numpy.ravel(OptMean).reshape((-1,1))
513     #
514     return Normalisation * (numpy.asarray(Ensemble) - __Em)
515
516 # ==============================================================================
517 def EnsembleErrorCovariance( Ensemble ):
518     "Renvoie la covariance d'ensemble"
519     __Anomalies = EnsembleOfAnomalies( Ensemble )
520     __n, __m = numpy.asarray(__Anomalies).shape
521     # Estimation empirique
522     __Covariance = (__Anomalies @ __Anomalies.T) / (__m-1)
523     # Assure la symétrie
524     __Covariance = (__Covariance + __Covariance.T) * 0.5
525     # Assure la positivité
526     __epsilon    = mpr*numpy.trace(__Covariance)
527     __Covariance = __Covariance + __epsilon * numpy.identity(__n)
528     #
529     return __Covariance
530
531 # ==============================================================================
532 def CovarianceInflation(
533         InputCovOrEns,
534         InflationType   = None,
535         InflationFactor = None,
536         BackgroundCov   = None,
537         ):
538     """
539     Inflation applicable soit sur Pb ou Pa, soit sur les ensembles EXb ou EXa
540
541     Synthèse : Hunt 2007, section 2.3.5
542     """
543     if InflationFactor is None:
544         return InputCovOrEns
545     else:
546         InflationFactor = float(InflationFactor)
547     #
548     if InflationType in ["MultiplicativeOnAnalysisCovariance", "MultiplicativeOnBackgroundCovariance"]:
549         if InflationFactor < 1.:
550             raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
551         if InflationFactor < 1.+mpr:
552             return InputCovOrEns
553         OutputCovOrEns = InflationFactor**2 * InputCovOrEns
554     #
555     elif InflationType in ["MultiplicativeOnAnalysisAnomalies", "MultiplicativeOnBackgroundAnomalies"]:
556         if InflationFactor < 1.:
557             raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
558         if InflationFactor < 1.+mpr:
559             return InputCovOrEns
560         InputCovOrEnsMean = InputCovOrEns.mean(axis=1, dtype=mfp).astype('float')
561         OutputCovOrEns = InputCovOrEnsMean[:,numpy.newaxis] \
562             + InflationFactor * (InputCovOrEns - InputCovOrEnsMean[:,numpy.newaxis])
563     #
564     elif InflationType in ["AdditiveOnAnalysisCovariance", "AdditiveOnBackgroundCovariance"]:
565         if InflationFactor < 0.:
566             raise ValueError("Inflation factor for additive inflation has to be greater or equal than 0.")
567         if InflationFactor < mpr:
568             return InputCovOrEns
569         __n, __m = numpy.asarray(InputCovOrEns).shape
570         if __n != __m:
571             raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
572         OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * numpy.identity(__n)
573     #
574     elif InflationType == "HybridOnBackgroundCovariance":
575         if InflationFactor < 0.:
576             raise ValueError("Inflation factor for hybrid inflation has to be greater or equal than 0.")
577         if InflationFactor < mpr:
578             return InputCovOrEns
579         __n, __m = numpy.asarray(InputCovOrEns).shape
580         if __n != __m:
581             raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
582         if BackgroundCov is None:
583             raise ValueError("Background covariance matrix B has to be given for hybrid inflation.")
584         if InputCovOrEns.shape != BackgroundCov.shape:
585             raise ValueError("Ensemble covariance matrix has to be of same size than background covariance matrix B.")
586         OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * BackgroundCov
587     #
588     elif InflationType == "Relaxation":
589         raise NotImplementedError("InflationType Relaxation")
590     #
591     else:
592         raise ValueError("Error in inflation type, '%s' is not a valid keyword."%InflationType)
593     #
594     return OutputCovOrEns
595
596 # ==============================================================================
597 def enks(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="EnKS16-KalmanFilterFormula"):
598     """
599     EnKS
600     """
601     #
602     # Opérateurs
603     H = HO["Direct"].appliedControledFormTo
604     #
605     if selfA._parameters["EstimationOf"] == "State":
606         M = EM["Direct"].appliedControledFormTo
607     #
608     if CM is not None and "Tangent" in CM and U is not None:
609         Cm = CM["Tangent"].asMatrix(Xb)
610     else:
611         Cm = None
612     #
613     # Précalcul des inversions de B et R
614     RIdemi = R.sqrtmI()
615     #
616     # Durée d'observation et tailles
617     LagL = selfA._parameters["SmootherLagL"]
618     if (not hasattr(Y,"store")) or (not hasattr(Y,"stepnumber")):
619         raise ValueError("Fixed-lag smoother requires a series of observation")
620     if Y.stepnumber() < LagL:
621         raise ValueError("Fixed-lag smoother requires a series of observation greater then the lag L")
622     duration = Y.stepnumber()
623     __p = numpy.cumprod(Y.shape())[-1]
624     __n = Xb.size
625     __m = selfA._parameters["NumberOfMembers"]
626     #
627     if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
628     else:                         Pn = B
629     if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
630     else:                         Qn = Q
631     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
632         selfA.StoredVariables["Analysis"].store( Xb )
633         if selfA._toStore("APosterioriCovariance"):
634             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
635             covarianceXa = Pn
636     #
637     # Calcul direct initial (on privilégie la mémorisation au recalcul)
638     __seed = numpy.random.get_state()
639     selfB = copy.deepcopy(selfA)
640     selfB._parameters["StoreSupplementaryCalculations"] = ["CurrentEnsembleState"]
641     if VariantM == "EnKS16-KalmanFilterFormula":
642         etkf(selfB, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM = "KalmanFilterFormula")
643     else:
644         raise ValueError("VariantM has to be chosen in the authorized methods list.")
645     if LagL > 0:
646         EL  = selfB.StoredVariables["CurrentEnsembleState"][LagL-1]
647     else:
648         EL = EnsembleOfBackgroundPerturbations( Xb, None, __m ) # Cf. etkf
649     selfA._parameters["SetSeed"] = numpy.random.set_state(__seed)
650     #
651     for step in range(LagL,duration-1):
652         #
653         sEL = selfB.StoredVariables["CurrentEnsembleState"][step+1-LagL:step+1]
654         sEL.append(None)
655         #
656         if hasattr(Y,"store"):
657             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
658         else:
659             Ynpu = numpy.ravel( Y ).reshape((__p,1))
660         #
661         if U is not None:
662             if hasattr(U,"store") and len(U)>1:
663                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
664             elif hasattr(U,"store") and len(U)==1:
665                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
666             else:
667                 Un = numpy.asmatrix(numpy.ravel( U )).T
668         else:
669             Un = None
670         #
671         #--------------------------
672         if VariantM == "EnKS16-KalmanFilterFormula":
673             if selfA._parameters["EstimationOf"] == "State": # Forecast
674                 EL = M( [(EL[:,i], Un) for i in range(__m)],
675                     argsAsSerie = True,
676                     returnSerieAsArrayMatrix = True )
677                 EL = EL + numpy.random.multivariate_normal(numpy.zeros(__n), Qn, size=__m).T
678                 EZ = H( [(EL[:,i], Un) for i in range(__m)],
679                     argsAsSerie = True,
680                     returnSerieAsArrayMatrix = True )
681                 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
682                     Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
683                     EZ = EZ + Cm * Un
684             elif selfA._parameters["EstimationOf"] == "Parameters":
685                 # --- > Par principe, M = Id, Q = 0
686                 EZ = H( [(EL[:,i], Un) for i in range(__m)],
687                     argsAsSerie = True,
688                     returnSerieAsArrayMatrix = True )
689             #
690             vEm   = EL.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
691             vZm   = EZ.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
692             #
693             mS    = RIdemi @ EnsembleOfAnomalies( EZ, vZm, 1./math.sqrt(__m-1) )
694             delta = RIdemi @ ( Ynpu - vZm )
695             mT    = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS )
696             vw    = mT @ mS.T @ delta
697             #
698             Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
699             mU    = numpy.identity(__m)
700             wTU   = (vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU)
701             #
702             EX    = EnsembleOfAnomalies( EL, vEm, 1./math.sqrt(__m-1) )
703             EL    = vEm + EX @ wTU
704             #
705             sEL[LagL] = EL
706             for irl in range(LagL): # Lissage des L précédentes analysis
707                 vEm = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
708                 EX = EnsembleOfAnomalies( sEL[irl], vEm, 1./math.sqrt(__m-1) )
709                 sEL[irl] = vEm + EX @ wTU
710             #
711             # Conservation de l'analyse retrospective d'ordre 0 avant rotation
712             Xa = sEL[0].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
713             if selfA._toStore("APosterioriCovariance"):
714                 EXn = sEL[0]
715             #
716             for irl in range(LagL):
717                 sEL[irl] = sEL[irl+1]
718             sEL[LagL] = None
719         #--------------------------
720         else:
721             raise ValueError("VariantM has to be chosen in the authorized methods list.")
722         #
723         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
724         # ---> avec analysis
725         selfA.StoredVariables["Analysis"].store( Xa )
726         if selfA._toStore("APosterioriCovariance"):
727             selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(EXn) )
728     #
729     # Stockage des dernières analyses incomplètement remises à jour
730     for irl in range(LagL):
731         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
732         Xa = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
733         selfA.StoredVariables["Analysis"].store( Xa )
734     #
735     return 0
736
737 # ==============================================================================
738 def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
739     """
740     Ensemble-Transform EnKF
741     """
742     if selfA._parameters["EstimationOf"] == "Parameters":
743         selfA._parameters["StoreInternalVariables"] = True
744     #
745     # Opérateurs
746     # ----------
747     H = HO["Direct"].appliedControledFormTo
748     #
749     if selfA._parameters["EstimationOf"] == "State":
750         M = EM["Direct"].appliedControledFormTo
751     #
752     if CM is not None and "Tangent" in CM and U is not None:
753         Cm = CM["Tangent"].asMatrix(Xb)
754     else:
755         Cm = None
756     #
757     # Nombre de pas identique au nombre de pas d'observations
758     # -------------------------------------------------------
759     if hasattr(Y,"stepnumber"):
760         duration = Y.stepnumber()
761         __p = numpy.cumprod(Y.shape())[-1]
762     else:
763         duration = 2
764         __p = numpy.array(Y).size
765     #
766     # Précalcul des inversions de B et R
767     # ----------------------------------
768     if selfA._parameters["StoreInternalVariables"] \
769         or selfA._toStore("CostFunctionJ") \
770         or selfA._toStore("CostFunctionJb") \
771         or selfA._toStore("CostFunctionJo") \
772         or selfA._toStore("CurrentOptimum") \
773         or selfA._toStore("APosterioriCovariance"):
774         BI = B.getI()
775         RI = R.getI()
776     elif VariantM != "KalmanFilterFormula":
777         RI = R.getI()
778     if VariantM == "KalmanFilterFormula":
779         RIdemi = R.sqrtmI()
780     #
781     # Initialisation
782     # --------------
783     __n = Xb.size
784     __m = selfA._parameters["NumberOfMembers"]
785     if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
786     else:                         Pn = B
787     if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
788     else:                         Qn = Q
789     Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
790     #~ Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
791     #
792     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
793         selfA.StoredVariables["Analysis"].store( Xb )
794         if selfA._toStore("APosterioriCovariance"):
795             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
796             covarianceXa = Pn
797     #
798     previousJMinimum = numpy.finfo(float).max
799     #
800     for step in range(duration-1):
801         if hasattr(Y,"store"):
802             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
803         else:
804             Ynpu = numpy.ravel( Y ).reshape((__p,1))
805         #
806         if U is not None:
807             if hasattr(U,"store") and len(U)>1:
808                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
809             elif hasattr(U,"store") and len(U)==1:
810                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
811             else:
812                 Un = numpy.asmatrix(numpy.ravel( U )).T
813         else:
814             Un = None
815         #
816         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
817             Xn = CovarianceInflation( Xn,
818                 selfA._parameters["InflationType"],
819                 selfA._parameters["InflationFactor"],
820                 )
821         #
822         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
823             EMX = M( [(Xn[:,i], Un) for i in range(__m)],
824                 argsAsSerie = True,
825                 returnSerieAsArrayMatrix = True )
826             qi = numpy.random.multivariate_normal(numpy.zeros(__n), Qn, size=__m).T
827             Xn_predicted = EMX + qi
828             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
829                 argsAsSerie = True,
830                 returnSerieAsArrayMatrix = True )
831             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
832                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
833                 Xn_predicted = Xn_predicted + Cm * Un
834         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
835             # --- > Par principe, M = Id, Q = 0
836             Xn_predicted = Xn
837             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
838                 argsAsSerie = True,
839                 returnSerieAsArrayMatrix = True )
840         #
841         # Mean of forecast and observation of forecast
842         Xfm  = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
843         Hfm  = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
844         #
845         # Anomalies
846         EaX   = EnsembleOfAnomalies( Xn_predicted, Xfm )
847         EaHX  = EnsembleOfAnomalies( HX_predicted, Hfm)
848         #
849         #--------------------------
850         if VariantM == "KalmanFilterFormula":
851             mS    = RIdemi * EaHX / math.sqrt(__m-1)
852             delta = RIdemi * ( Ynpu - Hfm )
853             mT    = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS )
854             vw    = mT @ mS.T @ delta
855             #
856             Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
857             mU    = numpy.identity(__m)
858             #
859             EaX   = EaX / math.sqrt(__m-1)
860             Xn    = Xfm + EaX @ ( vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU )
861         #--------------------------
862         elif VariantM == "Variational":
863             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
864             def CostFunction(w):
865                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
866                 _Jo = 0.5 * _A.T @ (RI * _A)
867                 _Jb = 0.5 * (__m-1) * w.T @ w
868                 _J  = _Jo + _Jb
869                 return float(_J)
870             def GradientOfCostFunction(w):
871                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
872                 _GardJo = - EaHX.T @ (RI * _A)
873                 _GradJb = (__m-1) * w.reshape((__m,1))
874                 _GradJ  = _GardJo + _GradJb
875                 return numpy.ravel(_GradJ)
876             vw = scipy.optimize.fmin_cg(
877                 f           = CostFunction,
878                 x0          = numpy.zeros(__m),
879                 fprime      = GradientOfCostFunction,
880                 args        = (),
881                 disp        = False,
882                 )
883             #
884             Hto = EaHX.T @ (RI * EaHX)
885             Htb = (__m-1) * numpy.identity(__m)
886             Hta = Hto + Htb
887             #
888             Pta = numpy.linalg.inv( Hta )
889             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
890             #
891             Xn  = Xfm + EaX @ (vw[:,None] + EWa)
892         #--------------------------
893         elif VariantM == "FiniteSize11": # Jauge Boc2011
894             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
895             def CostFunction(w):
896                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
897                 _Jo = 0.5 * _A.T @ (RI * _A)
898                 _Jb = 0.5 * __m * math.log(1 + 1/__m + w.T @ w)
899                 _J  = _Jo + _Jb
900                 return float(_J)
901             def GradientOfCostFunction(w):
902                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
903                 _GardJo = - EaHX.T @ (RI * _A)
904                 _GradJb = __m * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
905                 _GradJ  = _GardJo + _GradJb
906                 return numpy.ravel(_GradJ)
907             vw = scipy.optimize.fmin_cg(
908                 f           = CostFunction,
909                 x0          = numpy.zeros(__m),
910                 fprime      = GradientOfCostFunction,
911                 args        = (),
912                 disp        = False,
913                 )
914             #
915             Hto = EaHX.T @ (RI * EaHX)
916             Htb = __m * \
917                 ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
918                 / (1 + 1/__m + vw.T @ vw)**2
919             Hta = Hto + Htb
920             #
921             Pta = numpy.linalg.inv( Hta )
922             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
923             #
924             Xn  = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
925         #--------------------------
926         elif VariantM == "FiniteSize15": # Jauge Boc2015
927             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
928             def CostFunction(w):
929                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
930                 _Jo = 0.5 * _A.T * RI * _A
931                 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w)
932                 _J  = _Jo + _Jb
933                 return float(_J)
934             def GradientOfCostFunction(w):
935                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
936                 _GardJo = - EaHX.T @ (RI * _A)
937                 _GradJb = (__m+1) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
938                 _GradJ  = _GardJo + _GradJb
939                 return numpy.ravel(_GradJ)
940             vw = scipy.optimize.fmin_cg(
941                 f           = CostFunction,
942                 x0          = numpy.zeros(__m),
943                 fprime      = GradientOfCostFunction,
944                 args        = (),
945                 disp        = False,
946                 )
947             #
948             Hto = EaHX.T @ (RI * EaHX)
949             Htb = (__m+1) * \
950                 ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
951                 / (1 + 1/__m + vw.T @ vw)**2
952             Hta = Hto + Htb
953             #
954             Pta = numpy.linalg.inv( Hta )
955             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
956             #
957             Xn  = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
958         #--------------------------
959         elif VariantM == "FiniteSize16": # Jauge Boc2016
960             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
961             def CostFunction(w):
962                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
963                 _Jo = 0.5 * _A.T @ (RI * _A)
964                 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w / (__m-1))
965                 _J  = _Jo + _Jb
966                 return float(_J)
967             def GradientOfCostFunction(w):
968                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
969                 _GardJo = - EaHX.T @ (RI * _A)
970                 _GradJb = ((__m+1) / (__m-1)) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w / (__m-1))
971                 _GradJ  = _GardJo + _GradJb
972                 return numpy.ravel(_GradJ)
973             vw = scipy.optimize.fmin_cg(
974                 f           = CostFunction,
975                 x0          = numpy.zeros(__m),
976                 fprime      = GradientOfCostFunction,
977                 args        = (),
978                 disp        = False,
979                 )
980             #
981             Hto = EaHX.T @ (RI * EaHX)
982             Htb = ((__m+1) / (__m-1)) * \
983                 ( (1 + 1/__m + vw.T @ vw / (__m-1)) * numpy.identity(__m) - 2 * vw @ vw.T / (__m-1) ) \
984                 / (1 + 1/__m + vw.T @ vw / (__m-1))**2
985             Hta = Hto + Htb
986             #
987             Pta = numpy.linalg.inv( Hta )
988             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
989             #
990             Xn  = Xfm + EaX @ (vw[:,None] + EWa)
991         #--------------------------
992         else:
993             raise ValueError("VariantM has to be chosen in the authorized methods list.")
994         #
995         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
996             Xn = CovarianceInflation( Xn,
997                 selfA._parameters["InflationType"],
998                 selfA._parameters["InflationFactor"],
999                 )
1000         #
1001         Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1002         #--------------------------
1003         #
1004         if selfA._parameters["StoreInternalVariables"] \
1005             or selfA._toStore("CostFunctionJ") \
1006             or selfA._toStore("CostFunctionJb") \
1007             or selfA._toStore("CostFunctionJo") \
1008             or selfA._toStore("APosterioriCovariance") \
1009             or selfA._toStore("InnovationAtCurrentAnalysis") \
1010             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1011             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1012             _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
1013             _Innovation = Ynpu - _HXa
1014         #
1015         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1016         # ---> avec analysis
1017         selfA.StoredVariables["Analysis"].store( Xa )
1018         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1019             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
1020         if selfA._toStore("InnovationAtCurrentAnalysis"):
1021             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1022         # ---> avec current state
1023         if selfA._parameters["StoreInternalVariables"] \
1024             or selfA._toStore("CurrentState"):
1025             selfA.StoredVariables["CurrentState"].store( Xn )
1026         if selfA._toStore("ForecastState"):
1027             selfA.StoredVariables["ForecastState"].store( EMX )
1028         if selfA._toStore("BMA"):
1029             selfA.StoredVariables["BMA"].store( EMX - Xa.reshape((__n,1)) )
1030         if selfA._toStore("InnovationAtCurrentState"):
1031             selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
1032         if selfA._toStore("SimulatedObservationAtCurrentState") \
1033             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1034             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
1035         # ---> autres
1036         if selfA._parameters["StoreInternalVariables"] \
1037             or selfA._toStore("CostFunctionJ") \
1038             or selfA._toStore("CostFunctionJb") \
1039             or selfA._toStore("CostFunctionJo") \
1040             or selfA._toStore("CurrentOptimum") \
1041             or selfA._toStore("APosterioriCovariance"):
1042             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
1043             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
1044             J   = Jb + Jo
1045             selfA.StoredVariables["CostFunctionJb"].store( Jb )
1046             selfA.StoredVariables["CostFunctionJo"].store( Jo )
1047             selfA.StoredVariables["CostFunctionJ" ].store( J )
1048             #
1049             if selfA._toStore("IndexOfOptimum") \
1050                 or selfA._toStore("CurrentOptimum") \
1051                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1052                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1053                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1054                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1055                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1056             if selfA._toStore("IndexOfOptimum"):
1057                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1058             if selfA._toStore("CurrentOptimum"):
1059                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1060             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1061                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1062             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1063                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1064             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1065                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1066             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1067                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1068         if selfA._toStore("APosterioriCovariance"):
1069             selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
1070         if selfA._parameters["EstimationOf"] == "Parameters" \
1071             and J < previousJMinimum:
1072             previousJMinimum    = J
1073             XaMin               = Xa
1074             if selfA._toStore("APosterioriCovariance"):
1075                 covarianceXaMin = Pn
1076         # ---> Pour les smoothers
1077         if selfA._toStore("CurrentEnsembleState"):
1078             selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
1079     #
1080     # Stockage final supplémentaire de l'optimum en estimation de paramètres
1081     # ----------------------------------------------------------------------
1082     if selfA._parameters["EstimationOf"] == "Parameters":
1083         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1084         selfA.StoredVariables["Analysis"].store( XaMin )
1085         if selfA._toStore("APosterioriCovariance"):
1086             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1087         if selfA._toStore("BMA"):
1088             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1089     #
1090     return 0
1091
1092 # ==============================================================================
1093 def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12",
1094     BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
1095     """
1096     Iterative EnKF
1097     """
1098     if selfA._parameters["EstimationOf"] == "Parameters":
1099         selfA._parameters["StoreInternalVariables"] = True
1100     #
1101     # Opérateurs
1102     # ----------
1103     H = HO["Direct"].appliedControledFormTo
1104     #
1105     if selfA._parameters["EstimationOf"] == "State":
1106         M = EM["Direct"].appliedControledFormTo
1107     #
1108     if CM is not None and "Tangent" in CM and U is not None:
1109         Cm = CM["Tangent"].asMatrix(Xb)
1110     else:
1111         Cm = None
1112     #
1113     # Nombre de pas identique au nombre de pas d'observations
1114     # -------------------------------------------------------
1115     if hasattr(Y,"stepnumber"):
1116         duration = Y.stepnumber()
1117         __p = numpy.cumprod(Y.shape())[-1]
1118     else:
1119         duration = 2
1120         __p = numpy.array(Y).size
1121     #
1122     # Précalcul des inversions de B et R
1123     # ----------------------------------
1124     if selfA._parameters["StoreInternalVariables"] \
1125         or selfA._toStore("CostFunctionJ") \
1126         or selfA._toStore("CostFunctionJb") \
1127         or selfA._toStore("CostFunctionJo") \
1128         or selfA._toStore("CurrentOptimum") \
1129         or selfA._toStore("APosterioriCovariance"):
1130         BI = B.getI()
1131     RI = R.getI()
1132     #
1133     # Initialisation
1134     # --------------
1135     __n = Xb.size
1136     __m = selfA._parameters["NumberOfMembers"]
1137     if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
1138     else:                         Pn = B
1139     if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
1140     else:                         Rn = R
1141     if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
1142     else:                         Qn = Q
1143     Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
1144     #
1145     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1146         selfA.StoredVariables["Analysis"].store( Xb )
1147         if selfA._toStore("APosterioriCovariance"):
1148             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1149             covarianceXa = Pn
1150     #
1151     previousJMinimum = numpy.finfo(float).max
1152     #
1153     for step in range(duration-1):
1154         if hasattr(Y,"store"):
1155             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1156         else:
1157             Ynpu = numpy.ravel( Y ).reshape((__p,1))
1158         #
1159         if U is not None:
1160             if hasattr(U,"store") and len(U)>1:
1161                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
1162             elif hasattr(U,"store") and len(U)==1:
1163                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
1164             else:
1165                 Un = numpy.asmatrix(numpy.ravel( U )).T
1166         else:
1167             Un = None
1168         #
1169         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
1170             Xn = CovarianceInflation( Xn,
1171                 selfA._parameters["InflationType"],
1172                 selfA._parameters["InflationFactor"],
1173                 )
1174         #
1175         #--------------------------
1176         if VariantM == "IEnKF12":
1177             Xfm = numpy.ravel(Xn.mean(axis=1, dtype=mfp).astype('float'))
1178             EaX = EnsembleOfAnomalies( Xn ) / math.sqrt(__m-1)
1179             __j = 0
1180             Deltaw = 1
1181             if not BnotT:
1182                 Ta  = numpy.identity(__m)
1183             vw  = numpy.zeros(__m)
1184             while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
1185                 vx1 = (Xfm + EaX @ vw).reshape((__n,1))
1186                 #
1187                 if BnotT:
1188                     E1 = vx1 + _epsilon * EaX
1189                 else:
1190                     E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
1191                 #
1192                 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q
1193                     E2 = M( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
1194                         argsAsSerie = True,
1195                         returnSerieAsArrayMatrix = True )
1196                 elif selfA._parameters["EstimationOf"] == "Parameters":
1197                     # --- > Par principe, M = Id
1198                     E2 = Xn
1199                 vx2 = E2.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1200                 vy1 = H((vx2, Un)).reshape((__p,1))
1201                 #
1202                 HE2 = H( [(E2[:,i,numpy.newaxis], Un) for i in range(__m)],
1203                     argsAsSerie = True,
1204                     returnSerieAsArrayMatrix = True )
1205                 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
1206                 #
1207                 if BnotT:
1208                     EaY = (HE2 - vy2) / _epsilon
1209                 else:
1210                     EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
1211                 #
1212                 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy1 )))
1213                 mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY)
1214                 Deltaw = - numpy.linalg.solve(mH,GradJ)
1215                 #
1216                 vw = vw + Deltaw
1217                 #
1218                 if not BnotT:
1219                     Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
1220                 #
1221                 __j = __j + 1
1222             #
1223             A2 = EnsembleOfAnomalies( E2 )
1224             #
1225             if BnotT:
1226                 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
1227                 A2 = math.sqrt(__m-1) * A2 @ Ta / _epsilon
1228             #
1229             Xn = vx2 + A2
1230         #--------------------------
1231         else:
1232             raise ValueError("VariantM has to be chosen in the authorized methods list.")
1233         #
1234         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
1235             Xn = CovarianceInflation( Xn,
1236                 selfA._parameters["InflationType"],
1237                 selfA._parameters["InflationFactor"],
1238                 )
1239         #
1240         Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1241         #--------------------------
1242         #
1243         if selfA._parameters["StoreInternalVariables"] \
1244             or selfA._toStore("CostFunctionJ") \
1245             or selfA._toStore("CostFunctionJb") \
1246             or selfA._toStore("CostFunctionJo") \
1247             or selfA._toStore("APosterioriCovariance") \
1248             or selfA._toStore("InnovationAtCurrentAnalysis") \
1249             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1250             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1251             _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
1252             _Innovation = Ynpu - _HXa
1253         #
1254         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1255         # ---> avec analysis
1256         selfA.StoredVariables["Analysis"].store( Xa )
1257         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1258             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
1259         if selfA._toStore("InnovationAtCurrentAnalysis"):
1260             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1261         # ---> avec current state
1262         if selfA._parameters["StoreInternalVariables"] \
1263             or selfA._toStore("CurrentState"):
1264             selfA.StoredVariables["CurrentState"].store( Xn )
1265         if selfA._toStore("ForecastState"):
1266             selfA.StoredVariables["ForecastState"].store( E2 )
1267         if selfA._toStore("BMA"):
1268             selfA.StoredVariables["BMA"].store( E2 - Xa )
1269         if selfA._toStore("InnovationAtCurrentState"):
1270             selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
1271         if selfA._toStore("SimulatedObservationAtCurrentState") \
1272             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1273             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
1274         # ---> autres
1275         if selfA._parameters["StoreInternalVariables"] \
1276             or selfA._toStore("CostFunctionJ") \
1277             or selfA._toStore("CostFunctionJb") \
1278             or selfA._toStore("CostFunctionJo") \
1279             or selfA._toStore("CurrentOptimum") \
1280             or selfA._toStore("APosterioriCovariance"):
1281             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
1282             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
1283             J   = Jb + Jo
1284             selfA.StoredVariables["CostFunctionJb"].store( Jb )
1285             selfA.StoredVariables["CostFunctionJo"].store( Jo )
1286             selfA.StoredVariables["CostFunctionJ" ].store( J )
1287             #
1288             if selfA._toStore("IndexOfOptimum") \
1289                 or selfA._toStore("CurrentOptimum") \
1290                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1291                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1292                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1293                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1294                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1295             if selfA._toStore("IndexOfOptimum"):
1296                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1297             if selfA._toStore("CurrentOptimum"):
1298                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1299             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1300                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1301             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1302                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1303             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1304                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1305             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1306                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1307         if selfA._toStore("APosterioriCovariance"):
1308             selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
1309         if selfA._parameters["EstimationOf"] == "Parameters" \
1310             and J < previousJMinimum:
1311             previousJMinimum    = J
1312             XaMin               = Xa
1313             if selfA._toStore("APosterioriCovariance"):
1314                 covarianceXaMin = Pn
1315     #
1316     # Stockage final supplémentaire de l'optimum en estimation de paramètres
1317     # ----------------------------------------------------------------------
1318     if selfA._parameters["EstimationOf"] == "Parameters":
1319         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1320         selfA.StoredVariables["Analysis"].store( XaMin )
1321         if selfA._toStore("APosterioriCovariance"):
1322             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1323         if selfA._toStore("BMA"):
1324             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1325     #
1326     return 0
1327
1328 # ==============================================================================
1329 def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1330     """
1331     3DVAR incrémental
1332     """
1333     #
1334     # Initialisations
1335     # ---------------
1336     #
1337     # Opérateur non-linéaire pour la boucle externe
1338     Hm = HO["Direct"].appliedTo
1339     #
1340     # Précalcul des inversions de B et R
1341     BI = B.getI()
1342     RI = R.getI()
1343     #
1344     # Point de démarrage de l'optimisation
1345     Xini = selfA._parameters["InitializationPoint"]
1346     #
1347     HXb = numpy.asmatrix(numpy.ravel( Hm( Xb ) )).T
1348     Innovation = Y - HXb
1349     #
1350     # Outer Loop
1351     # ----------
1352     iOuter = 0
1353     J      = 1./mpr
1354     DeltaJ = 1./mpr
1355     Xr     = Xini.reshape((-1,1))
1356     while abs(DeltaJ) >= selfA._parameters["CostDecrementTolerance"] and iOuter <= selfA._parameters["MaximumNumberOfSteps"]:
1357         #
1358         # Inner Loop
1359         # ----------
1360         Ht = HO["Tangent"].asMatrix(Xr)
1361         Ht = Ht.reshape(Y.size,Xr.size) # ADAO & check shape
1362         #
1363         # Définition de la fonction-coût
1364         # ------------------------------
1365         def CostFunction(dx):
1366             _dX  = numpy.asmatrix(numpy.ravel( dx )).T
1367             if selfA._parameters["StoreInternalVariables"] or \
1368                 selfA._toStore("CurrentState") or \
1369                 selfA._toStore("CurrentOptimum"):
1370                 selfA.StoredVariables["CurrentState"].store( Xb + _dX )
1371             _HdX = Ht * _dX
1372             _HdX = numpy.asmatrix(numpy.ravel( _HdX )).T
1373             _dInnovation = Innovation - _HdX
1374             if selfA._toStore("SimulatedObservationAtCurrentState") or \
1375                 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1376                 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HXb + _HdX )
1377             if selfA._toStore("InnovationAtCurrentState"):
1378                 selfA.StoredVariables["InnovationAtCurrentState"].store( _dInnovation )
1379             #
1380             Jb  = float( 0.5 * _dX.T * BI * _dX )
1381             Jo  = float( 0.5 * _dInnovation.T * RI * _dInnovation )
1382             J   = Jb + Jo
1383             #
1384             selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
1385             selfA.StoredVariables["CostFunctionJb"].store( Jb )
1386             selfA.StoredVariables["CostFunctionJo"].store( Jo )
1387             selfA.StoredVariables["CostFunctionJ" ].store( J )
1388             if selfA._toStore("IndexOfOptimum") or \
1389                 selfA._toStore("CurrentOptimum") or \
1390                 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
1391                 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
1392                 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
1393                 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1394                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1395             if selfA._toStore("IndexOfOptimum"):
1396                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1397             if selfA._toStore("CurrentOptimum"):
1398                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
1399             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1400                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
1401             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1402                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1403             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1404                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1405             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1406                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1407             return J
1408         #
1409         def GradientOfCostFunction(dx):
1410             _dX          = numpy.asmatrix(numpy.ravel( dx )).T
1411             _HdX         = Ht * _dX
1412             _HdX         = numpy.asmatrix(numpy.ravel( _HdX )).T
1413             _dInnovation = Innovation - _HdX
1414             GradJb       = BI * _dX
1415             GradJo       = - Ht.T @ (RI * _dInnovation)
1416             GradJ        = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
1417             return GradJ
1418         #
1419         # Minimisation de la fonctionnelle
1420         # --------------------------------
1421         nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
1422         #
1423         if selfA._parameters["Minimizer"] == "LBFGSB":
1424             # Minimum, J_optimal, Informations = scipy.optimize.fmin_l_bfgs_b(
1425             if "0.19" <= scipy.version.version <= "1.1.0":
1426                 import lbfgsbhlt as optimiseur
1427             else:
1428                 import scipy.optimize as optimiseur
1429             Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
1430                 func        = CostFunction,
1431                 x0          = numpy.zeros(Xini.size),
1432                 fprime      = GradientOfCostFunction,
1433                 args        = (),
1434                 bounds      = selfA._parameters["Bounds"],
1435                 maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
1436                 factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
1437                 pgtol       = selfA._parameters["ProjectedGradientTolerance"],
1438                 iprint      = selfA._parameters["optiprint"],
1439                 )
1440             nfeval = Informations['funcalls']
1441             rc     = Informations['warnflag']
1442         elif selfA._parameters["Minimizer"] == "TNC":
1443             Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
1444                 func        = CostFunction,
1445                 x0          = numpy.zeros(Xini.size),
1446                 fprime      = GradientOfCostFunction,
1447                 args        = (),
1448                 bounds      = selfA._parameters["Bounds"],
1449                 maxfun      = selfA._parameters["MaximumNumberOfSteps"],
1450                 pgtol       = selfA._parameters["ProjectedGradientTolerance"],
1451                 ftol        = selfA._parameters["CostDecrementTolerance"],
1452                 messages    = selfA._parameters["optmessages"],
1453                 )
1454         elif selfA._parameters["Minimizer"] == "CG":
1455             Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
1456                 f           = CostFunction,
1457                 x0          = numpy.zeros(Xini.size),
1458                 fprime      = GradientOfCostFunction,
1459                 args        = (),
1460                 maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1461                 gtol        = selfA._parameters["GradientNormTolerance"],
1462                 disp        = selfA._parameters["optdisp"],
1463                 full_output = True,
1464                 )
1465         elif selfA._parameters["Minimizer"] == "NCG":
1466             Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
1467                 f           = CostFunction,
1468                 x0          = numpy.zeros(Xini.size),
1469                 fprime      = GradientOfCostFunction,
1470                 args        = (),
1471                 maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1472                 avextol     = selfA._parameters["CostDecrementTolerance"],
1473                 disp        = selfA._parameters["optdisp"],
1474                 full_output = True,
1475                 )
1476         elif selfA._parameters["Minimizer"] == "BFGS":
1477             Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
1478                 f           = CostFunction,
1479                 x0          = numpy.zeros(Xini.size),
1480                 fprime      = GradientOfCostFunction,
1481                 args        = (),
1482                 maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1483                 gtol        = selfA._parameters["GradientNormTolerance"],
1484                 disp        = selfA._parameters["optdisp"],
1485                 full_output = True,
1486                 )
1487         else:
1488             raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
1489         #
1490         IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1491         MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
1492         #
1493         if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
1494             Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
1495             Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
1496         else:
1497             Minimum = Xb + numpy.asmatrix(numpy.ravel( Minimum )).T
1498         #
1499         Xr     = Minimum
1500         DeltaJ = selfA.StoredVariables["CostFunctionJ" ][-1] - J
1501         iOuter = selfA.StoredVariables["CurrentIterationNumber"][-1]
1502     #
1503     # Obtention de l'analyse
1504     # ----------------------
1505     Xa = Xr
1506     #
1507     selfA.StoredVariables["Analysis"].store( Xa )
1508     #
1509     if selfA._toStore("OMA") or \
1510         selfA._toStore("SigmaObs2") or \
1511         selfA._toStore("SimulationQuantiles") or \
1512         selfA._toStore("SimulatedObservationAtOptimum"):
1513         if selfA._toStore("SimulatedObservationAtCurrentState"):
1514             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
1515         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1516             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
1517         else:
1518             HXa = Hm( Xa )
1519     #
1520     # Calcul de la covariance d'analyse
1521     # ---------------------------------
1522     if selfA._toStore("APosterioriCovariance") or \
1523         selfA._toStore("SimulationQuantiles") or \
1524         selfA._toStore("JacobianMatrixAtOptimum") or \
1525         selfA._toStore("KalmanGainAtOptimum"):
1526         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
1527         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
1528     if selfA._toStore("APosterioriCovariance") or \
1529         selfA._toStore("SimulationQuantiles") or \
1530         selfA._toStore("KalmanGainAtOptimum"):
1531         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
1532         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
1533     if selfA._toStore("APosterioriCovariance") or \
1534         selfA._toStore("SimulationQuantiles"):
1535         HessienneI = []
1536         nb = Xa.size
1537         for i in range(nb):
1538             _ee    = numpy.matrix(numpy.zeros(nb)).T
1539             _ee[i] = 1.
1540             _HtEE  = numpy.dot(HtM,_ee)
1541             _HtEE  = numpy.asmatrix(numpy.ravel( _HtEE )).T
1542             HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
1543         HessienneI = numpy.matrix( HessienneI )
1544         A = HessienneI.I
1545         if min(A.shape) != max(A.shape):
1546             raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
1547         if (numpy.diag(A) < 0).any():
1548             raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
1549         if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
1550             try:
1551                 L = numpy.linalg.cholesky( A )
1552             except:
1553                 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
1554     if selfA._toStore("APosterioriCovariance"):
1555         selfA.StoredVariables["APosterioriCovariance"].store( A )
1556     if selfA._toStore("JacobianMatrixAtOptimum"):
1557         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
1558     if selfA._toStore("KalmanGainAtOptimum"):
1559         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
1560         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
1561         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
1562     #
1563     # Calculs et/ou stockages supplémentaires
1564     # ---------------------------------------
1565     if selfA._toStore("Innovation") or \
1566         selfA._toStore("SigmaObs2") or \
1567         selfA._toStore("MahalanobisConsistency") or \
1568         selfA._toStore("OMB"):
1569         d  = Y - HXb
1570     if selfA._toStore("Innovation"):
1571         selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
1572     if selfA._toStore("BMA"):
1573         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
1574     if selfA._toStore("OMA"):
1575         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
1576     if selfA._toStore("OMB"):
1577         selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
1578     if selfA._toStore("SigmaObs2"):
1579         TraceR = R.trace(Y.size)
1580         selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
1581     if selfA._toStore("MahalanobisConsistency"):
1582         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
1583     if selfA._toStore("SimulationQuantiles"):
1584         nech = selfA._parameters["NumberOfSamplesForQuantiles"]
1585         HXa  = numpy.matrix(numpy.ravel( HXa )).T
1586         YfQ  = None
1587         for i in range(nech):
1588             if selfA._parameters["SimulationForQuantiles"] == "Linear":
1589                 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
1590                 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
1591                 Yr = HXa + dYr
1592             elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
1593                 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
1594                 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
1595             if YfQ is None:
1596                 YfQ = Yr
1597             else:
1598                 YfQ = numpy.hstack((YfQ,Yr))
1599         YfQ.sort(axis=-1)
1600         YQ = None
1601         for quantile in selfA._parameters["Quantiles"]:
1602             if not (0. <= float(quantile) <= 1.): continue
1603             indice = int(nech * float(quantile) - 1./nech)
1604             if YQ is None: YQ = YfQ[:,indice]
1605             else:          YQ = numpy.hstack((YQ,YfQ[:,indice]))
1606         selfA.StoredVariables["SimulationQuantiles"].store( YQ )
1607     if selfA._toStore("SimulatedObservationAtBackground"):
1608         selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
1609     if selfA._toStore("SimulatedObservationAtOptimum"):
1610         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
1611     #
1612     return 0
1613
1614 # ==============================================================================
1615 def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13",
1616     BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
1617     """
1618     Maximum Likelihood Ensemble Filter
1619     """
1620     if selfA._parameters["EstimationOf"] == "Parameters":
1621         selfA._parameters["StoreInternalVariables"] = True
1622     #
1623     # Opérateurs
1624     # ----------
1625     H = HO["Direct"].appliedControledFormTo
1626     #
1627     if selfA._parameters["EstimationOf"] == "State":
1628         M = EM["Direct"].appliedControledFormTo
1629     #
1630     if CM is not None and "Tangent" in CM and U is not None:
1631         Cm = CM["Tangent"].asMatrix(Xb)
1632     else:
1633         Cm = None
1634     #
1635     # Nombre de pas identique au nombre de pas d'observations
1636     # -------------------------------------------------------
1637     if hasattr(Y,"stepnumber"):
1638         duration = Y.stepnumber()
1639         __p = numpy.cumprod(Y.shape())[-1]
1640     else:
1641         duration = 2
1642         __p = numpy.array(Y).size
1643     #
1644     # Précalcul des inversions de B et R
1645     # ----------------------------------
1646     if selfA._parameters["StoreInternalVariables"] \
1647         or selfA._toStore("CostFunctionJ") \
1648         or selfA._toStore("CostFunctionJb") \
1649         or selfA._toStore("CostFunctionJo") \
1650         or selfA._toStore("CurrentOptimum") \
1651         or selfA._toStore("APosterioriCovariance"):
1652         BI = B.getI()
1653     RI = R.getI()
1654     #
1655     # Initialisation
1656     # --------------
1657     __n = Xb.size
1658     __m = selfA._parameters["NumberOfMembers"]
1659     if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
1660     else:                         Pn = B
1661     if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
1662     else:                         Rn = R
1663     if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
1664     else:                         Qn = Q
1665     Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
1666     #
1667     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1668         selfA.StoredVariables["Analysis"].store( Xb )
1669         if selfA._toStore("APosterioriCovariance"):
1670             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1671             covarianceXa = Pn
1672     #
1673     previousJMinimum = numpy.finfo(float).max
1674     #
1675     for step in range(duration-1):
1676         if hasattr(Y,"store"):
1677             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1678         else:
1679             Ynpu = numpy.ravel( Y ).reshape((__p,1))
1680         #
1681         if U is not None:
1682             if hasattr(U,"store") and len(U)>1:
1683                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
1684             elif hasattr(U,"store") and len(U)==1:
1685                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
1686             else:
1687                 Un = numpy.asmatrix(numpy.ravel( U )).T
1688         else:
1689             Un = None
1690         #
1691         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
1692             Xn = CovarianceInflation( Xn,
1693                 selfA._parameters["InflationType"],
1694                 selfA._parameters["InflationFactor"],
1695                 )
1696         #
1697         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
1698             EMX = M( [(Xn[:,i], Un) for i in range(__m)],
1699                 argsAsSerie = True,
1700                 returnSerieAsArrayMatrix = True )
1701             qi = numpy.random.multivariate_normal(numpy.zeros(__n), Qn, size=__m).T
1702             Xn_predicted = EMX + qi
1703             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1704                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
1705                 Xn_predicted = Xn_predicted + Cm * Un
1706         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
1707             # --- > Par principe, M = Id, Q = 0
1708             Xn_predicted = Xn
1709         #
1710         #--------------------------
1711         if VariantM == "MLEF13":
1712             Xfm = numpy.ravel(Xn_predicted.mean(axis=1, dtype=mfp).astype('float'))
1713             EaX = EnsembleOfAnomalies( Xn_predicted, Xfm, 1./math.sqrt(__m-1) )
1714             Ua  = numpy.identity(__m)
1715             __j = 0
1716             Deltaw = 1
1717             if not BnotT:
1718                 Ta  = numpy.identity(__m)
1719             vw  = numpy.zeros(__m)
1720             while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
1721                 vx1 = (Xfm + EaX @ vw).reshape((__n,1))
1722                 #
1723                 if BnotT:
1724                     E1 = vx1 + _epsilon * EaX
1725                 else:
1726                     E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
1727                 #
1728                 HE2 = H( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
1729                     argsAsSerie = True,
1730                     returnSerieAsArrayMatrix = True )
1731                 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
1732                 #
1733                 if BnotT:
1734                     EaY = (HE2 - vy2) / _epsilon
1735                 else:
1736                     EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
1737                 #
1738                 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy2 )))
1739                 mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY)
1740                 Deltaw = - numpy.linalg.solve(mH,GradJ)
1741                 #
1742                 vw = vw + Deltaw
1743                 #
1744                 if not BnotT:
1745                     Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
1746                 #
1747                 __j = __j + 1
1748             #
1749             if BnotT:
1750                 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
1751             #
1752             Xn = vx1 + math.sqrt(__m-1) * EaX @ Ta @ Ua
1753         #--------------------------
1754         else:
1755             raise ValueError("VariantM has to be chosen in the authorized methods list.")
1756         #
1757         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
1758             Xn = CovarianceInflation( Xn,
1759                 selfA._parameters["InflationType"],
1760                 selfA._parameters["InflationFactor"],
1761                 )
1762         #
1763         Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1764         #--------------------------
1765         #
1766         if selfA._parameters["StoreInternalVariables"] \
1767             or selfA._toStore("CostFunctionJ") \
1768             or selfA._toStore("CostFunctionJb") \
1769             or selfA._toStore("CostFunctionJo") \
1770             or selfA._toStore("APosterioriCovariance") \
1771             or selfA._toStore("InnovationAtCurrentAnalysis") \
1772             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1773             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1774             _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
1775             _Innovation = Ynpu - _HXa
1776         #
1777         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1778         # ---> avec analysis
1779         selfA.StoredVariables["Analysis"].store( Xa )
1780         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1781             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
1782         if selfA._toStore("InnovationAtCurrentAnalysis"):
1783             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1784         # ---> avec current state
1785         if selfA._parameters["StoreInternalVariables"] \
1786             or selfA._toStore("CurrentState"):
1787             selfA.StoredVariables["CurrentState"].store( Xn )
1788         if selfA._toStore("ForecastState"):
1789             selfA.StoredVariables["ForecastState"].store( EMX )
1790         if selfA._toStore("BMA"):
1791             selfA.StoredVariables["BMA"].store( EMX - Xa )
1792         if selfA._toStore("InnovationAtCurrentState"):
1793             selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
1794         if selfA._toStore("SimulatedObservationAtCurrentState") \
1795             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1796             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
1797         # ---> autres
1798         if selfA._parameters["StoreInternalVariables"] \
1799             or selfA._toStore("CostFunctionJ") \
1800             or selfA._toStore("CostFunctionJb") \
1801             or selfA._toStore("CostFunctionJo") \
1802             or selfA._toStore("CurrentOptimum") \
1803             or selfA._toStore("APosterioriCovariance"):
1804             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
1805             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
1806             J   = Jb + Jo
1807             selfA.StoredVariables["CostFunctionJb"].store( Jb )
1808             selfA.StoredVariables["CostFunctionJo"].store( Jo )
1809             selfA.StoredVariables["CostFunctionJ" ].store( J )
1810             #
1811             if selfA._toStore("IndexOfOptimum") \
1812                 or selfA._toStore("CurrentOptimum") \
1813                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1814                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1815                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1816                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1817                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1818             if selfA._toStore("IndexOfOptimum"):
1819                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1820             if selfA._toStore("CurrentOptimum"):
1821                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1822             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1823                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1824             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1825                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1826             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1827                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1828             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1829                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1830         if selfA._toStore("APosterioriCovariance"):
1831             selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
1832         if selfA._parameters["EstimationOf"] == "Parameters" \
1833             and J < previousJMinimum:
1834             previousJMinimum    = J
1835             XaMin               = Xa
1836             if selfA._toStore("APosterioriCovariance"):
1837                 covarianceXaMin = Pn
1838     #
1839     # Stockage final supplémentaire de l'optimum en estimation de paramètres
1840     # ----------------------------------------------------------------------
1841     if selfA._parameters["EstimationOf"] == "Parameters":
1842         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1843         selfA.StoredVariables["Analysis"].store( XaMin )
1844         if selfA._toStore("APosterioriCovariance"):
1845             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1846         if selfA._toStore("BMA"):
1847             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1848     #
1849     return 0
1850
1851 # ==============================================================================
1852 def mmqr(
1853         func     = None,
1854         x0       = None,
1855         fprime   = None,
1856         bounds   = None,
1857         quantile = 0.5,
1858         maxfun   = 15000,
1859         toler    = 1.e-06,
1860         y        = None,
1861         ):
1862     """
1863     Implémentation informatique de l'algorithme MMQR, basée sur la publication :
1864     David R. Hunter, Kenneth Lange, "Quantile Regression via an MM Algorithm",
1865     Journal of Computational and Graphical Statistics, 9, 1, pp.60-77, 2000.
1866     """
1867     #
1868     # Recuperation des donnees et informations initiales
1869     # --------------------------------------------------
1870     variables = numpy.ravel( x0 )
1871     mesures   = numpy.ravel( y )
1872     increment = sys.float_info[0]
1873     p         = variables.size
1874     n         = mesures.size
1875     quantile  = float(quantile)
1876     #
1877     # Calcul des parametres du MM
1878     # ---------------------------
1879     tn      = float(toler) / n
1880     e0      = -tn / math.log(tn)
1881     epsilon = (e0-tn)/(1+math.log(e0))
1882     #
1883     # Calculs d'initialisation
1884     # ------------------------
1885     residus  = mesures - numpy.ravel( func( variables ) )
1886     poids    = 1./(epsilon+numpy.abs(residus))
1887     veps     = 1. - 2. * quantile - residus * poids
1888     lastsurrogate = -numpy.sum(residus*veps) - (1.-2.*quantile)*numpy.sum(residus)
1889     iteration = 0
1890     #
1891     # Recherche iterative
1892     # -------------------
1893     while (increment > toler) and (iteration < maxfun) :
1894         iteration += 1
1895         #
1896         Derivees  = numpy.array(fprime(variables))
1897         Derivees  = Derivees.reshape(n,p) # Necessaire pour remettre en place la matrice si elle passe par des tuyaux YACS
1898         DeriveesT = Derivees.transpose()
1899         M         =   numpy.dot( DeriveesT , (numpy.array(numpy.matrix(p*[poids,]).T)*Derivees) )
1900         SM        =   numpy.transpose(numpy.dot( DeriveesT , veps ))
1901         step      = - numpy.linalg.lstsq( M, SM, rcond=-1 )[0]
1902         #
1903         variables = variables + step
1904         if bounds is not None:
1905             # Attention : boucle infinie à éviter si un intervalle est trop petit
1906             while( (variables < numpy.ravel(numpy.asmatrix(bounds)[:,0])).any() or (variables > numpy.ravel(numpy.asmatrix(bounds)[:,1])).any() ):
1907                 step      = step/2.
1908                 variables = variables - step
1909         residus   = mesures - numpy.ravel( func(variables) )
1910         surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
1911         #
1912         while ( (surrogate > lastsurrogate) and ( max(list(numpy.abs(step))) > 1.e-16 ) ) :
1913             step      = step/2.
1914             variables = variables - step
1915             residus   = mesures - numpy.ravel( func(variables) )
1916             surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
1917         #
1918         increment     = lastsurrogate-surrogate
1919         poids         = 1./(epsilon+numpy.abs(residus))
1920         veps          = 1. - 2. * quantile - residus * poids
1921         lastsurrogate = -numpy.sum(residus * veps) - (1.-2.*quantile)*numpy.sum(residus)
1922     #
1923     # Mesure d'écart
1924     # --------------
1925     Ecart = quantile * numpy.sum(residus) - numpy.sum( residus[residus<0] )
1926     #
1927     return variables, Ecart, [n,p,iteration,increment,0]
1928
1929 # ==============================================================================
1930 def multi3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, oneCycle):
1931     """
1932     3DVAR multi-pas et multi-méthodes
1933     """
1934     #
1935     # Initialisation
1936     # --------------
1937     Xn = numpy.ravel(Xb).reshape((-1,1))
1938     #
1939     if selfA._parameters["EstimationOf"] == "State":
1940         M = EM["Direct"].appliedTo
1941         #
1942         if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1943             selfA.StoredVariables["Analysis"].store( Xn )
1944             if selfA._toStore("APosterioriCovariance"):
1945                 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(Xn.size)
1946                 else:                         Pn = B
1947                 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1948             if selfA._toStore("ForecastState"):
1949                 selfA.StoredVariables["ForecastState"].store( Xn )
1950     #
1951     if hasattr(Y,"stepnumber"):
1952         duration = Y.stepnumber()
1953     else:
1954         duration = 2
1955     #
1956     # Multi-pas
1957     # ---------
1958     for step in range(duration-1):
1959         if hasattr(Y,"store"):
1960             Ynpu = numpy.ravel( Y[step+1] ).reshape((-1,1))
1961         else:
1962             Ynpu = numpy.ravel( Y ).reshape((-1,1))
1963         #
1964         if selfA._parameters["EstimationOf"] == "State": # Forecast
1965             Xn = selfA.StoredVariables["Analysis"][-1]
1966             Xn_predicted = M( Xn )
1967             if selfA._toStore("ForecastState"):
1968                 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
1969         elif selfA._parameters["EstimationOf"] == "Parameters": # No forecast
1970             # --- > Par principe, M = Id, Q = 0
1971             Xn_predicted = Xn
1972         Xn_predicted = numpy.ravel(Xn_predicted).reshape((-1,1))
1973         #
1974         oneCycle(selfA, Xn_predicted, Ynpu, U, HO, None, None, R, B, None)
1975     #
1976     return 0
1977
1978 # ==============================================================================
1979 def psas3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1980     """
1981     3DVAR PSAS
1982     """
1983     #
1984     # Initialisations
1985     # ---------------
1986     #
1987     # Opérateurs
1988     Hm = HO["Direct"].appliedTo
1989     #
1990     # Utilisation éventuelle d'un vecteur H(Xb) précalculé
1991     if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
1992         HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
1993     else:
1994         HXb = Hm( Xb )
1995     HXb = numpy.asmatrix(numpy.ravel( HXb )).T
1996     if Y.size != HXb.size:
1997         raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
1998     if max(Y.shape) != max(HXb.shape):
1999         raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
2000     #
2001     if selfA._toStore("JacobianMatrixAtBackground"):
2002         HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
2003         HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
2004         selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
2005     #
2006     Ht = HO["Tangent"].asMatrix(Xb)
2007     BHT = B * Ht.T
2008     HBHTpR = R + Ht * BHT
2009     Innovation = Y - HXb
2010     #
2011     # Point de démarrage de l'optimisation
2012     Xini = numpy.zeros(Xb.shape)
2013     #
2014     # Définition de la fonction-coût
2015     # ------------------------------
2016     def CostFunction(w):
2017         _W = numpy.asmatrix(numpy.ravel( w )).T
2018         if selfA._parameters["StoreInternalVariables"] or \
2019             selfA._toStore("CurrentState") or \
2020             selfA._toStore("CurrentOptimum"):
2021             selfA.StoredVariables["CurrentState"].store( Xb + BHT * _W )
2022         if selfA._toStore("SimulatedObservationAtCurrentState") or \
2023             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2024             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Hm( Xb + BHT * _W ) )
2025         if selfA._toStore("InnovationAtCurrentState"):
2026             selfA.StoredVariables["InnovationAtCurrentState"].store( Innovation )
2027         #
2028         Jb  = float( 0.5 * _W.T * HBHTpR * _W )
2029         Jo  = float( - _W.T * Innovation )
2030         J   = Jb + Jo
2031         #
2032         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
2033         selfA.StoredVariables["CostFunctionJb"].store( Jb )
2034         selfA.StoredVariables["CostFunctionJo"].store( Jo )
2035         selfA.StoredVariables["CostFunctionJ" ].store( J )
2036         if selfA._toStore("IndexOfOptimum") or \
2037             selfA._toStore("CurrentOptimum") or \
2038             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
2039             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
2040             selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
2041             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2042             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2043         if selfA._toStore("IndexOfOptimum"):
2044             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2045         if selfA._toStore("CurrentOptimum"):
2046             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
2047         if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2048             selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
2049         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2050             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2051         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2052             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2053         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2054             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2055         return J
2056     #
2057     def GradientOfCostFunction(w):
2058         _W = numpy.asmatrix(numpy.ravel( w )).T
2059         GradJb  = HBHTpR * _W
2060         GradJo  = - Innovation
2061         GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
2062         return GradJ
2063     #
2064     # Minimisation de la fonctionnelle
2065     # --------------------------------
2066     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
2067     #
2068     if selfA._parameters["Minimizer"] == "LBFGSB":
2069         if "0.19" <= scipy.version.version <= "1.1.0":
2070             import lbfgsbhlt as optimiseur
2071         else:
2072             import scipy.optimize as optimiseur
2073         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
2074             func        = CostFunction,
2075             x0          = Xini,
2076             fprime      = GradientOfCostFunction,
2077             args        = (),
2078             bounds      = selfA._parameters["Bounds"],
2079             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
2080             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
2081             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
2082             iprint      = selfA._parameters["optiprint"],
2083             )
2084         nfeval = Informations['funcalls']
2085         rc     = Informations['warnflag']
2086     elif selfA._parameters["Minimizer"] == "TNC":
2087         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
2088             func        = CostFunction,
2089             x0          = Xini,
2090             fprime      = GradientOfCostFunction,
2091             args        = (),
2092             bounds      = selfA._parameters["Bounds"],
2093             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
2094             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
2095             ftol        = selfA._parameters["CostDecrementTolerance"],
2096             messages    = selfA._parameters["optmessages"],
2097             )
2098     elif selfA._parameters["Minimizer"] == "CG":
2099         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
2100             f           = CostFunction,
2101             x0          = Xini,
2102             fprime      = GradientOfCostFunction,
2103             args        = (),
2104             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2105             gtol        = selfA._parameters["GradientNormTolerance"],
2106             disp        = selfA._parameters["optdisp"],
2107             full_output = True,
2108             )
2109     elif selfA._parameters["Minimizer"] == "NCG":
2110         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
2111             f           = CostFunction,
2112             x0          = Xini,
2113             fprime      = GradientOfCostFunction,
2114             args        = (),
2115             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2116             avextol     = selfA._parameters["CostDecrementTolerance"],
2117             disp        = selfA._parameters["optdisp"],
2118             full_output = True,
2119             )
2120     elif selfA._parameters["Minimizer"] == "BFGS":
2121         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
2122             f           = CostFunction,
2123             x0          = Xini,
2124             fprime      = GradientOfCostFunction,
2125             args        = (),
2126             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2127             gtol        = selfA._parameters["GradientNormTolerance"],
2128             disp        = selfA._parameters["optdisp"],
2129             full_output = True,
2130             )
2131     else:
2132         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
2133     #
2134     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2135     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
2136     #
2137     # Correction pour pallier a un bug de TNC sur le retour du Minimum
2138     # ----------------------------------------------------------------
2139     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
2140         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
2141         Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
2142     else:
2143         Minimum = Xb + BHT * numpy.asmatrix(numpy.ravel( Minimum )).T
2144     #
2145     # Obtention de l'analyse
2146     # ----------------------
2147     Xa = Minimum
2148     #
2149     selfA.StoredVariables["Analysis"].store( Xa )
2150     #
2151     if selfA._toStore("OMA") or \
2152         selfA._toStore("SigmaObs2") or \
2153         selfA._toStore("SimulationQuantiles") or \
2154         selfA._toStore("SimulatedObservationAtOptimum"):
2155         if selfA._toStore("SimulatedObservationAtCurrentState"):
2156             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
2157         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2158             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
2159         else:
2160             HXa = Hm( Xa )
2161     #
2162     # Calcul de la covariance d'analyse
2163     # ---------------------------------
2164     if selfA._toStore("APosterioriCovariance") or \
2165         selfA._toStore("SimulationQuantiles") or \
2166         selfA._toStore("JacobianMatrixAtOptimum") or \
2167         selfA._toStore("KalmanGainAtOptimum"):
2168         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
2169         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
2170     if selfA._toStore("APosterioriCovariance") or \
2171         selfA._toStore("SimulationQuantiles") or \
2172         selfA._toStore("KalmanGainAtOptimum"):
2173         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
2174         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
2175     if selfA._toStore("APosterioriCovariance") or \
2176         selfA._toStore("SimulationQuantiles"):
2177         BI = B.getI()
2178         RI = R.getI()
2179         HessienneI = []
2180         nb = Xa.size
2181         for i in range(nb):
2182             _ee    = numpy.matrix(numpy.zeros(nb)).T
2183             _ee[i] = 1.
2184             _HtEE  = numpy.dot(HtM,_ee)
2185             _HtEE  = numpy.asmatrix(numpy.ravel( _HtEE )).T
2186             HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
2187         HessienneI = numpy.matrix( HessienneI )
2188         A = HessienneI.I
2189         if min(A.shape) != max(A.shape):
2190             raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
2191         if (numpy.diag(A) < 0).any():
2192             raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
2193         if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
2194             try:
2195                 L = numpy.linalg.cholesky( A )
2196             except:
2197                 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
2198     if selfA._toStore("APosterioriCovariance"):
2199         selfA.StoredVariables["APosterioriCovariance"].store( A )
2200     if selfA._toStore("JacobianMatrixAtOptimum"):
2201         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
2202     if selfA._toStore("KalmanGainAtOptimum"):
2203         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
2204         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
2205         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
2206     #
2207     # Calculs et/ou stockages supplémentaires
2208     # ---------------------------------------
2209     if selfA._toStore("Innovation") or \
2210         selfA._toStore("SigmaObs2") or \
2211         selfA._toStore("MahalanobisConsistency") or \
2212         selfA._toStore("OMB"):
2213         d  = Y - HXb
2214     if selfA._toStore("Innovation"):
2215         selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
2216     if selfA._toStore("BMA"):
2217         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
2218     if selfA._toStore("OMA"):
2219         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
2220     if selfA._toStore("OMB"):
2221         selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
2222     if selfA._toStore("SigmaObs2"):
2223         TraceR = R.trace(Y.size)
2224         selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
2225     if selfA._toStore("MahalanobisConsistency"):
2226         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
2227     if selfA._toStore("SimulationQuantiles"):
2228         nech = selfA._parameters["NumberOfSamplesForQuantiles"]
2229         HXa  = numpy.matrix(numpy.ravel( HXa )).T
2230         YfQ  = None
2231         for i in range(nech):
2232             if selfA._parameters["SimulationForQuantiles"] == "Linear":
2233                 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
2234                 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
2235                 Yr = HXa + dYr
2236             elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
2237                 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
2238                 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
2239             if YfQ is None:
2240                 YfQ = Yr
2241             else:
2242                 YfQ = numpy.hstack((YfQ,Yr))
2243         YfQ.sort(axis=-1)
2244         YQ = None
2245         for quantile in selfA._parameters["Quantiles"]:
2246             if not (0. <= float(quantile) <= 1.): continue
2247             indice = int(nech * float(quantile) - 1./nech)
2248             if YQ is None: YQ = YfQ[:,indice]
2249             else:          YQ = numpy.hstack((YQ,YfQ[:,indice]))
2250         selfA.StoredVariables["SimulationQuantiles"].store( YQ )
2251     if selfA._toStore("SimulatedObservationAtBackground"):
2252         selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
2253     if selfA._toStore("SimulatedObservationAtOptimum"):
2254         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
2255     #
2256     return 0
2257
2258 # ==============================================================================
2259 def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
2260     """
2261     Stochastic EnKF
2262     """
2263     if selfA._parameters["EstimationOf"] == "Parameters":
2264         selfA._parameters["StoreInternalVariables"] = True
2265     #
2266     # Opérateurs
2267     # ----------
2268     H = HO["Direct"].appliedControledFormTo
2269     #
2270     if selfA._parameters["EstimationOf"] == "State":
2271         M = EM["Direct"].appliedControledFormTo
2272     #
2273     if CM is not None and "Tangent" in CM and U is not None:
2274         Cm = CM["Tangent"].asMatrix(Xb)
2275     else:
2276         Cm = None
2277     #
2278     # Nombre de pas identique au nombre de pas d'observations
2279     # -------------------------------------------------------
2280     if hasattr(Y,"stepnumber"):
2281         duration = Y.stepnumber()
2282         __p = numpy.cumprod(Y.shape())[-1]
2283     else:
2284         duration = 2
2285         __p = numpy.array(Y).size
2286     #
2287     # Précalcul des inversions de B et R
2288     # ----------------------------------
2289     if selfA._parameters["StoreInternalVariables"] \
2290         or selfA._toStore("CostFunctionJ") \
2291         or selfA._toStore("CostFunctionJb") \
2292         or selfA._toStore("CostFunctionJo") \
2293         or selfA._toStore("CurrentOptimum") \
2294         or selfA._toStore("APosterioriCovariance"):
2295         BI = B.getI()
2296         RI = R.getI()
2297     #
2298     # Initialisation
2299     # --------------
2300     __n = Xb.size
2301     __m = selfA._parameters["NumberOfMembers"]
2302     if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
2303     else:                         Pn = B
2304     if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
2305     else:                         Rn = R
2306     if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
2307     else:                         Qn = Q
2308     Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
2309     #
2310     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2311         selfA.StoredVariables["Analysis"].store( Xb )
2312         if selfA._toStore("APosterioriCovariance"):
2313             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2314             covarianceXa = Pn
2315     #
2316     previousJMinimum = numpy.finfo(float).max
2317     #
2318     for step in range(duration-1):
2319         if hasattr(Y,"store"):
2320             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
2321         else:
2322             Ynpu = numpy.ravel( Y ).reshape((__p,1))
2323         #
2324         if U is not None:
2325             if hasattr(U,"store") and len(U)>1:
2326                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2327             elif hasattr(U,"store") and len(U)==1:
2328                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2329             else:
2330                 Un = numpy.asmatrix(numpy.ravel( U )).T
2331         else:
2332             Un = None
2333         #
2334         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2335             Xn = CovarianceInflation( Xn,
2336                 selfA._parameters["InflationType"],
2337                 selfA._parameters["InflationFactor"],
2338                 )
2339         #
2340         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
2341             EMX = M( [(Xn[:,i], Un) for i in range(__m)],
2342                 argsAsSerie = True,
2343                 returnSerieAsArrayMatrix = True )
2344             qi = numpy.random.multivariate_normal(numpy.zeros(__n), Qn, size=__m).T
2345             Xn_predicted = EMX + qi
2346             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2347                 argsAsSerie = True,
2348                 returnSerieAsArrayMatrix = True )
2349             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2350                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2351                 Xn_predicted = Xn_predicted + Cm * Un
2352         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
2353             # --- > Par principe, M = Id, Q = 0
2354             Xn_predicted = Xn
2355             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2356                 argsAsSerie = True,
2357                 returnSerieAsArrayMatrix = True )
2358         #
2359         # Mean of forecast and observation of forecast
2360         Xfm  = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2361         Hfm  = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2362         #
2363         #--------------------------
2364         if VariantM == "KalmanFilterFormula05":
2365             PfHT, HPfHT = 0., 0.
2366             for i in range(__m):
2367                 Exfi = Xn_predicted[:,i].reshape((__n,1)) - Xfm
2368                 Eyfi = HX_predicted[:,i].reshape((__p,1)) - Hfm
2369                 PfHT  += Exfi * Eyfi.T
2370                 HPfHT += Eyfi * Eyfi.T
2371             PfHT  = (1./(__m-1)) * PfHT
2372             HPfHT = (1./(__m-1)) * HPfHT
2373             Kn     = PfHT * ( R + HPfHT ).I
2374             del PfHT, HPfHT
2375             #
2376             for i in range(__m):
2377                 ri = numpy.random.multivariate_normal(numpy.zeros(__p), Rn)
2378                 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(Ynpu) + ri - HX_predicted[:,i])
2379         #--------------------------
2380         elif VariantM == "KalmanFilterFormula16":
2381             EpY   = EnsembleOfCenteredPerturbations(Ynpu, Rn, __m)
2382             EpYm  = EpY.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2383             #
2384             EaX   = EnsembleOfAnomalies( Xn_predicted ) / math.sqrt(__m-1)
2385             EaY = (HX_predicted - Hfm - EpY + EpYm) / math.sqrt(__m-1)
2386             #
2387             Kn = EaX @ EaY.T @ numpy.linalg.inv( EaY @ EaY.T)
2388             #
2389             for i in range(__m):
2390                 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(EpY[:,i]) - HX_predicted[:,i])
2391         #--------------------------
2392         else:
2393             raise ValueError("VariantM has to be chosen in the authorized methods list.")
2394         #
2395         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2396             Xn = CovarianceInflation( Xn,
2397                 selfA._parameters["InflationType"],
2398                 selfA._parameters["InflationFactor"],
2399                 )
2400         #
2401         Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2402         #--------------------------
2403         #
2404         if selfA._parameters["StoreInternalVariables"] \
2405             or selfA._toStore("CostFunctionJ") \
2406             or selfA._toStore("CostFunctionJb") \
2407             or selfA._toStore("CostFunctionJo") \
2408             or selfA._toStore("APosterioriCovariance") \
2409             or selfA._toStore("InnovationAtCurrentAnalysis") \
2410             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2411             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2412             _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
2413             _Innovation = Ynpu - _HXa
2414         #
2415         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2416         # ---> avec analysis
2417         selfA.StoredVariables["Analysis"].store( Xa )
2418         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2419             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2420         if selfA._toStore("InnovationAtCurrentAnalysis"):
2421             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2422         # ---> avec current state
2423         if selfA._parameters["StoreInternalVariables"] \
2424             or selfA._toStore("CurrentState"):
2425             selfA.StoredVariables["CurrentState"].store( Xn )
2426         if selfA._toStore("ForecastState"):
2427             selfA.StoredVariables["ForecastState"].store( EMX )
2428         if selfA._toStore("BMA"):
2429             selfA.StoredVariables["BMA"].store( EMX - Xa )
2430         if selfA._toStore("InnovationAtCurrentState"):
2431             selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
2432         if selfA._toStore("SimulatedObservationAtCurrentState") \
2433             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2434             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
2435         # ---> autres
2436         if selfA._parameters["StoreInternalVariables"] \
2437             or selfA._toStore("CostFunctionJ") \
2438             or selfA._toStore("CostFunctionJb") \
2439             or selfA._toStore("CostFunctionJo") \
2440             or selfA._toStore("CurrentOptimum") \
2441             or selfA._toStore("APosterioriCovariance"):
2442             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2443             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
2444             J   = Jb + Jo
2445             selfA.StoredVariables["CostFunctionJb"].store( Jb )
2446             selfA.StoredVariables["CostFunctionJo"].store( Jo )
2447             selfA.StoredVariables["CostFunctionJ" ].store( J )
2448             #
2449             if selfA._toStore("IndexOfOptimum") \
2450                 or selfA._toStore("CurrentOptimum") \
2451                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2452                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2453                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2454                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2455                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2456             if selfA._toStore("IndexOfOptimum"):
2457                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2458             if selfA._toStore("CurrentOptimum"):
2459                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2460             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2461                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2462             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2463                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2464             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2465                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2466             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2467                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2468         if selfA._toStore("APosterioriCovariance"):
2469             selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
2470         if selfA._parameters["EstimationOf"] == "Parameters" \
2471             and J < previousJMinimum:
2472             previousJMinimum    = J
2473             XaMin               = Xa
2474             if selfA._toStore("APosterioriCovariance"):
2475                 covarianceXaMin = Pn
2476     #
2477     # Stockage final supplémentaire de l'optimum en estimation de paramètres
2478     # ----------------------------------------------------------------------
2479     if selfA._parameters["EstimationOf"] == "Parameters":
2480         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2481         selfA.StoredVariables["Analysis"].store( XaMin )
2482         if selfA._toStore("APosterioriCovariance"):
2483             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2484         if selfA._toStore("BMA"):
2485             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2486     #
2487     return 0
2488
2489 # ==============================================================================
2490 def std3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
2491     """
2492     3DVAR
2493     """
2494     #
2495     # Initialisations
2496     # ---------------
2497     #
2498     # Opérateurs
2499     Hm = HO["Direct"].appliedTo
2500     Ha = HO["Adjoint"].appliedInXTo
2501     #
2502     # Utilisation éventuelle d'un vecteur H(Xb) précalculé
2503     if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
2504         HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
2505     else:
2506         HXb = Hm( Xb )
2507     HXb = numpy.asmatrix(numpy.ravel( HXb )).T
2508     if Y.size != HXb.size:
2509         raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
2510     if max(Y.shape) != max(HXb.shape):
2511         raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
2512     #
2513     if selfA._toStore("JacobianMatrixAtBackground"):
2514         HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
2515         HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
2516         selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
2517     #
2518     # Précalcul des inversions de B et R
2519     BI = B.getI()
2520     RI = R.getI()
2521     #
2522     # Point de démarrage de l'optimisation
2523     Xini = selfA._parameters["InitializationPoint"]
2524     #
2525     # Définition de la fonction-coût
2526     # ------------------------------
2527     def CostFunction(x):
2528         _X  = numpy.asmatrix(numpy.ravel( x )).T
2529         if selfA._parameters["StoreInternalVariables"] or \
2530             selfA._toStore("CurrentState") or \
2531             selfA._toStore("CurrentOptimum"):
2532             selfA.StoredVariables["CurrentState"].store( _X )
2533         _HX = Hm( _X )
2534         _HX = numpy.asmatrix(numpy.ravel( _HX )).T
2535         _Innovation = Y - _HX
2536         if selfA._toStore("SimulatedObservationAtCurrentState") or \
2537             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2538             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
2539         if selfA._toStore("InnovationAtCurrentState"):
2540             selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
2541         #
2542         Jb  = float( 0.5 * (_X - Xb).T * BI * (_X - Xb) )
2543         Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
2544         J   = Jb + Jo
2545         #
2546         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
2547         selfA.StoredVariables["CostFunctionJb"].store( Jb )
2548         selfA.StoredVariables["CostFunctionJo"].store( Jo )
2549         selfA.StoredVariables["CostFunctionJ" ].store( J )
2550         if selfA._toStore("IndexOfOptimum") or \
2551             selfA._toStore("CurrentOptimum") or \
2552             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
2553             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
2554             selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
2555             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2556             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2557         if selfA._toStore("IndexOfOptimum"):
2558             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2559         if selfA._toStore("CurrentOptimum"):
2560             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
2561         if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2562             selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
2563         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2564             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2565         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2566             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2567         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2568             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2569         return J
2570     #
2571     def GradientOfCostFunction(x):
2572         _X      = numpy.asmatrix(numpy.ravel( x )).T
2573         _HX     = Hm( _X )
2574         _HX     = numpy.asmatrix(numpy.ravel( _HX )).T
2575         GradJb  = BI * (_X - Xb)
2576         GradJo  = - Ha( (_X, RI * (Y - _HX)) )
2577         GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
2578         return GradJ
2579     #
2580     # Minimisation de la fonctionnelle
2581     # --------------------------------
2582     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
2583     #
2584     if selfA._parameters["Minimizer"] == "LBFGSB":
2585         if "0.19" <= scipy.version.version <= "1.1.0":
2586             import lbfgsbhlt as optimiseur
2587         else:
2588             import scipy.optimize as optimiseur
2589         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
2590             func        = CostFunction,
2591             x0          = Xini,
2592             fprime      = GradientOfCostFunction,
2593             args        = (),
2594             bounds      = selfA._parameters["Bounds"],
2595             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
2596             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
2597             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
2598             iprint      = selfA._parameters["optiprint"],
2599             )
2600         nfeval = Informations['funcalls']
2601         rc     = Informations['warnflag']
2602     elif selfA._parameters["Minimizer"] == "TNC":
2603         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
2604             func        = CostFunction,
2605             x0          = Xini,
2606             fprime      = GradientOfCostFunction,
2607             args        = (),
2608             bounds      = selfA._parameters["Bounds"],
2609             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
2610             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
2611             ftol        = selfA._parameters["CostDecrementTolerance"],
2612             messages    = selfA._parameters["optmessages"],
2613             )
2614     elif selfA._parameters["Minimizer"] == "CG":
2615         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
2616             f           = CostFunction,
2617             x0          = Xini,
2618             fprime      = GradientOfCostFunction,
2619             args        = (),
2620             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2621             gtol        = selfA._parameters["GradientNormTolerance"],
2622             disp        = selfA._parameters["optdisp"],
2623             full_output = True,
2624             )
2625     elif selfA._parameters["Minimizer"] == "NCG":
2626         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
2627             f           = CostFunction,
2628             x0          = Xini,
2629             fprime      = GradientOfCostFunction,
2630             args        = (),
2631             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2632             avextol     = selfA._parameters["CostDecrementTolerance"],
2633             disp        = selfA._parameters["optdisp"],
2634             full_output = True,
2635             )
2636     elif selfA._parameters["Minimizer"] == "BFGS":
2637         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
2638             f           = CostFunction,
2639             x0          = Xini,
2640             fprime      = GradientOfCostFunction,
2641             args        = (),
2642             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2643             gtol        = selfA._parameters["GradientNormTolerance"],
2644             disp        = selfA._parameters["optdisp"],
2645             full_output = True,
2646             )
2647     else:
2648         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
2649     #
2650     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2651     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
2652     #
2653     # Correction pour pallier a un bug de TNC sur le retour du Minimum
2654     # ----------------------------------------------------------------
2655     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
2656         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
2657     #
2658     # Obtention de l'analyse
2659     # ----------------------
2660     Xa = numpy.asmatrix(numpy.ravel( Minimum )).T
2661     #
2662     selfA.StoredVariables["Analysis"].store( Xa )
2663     #
2664     if selfA._toStore("OMA") or \
2665         selfA._toStore("SigmaObs2") or \
2666         selfA._toStore("SimulationQuantiles") or \
2667         selfA._toStore("SimulatedObservationAtOptimum"):
2668         if selfA._toStore("SimulatedObservationAtCurrentState"):
2669             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
2670         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2671             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
2672         else:
2673             HXa = Hm( Xa )
2674     #
2675     # Calcul de la covariance d'analyse
2676     # ---------------------------------
2677     if selfA._toStore("APosterioriCovariance") or \
2678         selfA._toStore("SimulationQuantiles") or \
2679         selfA._toStore("JacobianMatrixAtOptimum") or \
2680         selfA._toStore("KalmanGainAtOptimum"):
2681         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
2682         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
2683     if selfA._toStore("APosterioriCovariance") or \
2684         selfA._toStore("SimulationQuantiles") or \
2685         selfA._toStore("KalmanGainAtOptimum"):
2686         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
2687         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
2688     if selfA._toStore("APosterioriCovariance") or \
2689         selfA._toStore("SimulationQuantiles"):
2690         HessienneI = []
2691         nb = Xa.size
2692         for i in range(nb):
2693             _ee    = numpy.matrix(numpy.zeros(nb)).T
2694             _ee[i] = 1.
2695             _HtEE  = numpy.dot(HtM,_ee)
2696             _HtEE  = numpy.asmatrix(numpy.ravel( _HtEE )).T
2697             HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
2698         HessienneI = numpy.matrix( HessienneI )
2699         A = HessienneI.I
2700         if min(A.shape) != max(A.shape):
2701             raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
2702         if (numpy.diag(A) < 0).any():
2703             raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
2704         if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
2705             try:
2706                 L = numpy.linalg.cholesky( A )
2707             except:
2708                 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
2709     if selfA._toStore("APosterioriCovariance"):
2710         selfA.StoredVariables["APosterioriCovariance"].store( A )
2711     if selfA._toStore("JacobianMatrixAtOptimum"):
2712         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
2713     if selfA._toStore("KalmanGainAtOptimum"):
2714         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
2715         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
2716         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
2717     #
2718     # Calculs et/ou stockages supplémentaires
2719     # ---------------------------------------
2720     if selfA._toStore("Innovation") or \
2721         selfA._toStore("SigmaObs2") or \
2722         selfA._toStore("MahalanobisConsistency") or \
2723         selfA._toStore("OMB"):
2724         d  = Y - HXb
2725     if selfA._toStore("Innovation"):
2726         selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
2727     if selfA._toStore("BMA"):
2728         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
2729     if selfA._toStore("OMA"):
2730         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
2731     if selfA._toStore("OMB"):
2732         selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
2733     if selfA._toStore("SigmaObs2"):
2734         TraceR = R.trace(Y.size)
2735         selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
2736     if selfA._toStore("MahalanobisConsistency"):
2737         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
2738     if selfA._toStore("SimulationQuantiles"):
2739         nech = selfA._parameters["NumberOfSamplesForQuantiles"]
2740         HXa  = numpy.matrix(numpy.ravel( HXa )).T
2741         YfQ  = None
2742         for i in range(nech):
2743             if selfA._parameters["SimulationForQuantiles"] == "Linear":
2744                 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
2745                 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
2746                 Yr = HXa + dYr
2747             elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
2748                 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
2749                 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
2750             if YfQ is None:
2751                 YfQ = Yr
2752             else:
2753                 YfQ = numpy.hstack((YfQ,Yr))
2754         YfQ.sort(axis=-1)
2755         YQ = None
2756         for quantile in selfA._parameters["Quantiles"]:
2757             if not (0. <= float(quantile) <= 1.): continue
2758             indice = int(nech * float(quantile) - 1./nech)
2759             if YQ is None: YQ = YfQ[:,indice]
2760             else:          YQ = numpy.hstack((YQ,YfQ[:,indice]))
2761         selfA.StoredVariables["SimulationQuantiles"].store( YQ )
2762     if selfA._toStore("SimulatedObservationAtBackground"):
2763         selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
2764     if selfA._toStore("SimulatedObservationAtOptimum"):
2765         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
2766     #
2767     return 0
2768
2769 # ==============================================================================
2770 def std4dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
2771     """
2772     4DVAR
2773     """
2774     #
2775     # Initialisations
2776     # ---------------
2777     #
2778     # Opérateurs
2779     Hm = HO["Direct"].appliedControledFormTo
2780     Mm = EM["Direct"].appliedControledFormTo
2781     #
2782     if CM is not None and "Tangent" in CM and U is not None:
2783         Cm = CM["Tangent"].asMatrix(Xb)
2784     else:
2785         Cm = None
2786     #
2787     def Un(_step):
2788         if U is not None:
2789             if hasattr(U,"store") and 1<=_step<len(U) :
2790                 _Un = numpy.asmatrix(numpy.ravel( U[_step] )).T
2791             elif hasattr(U,"store") and len(U)==1:
2792                 _Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2793             else:
2794                 _Un = numpy.asmatrix(numpy.ravel( U )).T
2795         else:
2796             _Un = None
2797         return _Un
2798     def CmUn(_xn,_un):
2799         if Cm is not None and _un is not None: # Attention : si Cm est aussi dans M, doublon !
2800             _Cm   = Cm.reshape(_xn.size,_un.size) # ADAO & check shape
2801             _CmUn = _Cm * _un
2802         else:
2803             _CmUn = 0.
2804         return _CmUn
2805     #
2806     # Remarque : les observations sont exploitées à partir du pas de temps
2807     # numéro 1, et sont utilisées dans Yo comme rangées selon ces indices.
2808     # Donc le pas 0 n'est pas utilisé puisque la première étape commence
2809     # avec l'observation du pas 1.
2810     #
2811     # Nombre de pas identique au nombre de pas d'observations
2812     if hasattr(Y,"stepnumber"):
2813         duration = Y.stepnumber()
2814     else:
2815         duration = 2
2816     #
2817     # Précalcul des inversions de B et R
2818     BI = B.getI()
2819     RI = R.getI()
2820     #
2821     # Point de démarrage de l'optimisation
2822     Xini = selfA._parameters["InitializationPoint"]
2823     #
2824     # Définition de la fonction-coût
2825     # ------------------------------
2826     selfA.DirectCalculation = [None,] # Le pas 0 n'est pas observé
2827     selfA.DirectInnovation  = [None,] # Le pas 0 n'est pas observé
2828     def CostFunction(x):
2829         _X  = numpy.asmatrix(numpy.ravel( x )).T
2830         if selfA._parameters["StoreInternalVariables"] or \
2831             selfA._toStore("CurrentState") or \
2832             selfA._toStore("CurrentOptimum"):
2833             selfA.StoredVariables["CurrentState"].store( _X )
2834         Jb  = float( 0.5 * (_X - Xb).T * BI * (_X - Xb) )
2835         selfA.DirectCalculation = [None,]
2836         selfA.DirectInnovation  = [None,]
2837         Jo  = 0.
2838         _Xn = _X
2839         for step in range(0,duration-1):
2840             if hasattr(Y,"store"):
2841                 _Ynpu = numpy.asmatrix(numpy.ravel( Y[step+1] )).T
2842             else:
2843                 _Ynpu = numpy.asmatrix(numpy.ravel( Y )).T
2844             _Un = Un(step)
2845             #
2846             # Etape d'évolution
2847             if selfA._parameters["EstimationOf"] == "State":
2848                 _Xn = Mm( (_Xn, _Un) ) + CmUn(_Xn, _Un)
2849             elif selfA._parameters["EstimationOf"] == "Parameters":
2850                 pass
2851             #
2852             if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
2853                 _Xn = numpy.max(numpy.hstack((_Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1)
2854                 _Xn = numpy.min(numpy.hstack((_Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1)
2855             #
2856             # Etape de différence aux observations
2857             if selfA._parameters["EstimationOf"] == "State":
2858                 _YmHMX = _Ynpu - numpy.asmatrix(numpy.ravel( Hm( (_Xn, None) ) )).T
2859             elif selfA._parameters["EstimationOf"] == "Parameters":
2860                 _YmHMX = _Ynpu - numpy.asmatrix(numpy.ravel( Hm( (_Xn, _Un) ) )).T - CmUn(_Xn, _Un)
2861             #
2862             # Stockage de l'état
2863             selfA.DirectCalculation.append( _Xn )
2864             selfA.DirectInnovation.append( _YmHMX )
2865             #
2866             # Ajout dans la fonctionnelle d'observation
2867             Jo = Jo + 0.5 * float( _YmHMX.T * RI * _YmHMX )
2868         J = Jb + Jo
2869         #
2870         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
2871         selfA.StoredVariables["CostFunctionJb"].store( Jb )
2872         selfA.StoredVariables["CostFunctionJo"].store( Jo )
2873         selfA.StoredVariables["CostFunctionJ" ].store( J )
2874         if selfA._toStore("IndexOfOptimum") or \
2875             selfA._toStore("CurrentOptimum") or \
2876             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
2877             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
2878             selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2879             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2880         if selfA._toStore("IndexOfOptimum"):
2881             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2882         if selfA._toStore("CurrentOptimum"):
2883             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
2884         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2885             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2886         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2887             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2888         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2889             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2890         return J
2891     #
2892     def GradientOfCostFunction(x):
2893         _X      = numpy.asmatrix(numpy.ravel( x )).T
2894         GradJb  = BI * (_X - Xb)
2895         GradJo  = 0.
2896         for step in range(duration-1,0,-1):
2897             # Étape de récupération du dernier stockage de l'évolution
2898             _Xn = selfA.DirectCalculation.pop()
2899             # Étape de récupération du dernier stockage de l'innovation
2900             _YmHMX = selfA.DirectInnovation.pop()
2901             # Calcul des adjoints
2902             Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
2903             Ha = Ha.reshape(_Xn.size,_YmHMX.size) # ADAO & check shape
2904             Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
2905             Ma = Ma.reshape(_Xn.size,_Xn.size) # ADAO & check shape
2906             # Calcul du gradient par état adjoint
2907             GradJo = GradJo + Ha * RI * _YmHMX # Équivaut pour Ha linéaire à : Ha( (_Xn, RI * _YmHMX) )
2908             GradJo = Ma * GradJo               # Équivaut pour Ma linéaire à : Ma( (_Xn, GradJo) )
2909         GradJ = numpy.ravel( GradJb ) - numpy.ravel( GradJo )
2910         return GradJ
2911     #
2912     # Minimisation de la fonctionnelle
2913     # --------------------------------
2914     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
2915     #
2916     if selfA._parameters["Minimizer"] == "LBFGSB":
2917         if "0.19" <= scipy.version.version <= "1.1.0":
2918             import lbfgsbhlt as optimiseur
2919         else:
2920             import scipy.optimize as optimiseur
2921         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
2922             func        = CostFunction,
2923             x0          = Xini,
2924             fprime      = GradientOfCostFunction,
2925             args        = (),
2926             bounds      = selfA._parameters["Bounds"],
2927             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
2928             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
2929             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
2930             iprint      = selfA._parameters["optiprint"],
2931             )
2932         nfeval = Informations['funcalls']
2933         rc     = Informations['warnflag']
2934     elif selfA._parameters["Minimizer"] == "TNC":
2935         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
2936             func        = CostFunction,
2937             x0          = Xini,
2938             fprime      = GradientOfCostFunction,
2939             args        = (),
2940             bounds      = selfA._parameters["Bounds"],
2941             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
2942             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
2943             ftol        = selfA._parameters["CostDecrementTolerance"],
2944             messages    = selfA._parameters["optmessages"],
2945             )
2946     elif selfA._parameters["Minimizer"] == "CG":
2947         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
2948             f           = CostFunction,
2949             x0          = Xini,
2950             fprime      = GradientOfCostFunction,
2951             args        = (),
2952             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2953             gtol        = selfA._parameters["GradientNormTolerance"],
2954             disp        = selfA._parameters["optdisp"],
2955             full_output = True,
2956             )
2957     elif selfA._parameters["Minimizer"] == "NCG":
2958         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
2959             f           = CostFunction,
2960             x0          = Xini,
2961             fprime      = GradientOfCostFunction,
2962             args        = (),
2963             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2964             avextol     = selfA._parameters["CostDecrementTolerance"],
2965             disp        = selfA._parameters["optdisp"],
2966             full_output = True,
2967             )
2968     elif selfA._parameters["Minimizer"] == "BFGS":
2969         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
2970             f           = CostFunction,
2971             x0          = Xini,
2972             fprime      = GradientOfCostFunction,
2973             args        = (),
2974             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2975             gtol        = selfA._parameters["GradientNormTolerance"],
2976             disp        = selfA._parameters["optdisp"],
2977             full_output = True,
2978             )
2979     else:
2980         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
2981     #
2982     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2983     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
2984     #
2985     # Correction pour pallier a un bug de TNC sur le retour du Minimum
2986     # ----------------------------------------------------------------
2987     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
2988         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
2989     #
2990     # Obtention de l'analyse
2991     # ----------------------
2992     Xa = numpy.asmatrix(numpy.ravel( Minimum )).T
2993     #
2994     selfA.StoredVariables["Analysis"].store( Xa )
2995     #
2996     # Calculs et/ou stockages supplémentaires
2997     # ---------------------------------------
2998     if selfA._toStore("BMA"):
2999         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
3000     #
3001     return 0
3002
3003 # ==============================================================================
3004 def van3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3005     """
3006     3DVAR variational analysis with no inversion of B
3007     """
3008     #
3009     # Initialisations
3010     # ---------------
3011     #
3012     # Opérateurs
3013     Hm = HO["Direct"].appliedTo
3014     Ha = HO["Adjoint"].appliedInXTo
3015     #
3016     # Précalcul des inversions de B et R
3017     BT = B.getT()
3018     RI = R.getI()
3019     #
3020     # Point de démarrage de l'optimisation
3021     Xini = numpy.zeros(Xb.shape)
3022     #
3023     # Définition de la fonction-coût
3024     # ------------------------------
3025     def CostFunction(v):
3026         _V = numpy.asmatrix(numpy.ravel( v )).T
3027         _X = Xb + B * _V
3028         if selfA._parameters["StoreInternalVariables"] or \
3029             selfA._toStore("CurrentState") or \
3030             selfA._toStore("CurrentOptimum"):
3031             selfA.StoredVariables["CurrentState"].store( _X )
3032         _HX = Hm( _X )
3033         _HX = numpy.asmatrix(numpy.ravel( _HX )).T
3034         _Innovation = Y - _HX
3035         if selfA._toStore("SimulatedObservationAtCurrentState") or \
3036             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3037             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
3038         if selfA._toStore("InnovationAtCurrentState"):
3039             selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
3040         #
3041         Jb  = float( 0.5 * _V.T * BT * _V )
3042         Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
3043         J   = Jb + Jo
3044         #
3045         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
3046         selfA.StoredVariables["CostFunctionJb"].store( Jb )
3047         selfA.StoredVariables["CostFunctionJo"].store( Jo )
3048         selfA.StoredVariables["CostFunctionJ" ].store( J )
3049         if selfA._toStore("IndexOfOptimum") or \
3050             selfA._toStore("CurrentOptimum") or \
3051             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
3052             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
3053             selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
3054             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3055             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3056         if selfA._toStore("IndexOfOptimum"):
3057             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3058         if selfA._toStore("CurrentOptimum"):
3059             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
3060         if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3061             selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
3062         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3063             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3064         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3065             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3066         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3067             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3068         return J
3069     #
3070     def GradientOfCostFunction(v):
3071         _V = numpy.asmatrix(numpy.ravel( v )).T
3072         _X = Xb + B * _V
3073         _HX     = Hm( _X )
3074         _HX     = numpy.asmatrix(numpy.ravel( _HX )).T
3075         GradJb  = BT * _V
3076         GradJo  = - Ha( (_X, RI * (Y - _HX)) )
3077         GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
3078         return GradJ
3079     #
3080     # Minimisation de la fonctionnelle
3081     # --------------------------------
3082     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
3083     #
3084     if selfA._parameters["Minimizer"] == "LBFGSB":
3085         if "0.19" <= scipy.version.version <= "1.1.0":
3086             import lbfgsbhlt as optimiseur
3087         else:
3088             import scipy.optimize as optimiseur
3089         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
3090             func        = CostFunction,
3091             x0          = Xini,
3092             fprime      = GradientOfCostFunction,
3093             args        = (),
3094             bounds      = selfA._parameters["Bounds"],
3095             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
3096             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
3097             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
3098             iprint      = selfA._parameters["optiprint"],
3099             )
3100         nfeval = Informations['funcalls']
3101         rc     = Informations['warnflag']
3102     elif selfA._parameters["Minimizer"] == "TNC":
3103         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
3104             func        = CostFunction,
3105             x0          = Xini,
3106             fprime      = GradientOfCostFunction,
3107             args        = (),
3108             bounds      = selfA._parameters["Bounds"],
3109             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
3110             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
3111             ftol        = selfA._parameters["CostDecrementTolerance"],
3112             messages    = selfA._parameters["optmessages"],
3113             )
3114     elif selfA._parameters["Minimizer"] == "CG":
3115         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
3116             f           = CostFunction,
3117             x0          = Xini,
3118             fprime      = GradientOfCostFunction,
3119             args        = (),
3120             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3121             gtol        = selfA._parameters["GradientNormTolerance"],
3122             disp        = selfA._parameters["optdisp"],
3123             full_output = True,
3124             )
3125     elif selfA._parameters["Minimizer"] == "NCG":
3126         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
3127             f           = CostFunction,
3128             x0          = Xini,
3129             fprime      = GradientOfCostFunction,
3130             args        = (),
3131             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3132             avextol     = selfA._parameters["CostDecrementTolerance"],
3133             disp        = selfA._parameters["optdisp"],
3134             full_output = True,
3135             )
3136     elif selfA._parameters["Minimizer"] == "BFGS":
3137         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
3138             f           = CostFunction,
3139             x0          = Xini,
3140             fprime      = GradientOfCostFunction,
3141             args        = (),
3142             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3143             gtol        = selfA._parameters["GradientNormTolerance"],
3144             disp        = selfA._parameters["optdisp"],
3145             full_output = True,
3146             )
3147     else:
3148         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
3149     #
3150     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3151     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
3152     #
3153     # Correction pour pallier a un bug de TNC sur le retour du Minimum
3154     # ----------------------------------------------------------------
3155     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
3156         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
3157         Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
3158     else:
3159         Minimum = Xb + B * numpy.asmatrix(numpy.ravel( Minimum )).T
3160     #
3161     # Obtention de l'analyse
3162     # ----------------------
3163     Xa = Minimum
3164     #
3165     selfA.StoredVariables["Analysis"].store( Xa )
3166     #
3167     if selfA._toStore("OMA") or \
3168         selfA._toStore("SigmaObs2") or \
3169         selfA._toStore("SimulationQuantiles") or \
3170         selfA._toStore("SimulatedObservationAtOptimum"):
3171         if selfA._toStore("SimulatedObservationAtCurrentState"):
3172             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
3173         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3174             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
3175         else:
3176             HXa = Hm( Xa )
3177     #
3178     # Calcul de la covariance d'analyse
3179     # ---------------------------------
3180     if selfA._toStore("APosterioriCovariance") or \
3181         selfA._toStore("SimulationQuantiles") or \
3182         selfA._toStore("JacobianMatrixAtOptimum") or \
3183         selfA._toStore("KalmanGainAtOptimum"):
3184         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
3185         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
3186     if selfA._toStore("APosterioriCovariance") or \
3187         selfA._toStore("SimulationQuantiles") or \
3188         selfA._toStore("KalmanGainAtOptimum"):
3189         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
3190         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
3191     if selfA._toStore("APosterioriCovariance") or \
3192         selfA._toStore("SimulationQuantiles"):
3193         BI = B.getI()
3194         HessienneI = []
3195         nb = Xa.size
3196         for i in range(nb):
3197             _ee    = numpy.matrix(numpy.zeros(nb)).T
3198             _ee[i] = 1.
3199             _HtEE  = numpy.dot(HtM,_ee)
3200             _HtEE  = numpy.asmatrix(numpy.ravel( _HtEE )).T
3201             HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
3202         HessienneI = numpy.matrix( HessienneI )
3203         A = HessienneI.I
3204         if min(A.shape) != max(A.shape):
3205             raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
3206         if (numpy.diag(A) < 0).any():
3207             raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
3208         if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
3209             try:
3210                 L = numpy.linalg.cholesky( A )
3211             except:
3212                 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
3213     if selfA._toStore("APosterioriCovariance"):
3214         selfA.StoredVariables["APosterioriCovariance"].store( A )
3215     if selfA._toStore("JacobianMatrixAtOptimum"):
3216         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
3217     if selfA._toStore("KalmanGainAtOptimum"):
3218         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
3219         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
3220         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
3221     #
3222     # Calculs et/ou stockages supplémentaires
3223     # ---------------------------------------
3224     if selfA._toStore("Innovation") or \
3225         selfA._toStore("SigmaObs2") or \
3226         selfA._toStore("MahalanobisConsistency") or \
3227         selfA._toStore("OMB"):
3228         d  = Y - HXb
3229     if selfA._toStore("Innovation"):
3230         selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
3231     if selfA._toStore("BMA"):
3232         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
3233     if selfA._toStore("OMA"):
3234         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
3235     if selfA._toStore("OMB"):
3236         selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
3237     if selfA._toStore("SigmaObs2"):
3238         TraceR = R.trace(Y.size)
3239         selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
3240     if selfA._toStore("MahalanobisConsistency"):
3241         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
3242     if selfA._toStore("SimulationQuantiles"):
3243         nech = selfA._parameters["NumberOfSamplesForQuantiles"]
3244         HXa  = numpy.matrix(numpy.ravel( HXa )).T
3245         YfQ  = None
3246         for i in range(nech):
3247             if selfA._parameters["SimulationForQuantiles"] == "Linear":
3248                 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
3249                 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
3250                 Yr = HXa + dYr
3251             elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
3252                 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
3253                 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
3254             if YfQ is None:
3255                 YfQ = Yr
3256             else:
3257                 YfQ = numpy.hstack((YfQ,Yr))
3258         YfQ.sort(axis=-1)
3259         YQ = None
3260         for quantile in selfA._parameters["Quantiles"]:
3261             if not (0. <= float(quantile) <= 1.): continue
3262             indice = int(nech * float(quantile) - 1./nech)
3263             if YQ is None: YQ = YfQ[:,indice]
3264             else:          YQ = numpy.hstack((YQ,YfQ[:,indice]))
3265         selfA.StoredVariables["SimulationQuantiles"].store( YQ )
3266     if selfA._toStore("SimulatedObservationAtBackground"):
3267         selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
3268     if selfA._toStore("SimulatedObservationAtOptimum"):
3269         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
3270     #
3271     return 0
3272
3273 # ==============================================================================
3274 if __name__ == "__main__":
3275     print('\n AUTODIAGNOSTIC\n')