Salome HOME
Minor improvements and fixes for internal variables
[modules/adao.git] / src / daComposant / daCore / NumericObjects.py
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) 2008-2021 EDF R&D
4 #
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
9 #
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13 # Lesser General Public License for more details.
14 #
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
18 #
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
20 #
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
22
23 __doc__ = """
24     Définit les objets numériques génériques.
25 """
26 __author__ = "Jean-Philippe ARGAUD"
27
28 import os, time, copy, types, sys, logging
29 import math, numpy, scipy, scipy.optimize, scipy.version
30 from daCore.BasicObjects import Operator
31 from daCore.PlatformInfo import PlatformInfo
32 mpr = PlatformInfo().MachinePrecision()
33 mfp = PlatformInfo().MaximumPrecision()
34 # logging.getLogger().setLevel(logging.DEBUG)
35
36 # ==============================================================================
37 def ExecuteFunction( triplet ):
38     assert len(triplet) == 3, "Incorrect number of arguments"
39     X, xArgs, funcrepr = triplet
40     __X = numpy.asmatrix(numpy.ravel( X )).T
41     __sys_path_tmp = sys.path ; sys.path.insert(0,funcrepr["__userFunction__path"])
42     __module = __import__(funcrepr["__userFunction__modl"], globals(), locals(), [])
43     __fonction = getattr(__module,funcrepr["__userFunction__name"])
44     sys.path = __sys_path_tmp ; del __sys_path_tmp
45     if isinstance(xArgs, dict):
46         __HX  = __fonction( __X, **xArgs )
47     else:
48         __HX  = __fonction( __X )
49     return numpy.ravel( __HX )
50
51 # ==============================================================================
52 class FDApproximation(object):
53     """
54     Cette classe sert d'interface pour définir les opérateurs approximés. A la
55     création d'un objet, en fournissant une fonction "Function", on obtient un
56     objet qui dispose de 3 méthodes "DirectOperator", "TangentOperator" et
57     "AdjointOperator". On contrôle l'approximation DF avec l'incrément
58     multiplicatif "increment" valant par défaut 1%, ou avec l'incrément fixe
59     "dX" qui sera multiplié par "increment" (donc en %), et on effectue de DF
60     centrées si le booléen "centeredDF" est vrai.
61     """
62     def __init__(self,
63             name                  = "FDApproximation",
64             Function              = None,
65             centeredDF            = False,
66             increment             = 0.01,
67             dX                    = None,
68             extraArguments        = None,
69             avoidingRedundancy    = True,
70             toleranceInRedundancy = 1.e-18,
71             lenghtOfRedundancy    = -1,
72             mpEnabled             = False,
73             mpWorkers             = None,
74             mfEnabled             = False,
75             ):
76         self.__name = str(name)
77         self.__extraArgs = extraArguments
78         if mpEnabled:
79             try:
80                 import multiprocessing
81                 self.__mpEnabled = True
82             except ImportError:
83                 self.__mpEnabled = False
84         else:
85             self.__mpEnabled = False
86         self.__mpWorkers = mpWorkers
87         if self.__mpWorkers is not None and self.__mpWorkers < 1:
88             self.__mpWorkers = None
89         logging.debug("FDA Calculs en multiprocessing : %s (nombre de processus : %s)"%(self.__mpEnabled,self.__mpWorkers))
90         #
91         if mfEnabled:
92             self.__mfEnabled = True
93         else:
94             self.__mfEnabled = False
95         logging.debug("FDA Calculs en multifonctions : %s"%(self.__mfEnabled,))
96         #
97         if avoidingRedundancy:
98             self.__avoidRC = True
99             self.__tolerBP = float(toleranceInRedundancy)
100             self.__lenghtRJ = int(lenghtOfRedundancy)
101             self.__listJPCP = [] # Jacobian Previous Calculated Points
102             self.__listJPCI = [] # Jacobian Previous Calculated Increment
103             self.__listJPCR = [] # Jacobian Previous Calculated Results
104             self.__listJPPN = [] # Jacobian Previous Calculated Point Norms
105             self.__listJPIN = [] # Jacobian Previous Calculated Increment Norms
106         else:
107             self.__avoidRC = False
108         #
109         if self.__mpEnabled:
110             if isinstance(Function,types.FunctionType):
111                 logging.debug("FDA Calculs en multiprocessing : FunctionType")
112                 self.__userFunction__name = Function.__name__
113                 try:
114                     mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
115                 except:
116                     mod = os.path.abspath(Function.__globals__['__file__'])
117                 if not os.path.isfile(mod):
118                     raise ImportError("No user defined function or method found with the name %s"%(mod,))
119                 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
120                 self.__userFunction__path = os.path.dirname(mod)
121                 del mod
122                 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
123                 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
124             elif isinstance(Function,types.MethodType):
125                 logging.debug("FDA Calculs en multiprocessing : MethodType")
126                 self.__userFunction__name = Function.__name__
127                 try:
128                     mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
129                 except:
130                     mod = os.path.abspath(Function.__func__.__globals__['__file__'])
131                 if not os.path.isfile(mod):
132                     raise ImportError("No user defined function or method found with the name %s"%(mod,))
133                 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
134                 self.__userFunction__path = os.path.dirname(mod)
135                 del mod
136                 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
137                 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
138             else:
139                 raise TypeError("User defined function or method has to be provided for finite differences approximation.")
140         else:
141             self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
142             self.__userFunction = self.__userOperator.appliedTo
143         #
144         self.__centeredDF = bool(centeredDF)
145         if abs(float(increment)) > 1.e-15:
146             self.__increment  = float(increment)
147         else:
148             self.__increment  = 0.01
149         if dX is None:
150             self.__dX     = None
151         else:
152             self.__dX     = numpy.asmatrix(numpy.ravel( dX )).T
153         logging.debug("FDA Reduction des doublons de calcul : %s"%self.__avoidRC)
154         if self.__avoidRC:
155             logging.debug("FDA Tolerance de determination des doublons : %.2e"%self.__tolerBP)
156
157     # ---------------------------------------------------------
158     def __doublon__(self, e, l, n, v=None):
159         __ac, __iac = False, -1
160         for i in range(len(l)-1,-1,-1):
161             if numpy.linalg.norm(e - l[i]) < self.__tolerBP * n[i]:
162                 __ac, __iac = True, i
163                 if v is not None: logging.debug("FDA Cas%s déja calculé, récupération du doublon %i"%(v,__iac))
164                 break
165         return __ac, __iac
166
167     # ---------------------------------------------------------
168     def DirectOperator(self, X, **extraArgs ):
169         """
170         Calcul du direct à l'aide de la fonction fournie.
171
172         NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
173         ne doivent pas être données ici à la fonction utilisateur.
174         """
175         logging.debug("FDA Calcul DirectOperator (explicite)")
176         if self.__mfEnabled:
177             _HX = self.__userFunction( X, argsAsSerie = True )
178         else:
179             _X = numpy.asmatrix(numpy.ravel( X )).T
180             _HX = numpy.ravel(self.__userFunction( _X ))
181         #
182         return _HX
183
184     # ---------------------------------------------------------
185     def TangentMatrix(self, X ):
186         """
187         Calcul de l'opérateur tangent comme la Jacobienne par différences finies,
188         c'est-à-dire le gradient de H en X. On utilise des différences finies
189         directionnelles autour du point X. X est un numpy.matrix.
190
191         Différences finies centrées (approximation d'ordre 2):
192         1/ Pour chaque composante i de X, on ajoute et on enlève la perturbation
193            dX[i] à la  composante X[i], pour composer X_plus_dXi et X_moins_dXi, et
194            on calcule les réponses HX_plus_dXi = H( X_plus_dXi ) et HX_moins_dXi =
195            H( X_moins_dXi )
196         2/ On effectue les différences (HX_plus_dXi-HX_moins_dXi) et on divise par
197            le pas 2*dXi
198         3/ Chaque résultat, par composante, devient une colonne de la Jacobienne
199
200         Différences finies non centrées (approximation d'ordre 1):
201         1/ Pour chaque composante i de X, on ajoute la perturbation dX[i] à la
202            composante X[i] pour composer X_plus_dXi, et on calcule la réponse
203            HX_plus_dXi = H( X_plus_dXi )
204         2/ On calcule la valeur centrale HX = H(X)
205         3/ On effectue les différences (HX_plus_dXi-HX) et on divise par
206            le pas dXi
207         4/ Chaque résultat, par composante, devient une colonne de la Jacobienne
208
209         """
210         logging.debug("FDA Début du calcul de la Jacobienne")
211         logging.debug("FDA   Incrément de............: %s*X"%float(self.__increment))
212         logging.debug("FDA   Approximation centrée...: %s"%(self.__centeredDF))
213         #
214         if X is None or len(X)==0:
215             raise ValueError("Nominal point X for approximate derivatives can not be None or void (given X: %s)."%(str(X),))
216         #
217         _X = numpy.asmatrix(numpy.ravel( X )).T
218         #
219         if self.__dX is None:
220             _dX  = self.__increment * _X
221         else:
222             _dX = numpy.asmatrix(numpy.ravel( self.__dX )).T
223         #
224         if (_dX == 0.).any():
225             moyenne = _dX.mean()
226             if moyenne == 0.:
227                 _dX = numpy.where( _dX == 0., float(self.__increment), _dX )
228             else:
229                 _dX = numpy.where( _dX == 0., moyenne, _dX )
230         #
231         __alreadyCalculated  = False
232         if self.__avoidRC:
233             __bidon, __alreadyCalculatedP = self.__doublon__(_X,  self.__listJPCP, self.__listJPPN, None)
234             __bidon, __alreadyCalculatedI = self.__doublon__(_dX, self.__listJPCI, self.__listJPIN, None)
235             if __alreadyCalculatedP == __alreadyCalculatedI > -1:
236                 __alreadyCalculated, __i = True, __alreadyCalculatedP
237                 logging.debug("FDA Cas J déja calculé, récupération du doublon %i"%__i)
238         #
239         if __alreadyCalculated:
240             logging.debug("FDA   Calcul Jacobienne (par récupération du doublon %i)"%__i)
241             _Jacobienne = self.__listJPCR[__i]
242         else:
243             logging.debug("FDA   Calcul Jacobienne (explicite)")
244             if self.__centeredDF:
245                 #
246                 if self.__mpEnabled and not self.__mfEnabled:
247                     funcrepr = {
248                         "__userFunction__path" : self.__userFunction__path,
249                         "__userFunction__modl" : self.__userFunction__modl,
250                         "__userFunction__name" : self.__userFunction__name,
251                     }
252                     _jobs = []
253                     for i in range( len(_dX) ):
254                         _dXi            = _dX[i]
255                         _X_plus_dXi     = numpy.array( _X.A1, dtype=float )
256                         _X_plus_dXi[i]  = _X[i] + _dXi
257                         _X_moins_dXi    = numpy.array( _X.A1, dtype=float )
258                         _X_moins_dXi[i] = _X[i] - _dXi
259                         #
260                         _jobs.append( (_X_plus_dXi,  self.__extraArgs, funcrepr) )
261                         _jobs.append( (_X_moins_dXi, self.__extraArgs, funcrepr) )
262                     #
263                     import multiprocessing
264                     self.__pool = multiprocessing.Pool(self.__mpWorkers)
265                     _HX_plusmoins_dX = self.__pool.map( ExecuteFunction, _jobs )
266                     self.__pool.close()
267                     self.__pool.join()
268                     #
269                     _Jacobienne  = []
270                     for i in range( len(_dX) ):
271                         _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
272                     #
273                 elif self.__mfEnabled:
274                     _xserie = []
275                     for i in range( len(_dX) ):
276                         _dXi            = _dX[i]
277                         _X_plus_dXi     = numpy.array( _X.A1, dtype=float )
278                         _X_plus_dXi[i]  = _X[i] + _dXi
279                         _X_moins_dXi    = numpy.array( _X.A1, dtype=float )
280                         _X_moins_dXi[i] = _X[i] - _dXi
281                         #
282                         _xserie.append( _X_plus_dXi )
283                         _xserie.append( _X_moins_dXi )
284                     #
285                     _HX_plusmoins_dX = self.DirectOperator( _xserie )
286                      #
287                     _Jacobienne  = []
288                     for i in range( len(_dX) ):
289                         _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
290                     #
291                 else:
292                     _Jacobienne  = []
293                     for i in range( _dX.size ):
294                         _dXi            = _dX[i]
295                         _X_plus_dXi     = numpy.array( _X.A1, dtype=float )
296                         _X_plus_dXi[i]  = _X[i] + _dXi
297                         _X_moins_dXi    = numpy.array( _X.A1, dtype=float )
298                         _X_moins_dXi[i] = _X[i] - _dXi
299                         #
300                         _HX_plus_dXi    = self.DirectOperator( _X_plus_dXi )
301                         _HX_moins_dXi   = self.DirectOperator( _X_moins_dXi )
302                         #
303                         _Jacobienne.append( numpy.ravel( _HX_plus_dXi - _HX_moins_dXi ) / (2.*_dXi) )
304                 #
305             else:
306                 #
307                 if self.__mpEnabled and not self.__mfEnabled:
308                     funcrepr = {
309                         "__userFunction__path" : self.__userFunction__path,
310                         "__userFunction__modl" : self.__userFunction__modl,
311                         "__userFunction__name" : self.__userFunction__name,
312                     }
313                     _jobs = []
314                     _jobs.append( (_X.A1, self.__extraArgs, funcrepr) )
315                     for i in range( len(_dX) ):
316                         _X_plus_dXi    = numpy.array( _X.A1, dtype=float )
317                         _X_plus_dXi[i] = _X[i] + _dX[i]
318                         #
319                         _jobs.append( (_X_plus_dXi, self.__extraArgs, funcrepr) )
320                     #
321                     import multiprocessing
322                     self.__pool = multiprocessing.Pool(self.__mpWorkers)
323                     _HX_plus_dX = self.__pool.map( ExecuteFunction, _jobs )
324                     self.__pool.close()
325                     self.__pool.join()
326                     #
327                     _HX = _HX_plus_dX.pop(0)
328                     #
329                     _Jacobienne = []
330                     for i in range( len(_dX) ):
331                         _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
332                     #
333                 elif self.__mfEnabled:
334                     _xserie = []
335                     _xserie.append( _X.A1 )
336                     for i in range( len(_dX) ):
337                         _X_plus_dXi    = numpy.array( _X.A1, dtype=float )
338                         _X_plus_dXi[i] = _X[i] + _dX[i]
339                         #
340                         _xserie.append( _X_plus_dXi )
341                     #
342                     _HX_plus_dX = self.DirectOperator( _xserie )
343                     #
344                     _HX = _HX_plus_dX.pop(0)
345                     #
346                     _Jacobienne = []
347                     for i in range( len(_dX) ):
348                         _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
349                    #
350                 else:
351                     _Jacobienne  = []
352                     _HX = self.DirectOperator( _X )
353                     for i in range( _dX.size ):
354                         _dXi            = _dX[i]
355                         _X_plus_dXi     = numpy.array( _X.A1, dtype=float )
356                         _X_plus_dXi[i]  = _X[i] + _dXi
357                         #
358                         _HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
359                         #
360                         _Jacobienne.append( numpy.ravel(( _HX_plus_dXi - _HX ) / _dXi) )
361                 #
362             #
363             _Jacobienne = numpy.asmatrix( numpy.vstack( _Jacobienne ) ).T
364             if self.__avoidRC:
365                 if self.__lenghtRJ < 0: self.__lenghtRJ = 2 * _X.size
366                 while len(self.__listJPCP) > self.__lenghtRJ:
367                     self.__listJPCP.pop(0)
368                     self.__listJPCI.pop(0)
369                     self.__listJPCR.pop(0)
370                     self.__listJPPN.pop(0)
371                     self.__listJPIN.pop(0)
372                 self.__listJPCP.append( copy.copy(_X) )
373                 self.__listJPCI.append( copy.copy(_dX) )
374                 self.__listJPCR.append( copy.copy(_Jacobienne) )
375                 self.__listJPPN.append( numpy.linalg.norm(_X) )
376                 self.__listJPIN.append( numpy.linalg.norm(_Jacobienne) )
377         #
378         logging.debug("FDA Fin du calcul de la Jacobienne")
379         #
380         return _Jacobienne
381
382     # ---------------------------------------------------------
383     def TangentOperator(self, paire, **extraArgs ):
384         """
385         Calcul du tangent à l'aide de la Jacobienne.
386
387         NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
388         ne doivent pas être données ici à la fonction utilisateur.
389         """
390         if self.__mfEnabled:
391             assert len(paire) == 1, "Incorrect lenght of arguments"
392             _paire = paire[0]
393             assert len(_paire) == 2, "Incorrect number of arguments"
394         else:
395             assert len(paire) == 2, "Incorrect number of arguments"
396             _paire = paire
397         X, dX = _paire
398         _Jacobienne = self.TangentMatrix( X )
399         if dX is None or len(dX) == 0:
400             #
401             # Calcul de la forme matricielle si le second argument est None
402             # -------------------------------------------------------------
403             if self.__mfEnabled: return [_Jacobienne,]
404             else:                return _Jacobienne
405         else:
406             #
407             # Calcul de la valeur linéarisée de H en X appliqué à dX
408             # ------------------------------------------------------
409             _dX = numpy.asmatrix(numpy.ravel( dX )).T
410             _HtX = numpy.dot(_Jacobienne, _dX)
411             if self.__mfEnabled: return [_HtX.A1,]
412             else:                return _HtX.A1
413
414     # ---------------------------------------------------------
415     def AdjointOperator(self, paire, **extraArgs ):
416         """
417         Calcul de l'adjoint à l'aide de la Jacobienne.
418
419         NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
420         ne doivent pas être données ici à la fonction utilisateur.
421         """
422         if self.__mfEnabled:
423             assert len(paire) == 1, "Incorrect lenght of arguments"
424             _paire = paire[0]
425             assert len(_paire) == 2, "Incorrect number of arguments"
426         else:
427             assert len(paire) == 2, "Incorrect number of arguments"
428             _paire = paire
429         X, Y = _paire
430         _JacobienneT = self.TangentMatrix( X ).T
431         if Y is None or len(Y) == 0:
432             #
433             # Calcul de la forme matricielle si le second argument est None
434             # -------------------------------------------------------------
435             if self.__mfEnabled: return [_JacobienneT,]
436             else:                return _JacobienneT
437         else:
438             #
439             # Calcul de la valeur de l'adjoint en X appliqué à Y
440             # --------------------------------------------------
441             _Y = numpy.asmatrix(numpy.ravel( Y )).T
442             _HaY = numpy.dot(_JacobienneT, _Y)
443             if self.__mfEnabled: return [_HaY.A1,]
444             else:                return _HaY.A1
445
446 # ==============================================================================
447 def EnsembleOfCenteredPerturbations( _bgcenter, _bgcovariance, _nbmembers ):
448     "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
449     #
450     _bgcenter = numpy.ravel(_bgcenter)[:,None]
451     if _nbmembers < 1:
452         raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
453     #
454     if _bgcovariance is None:
455         BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
456     else:
457         _Z = numpy.random.multivariate_normal(numpy.zeros(_bgcenter.size), _bgcovariance, size=_nbmembers).T
458         BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers) + _Z
459     #
460     return BackgroundEnsemble
461
462 # ==============================================================================
463 def EnsembleOfBackgroundPerturbations( _bgcenter, _bgcovariance, _nbmembers, _withSVD = True):
464     "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
465     def __CenteredRandomAnomalies(Zr, N):
466         """
467         Génère une matrice de N anomalies aléatoires centrées sur Zr selon les
468         notes manuscrites de MB et conforme au code de PS avec eps = -1
469         """
470         eps = -1
471         Q = numpy.identity(N-1)-numpy.ones((N-1,N-1))/numpy.sqrt(N)/(numpy.sqrt(N)-eps)
472         Q = numpy.concatenate((Q, [eps*numpy.ones(N-1)/numpy.sqrt(N)]), axis=0)
473         R, _ = numpy.linalg.qr(numpy.random.normal(size = (N-1,N-1)))
474         Q = numpy.dot(Q,R)
475         Zr = numpy.dot(Q,Zr)
476         return Zr.T
477     #
478     _bgcenter = numpy.ravel(_bgcenter).reshape((-1,1))
479     if _nbmembers < 1:
480         raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
481     if _bgcovariance is None:
482         BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
483     else:
484         if _withSVD:
485             U, s, V = numpy.linalg.svd(_bgcovariance, full_matrices=False)
486             _nbctl = _bgcenter.size
487             if _nbmembers > _nbctl:
488                 _Z = numpy.concatenate((numpy.dot(
489                     numpy.diag(numpy.sqrt(s[:_nbctl])), V[:_nbctl]),
490                     numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1-_nbctl)), axis = 0)
491             else:
492                 _Z = numpy.dot(numpy.diag(numpy.sqrt(s[:_nbmembers-1])), V[:_nbmembers-1])
493             _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
494             BackgroundEnsemble = _bgcenter + _Zca
495         else:
496             if max(abs(_bgcovariance.flatten())) > 0:
497                 _nbctl = _bgcenter.size
498                 _Z = numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1)
499                 _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
500                 BackgroundEnsemble = _bgcenter + _Zca
501             else:
502                 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
503     #
504     return BackgroundEnsemble
505
506 # ==============================================================================
507 def EnsembleOfAnomalies( Ensemble, OptMean = None, Normalisation = 1.):
508     "Renvoie les anomalies centrées à partir d'un ensemble TailleEtat*NbMembres"
509     if OptMean is None:
510         __Em = numpy.asarray(Ensemble).mean(axis=1, dtype=mfp).astype('float').reshape((-1,1))
511     else:
512         __Em = numpy.ravel(OptMean).reshape((-1,1))
513     #
514     return Normalisation * (numpy.asarray(Ensemble) - __Em)
515
516 # ==============================================================================
517 def EnsembleErrorCovariance( Ensemble ):
518     "Renvoie la covariance d'ensemble"
519     __Anomalies = EnsembleOfAnomalies( Ensemble )
520     __n, __m = numpy.asarray(__Anomalies).shape
521     # Estimation empirique
522     __Covariance = (__Anomalies @ __Anomalies.T) / (__m-1)
523     # Assure la symétrie
524     __Covariance = (__Covariance + __Covariance.T) * 0.5
525     # Assure la positivité
526     __epsilon    = mpr*numpy.trace(__Covariance)
527     __Covariance = __Covariance + __epsilon * numpy.identity(__n)
528     #
529     return __Covariance
530
531 # ==============================================================================
532 def CovarianceInflation(
533         InputCovOrEns,
534         InflationType   = None,
535         InflationFactor = None,
536         BackgroundCov   = None,
537         ):
538     """
539     Inflation applicable soit sur Pb ou Pa, soit sur les ensembles EXb ou EXa
540
541     Synthèse : Hunt 2007, section 2.3.5
542     """
543     if InflationFactor is None:
544         return InputCovOrEns
545     else:
546         InflationFactor = float(InflationFactor)
547     #
548     if InflationType in ["MultiplicativeOnAnalysisCovariance", "MultiplicativeOnBackgroundCovariance"]:
549         if InflationFactor < 1.:
550             raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
551         if InflationFactor < 1.+mpr:
552             return InputCovOrEns
553         OutputCovOrEns = InflationFactor**2 * InputCovOrEns
554     #
555     elif InflationType in ["MultiplicativeOnAnalysisAnomalies", "MultiplicativeOnBackgroundAnomalies"]:
556         if InflationFactor < 1.:
557             raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
558         if InflationFactor < 1.+mpr:
559             return InputCovOrEns
560         InputCovOrEnsMean = InputCovOrEns.mean(axis=1, dtype=mfp).astype('float')
561         OutputCovOrEns = InputCovOrEnsMean[:,numpy.newaxis] \
562             + InflationFactor * (InputCovOrEns - InputCovOrEnsMean[:,numpy.newaxis])
563     #
564     elif InflationType in ["AdditiveOnAnalysisCovariance", "AdditiveOnBackgroundCovariance"]:
565         if InflationFactor < 0.:
566             raise ValueError("Inflation factor for additive inflation has to be greater or equal than 0.")
567         if InflationFactor < mpr:
568             return InputCovOrEns
569         __n, __m = numpy.asarray(InputCovOrEns).shape
570         if __n != __m:
571             raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
572         OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * numpy.identity(__n)
573     #
574     elif InflationType == "HybridOnBackgroundCovariance":
575         if InflationFactor < 0.:
576             raise ValueError("Inflation factor for hybrid inflation has to be greater or equal than 0.")
577         if InflationFactor < mpr:
578             return InputCovOrEns
579         __n, __m = numpy.asarray(InputCovOrEns).shape
580         if __n != __m:
581             raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
582         if BackgroundCov is None:
583             raise ValueError("Background covariance matrix B has to be given for hybrid inflation.")
584         if InputCovOrEns.shape != BackgroundCov.shape:
585             raise ValueError("Ensemble covariance matrix has to be of same size than background covariance matrix B.")
586         OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * BackgroundCov
587     #
588     elif InflationType == "Relaxation":
589         raise NotImplementedError("InflationType Relaxation")
590     #
591     else:
592         raise ValueError("Error in inflation type, '%s' is not a valid keyword."%InflationType)
593     #
594     return OutputCovOrEns
595
596 # ==============================================================================
597 def enks(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="EnKS16-KalmanFilterFormula"):
598     """
599     EnKS
600     """
601     #
602     # Opérateurs
603     H = HO["Direct"].appliedControledFormTo
604     #
605     if selfA._parameters["EstimationOf"] == "State":
606         M = EM["Direct"].appliedControledFormTo
607     #
608     if CM is not None and "Tangent" in CM and U is not None:
609         Cm = CM["Tangent"].asMatrix(Xb)
610     else:
611         Cm = None
612     #
613     # Précalcul des inversions de B et R
614     RIdemi = R.sqrtmI()
615     #
616     # Durée d'observation et tailles
617     LagL = selfA._parameters["SmootherLagL"]
618     if (not hasattr(Y,"store")) or (not hasattr(Y,"stepnumber")):
619         raise ValueError("Fixed-lag smoother requires a series of observation")
620     if Y.stepnumber() < LagL:
621         raise ValueError("Fixed-lag smoother requires a series of observation greater then the lag L")
622     duration = Y.stepnumber()
623     __p = numpy.cumprod(Y.shape())[-1]
624     __n = Xb.size
625     __m = selfA._parameters["NumberOfMembers"]
626     #
627     if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
628     else:                         Pn = B
629     if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
630     else:                         Qn = Q
631     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
632         selfA.StoredVariables["Analysis"].store( Xb )
633         if selfA._toStore("APosterioriCovariance"):
634             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
635             covarianceXa = Pn
636     #
637     # Calcul direct initial (on privilégie la mémorisation au recalcul)
638     __seed = numpy.random.get_state()
639     selfB = copy.deepcopy(selfA)
640     selfB._parameters["StoreSupplementaryCalculations"] = ["CurrentEnsembleState"]
641     if VariantM == "EnKS16-KalmanFilterFormula":
642         etkf(selfB, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM = "KalmanFilterFormula")
643     else:
644         raise ValueError("VariantM has to be chosen in the authorized methods list.")
645     if LagL > 0:
646         EL  = selfB.StoredVariables["CurrentEnsembleState"][LagL-1]
647     else:
648         EL = EnsembleOfBackgroundPerturbations( Xb, None, __m ) # Cf. etkf
649     selfA._parameters["SetSeed"] = numpy.random.set_state(__seed)
650     #
651     for step in range(LagL,duration-1):
652         #
653         sEL = selfB.StoredVariables["CurrentEnsembleState"][step+1-LagL:step+1]
654         sEL.append(None)
655         #
656         if hasattr(Y,"store"):
657             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
658         else:
659             Ynpu = numpy.ravel( Y ).reshape((__p,1))
660         #
661         if U is not None:
662             if hasattr(U,"store") and len(U)>1:
663                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
664             elif hasattr(U,"store") and len(U)==1:
665                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
666             else:
667                 Un = numpy.asmatrix(numpy.ravel( U )).T
668         else:
669             Un = None
670         #
671         #--------------------------
672         if VariantM == "EnKS16-KalmanFilterFormula":
673             if selfA._parameters["EstimationOf"] == "State": # Forecast
674                 EL = M( [(EL[:,i], Un) for i in range(__m)],
675                     argsAsSerie = True,
676                     returnSerieAsArrayMatrix = True )
677                 EL = EL + numpy.random.multivariate_normal(numpy.zeros(__n), Qn, size=__m).T
678                 EZ = H( [(EL[:,i], Un) for i in range(__m)],
679                     argsAsSerie = True,
680                     returnSerieAsArrayMatrix = True )
681                 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
682                     Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
683                     EZ = EZ + Cm * Un
684             elif selfA._parameters["EstimationOf"] == "Parameters":
685                 # --- > Par principe, M = Id, Q = 0
686                 EZ = H( [(EL[:,i], Un) for i in range(__m)],
687                     argsAsSerie = True,
688                     returnSerieAsArrayMatrix = True )
689             #
690             vEm   = EL.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
691             vZm   = EZ.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
692             #
693             mS    = RIdemi @ EnsembleOfAnomalies( EZ, vZm, 1./math.sqrt(__m-1) )
694             delta = RIdemi @ ( Ynpu - vZm )
695             mT    = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS )
696             vw    = mT @ mS.T @ delta
697             #
698             Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
699             mU    = numpy.identity(__m)
700             wTU   = (vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU)
701             #
702             EX    = EnsembleOfAnomalies( EL, vEm, 1./math.sqrt(__m-1) )
703             EL    = vEm + EX @ wTU
704             #
705             sEL[LagL] = EL
706             for irl in range(LagL): # Lissage des L précédentes analysis
707                 vEm = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
708                 EX = EnsembleOfAnomalies( sEL[irl], vEm, 1./math.sqrt(__m-1) )
709                 sEL[irl] = vEm + EX @ wTU
710             #
711             # Conservation de l'analyse retrospective d'ordre 0 avant rotation
712             Xa = sEL[0].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
713             if selfA._toStore("APosterioriCovariance"):
714                 EXn = sEL[0]
715             #
716             for irl in range(LagL):
717                 sEL[irl] = sEL[irl+1]
718             sEL[LagL] = None
719         #--------------------------
720         else:
721             raise ValueError("VariantM has to be chosen in the authorized methods list.")
722         #
723         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
724         # ---> avec analysis
725         selfA.StoredVariables["Analysis"].store( Xa )
726         if selfA._toStore("APosterioriCovariance"):
727             selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(EXn) )
728     #
729     # Stockage des dernières analyses incomplètement remises à jour
730     for irl in range(LagL):
731         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
732         Xa = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
733         selfA.StoredVariables["Analysis"].store( Xa )
734     #
735     return 0
736
737 # ==============================================================================
738 def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
739     """
740     Ensemble-Transform EnKF
741     """
742     if selfA._parameters["EstimationOf"] == "Parameters":
743         selfA._parameters["StoreInternalVariables"] = True
744     #
745     # Opérateurs
746     # ----------
747     H = HO["Direct"].appliedControledFormTo
748     #
749     if selfA._parameters["EstimationOf"] == "State":
750         M = EM["Direct"].appliedControledFormTo
751     #
752     if CM is not None and "Tangent" in CM and U is not None:
753         Cm = CM["Tangent"].asMatrix(Xb)
754     else:
755         Cm = None
756     #
757     # Nombre de pas identique au nombre de pas d'observations
758     # -------------------------------------------------------
759     if hasattr(Y,"stepnumber"):
760         duration = Y.stepnumber()
761         __p = numpy.cumprod(Y.shape())[-1]
762     else:
763         duration = 2
764         __p = numpy.array(Y).size
765     #
766     # Précalcul des inversions de B et R
767     # ----------------------------------
768     if selfA._parameters["StoreInternalVariables"] \
769         or selfA._toStore("CostFunctionJ") \
770         or selfA._toStore("CostFunctionJb") \
771         or selfA._toStore("CostFunctionJo") \
772         or selfA._toStore("CurrentOptimum") \
773         or selfA._toStore("APosterioriCovariance"):
774         BI = B.getI()
775         RI = R.getI()
776     elif VariantM != "KalmanFilterFormula":
777         RI = R.getI()
778     if VariantM == "KalmanFilterFormula":
779         RIdemi = R.sqrtmI()
780     #
781     # Initialisation
782     # --------------
783     __n = Xb.size
784     __m = selfA._parameters["NumberOfMembers"]
785     if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
786     else:                         Pn = B
787     if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
788     else:                         Qn = Q
789     Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
790     #~ Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
791     #
792     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
793         selfA.StoredVariables["Analysis"].store( Xb )
794         if selfA._toStore("APosterioriCovariance"):
795             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
796             covarianceXa = Pn
797     #
798     previousJMinimum = numpy.finfo(float).max
799     #
800     for step in range(duration-1):
801         if hasattr(Y,"store"):
802             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
803         else:
804             Ynpu = numpy.ravel( Y ).reshape((__p,1))
805         #
806         if U is not None:
807             if hasattr(U,"store") and len(U)>1:
808                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
809             elif hasattr(U,"store") and len(U)==1:
810                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
811             else:
812                 Un = numpy.asmatrix(numpy.ravel( U )).T
813         else:
814             Un = None
815         #
816         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
817             Xn = CovarianceInflation( Xn,
818                 selfA._parameters["InflationType"],
819                 selfA._parameters["InflationFactor"],
820                 )
821         #
822         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
823             EMX = M( [(Xn[:,i], Un) for i in range(__m)],
824                 argsAsSerie = True,
825                 returnSerieAsArrayMatrix = True )
826             qi = numpy.random.multivariate_normal(numpy.zeros(__n), Qn, size=__m).T
827             Xn_predicted = EMX + qi
828             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
829                 argsAsSerie = True,
830                 returnSerieAsArrayMatrix = True )
831             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
832                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
833                 Xn_predicted = Xn_predicted + Cm * Un
834         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
835             # --- > Par principe, M = Id, Q = 0
836             Xn_predicted = Xn
837             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
838                 argsAsSerie = True,
839                 returnSerieAsArrayMatrix = True )
840         #
841         # Mean of forecast and observation of forecast
842         Xfm  = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
843         Hfm  = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
844         #
845         # Anomalies
846         EaX   = EnsembleOfAnomalies( Xn_predicted, Xfm )
847         EaHX  = EnsembleOfAnomalies( HX_predicted, Hfm)
848         #
849         #--------------------------
850         if VariantM == "KalmanFilterFormula":
851             mS    = RIdemi * EaHX / math.sqrt(__m-1)
852             delta = RIdemi * ( Ynpu - Hfm )
853             mT    = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS )
854             vw    = mT @ mS.T @ delta
855             #
856             Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
857             mU    = numpy.identity(__m)
858             #
859             EaX   = EaX / math.sqrt(__m-1)
860             Xn    = Xfm + EaX @ ( vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU )
861         #--------------------------
862         elif VariantM == "Variational":
863             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
864             def CostFunction(w):
865                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
866                 _Jo = 0.5 * _A.T @ (RI * _A)
867                 _Jb = 0.5 * (__m-1) * w.T @ w
868                 _J  = _Jo + _Jb
869                 return float(_J)
870             def GradientOfCostFunction(w):
871                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
872                 _GardJo = - EaHX.T @ (RI * _A)
873                 _GradJb = (__m-1) * w.reshape((__m,1))
874                 _GradJ  = _GardJo + _GradJb
875                 return numpy.ravel(_GradJ)
876             vw = scipy.optimize.fmin_cg(
877                 f           = CostFunction,
878                 x0          = numpy.zeros(__m),
879                 fprime      = GradientOfCostFunction,
880                 args        = (),
881                 disp        = False,
882                 )
883             #
884             Hto = EaHX.T @ (RI * EaHX)
885             Htb = (__m-1) * numpy.identity(__m)
886             Hta = Hto + Htb
887             #
888             Pta = numpy.linalg.inv( Hta )
889             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
890             #
891             Xn  = Xfm + EaX @ (vw[:,None] + EWa)
892         #--------------------------
893         elif VariantM == "FiniteSize11": # Jauge Boc2011
894             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
895             def CostFunction(w):
896                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
897                 _Jo = 0.5 * _A.T @ (RI * _A)
898                 _Jb = 0.5 * __m * math.log(1 + 1/__m + w.T @ w)
899                 _J  = _Jo + _Jb
900                 return float(_J)
901             def GradientOfCostFunction(w):
902                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
903                 _GardJo = - EaHX.T @ (RI * _A)
904                 _GradJb = __m * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
905                 _GradJ  = _GardJo + _GradJb
906                 return numpy.ravel(_GradJ)
907             vw = scipy.optimize.fmin_cg(
908                 f           = CostFunction,
909                 x0          = numpy.zeros(__m),
910                 fprime      = GradientOfCostFunction,
911                 args        = (),
912                 disp        = False,
913                 )
914             #
915             Hto = EaHX.T @ (RI * EaHX)
916             Htb = __m * \
917                 ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
918                 / (1 + 1/__m + vw.T @ vw)**2
919             Hta = Hto + Htb
920             #
921             Pta = numpy.linalg.inv( Hta )
922             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
923             #
924             Xn  = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
925         #--------------------------
926         elif VariantM == "FiniteSize15": # Jauge Boc2015
927             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
928             def CostFunction(w):
929                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
930                 _Jo = 0.5 * _A.T * RI * _A
931                 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w)
932                 _J  = _Jo + _Jb
933                 return float(_J)
934             def GradientOfCostFunction(w):
935                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
936                 _GardJo = - EaHX.T @ (RI * _A)
937                 _GradJb = (__m+1) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
938                 _GradJ  = _GardJo + _GradJb
939                 return numpy.ravel(_GradJ)
940             vw = scipy.optimize.fmin_cg(
941                 f           = CostFunction,
942                 x0          = numpy.zeros(__m),
943                 fprime      = GradientOfCostFunction,
944                 args        = (),
945                 disp        = False,
946                 )
947             #
948             Hto = EaHX.T @ (RI * EaHX)
949             Htb = (__m+1) * \
950                 ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
951                 / (1 + 1/__m + vw.T @ vw)**2
952             Hta = Hto + Htb
953             #
954             Pta = numpy.linalg.inv( Hta )
955             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
956             #
957             Xn  = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
958         #--------------------------
959         elif VariantM == "FiniteSize16": # Jauge Boc2016
960             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
961             def CostFunction(w):
962                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
963                 _Jo = 0.5 * _A.T @ (RI * _A)
964                 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w / (__m-1))
965                 _J  = _Jo + _Jb
966                 return float(_J)
967             def GradientOfCostFunction(w):
968                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
969                 _GardJo = - EaHX.T @ (RI * _A)
970                 _GradJb = ((__m+1) / (__m-1)) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w / (__m-1))
971                 _GradJ  = _GardJo + _GradJb
972                 return numpy.ravel(_GradJ)
973             vw = scipy.optimize.fmin_cg(
974                 f           = CostFunction,
975                 x0          = numpy.zeros(__m),
976                 fprime      = GradientOfCostFunction,
977                 args        = (),
978                 disp        = False,
979                 )
980             #
981             Hto = EaHX.T @ (RI * EaHX)
982             Htb = ((__m+1) / (__m-1)) * \
983                 ( (1 + 1/__m + vw.T @ vw / (__m-1)) * numpy.identity(__m) - 2 * vw @ vw.T / (__m-1) ) \
984                 / (1 + 1/__m + vw.T @ vw / (__m-1))**2
985             Hta = Hto + Htb
986             #
987             Pta = numpy.linalg.inv( Hta )
988             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
989             #
990             Xn  = Xfm + EaX @ (vw[:,None] + EWa)
991         #--------------------------
992         else:
993             raise ValueError("VariantM has to be chosen in the authorized methods list.")
994         #
995         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
996             Xn = CovarianceInflation( Xn,
997                 selfA._parameters["InflationType"],
998                 selfA._parameters["InflationFactor"],
999                 )
1000         #
1001         Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1002         #--------------------------
1003         #
1004         if selfA._parameters["StoreInternalVariables"] \
1005             or selfA._toStore("CostFunctionJ") \
1006             or selfA._toStore("CostFunctionJb") \
1007             or selfA._toStore("CostFunctionJo") \
1008             or selfA._toStore("APosterioriCovariance") \
1009             or selfA._toStore("InnovationAtCurrentAnalysis") \
1010             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1011             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1012             _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
1013             _Innovation = Ynpu - _HXa
1014         #
1015         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1016         # ---> avec analysis
1017         selfA.StoredVariables["Analysis"].store( Xa )
1018         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1019             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
1020         if selfA._toStore("InnovationAtCurrentAnalysis"):
1021             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1022         # ---> avec current state
1023         if selfA._parameters["StoreInternalVariables"] \
1024             or selfA._toStore("CurrentState"):
1025             selfA.StoredVariables["CurrentState"].store( Xn )
1026         if selfA._toStore("ForecastState"):
1027             selfA.StoredVariables["ForecastState"].store( EMX )
1028         if selfA._toStore("BMA"):
1029             selfA.StoredVariables["BMA"].store( EMX - Xa.reshape((__n,1)) )
1030         if selfA._toStore("InnovationAtCurrentState"):
1031             selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
1032         if selfA._toStore("SimulatedObservationAtCurrentState") \
1033             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1034             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
1035         # ---> autres
1036         if selfA._parameters["StoreInternalVariables"] \
1037             or selfA._toStore("CostFunctionJ") \
1038             or selfA._toStore("CostFunctionJb") \
1039             or selfA._toStore("CostFunctionJo") \
1040             or selfA._toStore("CurrentOptimum") \
1041             or selfA._toStore("APosterioriCovariance"):
1042             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
1043             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
1044             J   = Jb + Jo
1045             selfA.StoredVariables["CostFunctionJb"].store( Jb )
1046             selfA.StoredVariables["CostFunctionJo"].store( Jo )
1047             selfA.StoredVariables["CostFunctionJ" ].store( J )
1048             #
1049             if selfA._toStore("IndexOfOptimum") \
1050                 or selfA._toStore("CurrentOptimum") \
1051                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1052                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1053                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1054                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1055                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1056             if selfA._toStore("IndexOfOptimum"):
1057                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1058             if selfA._toStore("CurrentOptimum"):
1059                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1060             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1061                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1062             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1063                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1064             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1065                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1066             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1067                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1068         if selfA._toStore("APosterioriCovariance"):
1069             selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
1070         if selfA._parameters["EstimationOf"] == "Parameters" \
1071             and J < previousJMinimum:
1072             previousJMinimum    = J
1073             XaMin               = Xa
1074             if selfA._toStore("APosterioriCovariance"):
1075                 covarianceXaMin = Pn
1076         # ---> Pour les smoothers
1077         if selfA._toStore("CurrentEnsembleState"):
1078             selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
1079     #
1080     # Stockage final supplémentaire de l'optimum en estimation de paramètres
1081     # ----------------------------------------------------------------------
1082     if selfA._parameters["EstimationOf"] == "Parameters":
1083         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1084         selfA.StoredVariables["Analysis"].store( XaMin )
1085         if selfA._toStore("APosterioriCovariance"):
1086             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1087         if selfA._toStore("BMA"):
1088             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1089     #
1090     return 0
1091
1092 # ==============================================================================
1093 def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12",
1094     BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
1095     """
1096     Iterative EnKF
1097     """
1098     if selfA._parameters["EstimationOf"] == "Parameters":
1099         selfA._parameters["StoreInternalVariables"] = True
1100     #
1101     # Opérateurs
1102     # ----------
1103     H = HO["Direct"].appliedControledFormTo
1104     #
1105     if selfA._parameters["EstimationOf"] == "State":
1106         M = EM["Direct"].appliedControledFormTo
1107     #
1108     if CM is not None and "Tangent" in CM and U is not None:
1109         Cm = CM["Tangent"].asMatrix(Xb)
1110     else:
1111         Cm = None
1112     #
1113     # Nombre de pas identique au nombre de pas d'observations
1114     # -------------------------------------------------------
1115     if hasattr(Y,"stepnumber"):
1116         duration = Y.stepnumber()
1117         __p = numpy.cumprod(Y.shape())[-1]
1118     else:
1119         duration = 2
1120         __p = numpy.array(Y).size
1121     #
1122     # Précalcul des inversions de B et R
1123     # ----------------------------------
1124     if selfA._parameters["StoreInternalVariables"] \
1125         or selfA._toStore("CostFunctionJ") \
1126         or selfA._toStore("CostFunctionJb") \
1127         or selfA._toStore("CostFunctionJo") \
1128         or selfA._toStore("CurrentOptimum") \
1129         or selfA._toStore("APosterioriCovariance"):
1130         BI = B.getI()
1131     RI = R.getI()
1132     #
1133     # Initialisation
1134     # --------------
1135     __n = Xb.size
1136     __m = selfA._parameters["NumberOfMembers"]
1137     if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
1138     else:                         Pn = B
1139     if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
1140     else:                         Rn = R
1141     if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
1142     else:                         Qn = Q
1143     Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
1144     #
1145     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1146         selfA.StoredVariables["Analysis"].store( Xb )
1147         if selfA._toStore("APosterioriCovariance"):
1148             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1149             covarianceXa = Pn
1150     #
1151     previousJMinimum = numpy.finfo(float).max
1152     #
1153     for step in range(duration-1):
1154         if hasattr(Y,"store"):
1155             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1156         else:
1157             Ynpu = numpy.ravel( Y ).reshape((__p,1))
1158         #
1159         if U is not None:
1160             if hasattr(U,"store") and len(U)>1:
1161                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
1162             elif hasattr(U,"store") and len(U)==1:
1163                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
1164             else:
1165                 Un = numpy.asmatrix(numpy.ravel( U )).T
1166         else:
1167             Un = None
1168         #
1169         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
1170             Xn = CovarianceInflation( Xn,
1171                 selfA._parameters["InflationType"],
1172                 selfA._parameters["InflationFactor"],
1173                 )
1174         #
1175         #--------------------------
1176         if VariantM == "IEnKF12":
1177             Xfm = numpy.ravel(Xn.mean(axis=1, dtype=mfp).astype('float'))
1178             EaX = EnsembleOfAnomalies( Xn ) / math.sqrt(__m-1)
1179             __j = 0
1180             Deltaw = 1
1181             if not BnotT:
1182                 Ta  = numpy.identity(__m)
1183             vw  = numpy.zeros(__m)
1184             while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
1185                 vx1 = (Xfm + EaX @ vw).reshape((__n,1))
1186                 #
1187                 if BnotT:
1188                     E1 = vx1 + _epsilon * EaX
1189                 else:
1190                     E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
1191                 #
1192                 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q
1193                     E2 = M( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
1194                         argsAsSerie = True,
1195                         returnSerieAsArrayMatrix = True )
1196                 elif selfA._parameters["EstimationOf"] == "Parameters":
1197                     # --- > Par principe, M = Id
1198                     E2 = Xn
1199                 vx2 = E2.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1200                 vy1 = H((vx2, Un)).reshape((__p,1))
1201                 #
1202                 HE2 = H( [(E2[:,i,numpy.newaxis], Un) for i in range(__m)],
1203                     argsAsSerie = True,
1204                     returnSerieAsArrayMatrix = True )
1205                 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
1206                 #
1207                 if BnotT:
1208                     EaY = (HE2 - vy2) / _epsilon
1209                 else:
1210                     EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
1211                 #
1212                 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy1 )))
1213                 mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY)
1214                 Deltaw = - numpy.linalg.solve(mH,GradJ)
1215                 #
1216                 vw = vw + Deltaw
1217                 #
1218                 if not BnotT:
1219                     Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
1220                 #
1221                 __j = __j + 1
1222             #
1223             A2 = EnsembleOfAnomalies( E2 )
1224             #
1225             if BnotT:
1226                 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
1227                 A2 = math.sqrt(__m-1) * A2 @ Ta / _epsilon
1228             #
1229             Xn = vx2 + A2
1230         #--------------------------
1231         else:
1232             raise ValueError("VariantM has to be chosen in the authorized methods list.")
1233         #
1234         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
1235             Xn = CovarianceInflation( Xn,
1236                 selfA._parameters["InflationType"],
1237                 selfA._parameters["InflationFactor"],
1238                 )
1239         #
1240         Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1241         #--------------------------
1242         #
1243         if selfA._parameters["StoreInternalVariables"] \
1244             or selfA._toStore("CostFunctionJ") \
1245             or selfA._toStore("CostFunctionJb") \
1246             or selfA._toStore("CostFunctionJo") \
1247             or selfA._toStore("APosterioriCovariance") \
1248             or selfA._toStore("InnovationAtCurrentAnalysis") \
1249             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1250             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1251             _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
1252             _Innovation = Ynpu - _HXa
1253         #
1254         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1255         # ---> avec analysis
1256         selfA.StoredVariables["Analysis"].store( Xa )
1257         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1258             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
1259         if selfA._toStore("InnovationAtCurrentAnalysis"):
1260             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1261         # ---> avec current state
1262         if selfA._parameters["StoreInternalVariables"] \
1263             or selfA._toStore("CurrentState"):
1264             selfA.StoredVariables["CurrentState"].store( Xn )
1265         if selfA._toStore("ForecastState"):
1266             selfA.StoredVariables["ForecastState"].store( E2 )
1267         if selfA._toStore("BMA"):
1268             selfA.StoredVariables["BMA"].store( E2 - Xa )
1269         if selfA._toStore("InnovationAtCurrentState"):
1270             selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
1271         if selfA._toStore("SimulatedObservationAtCurrentState") \
1272             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1273             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
1274         # ---> autres
1275         if selfA._parameters["StoreInternalVariables"] \
1276             or selfA._toStore("CostFunctionJ") \
1277             or selfA._toStore("CostFunctionJb") \
1278             or selfA._toStore("CostFunctionJo") \
1279             or selfA._toStore("CurrentOptimum") \
1280             or selfA._toStore("APosterioriCovariance"):
1281             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
1282             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
1283             J   = Jb + Jo
1284             selfA.StoredVariables["CostFunctionJb"].store( Jb )
1285             selfA.StoredVariables["CostFunctionJo"].store( Jo )
1286             selfA.StoredVariables["CostFunctionJ" ].store( J )
1287             #
1288             if selfA._toStore("IndexOfOptimum") \
1289                 or selfA._toStore("CurrentOptimum") \
1290                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1291                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1292                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1293                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1294                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1295             if selfA._toStore("IndexOfOptimum"):
1296                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1297             if selfA._toStore("CurrentOptimum"):
1298                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1299             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1300                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1301             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1302                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1303             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1304                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1305             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1306                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1307         if selfA._toStore("APosterioriCovariance"):
1308             selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
1309         if selfA._parameters["EstimationOf"] == "Parameters" \
1310             and J < previousJMinimum:
1311             previousJMinimum    = J
1312             XaMin               = Xa
1313             if selfA._toStore("APosterioriCovariance"):
1314                 covarianceXaMin = Pn
1315     #
1316     # Stockage final supplémentaire de l'optimum en estimation de paramètres
1317     # ----------------------------------------------------------------------
1318     if selfA._parameters["EstimationOf"] == "Parameters":
1319         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1320         selfA.StoredVariables["Analysis"].store( XaMin )
1321         if selfA._toStore("APosterioriCovariance"):
1322             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1323         if selfA._toStore("BMA"):
1324             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1325     #
1326     return 0
1327
1328 # ==============================================================================
1329 def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1330     """
1331     3DVAR incrémental
1332     """
1333     #
1334     # Initialisations
1335     # ---------------
1336     #
1337     # Opérateur non-linéaire pour la boucle externe
1338     Hm = HO["Direct"].appliedTo
1339     #
1340     # Précalcul des inversions de B et R
1341     BI = B.getI()
1342     RI = R.getI()
1343     #
1344     # Point de démarrage de l'optimisation
1345     Xini = selfA._parameters["InitializationPoint"]
1346     #
1347     HXb = numpy.asmatrix(numpy.ravel( Hm( Xb ) )).T
1348     Innovation = Y - HXb
1349     #
1350     # Outer Loop
1351     # ----------
1352     iOuter = 0
1353     J      = 1./mpr
1354     DeltaJ = 1./mpr
1355     Xr     = Xini.reshape((-1,1))
1356     while abs(DeltaJ) >= selfA._parameters["CostDecrementTolerance"] and iOuter <= selfA._parameters["MaximumNumberOfSteps"]:
1357         #
1358         # Inner Loop
1359         # ----------
1360         Ht = HO["Tangent"].asMatrix(Xr)
1361         Ht = Ht.reshape(Y.size,Xr.size) # ADAO & check shape
1362         #
1363         # Définition de la fonction-coût
1364         # ------------------------------
1365         def CostFunction(dx):
1366             _dX  = numpy.asmatrix(numpy.ravel( dx )).T
1367             if selfA._parameters["StoreInternalVariables"] or \
1368                 selfA._toStore("CurrentState") or \
1369                 selfA._toStore("CurrentOptimum"):
1370                 selfA.StoredVariables["CurrentState"].store( Xb + _dX )
1371             _HdX = Ht * _dX
1372             _HdX = numpy.asmatrix(numpy.ravel( _HdX )).T
1373             _dInnovation = Innovation - _HdX
1374             if selfA._toStore("SimulatedObservationAtCurrentState") or \
1375                 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1376                 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HXb + _HdX )
1377             if selfA._toStore("InnovationAtCurrentState"):
1378                 selfA.StoredVariables["InnovationAtCurrentState"].store( _dInnovation )
1379             #
1380             Jb  = float( 0.5 * _dX.T * BI * _dX )
1381             Jo  = float( 0.5 * _dInnovation.T * RI * _dInnovation )
1382             J   = Jb + Jo
1383             #
1384             selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
1385             selfA.StoredVariables["CostFunctionJb"].store( Jb )
1386             selfA.StoredVariables["CostFunctionJo"].store( Jo )
1387             selfA.StoredVariables["CostFunctionJ" ].store( J )
1388             if selfA._toStore("IndexOfOptimum") or \
1389                 selfA._toStore("CurrentOptimum") or \
1390                 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
1391                 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
1392                 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
1393                 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1394                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1395             if selfA._toStore("IndexOfOptimum"):
1396                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1397             if selfA._toStore("CurrentOptimum"):
1398                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
1399             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1400                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
1401             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1402                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1403             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1404                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1405             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1406                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1407             return J
1408         #
1409         def GradientOfCostFunction(dx):
1410             _dX          = numpy.asmatrix(numpy.ravel( dx )).T
1411             _HdX         = Ht * _dX
1412             _HdX         = numpy.asmatrix(numpy.ravel( _HdX )).T
1413             _dInnovation = Innovation - _HdX
1414             GradJb       = BI * _dX
1415             GradJo       = - Ht.T @ (RI * _dInnovation)
1416             GradJ        = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
1417             return GradJ
1418         #
1419         # Minimisation de la fonctionnelle
1420         # --------------------------------
1421         nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
1422         #
1423         if selfA._parameters["Minimizer"] == "LBFGSB":
1424             # Minimum, J_optimal, Informations = scipy.optimize.fmin_l_bfgs_b(
1425             if "0.19" <= scipy.version.version <= "1.1.0":
1426                 import lbfgsbhlt as optimiseur
1427             else:
1428                 import scipy.optimize as optimiseur
1429             Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
1430                 func        = CostFunction,
1431                 x0          = numpy.zeros(Xini.size),
1432                 fprime      = GradientOfCostFunction,
1433                 args        = (),
1434                 bounds      = selfA._parameters["Bounds"],
1435                 maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
1436                 factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
1437                 pgtol       = selfA._parameters["ProjectedGradientTolerance"],
1438                 iprint      = selfA._parameters["optiprint"],
1439                 )
1440             nfeval = Informations['funcalls']
1441             rc     = Informations['warnflag']
1442         elif selfA._parameters["Minimizer"] == "TNC":
1443             Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
1444                 func        = CostFunction,
1445                 x0          = numpy.zeros(Xini.size),
1446                 fprime      = GradientOfCostFunction,
1447                 args        = (),
1448                 bounds      = selfA._parameters["Bounds"],
1449                 maxfun      = selfA._parameters["MaximumNumberOfSteps"],
1450                 pgtol       = selfA._parameters["ProjectedGradientTolerance"],
1451                 ftol        = selfA._parameters["CostDecrementTolerance"],
1452                 messages    = selfA._parameters["optmessages"],
1453                 )
1454         elif selfA._parameters["Minimizer"] == "CG":
1455             Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
1456                 f           = CostFunction,
1457                 x0          = numpy.zeros(Xini.size),
1458                 fprime      = GradientOfCostFunction,
1459                 args        = (),
1460                 maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1461                 gtol        = selfA._parameters["GradientNormTolerance"],
1462                 disp        = selfA._parameters["optdisp"],
1463                 full_output = True,
1464                 )
1465         elif selfA._parameters["Minimizer"] == "NCG":
1466             Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
1467                 f           = CostFunction,
1468                 x0          = numpy.zeros(Xini.size),
1469                 fprime      = GradientOfCostFunction,
1470                 args        = (),
1471                 maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1472                 avextol     = selfA._parameters["CostDecrementTolerance"],
1473                 disp        = selfA._parameters["optdisp"],
1474                 full_output = True,
1475                 )
1476         elif selfA._parameters["Minimizer"] == "BFGS":
1477             Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
1478                 f           = CostFunction,
1479                 x0          = numpy.zeros(Xini.size),
1480                 fprime      = GradientOfCostFunction,
1481                 args        = (),
1482                 maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1483                 gtol        = selfA._parameters["GradientNormTolerance"],
1484                 disp        = selfA._parameters["optdisp"],
1485                 full_output = True,
1486                 )
1487         else:
1488             raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
1489         #
1490         IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1491         MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
1492         #
1493         if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
1494             Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
1495             Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
1496         else:
1497             Minimum = Xb + numpy.asmatrix(numpy.ravel( Minimum )).T
1498         #
1499         Xr     = Minimum
1500         DeltaJ = selfA.StoredVariables["CostFunctionJ" ][-1] - J
1501         iOuter = selfA.StoredVariables["CurrentIterationNumber"][-1]
1502     #
1503     # Obtention de l'analyse
1504     # ----------------------
1505     Xa = Xr
1506     #
1507     selfA.StoredVariables["Analysis"].store( Xa )
1508     #
1509     if selfA._toStore("OMA") or \
1510         selfA._toStore("SigmaObs2") or \
1511         selfA._toStore("SimulationQuantiles") or \
1512         selfA._toStore("SimulatedObservationAtOptimum"):
1513         if selfA._toStore("SimulatedObservationAtCurrentState"):
1514             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
1515         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1516             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
1517         else:
1518             HXa = Hm( Xa )
1519     #
1520     # Calcul de la covariance d'analyse
1521     # ---------------------------------
1522     if selfA._toStore("APosterioriCovariance") or \
1523         selfA._toStore("SimulationQuantiles") or \
1524         selfA._toStore("JacobianMatrixAtOptimum") or \
1525         selfA._toStore("KalmanGainAtOptimum"):
1526         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
1527         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
1528     if selfA._toStore("APosterioriCovariance") or \
1529         selfA._toStore("SimulationQuantiles") or \
1530         selfA._toStore("KalmanGainAtOptimum"):
1531         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
1532         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
1533     if selfA._toStore("APosterioriCovariance") or \
1534         selfA._toStore("SimulationQuantiles"):
1535         HessienneI = []
1536         nb = Xa.size
1537         for i in range(nb):
1538             _ee    = numpy.matrix(numpy.zeros(nb)).T
1539             _ee[i] = 1.
1540             _HtEE  = numpy.dot(HtM,_ee)
1541             _HtEE  = numpy.asmatrix(numpy.ravel( _HtEE )).T
1542             HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
1543         HessienneI = numpy.matrix( HessienneI )
1544         A = HessienneI.I
1545         if min(A.shape) != max(A.shape):
1546             raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
1547         if (numpy.diag(A) < 0).any():
1548             raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
1549         if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
1550             try:
1551                 L = numpy.linalg.cholesky( A )
1552             except:
1553                 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
1554     if selfA._toStore("APosterioriCovariance"):
1555         selfA.StoredVariables["APosterioriCovariance"].store( A )
1556     if selfA._toStore("JacobianMatrixAtOptimum"):
1557         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
1558     if selfA._toStore("KalmanGainAtOptimum"):
1559         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
1560         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
1561         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
1562     #
1563     # Calculs et/ou stockages supplémentaires
1564     # ---------------------------------------
1565     if selfA._toStore("Innovation") or \
1566         selfA._toStore("SigmaObs2") or \
1567         selfA._toStore("MahalanobisConsistency") or \
1568         selfA._toStore("OMB"):
1569         d  = Y - HXb
1570     if selfA._toStore("Innovation"):
1571         selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
1572     if selfA._toStore("BMA"):
1573         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
1574     if selfA._toStore("OMA"):
1575         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
1576     if selfA._toStore("OMB"):
1577         selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
1578     if selfA._toStore("SigmaObs2"):
1579         TraceR = R.trace(Y.size)
1580         selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
1581     if selfA._toStore("MahalanobisConsistency"):
1582         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
1583     if selfA._toStore("SimulationQuantiles"):
1584         nech = selfA._parameters["NumberOfSamplesForQuantiles"]
1585         HXa  = numpy.matrix(numpy.ravel( HXa )).T
1586         EXr  = None
1587         YfQ  = None
1588         for i in range(nech):
1589             if selfA._parameters["SimulationForQuantiles"] == "Linear":
1590                 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
1591                 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
1592                 Yr = HXa + dYr
1593                 if selfA._toStore("SampledStateForQuantiles"): Xr = Xa+dXr
1594             elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
1595                 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
1596                 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
1597             if YfQ is None:
1598                 YfQ = Yr
1599                 if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.ravel(Xr)
1600             else:
1601                 YfQ = numpy.hstack((YfQ,Yr))
1602                 if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.vstack((EXr,numpy.ravel(Xr)))
1603         YfQ.sort(axis=-1)
1604         YQ = None
1605         for quantile in selfA._parameters["Quantiles"]:
1606             if not (0. <= float(quantile) <= 1.): continue
1607             indice = int(nech * float(quantile) - 1./nech)
1608             if YQ is None: YQ = YfQ[:,indice]
1609             else:          YQ = numpy.hstack((YQ,YfQ[:,indice]))
1610         selfA.StoredVariables["SimulationQuantiles"].store( YQ )
1611         if selfA._toStore("SampledStateForQuantiles"):
1612             selfA.StoredVariables["SampledStateForQuantiles"].store( EXr.T )
1613     if selfA._toStore("SimulatedObservationAtBackground"):
1614         selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
1615     if selfA._toStore("SimulatedObservationAtOptimum"):
1616         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
1617     #
1618     return 0
1619
1620 # ==============================================================================
1621 def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13",
1622     BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
1623     """
1624     Maximum Likelihood Ensemble Filter
1625     """
1626     if selfA._parameters["EstimationOf"] == "Parameters":
1627         selfA._parameters["StoreInternalVariables"] = True
1628     #
1629     # Opérateurs
1630     # ----------
1631     H = HO["Direct"].appliedControledFormTo
1632     #
1633     if selfA._parameters["EstimationOf"] == "State":
1634         M = EM["Direct"].appliedControledFormTo
1635     #
1636     if CM is not None and "Tangent" in CM and U is not None:
1637         Cm = CM["Tangent"].asMatrix(Xb)
1638     else:
1639         Cm = None
1640     #
1641     # Nombre de pas identique au nombre de pas d'observations
1642     # -------------------------------------------------------
1643     if hasattr(Y,"stepnumber"):
1644         duration = Y.stepnumber()
1645         __p = numpy.cumprod(Y.shape())[-1]
1646     else:
1647         duration = 2
1648         __p = numpy.array(Y).size
1649     #
1650     # Précalcul des inversions de B et R
1651     # ----------------------------------
1652     if selfA._parameters["StoreInternalVariables"] \
1653         or selfA._toStore("CostFunctionJ") \
1654         or selfA._toStore("CostFunctionJb") \
1655         or selfA._toStore("CostFunctionJo") \
1656         or selfA._toStore("CurrentOptimum") \
1657         or selfA._toStore("APosterioriCovariance"):
1658         BI = B.getI()
1659     RI = R.getI()
1660     #
1661     # Initialisation
1662     # --------------
1663     __n = Xb.size
1664     __m = selfA._parameters["NumberOfMembers"]
1665     if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
1666     else:                         Pn = B
1667     if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
1668     else:                         Rn = R
1669     if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
1670     else:                         Qn = Q
1671     Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
1672     #
1673     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1674         selfA.StoredVariables["Analysis"].store( Xb )
1675         if selfA._toStore("APosterioriCovariance"):
1676             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1677             covarianceXa = Pn
1678     #
1679     previousJMinimum = numpy.finfo(float).max
1680     #
1681     for step in range(duration-1):
1682         if hasattr(Y,"store"):
1683             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1684         else:
1685             Ynpu = numpy.ravel( Y ).reshape((__p,1))
1686         #
1687         if U is not None:
1688             if hasattr(U,"store") and len(U)>1:
1689                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
1690             elif hasattr(U,"store") and len(U)==1:
1691                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
1692             else:
1693                 Un = numpy.asmatrix(numpy.ravel( U )).T
1694         else:
1695             Un = None
1696         #
1697         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
1698             Xn = CovarianceInflation( Xn,
1699                 selfA._parameters["InflationType"],
1700                 selfA._parameters["InflationFactor"],
1701                 )
1702         #
1703         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
1704             EMX = M( [(Xn[:,i], Un) for i in range(__m)],
1705                 argsAsSerie = True,
1706                 returnSerieAsArrayMatrix = True )
1707             qi = numpy.random.multivariate_normal(numpy.zeros(__n), Qn, size=__m).T
1708             Xn_predicted = EMX + qi
1709             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1710                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
1711                 Xn_predicted = Xn_predicted + Cm * Un
1712         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
1713             # --- > Par principe, M = Id, Q = 0
1714             Xn_predicted = Xn
1715         #
1716         #--------------------------
1717         if VariantM == "MLEF13":
1718             Xfm = numpy.ravel(Xn_predicted.mean(axis=1, dtype=mfp).astype('float'))
1719             EaX = EnsembleOfAnomalies( Xn_predicted, Xfm, 1./math.sqrt(__m-1) )
1720             Ua  = numpy.identity(__m)
1721             __j = 0
1722             Deltaw = 1
1723             if not BnotT:
1724                 Ta  = numpy.identity(__m)
1725             vw  = numpy.zeros(__m)
1726             while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
1727                 vx1 = (Xfm + EaX @ vw).reshape((__n,1))
1728                 #
1729                 if BnotT:
1730                     E1 = vx1 + _epsilon * EaX
1731                 else:
1732                     E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
1733                 #
1734                 HE2 = H( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
1735                     argsAsSerie = True,
1736                     returnSerieAsArrayMatrix = True )
1737                 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
1738                 #
1739                 if BnotT:
1740                     EaY = (HE2 - vy2) / _epsilon
1741                 else:
1742                     EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
1743                 #
1744                 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy2 )))
1745                 mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY)
1746                 Deltaw = - numpy.linalg.solve(mH,GradJ)
1747                 #
1748                 vw = vw + Deltaw
1749                 #
1750                 if not BnotT:
1751                     Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
1752                 #
1753                 __j = __j + 1
1754             #
1755             if BnotT:
1756                 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
1757             #
1758             Xn = vx1 + math.sqrt(__m-1) * EaX @ Ta @ Ua
1759         #--------------------------
1760         else:
1761             raise ValueError("VariantM has to be chosen in the authorized methods list.")
1762         #
1763         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
1764             Xn = CovarianceInflation( Xn,
1765                 selfA._parameters["InflationType"],
1766                 selfA._parameters["InflationFactor"],
1767                 )
1768         #
1769         Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1770         #--------------------------
1771         #
1772         if selfA._parameters["StoreInternalVariables"] \
1773             or selfA._toStore("CostFunctionJ") \
1774             or selfA._toStore("CostFunctionJb") \
1775             or selfA._toStore("CostFunctionJo") \
1776             or selfA._toStore("APosterioriCovariance") \
1777             or selfA._toStore("InnovationAtCurrentAnalysis") \
1778             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1779             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1780             _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
1781             _Innovation = Ynpu - _HXa
1782         #
1783         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1784         # ---> avec analysis
1785         selfA.StoredVariables["Analysis"].store( Xa )
1786         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1787             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
1788         if selfA._toStore("InnovationAtCurrentAnalysis"):
1789             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1790         # ---> avec current state
1791         if selfA._parameters["StoreInternalVariables"] \
1792             or selfA._toStore("CurrentState"):
1793             selfA.StoredVariables["CurrentState"].store( Xn )
1794         if selfA._toStore("ForecastState"):
1795             selfA.StoredVariables["ForecastState"].store( EMX )
1796         if selfA._toStore("BMA"):
1797             selfA.StoredVariables["BMA"].store( EMX - Xa )
1798         if selfA._toStore("InnovationAtCurrentState"):
1799             selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
1800         if selfA._toStore("SimulatedObservationAtCurrentState") \
1801             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1802             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
1803         # ---> autres
1804         if selfA._parameters["StoreInternalVariables"] \
1805             or selfA._toStore("CostFunctionJ") \
1806             or selfA._toStore("CostFunctionJb") \
1807             or selfA._toStore("CostFunctionJo") \
1808             or selfA._toStore("CurrentOptimum") \
1809             or selfA._toStore("APosterioriCovariance"):
1810             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
1811             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
1812             J   = Jb + Jo
1813             selfA.StoredVariables["CostFunctionJb"].store( Jb )
1814             selfA.StoredVariables["CostFunctionJo"].store( Jo )
1815             selfA.StoredVariables["CostFunctionJ" ].store( J )
1816             #
1817             if selfA._toStore("IndexOfOptimum") \
1818                 or selfA._toStore("CurrentOptimum") \
1819                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1820                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1821                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1822                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1823                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1824             if selfA._toStore("IndexOfOptimum"):
1825                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1826             if selfA._toStore("CurrentOptimum"):
1827                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1828             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1829                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1830             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1831                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1832             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1833                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1834             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1835                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1836         if selfA._toStore("APosterioriCovariance"):
1837             selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
1838         if selfA._parameters["EstimationOf"] == "Parameters" \
1839             and J < previousJMinimum:
1840             previousJMinimum    = J
1841             XaMin               = Xa
1842             if selfA._toStore("APosterioriCovariance"):
1843                 covarianceXaMin = Pn
1844     #
1845     # Stockage final supplémentaire de l'optimum en estimation de paramètres
1846     # ----------------------------------------------------------------------
1847     if selfA._parameters["EstimationOf"] == "Parameters":
1848         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1849         selfA.StoredVariables["Analysis"].store( XaMin )
1850         if selfA._toStore("APosterioriCovariance"):
1851             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1852         if selfA._toStore("BMA"):
1853             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1854     #
1855     return 0
1856
1857 # ==============================================================================
1858 def mmqr(
1859         func     = None,
1860         x0       = None,
1861         fprime   = None,
1862         bounds   = None,
1863         quantile = 0.5,
1864         maxfun   = 15000,
1865         toler    = 1.e-06,
1866         y        = None,
1867         ):
1868     """
1869     Implémentation informatique de l'algorithme MMQR, basée sur la publication :
1870     David R. Hunter, Kenneth Lange, "Quantile Regression via an MM Algorithm",
1871     Journal of Computational and Graphical Statistics, 9, 1, pp.60-77, 2000.
1872     """
1873     #
1874     # Recuperation des donnees et informations initiales
1875     # --------------------------------------------------
1876     variables = numpy.ravel( x0 )
1877     mesures   = numpy.ravel( y )
1878     increment = sys.float_info[0]
1879     p         = variables.size
1880     n         = mesures.size
1881     quantile  = float(quantile)
1882     #
1883     # Calcul des parametres du MM
1884     # ---------------------------
1885     tn      = float(toler) / n
1886     e0      = -tn / math.log(tn)
1887     epsilon = (e0-tn)/(1+math.log(e0))
1888     #
1889     # Calculs d'initialisation
1890     # ------------------------
1891     residus  = mesures - numpy.ravel( func( variables ) )
1892     poids    = 1./(epsilon+numpy.abs(residus))
1893     veps     = 1. - 2. * quantile - residus * poids
1894     lastsurrogate = -numpy.sum(residus*veps) - (1.-2.*quantile)*numpy.sum(residus)
1895     iteration = 0
1896     #
1897     # Recherche iterative
1898     # -------------------
1899     while (increment > toler) and (iteration < maxfun) :
1900         iteration += 1
1901         #
1902         Derivees  = numpy.array(fprime(variables))
1903         Derivees  = Derivees.reshape(n,p) # Necessaire pour remettre en place la matrice si elle passe par des tuyaux YACS
1904         DeriveesT = Derivees.transpose()
1905         M         =   numpy.dot( DeriveesT , (numpy.array(numpy.matrix(p*[poids,]).T)*Derivees) )
1906         SM        =   numpy.transpose(numpy.dot( DeriveesT , veps ))
1907         step      = - numpy.linalg.lstsq( M, SM, rcond=-1 )[0]
1908         #
1909         variables = variables + step
1910         if bounds is not None:
1911             # Attention : boucle infinie à éviter si un intervalle est trop petit
1912             while( (variables < numpy.ravel(numpy.asmatrix(bounds)[:,0])).any() or (variables > numpy.ravel(numpy.asmatrix(bounds)[:,1])).any() ):
1913                 step      = step/2.
1914                 variables = variables - step
1915         residus   = mesures - numpy.ravel( func(variables) )
1916         surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
1917         #
1918         while ( (surrogate > lastsurrogate) and ( max(list(numpy.abs(step))) > 1.e-16 ) ) :
1919             step      = step/2.
1920             variables = variables - step
1921             residus   = mesures - numpy.ravel( func(variables) )
1922             surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
1923         #
1924         increment     = lastsurrogate-surrogate
1925         poids         = 1./(epsilon+numpy.abs(residus))
1926         veps          = 1. - 2. * quantile - residus * poids
1927         lastsurrogate = -numpy.sum(residus * veps) - (1.-2.*quantile)*numpy.sum(residus)
1928     #
1929     # Mesure d'écart
1930     # --------------
1931     Ecart = quantile * numpy.sum(residus) - numpy.sum( residus[residus<0] )
1932     #
1933     return variables, Ecart, [n,p,iteration,increment,0]
1934
1935 # ==============================================================================
1936 def multi3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, oneCycle):
1937     """
1938     3DVAR multi-pas et multi-méthodes
1939     """
1940     #
1941     # Initialisation
1942     # --------------
1943     Xn = numpy.ravel(Xb).reshape((-1,1))
1944     #
1945     if selfA._parameters["EstimationOf"] == "State":
1946         M = EM["Direct"].appliedTo
1947         #
1948         if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1949             selfA.StoredVariables["Analysis"].store( Xn )
1950             if selfA._toStore("APosterioriCovariance"):
1951                 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(Xn.size)
1952                 else:                         Pn = B
1953                 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1954             if selfA._toStore("ForecastState"):
1955                 selfA.StoredVariables["ForecastState"].store( Xn )
1956     #
1957     if hasattr(Y,"stepnumber"):
1958         duration = Y.stepnumber()
1959     else:
1960         duration = 2
1961     #
1962     # Multi-pas
1963     # ---------
1964     for step in range(duration-1):
1965         if hasattr(Y,"store"):
1966             Ynpu = numpy.ravel( Y[step+1] ).reshape((-1,1))
1967         else:
1968             Ynpu = numpy.ravel( Y ).reshape((-1,1))
1969         #
1970         if selfA._parameters["EstimationOf"] == "State": # Forecast
1971             Xn = selfA.StoredVariables["Analysis"][-1]
1972             Xn_predicted = M( Xn )
1973             if selfA._toStore("ForecastState"):
1974                 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
1975         elif selfA._parameters["EstimationOf"] == "Parameters": # No forecast
1976             # --- > Par principe, M = Id, Q = 0
1977             Xn_predicted = Xn
1978         Xn_predicted = numpy.ravel(Xn_predicted).reshape((-1,1))
1979         #
1980         oneCycle(selfA, Xn_predicted, Ynpu, U, HO, None, None, R, B, None)
1981     #
1982     return 0
1983
1984 # ==============================================================================
1985 def psas3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1986     """
1987     3DVAR PSAS
1988     """
1989     #
1990     # Initialisations
1991     # ---------------
1992     #
1993     # Opérateurs
1994     Hm = HO["Direct"].appliedTo
1995     #
1996     # Utilisation éventuelle d'un vecteur H(Xb) précalculé
1997     if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
1998         HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
1999     else:
2000         HXb = Hm( Xb )
2001     HXb = numpy.asmatrix(numpy.ravel( HXb )).T
2002     if Y.size != HXb.size:
2003         raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
2004     if max(Y.shape) != max(HXb.shape):
2005         raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
2006     #
2007     if selfA._toStore("JacobianMatrixAtBackground"):
2008         HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
2009         HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
2010         selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
2011     #
2012     Ht = HO["Tangent"].asMatrix(Xb)
2013     BHT = B * Ht.T
2014     HBHTpR = R + Ht * BHT
2015     Innovation = Y - HXb
2016     #
2017     # Point de démarrage de l'optimisation
2018     Xini = numpy.zeros(Xb.shape)
2019     #
2020     # Définition de la fonction-coût
2021     # ------------------------------
2022     def CostFunction(w):
2023         _W = numpy.asmatrix(numpy.ravel( w )).T
2024         if selfA._parameters["StoreInternalVariables"] or \
2025             selfA._toStore("CurrentState") or \
2026             selfA._toStore("CurrentOptimum"):
2027             selfA.StoredVariables["CurrentState"].store( Xb + BHT * _W )
2028         if selfA._toStore("SimulatedObservationAtCurrentState") or \
2029             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2030             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Hm( Xb + BHT * _W ) )
2031         if selfA._toStore("InnovationAtCurrentState"):
2032             selfA.StoredVariables["InnovationAtCurrentState"].store( Innovation )
2033         #
2034         Jb  = float( 0.5 * _W.T * HBHTpR * _W )
2035         Jo  = float( - _W.T * Innovation )
2036         J   = Jb + Jo
2037         #
2038         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
2039         selfA.StoredVariables["CostFunctionJb"].store( Jb )
2040         selfA.StoredVariables["CostFunctionJo"].store( Jo )
2041         selfA.StoredVariables["CostFunctionJ" ].store( J )
2042         if selfA._toStore("IndexOfOptimum") or \
2043             selfA._toStore("CurrentOptimum") or \
2044             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
2045             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
2046             selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
2047             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2048             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2049         if selfA._toStore("IndexOfOptimum"):
2050             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2051         if selfA._toStore("CurrentOptimum"):
2052             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
2053         if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2054             selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
2055         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2056             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2057         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2058             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2059         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2060             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2061         return J
2062     #
2063     def GradientOfCostFunction(w):
2064         _W = numpy.asmatrix(numpy.ravel( w )).T
2065         GradJb  = HBHTpR * _W
2066         GradJo  = - Innovation
2067         GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
2068         return GradJ
2069     #
2070     # Minimisation de la fonctionnelle
2071     # --------------------------------
2072     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
2073     #
2074     if selfA._parameters["Minimizer"] == "LBFGSB":
2075         if "0.19" <= scipy.version.version <= "1.1.0":
2076             import lbfgsbhlt as optimiseur
2077         else:
2078             import scipy.optimize as optimiseur
2079         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
2080             func        = CostFunction,
2081             x0          = Xini,
2082             fprime      = GradientOfCostFunction,
2083             args        = (),
2084             bounds      = selfA._parameters["Bounds"],
2085             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
2086             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
2087             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
2088             iprint      = selfA._parameters["optiprint"],
2089             )
2090         nfeval = Informations['funcalls']
2091         rc     = Informations['warnflag']
2092     elif selfA._parameters["Minimizer"] == "TNC":
2093         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
2094             func        = CostFunction,
2095             x0          = Xini,
2096             fprime      = GradientOfCostFunction,
2097             args        = (),
2098             bounds      = selfA._parameters["Bounds"],
2099             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
2100             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
2101             ftol        = selfA._parameters["CostDecrementTolerance"],
2102             messages    = selfA._parameters["optmessages"],
2103             )
2104     elif selfA._parameters["Minimizer"] == "CG":
2105         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
2106             f           = CostFunction,
2107             x0          = Xini,
2108             fprime      = GradientOfCostFunction,
2109             args        = (),
2110             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2111             gtol        = selfA._parameters["GradientNormTolerance"],
2112             disp        = selfA._parameters["optdisp"],
2113             full_output = True,
2114             )
2115     elif selfA._parameters["Minimizer"] == "NCG":
2116         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
2117             f           = CostFunction,
2118             x0          = Xini,
2119             fprime      = GradientOfCostFunction,
2120             args        = (),
2121             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2122             avextol     = selfA._parameters["CostDecrementTolerance"],
2123             disp        = selfA._parameters["optdisp"],
2124             full_output = True,
2125             )
2126     elif selfA._parameters["Minimizer"] == "BFGS":
2127         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
2128             f           = CostFunction,
2129             x0          = Xini,
2130             fprime      = GradientOfCostFunction,
2131             args        = (),
2132             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2133             gtol        = selfA._parameters["GradientNormTolerance"],
2134             disp        = selfA._parameters["optdisp"],
2135             full_output = True,
2136             )
2137     else:
2138         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
2139     #
2140     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2141     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
2142     #
2143     # Correction pour pallier a un bug de TNC sur le retour du Minimum
2144     # ----------------------------------------------------------------
2145     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
2146         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
2147         Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
2148     else:
2149         Minimum = Xb + BHT * numpy.asmatrix(numpy.ravel( Minimum )).T
2150     #
2151     # Obtention de l'analyse
2152     # ----------------------
2153     Xa = Minimum
2154     #
2155     selfA.StoredVariables["Analysis"].store( Xa )
2156     #
2157     if selfA._toStore("OMA") or \
2158         selfA._toStore("SigmaObs2") or \
2159         selfA._toStore("SimulationQuantiles") or \
2160         selfA._toStore("SimulatedObservationAtOptimum"):
2161         if selfA._toStore("SimulatedObservationAtCurrentState"):
2162             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
2163         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2164             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
2165         else:
2166             HXa = Hm( Xa )
2167     #
2168     # Calcul de la covariance d'analyse
2169     # ---------------------------------
2170     if selfA._toStore("APosterioriCovariance") or \
2171         selfA._toStore("SimulationQuantiles") or \
2172         selfA._toStore("JacobianMatrixAtOptimum") or \
2173         selfA._toStore("KalmanGainAtOptimum"):
2174         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
2175         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
2176     if selfA._toStore("APosterioriCovariance") or \
2177         selfA._toStore("SimulationQuantiles") or \
2178         selfA._toStore("KalmanGainAtOptimum"):
2179         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
2180         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
2181     if selfA._toStore("APosterioriCovariance") or \
2182         selfA._toStore("SimulationQuantiles"):
2183         BI = B.getI()
2184         RI = R.getI()
2185         HessienneI = []
2186         nb = Xa.size
2187         for i in range(nb):
2188             _ee    = numpy.matrix(numpy.zeros(nb)).T
2189             _ee[i] = 1.
2190             _HtEE  = numpy.dot(HtM,_ee)
2191             _HtEE  = numpy.asmatrix(numpy.ravel( _HtEE )).T
2192             HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
2193         HessienneI = numpy.matrix( HessienneI )
2194         A = HessienneI.I
2195         if min(A.shape) != max(A.shape):
2196             raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
2197         if (numpy.diag(A) < 0).any():
2198             raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
2199         if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
2200             try:
2201                 L = numpy.linalg.cholesky( A )
2202             except:
2203                 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
2204     if selfA._toStore("APosterioriCovariance"):
2205         selfA.StoredVariables["APosterioriCovariance"].store( A )
2206     if selfA._toStore("JacobianMatrixAtOptimum"):
2207         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
2208     if selfA._toStore("KalmanGainAtOptimum"):
2209         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
2210         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
2211         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
2212     #
2213     # Calculs et/ou stockages supplémentaires
2214     # ---------------------------------------
2215     if selfA._toStore("Innovation") or \
2216         selfA._toStore("SigmaObs2") or \
2217         selfA._toStore("MahalanobisConsistency") or \
2218         selfA._toStore("OMB"):
2219         d  = Y - HXb
2220     if selfA._toStore("Innovation"):
2221         selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
2222     if selfA._toStore("BMA"):
2223         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
2224     if selfA._toStore("OMA"):
2225         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
2226     if selfA._toStore("OMB"):
2227         selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
2228     if selfA._toStore("SigmaObs2"):
2229         TraceR = R.trace(Y.size)
2230         selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
2231     if selfA._toStore("MahalanobisConsistency"):
2232         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
2233     if selfA._toStore("SimulationQuantiles"):
2234         nech = selfA._parameters["NumberOfSamplesForQuantiles"]
2235         HXa  = numpy.matrix(numpy.ravel( HXa )).T
2236         EXr  = None
2237         YfQ  = None
2238         for i in range(nech):
2239             if selfA._parameters["SimulationForQuantiles"] == "Linear":
2240                 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
2241                 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
2242                 Yr = HXa + dYr
2243                 if selfA._toStore("SampledStateForQuantiles"): Xr = Xa+dXr
2244             elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
2245                 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
2246                 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
2247             if YfQ is None:
2248                 YfQ = Yr
2249                 if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.ravel(Xr)
2250             else:
2251                 YfQ = numpy.hstack((YfQ,Yr))
2252                 if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.vstack((EXr,numpy.ravel(Xr)))
2253         YfQ.sort(axis=-1)
2254         YQ = None
2255         for quantile in selfA._parameters["Quantiles"]:
2256             if not (0. <= float(quantile) <= 1.): continue
2257             indice = int(nech * float(quantile) - 1./nech)
2258             if YQ is None: YQ = YfQ[:,indice]
2259             else:          YQ = numpy.hstack((YQ,YfQ[:,indice]))
2260         selfA.StoredVariables["SimulationQuantiles"].store( YQ )
2261         if selfA._toStore("SampledStateForQuantiles"):
2262             selfA.StoredVariables["SampledStateForQuantiles"].store( EXr.T )
2263     if selfA._toStore("SimulatedObservationAtBackground"):
2264         selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
2265     if selfA._toStore("SimulatedObservationAtOptimum"):
2266         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
2267     #
2268     return 0
2269
2270 # ==============================================================================
2271 def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
2272     """
2273     Stochastic EnKF
2274     """
2275     if selfA._parameters["EstimationOf"] == "Parameters":
2276         selfA._parameters["StoreInternalVariables"] = True
2277     #
2278     # Opérateurs
2279     H = HO["Direct"].appliedControledFormTo
2280     #
2281     if selfA._parameters["EstimationOf"] == "State":
2282         M = EM["Direct"].appliedControledFormTo
2283     #
2284     if CM is not None and "Tangent" in CM and U is not None:
2285         Cm = CM["Tangent"].asMatrix(Xb)
2286     else:
2287         Cm = None
2288     #
2289     # Durée d'observation et tailles
2290     if hasattr(Y,"stepnumber"):
2291         duration = Y.stepnumber()
2292         __p = numpy.cumprod(Y.shape())[-1]
2293     else:
2294         duration = 2
2295         __p = numpy.array(Y).size
2296     #
2297     # Précalcul des inversions de B et R
2298     if selfA._parameters["StoreInternalVariables"] \
2299         or selfA._toStore("CostFunctionJ") \
2300         or selfA._toStore("CostFunctionJb") \
2301         or selfA._toStore("CostFunctionJo") \
2302         or selfA._toStore("CurrentOptimum") \
2303         or selfA._toStore("APosterioriCovariance"):
2304         BI = B.getI()
2305         RI = R.getI()
2306     #
2307     __n = Xb.size
2308     __m = selfA._parameters["NumberOfMembers"]
2309     #
2310     if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
2311     else:                         Pn = B
2312     if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
2313     else:                         Rn = R
2314     if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
2315     else:                         Qn = Q
2316     Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
2317     #
2318     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2319         selfA.StoredVariables["Analysis"].store( Xb )
2320         if selfA._toStore("APosterioriCovariance"):
2321             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2322             covarianceXa = Pn
2323     #
2324     previousJMinimum = numpy.finfo(float).max
2325     #
2326     for step in range(duration-1):
2327         if hasattr(Y,"store"):
2328             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
2329         else:
2330             Ynpu = numpy.ravel( Y ).reshape((__p,1))
2331         #
2332         if U is not None:
2333             if hasattr(U,"store") and len(U)>1:
2334                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2335             elif hasattr(U,"store") and len(U)==1:
2336                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2337             else:
2338                 Un = numpy.asmatrix(numpy.ravel( U )).T
2339         else:
2340             Un = None
2341         #
2342         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2343             Xn = CovarianceInflation( Xn,
2344                 selfA._parameters["InflationType"],
2345                 selfA._parameters["InflationFactor"],
2346                 )
2347         #
2348         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
2349             EMX = M( [(Xn[:,i], Un) for i in range(__m)],
2350                 argsAsSerie = True,
2351                 returnSerieAsArrayMatrix = True )
2352             qi = numpy.random.multivariate_normal(numpy.zeros(__n), Qn, size=__m).T
2353             Xn_predicted = EMX + qi
2354             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2355                 argsAsSerie = True,
2356                 returnSerieAsArrayMatrix = True )
2357             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2358                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2359                 Xn_predicted = Xn_predicted + Cm * Un
2360         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
2361             # --- > Par principe, M = Id, Q = 0
2362             Xn_predicted = Xn
2363             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2364                 argsAsSerie = True,
2365                 returnSerieAsArrayMatrix = True )
2366         #
2367         # Mean of forecast and observation of forecast
2368         Xfm  = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2369         Hfm  = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2370         #
2371         #--------------------------
2372         if VariantM == "KalmanFilterFormula05":
2373             PfHT, HPfHT = 0., 0.
2374             for i in range(__m):
2375                 Exfi = Xn_predicted[:,i].reshape((__n,1)) - Xfm
2376                 Eyfi = HX_predicted[:,i].reshape((__p,1)) - Hfm
2377                 PfHT  += Exfi * Eyfi.T
2378                 HPfHT += Eyfi * Eyfi.T
2379             PfHT  = (1./(__m-1)) * PfHT
2380             HPfHT = (1./(__m-1)) * HPfHT
2381             Kn     = PfHT * ( R + HPfHT ).I
2382             del PfHT, HPfHT
2383             #
2384             for i in range(__m):
2385                 ri = numpy.random.multivariate_normal(numpy.zeros(__p), Rn)
2386                 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(Ynpu) + ri - HX_predicted[:,i])
2387         #--------------------------
2388         elif VariantM == "KalmanFilterFormula16":
2389             EpY   = EnsembleOfCenteredPerturbations(Ynpu, Rn, __m)
2390             EpYm  = EpY.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2391             #
2392             EaX   = EnsembleOfAnomalies( Xn_predicted ) / math.sqrt(__m-1)
2393             EaY = (HX_predicted - Hfm - EpY + EpYm) / math.sqrt(__m-1)
2394             #
2395             Kn = EaX @ EaY.T @ numpy.linalg.inv( EaY @ EaY.T)
2396             #
2397             for i in range(__m):
2398                 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(EpY[:,i]) - HX_predicted[:,i])
2399         #--------------------------
2400         else:
2401             raise ValueError("VariantM has to be chosen in the authorized methods list.")
2402         #
2403         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2404             Xn = CovarianceInflation( Xn,
2405                 selfA._parameters["InflationType"],
2406                 selfA._parameters["InflationFactor"],
2407                 )
2408         #
2409         Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2410         #--------------------------
2411         #
2412         if selfA._parameters["StoreInternalVariables"] \
2413             or selfA._toStore("CostFunctionJ") \
2414             or selfA._toStore("CostFunctionJb") \
2415             or selfA._toStore("CostFunctionJo") \
2416             or selfA._toStore("APosterioriCovariance") \
2417             or selfA._toStore("InnovationAtCurrentAnalysis") \
2418             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2419             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2420             _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
2421             _Innovation = Ynpu - _HXa
2422         #
2423         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2424         # ---> avec analysis
2425         selfA.StoredVariables["Analysis"].store( Xa )
2426         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2427             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2428         if selfA._toStore("InnovationAtCurrentAnalysis"):
2429             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2430         # ---> avec current state
2431         if selfA._parameters["StoreInternalVariables"] \
2432             or selfA._toStore("CurrentState"):
2433             selfA.StoredVariables["CurrentState"].store( Xn )
2434         if selfA._toStore("ForecastState"):
2435             selfA.StoredVariables["ForecastState"].store( EMX )
2436         if selfA._toStore("BMA"):
2437             selfA.StoredVariables["BMA"].store( EMX - Xa )
2438         if selfA._toStore("InnovationAtCurrentState"):
2439             selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
2440         if selfA._toStore("SimulatedObservationAtCurrentState") \
2441             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2442             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
2443         # ---> autres
2444         if selfA._parameters["StoreInternalVariables"] \
2445             or selfA._toStore("CostFunctionJ") \
2446             or selfA._toStore("CostFunctionJb") \
2447             or selfA._toStore("CostFunctionJo") \
2448             or selfA._toStore("CurrentOptimum") \
2449             or selfA._toStore("APosterioriCovariance"):
2450             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2451             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
2452             J   = Jb + Jo
2453             selfA.StoredVariables["CostFunctionJb"].store( Jb )
2454             selfA.StoredVariables["CostFunctionJo"].store( Jo )
2455             selfA.StoredVariables["CostFunctionJ" ].store( J )
2456             #
2457             if selfA._toStore("IndexOfOptimum") \
2458                 or selfA._toStore("CurrentOptimum") \
2459                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2460                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2461                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2462                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2463                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2464             if selfA._toStore("IndexOfOptimum"):
2465                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2466             if selfA._toStore("CurrentOptimum"):
2467                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2468             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2469                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2470             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2471                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2472             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2473                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2474             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2475                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2476         if selfA._toStore("APosterioriCovariance"):
2477             selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
2478         if selfA._parameters["EstimationOf"] == "Parameters" \
2479             and J < previousJMinimum:
2480             previousJMinimum    = J
2481             XaMin               = Xa
2482             if selfA._toStore("APosterioriCovariance"):
2483                 covarianceXaMin = Pn
2484     #
2485     # Stockage final supplémentaire de l'optimum en estimation de paramètres
2486     # ----------------------------------------------------------------------
2487     if selfA._parameters["EstimationOf"] == "Parameters":
2488         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2489         selfA.StoredVariables["Analysis"].store( XaMin )
2490         if selfA._toStore("APosterioriCovariance"):
2491             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2492         if selfA._toStore("BMA"):
2493             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2494     #
2495     return 0
2496
2497 # ==============================================================================
2498 def std3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
2499     """
2500     3DVAR
2501     """
2502     #
2503     # Initialisations
2504     # ---------------
2505     #
2506     # Opérateurs
2507     Hm = HO["Direct"].appliedTo
2508     Ha = HO["Adjoint"].appliedInXTo
2509     #
2510     # Utilisation éventuelle d'un vecteur H(Xb) précalculé
2511     if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
2512         HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
2513     else:
2514         HXb = Hm( Xb )
2515     HXb = numpy.asmatrix(numpy.ravel( HXb )).T
2516     if Y.size != HXb.size:
2517         raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
2518     if max(Y.shape) != max(HXb.shape):
2519         raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
2520     #
2521     if selfA._toStore("JacobianMatrixAtBackground"):
2522         HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
2523         HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
2524         selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
2525     #
2526     # Précalcul des inversions de B et R
2527     BI = B.getI()
2528     RI = R.getI()
2529     #
2530     # Point de démarrage de l'optimisation
2531     Xini = selfA._parameters["InitializationPoint"]
2532     #
2533     # Définition de la fonction-coût
2534     # ------------------------------
2535     def CostFunction(x):
2536         _X  = numpy.asmatrix(numpy.ravel( x )).T
2537         if selfA._parameters["StoreInternalVariables"] or \
2538             selfA._toStore("CurrentState") or \
2539             selfA._toStore("CurrentOptimum"):
2540             selfA.StoredVariables["CurrentState"].store( _X )
2541         _HX = Hm( _X )
2542         _HX = numpy.asmatrix(numpy.ravel( _HX )).T
2543         _Innovation = Y - _HX
2544         if selfA._toStore("SimulatedObservationAtCurrentState") or \
2545             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2546             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
2547         if selfA._toStore("InnovationAtCurrentState"):
2548             selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
2549         #
2550         Jb  = float( 0.5 * (_X - Xb).T * BI * (_X - Xb) )
2551         Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
2552         J   = Jb + Jo
2553         #
2554         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
2555         selfA.StoredVariables["CostFunctionJb"].store( Jb )
2556         selfA.StoredVariables["CostFunctionJo"].store( Jo )
2557         selfA.StoredVariables["CostFunctionJ" ].store( J )
2558         if selfA._toStore("IndexOfOptimum") or \
2559             selfA._toStore("CurrentOptimum") or \
2560             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
2561             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
2562             selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
2563             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2564             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2565         if selfA._toStore("IndexOfOptimum"):
2566             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2567         if selfA._toStore("CurrentOptimum"):
2568             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
2569         if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2570             selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
2571         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2572             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2573         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2574             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2575         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2576             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2577         return J
2578     #
2579     def GradientOfCostFunction(x):
2580         _X      = numpy.asmatrix(numpy.ravel( x )).T
2581         _HX     = Hm( _X )
2582         _HX     = numpy.asmatrix(numpy.ravel( _HX )).T
2583         GradJb  = BI * (_X - Xb)
2584         GradJo  = - Ha( (_X, RI * (Y - _HX)) )
2585         GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
2586         return GradJ
2587     #
2588     # Minimisation de la fonctionnelle
2589     # --------------------------------
2590     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
2591     #
2592     if selfA._parameters["Minimizer"] == "LBFGSB":
2593         if "0.19" <= scipy.version.version <= "1.1.0":
2594             import lbfgsbhlt as optimiseur
2595         else:
2596             import scipy.optimize as optimiseur
2597         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
2598             func        = CostFunction,
2599             x0          = Xini,
2600             fprime      = GradientOfCostFunction,
2601             args        = (),
2602             bounds      = selfA._parameters["Bounds"],
2603             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
2604             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
2605             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
2606             iprint      = selfA._parameters["optiprint"],
2607             )
2608         nfeval = Informations['funcalls']
2609         rc     = Informations['warnflag']
2610     elif selfA._parameters["Minimizer"] == "TNC":
2611         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
2612             func        = CostFunction,
2613             x0          = Xini,
2614             fprime      = GradientOfCostFunction,
2615             args        = (),
2616             bounds      = selfA._parameters["Bounds"],
2617             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
2618             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
2619             ftol        = selfA._parameters["CostDecrementTolerance"],
2620             messages    = selfA._parameters["optmessages"],
2621             )
2622     elif selfA._parameters["Minimizer"] == "CG":
2623         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
2624             f           = CostFunction,
2625             x0          = Xini,
2626             fprime      = GradientOfCostFunction,
2627             args        = (),
2628             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2629             gtol        = selfA._parameters["GradientNormTolerance"],
2630             disp        = selfA._parameters["optdisp"],
2631             full_output = True,
2632             )
2633     elif selfA._parameters["Minimizer"] == "NCG":
2634         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
2635             f           = CostFunction,
2636             x0          = Xini,
2637             fprime      = GradientOfCostFunction,
2638             args        = (),
2639             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2640             avextol     = selfA._parameters["CostDecrementTolerance"],
2641             disp        = selfA._parameters["optdisp"],
2642             full_output = True,
2643             )
2644     elif selfA._parameters["Minimizer"] == "BFGS":
2645         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
2646             f           = CostFunction,
2647             x0          = Xini,
2648             fprime      = GradientOfCostFunction,
2649             args        = (),
2650             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2651             gtol        = selfA._parameters["GradientNormTolerance"],
2652             disp        = selfA._parameters["optdisp"],
2653             full_output = True,
2654             )
2655     else:
2656         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
2657     #
2658     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2659     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
2660     #
2661     # Correction pour pallier a un bug de TNC sur le retour du Minimum
2662     # ----------------------------------------------------------------
2663     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
2664         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
2665     #
2666     # Obtention de l'analyse
2667     # ----------------------
2668     Xa = numpy.asmatrix(numpy.ravel( Minimum )).T
2669     #
2670     selfA.StoredVariables["Analysis"].store( Xa )
2671     #
2672     if selfA._toStore("OMA") or \
2673         selfA._toStore("SigmaObs2") or \
2674         selfA._toStore("SimulationQuantiles") or \
2675         selfA._toStore("SimulatedObservationAtOptimum"):
2676         if selfA._toStore("SimulatedObservationAtCurrentState"):
2677             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
2678         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2679             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
2680         else:
2681             HXa = Hm( Xa )
2682     #
2683     # Calcul de la covariance d'analyse
2684     # ---------------------------------
2685     if selfA._toStore("APosterioriCovariance") or \
2686         selfA._toStore("SimulationQuantiles") or \
2687         selfA._toStore("JacobianMatrixAtOptimum") or \
2688         selfA._toStore("KalmanGainAtOptimum"):
2689         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
2690         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
2691     if selfA._toStore("APosterioriCovariance") or \
2692         selfA._toStore("SimulationQuantiles") or \
2693         selfA._toStore("KalmanGainAtOptimum"):
2694         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
2695         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
2696     if selfA._toStore("APosterioriCovariance") or \
2697         selfA._toStore("SimulationQuantiles"):
2698         HessienneI = []
2699         nb = Xa.size
2700         for i in range(nb):
2701             _ee    = numpy.matrix(numpy.zeros(nb)).T
2702             _ee[i] = 1.
2703             _HtEE  = numpy.dot(HtM,_ee)
2704             _HtEE  = numpy.asmatrix(numpy.ravel( _HtEE )).T
2705             HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
2706         HessienneI = numpy.matrix( HessienneI )
2707         A = HessienneI.I
2708         if min(A.shape) != max(A.shape):
2709             raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
2710         if (numpy.diag(A) < 0).any():
2711             raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
2712         if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
2713             try:
2714                 L = numpy.linalg.cholesky( A )
2715             except:
2716                 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
2717     if selfA._toStore("APosterioriCovariance"):
2718         selfA.StoredVariables["APosterioriCovariance"].store( A )
2719     if selfA._toStore("JacobianMatrixAtOptimum"):
2720         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
2721     if selfA._toStore("KalmanGainAtOptimum"):
2722         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
2723         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
2724         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
2725     #
2726     # Calculs et/ou stockages supplémentaires
2727     # ---------------------------------------
2728     if selfA._toStore("Innovation") or \
2729         selfA._toStore("SigmaObs2") or \
2730         selfA._toStore("MahalanobisConsistency") or \
2731         selfA._toStore("OMB"):
2732         d  = Y - HXb
2733     if selfA._toStore("Innovation"):
2734         selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
2735     if selfA._toStore("BMA"):
2736         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
2737     if selfA._toStore("OMA"):
2738         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
2739     if selfA._toStore("OMB"):
2740         selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
2741     if selfA._toStore("SigmaObs2"):
2742         TraceR = R.trace(Y.size)
2743         selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
2744     if selfA._toStore("MahalanobisConsistency"):
2745         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
2746     if selfA._toStore("SimulationQuantiles"):
2747         nech = selfA._parameters["NumberOfSamplesForQuantiles"]
2748         HXa  = numpy.matrix(numpy.ravel( HXa )).T
2749         EXr  = None
2750         YfQ  = None
2751         for i in range(nech):
2752             if selfA._parameters["SimulationForQuantiles"] == "Linear":
2753                 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
2754                 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
2755                 Yr = HXa + dYr
2756                 if selfA._toStore("SampledStateForQuantiles"): Xr = Xa+dXr
2757             elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
2758                 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
2759                 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
2760             if YfQ is None:
2761                 YfQ = Yr
2762                 if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.ravel(Xr)
2763             else:
2764                 YfQ = numpy.hstack((YfQ,Yr))
2765                 if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.vstack((EXr,numpy.ravel(Xr)))
2766         YfQ.sort(axis=-1)
2767         YQ = None
2768         for quantile in selfA._parameters["Quantiles"]:
2769             if not (0. <= float(quantile) <= 1.): continue
2770             indice = int(nech * float(quantile) - 1./nech)
2771             if YQ is None: YQ = YfQ[:,indice]
2772             else:          YQ = numpy.hstack((YQ,YfQ[:,indice]))
2773         selfA.StoredVariables["SimulationQuantiles"].store( YQ )
2774         if selfA._toStore("SampledStateForQuantiles"):
2775             selfA.StoredVariables["SampledStateForQuantiles"].store( EXr.T )
2776     if selfA._toStore("SimulatedObservationAtBackground"):
2777         selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
2778     if selfA._toStore("SimulatedObservationAtOptimum"):
2779         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
2780     #
2781     return 0
2782
2783 # ==============================================================================
2784 def std4dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
2785     """
2786     4DVAR
2787     """
2788     #
2789     # Initialisations
2790     # ---------------
2791     #
2792     # Opérateurs
2793     Hm = HO["Direct"].appliedControledFormTo
2794     Mm = EM["Direct"].appliedControledFormTo
2795     #
2796     if CM is not None and "Tangent" in CM and U is not None:
2797         Cm = CM["Tangent"].asMatrix(Xb)
2798     else:
2799         Cm = None
2800     #
2801     def Un(_step):
2802         if U is not None:
2803             if hasattr(U,"store") and 1<=_step<len(U) :
2804                 _Un = numpy.asmatrix(numpy.ravel( U[_step] )).T
2805             elif hasattr(U,"store") and len(U)==1:
2806                 _Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2807             else:
2808                 _Un = numpy.asmatrix(numpy.ravel( U )).T
2809         else:
2810             _Un = None
2811         return _Un
2812     def CmUn(_xn,_un):
2813         if Cm is not None and _un is not None: # Attention : si Cm est aussi dans M, doublon !
2814             _Cm   = Cm.reshape(_xn.size,_un.size) # ADAO & check shape
2815             _CmUn = _Cm * _un
2816         else:
2817             _CmUn = 0.
2818         return _CmUn
2819     #
2820     # Remarque : les observations sont exploitées à partir du pas de temps
2821     # numéro 1, et sont utilisées dans Yo comme rangées selon ces indices.
2822     # Donc le pas 0 n'est pas utilisé puisque la première étape commence
2823     # avec l'observation du pas 1.
2824     #
2825     # Nombre de pas identique au nombre de pas d'observations
2826     if hasattr(Y,"stepnumber"):
2827         duration = Y.stepnumber()
2828     else:
2829         duration = 2
2830     #
2831     # Précalcul des inversions de B et R
2832     BI = B.getI()
2833     RI = R.getI()
2834     #
2835     # Point de démarrage de l'optimisation
2836     Xini = selfA._parameters["InitializationPoint"]
2837     #
2838     # Définition de la fonction-coût
2839     # ------------------------------
2840     selfA.DirectCalculation = [None,] # Le pas 0 n'est pas observé
2841     selfA.DirectInnovation  = [None,] # Le pas 0 n'est pas observé
2842     def CostFunction(x):
2843         _X  = numpy.asmatrix(numpy.ravel( x )).T
2844         if selfA._parameters["StoreInternalVariables"] or \
2845             selfA._toStore("CurrentState") or \
2846             selfA._toStore("CurrentOptimum"):
2847             selfA.StoredVariables["CurrentState"].store( _X )
2848         Jb  = float( 0.5 * (_X - Xb).T * BI * (_X - Xb) )
2849         selfA.DirectCalculation = [None,]
2850         selfA.DirectInnovation  = [None,]
2851         Jo  = 0.
2852         _Xn = _X
2853         for step in range(0,duration-1):
2854             if hasattr(Y,"store"):
2855                 _Ynpu = numpy.asmatrix(numpy.ravel( Y[step+1] )).T
2856             else:
2857                 _Ynpu = numpy.asmatrix(numpy.ravel( Y )).T
2858             _Un = Un(step)
2859             #
2860             # Etape d'évolution
2861             if selfA._parameters["EstimationOf"] == "State":
2862                 _Xn = Mm( (_Xn, _Un) ) + CmUn(_Xn, _Un)
2863             elif selfA._parameters["EstimationOf"] == "Parameters":
2864                 pass
2865             #
2866             if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
2867                 _Xn = numpy.max(numpy.hstack((_Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1)
2868                 _Xn = numpy.min(numpy.hstack((_Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1)
2869             #
2870             # Etape de différence aux observations
2871             if selfA._parameters["EstimationOf"] == "State":
2872                 _YmHMX = _Ynpu - numpy.asmatrix(numpy.ravel( Hm( (_Xn, None) ) )).T
2873             elif selfA._parameters["EstimationOf"] == "Parameters":
2874                 _YmHMX = _Ynpu - numpy.asmatrix(numpy.ravel( Hm( (_Xn, _Un) ) )).T - CmUn(_Xn, _Un)
2875             #
2876             # Stockage de l'état
2877             selfA.DirectCalculation.append( _Xn )
2878             selfA.DirectInnovation.append( _YmHMX )
2879             #
2880             # Ajout dans la fonctionnelle d'observation
2881             Jo = Jo + 0.5 * float( _YmHMX.T * RI * _YmHMX )
2882         J = Jb + Jo
2883         #
2884         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
2885         selfA.StoredVariables["CostFunctionJb"].store( Jb )
2886         selfA.StoredVariables["CostFunctionJo"].store( Jo )
2887         selfA.StoredVariables["CostFunctionJ" ].store( J )
2888         if selfA._toStore("IndexOfOptimum") or \
2889             selfA._toStore("CurrentOptimum") or \
2890             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
2891             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
2892             selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2893             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2894         if selfA._toStore("IndexOfOptimum"):
2895             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2896         if selfA._toStore("CurrentOptimum"):
2897             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
2898         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2899             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2900         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2901             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2902         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2903             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2904         return J
2905     #
2906     def GradientOfCostFunction(x):
2907         _X      = numpy.asmatrix(numpy.ravel( x )).T
2908         GradJb  = BI * (_X - Xb)
2909         GradJo  = 0.
2910         for step in range(duration-1,0,-1):
2911             # Étape de récupération du dernier stockage de l'évolution
2912             _Xn = selfA.DirectCalculation.pop()
2913             # Étape de récupération du dernier stockage de l'innovation
2914             _YmHMX = selfA.DirectInnovation.pop()
2915             # Calcul des adjoints
2916             Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
2917             Ha = Ha.reshape(_Xn.size,_YmHMX.size) # ADAO & check shape
2918             Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
2919             Ma = Ma.reshape(_Xn.size,_Xn.size) # ADAO & check shape
2920             # Calcul du gradient par état adjoint
2921             GradJo = GradJo + Ha * RI * _YmHMX # Équivaut pour Ha linéaire à : Ha( (_Xn, RI * _YmHMX) )
2922             GradJo = Ma * GradJo               # Équivaut pour Ma linéaire à : Ma( (_Xn, GradJo) )
2923         GradJ = numpy.ravel( GradJb ) - numpy.ravel( GradJo )
2924         return GradJ
2925     #
2926     # Minimisation de la fonctionnelle
2927     # --------------------------------
2928     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
2929     #
2930     if selfA._parameters["Minimizer"] == "LBFGSB":
2931         if "0.19" <= scipy.version.version <= "1.1.0":
2932             import lbfgsbhlt as optimiseur
2933         else:
2934             import scipy.optimize as optimiseur
2935         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
2936             func        = CostFunction,
2937             x0          = Xini,
2938             fprime      = GradientOfCostFunction,
2939             args        = (),
2940             bounds      = selfA._parameters["Bounds"],
2941             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
2942             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
2943             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
2944             iprint      = selfA._parameters["optiprint"],
2945             )
2946         nfeval = Informations['funcalls']
2947         rc     = Informations['warnflag']
2948     elif selfA._parameters["Minimizer"] == "TNC":
2949         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
2950             func        = CostFunction,
2951             x0          = Xini,
2952             fprime      = GradientOfCostFunction,
2953             args        = (),
2954             bounds      = selfA._parameters["Bounds"],
2955             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
2956             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
2957             ftol        = selfA._parameters["CostDecrementTolerance"],
2958             messages    = selfA._parameters["optmessages"],
2959             )
2960     elif selfA._parameters["Minimizer"] == "CG":
2961         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
2962             f           = CostFunction,
2963             x0          = Xini,
2964             fprime      = GradientOfCostFunction,
2965             args        = (),
2966             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2967             gtol        = selfA._parameters["GradientNormTolerance"],
2968             disp        = selfA._parameters["optdisp"],
2969             full_output = True,
2970             )
2971     elif selfA._parameters["Minimizer"] == "NCG":
2972         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
2973             f           = CostFunction,
2974             x0          = Xini,
2975             fprime      = GradientOfCostFunction,
2976             args        = (),
2977             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2978             avextol     = selfA._parameters["CostDecrementTolerance"],
2979             disp        = selfA._parameters["optdisp"],
2980             full_output = True,
2981             )
2982     elif selfA._parameters["Minimizer"] == "BFGS":
2983         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
2984             f           = CostFunction,
2985             x0          = Xini,
2986             fprime      = GradientOfCostFunction,
2987             args        = (),
2988             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2989             gtol        = selfA._parameters["GradientNormTolerance"],
2990             disp        = selfA._parameters["optdisp"],
2991             full_output = True,
2992             )
2993     else:
2994         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
2995     #
2996     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2997     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
2998     #
2999     # Correction pour pallier a un bug de TNC sur le retour du Minimum
3000     # ----------------------------------------------------------------
3001     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
3002         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
3003     #
3004     # Obtention de l'analyse
3005     # ----------------------
3006     Xa = numpy.asmatrix(numpy.ravel( Minimum )).T
3007     #
3008     selfA.StoredVariables["Analysis"].store( Xa )
3009     #
3010     # Calculs et/ou stockages supplémentaires
3011     # ---------------------------------------
3012     if selfA._toStore("BMA"):
3013         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
3014     #
3015     return 0
3016
3017 # ==============================================================================
3018 def van3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3019     """
3020     3DVAR variational analysis with no inversion of B
3021     """
3022     #
3023     # Initialisations
3024     # ---------------
3025     #
3026     # Opérateurs
3027     Hm = HO["Direct"].appliedTo
3028     Ha = HO["Adjoint"].appliedInXTo
3029     #
3030     # Précalcul des inversions de B et R
3031     BT = B.getT()
3032     RI = R.getI()
3033     #
3034     # Point de démarrage de l'optimisation
3035     Xini = numpy.zeros(Xb.shape)
3036     #
3037     # Définition de la fonction-coût
3038     # ------------------------------
3039     def CostFunction(v):
3040         _V = numpy.asmatrix(numpy.ravel( v )).T
3041         _X = Xb + B * _V
3042         if selfA._parameters["StoreInternalVariables"] or \
3043             selfA._toStore("CurrentState") or \
3044             selfA._toStore("CurrentOptimum"):
3045             selfA.StoredVariables["CurrentState"].store( _X )
3046         _HX = Hm( _X )
3047         _HX = numpy.asmatrix(numpy.ravel( _HX )).T
3048         _Innovation = Y - _HX
3049         if selfA._toStore("SimulatedObservationAtCurrentState") or \
3050             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3051             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
3052         if selfA._toStore("InnovationAtCurrentState"):
3053             selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
3054         #
3055         Jb  = float( 0.5 * _V.T * BT * _V )
3056         Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
3057         J   = Jb + Jo
3058         #
3059         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
3060         selfA.StoredVariables["CostFunctionJb"].store( Jb )
3061         selfA.StoredVariables["CostFunctionJo"].store( Jo )
3062         selfA.StoredVariables["CostFunctionJ" ].store( J )
3063         if selfA._toStore("IndexOfOptimum") or \
3064             selfA._toStore("CurrentOptimum") or \
3065             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
3066             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
3067             selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
3068             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3069             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3070         if selfA._toStore("IndexOfOptimum"):
3071             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3072         if selfA._toStore("CurrentOptimum"):
3073             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
3074         if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3075             selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
3076         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3077             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3078         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3079             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3080         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3081             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3082         return J
3083     #
3084     def GradientOfCostFunction(v):
3085         _V = numpy.asmatrix(numpy.ravel( v )).T
3086         _X = Xb + B * _V
3087         _HX     = Hm( _X )
3088         _HX     = numpy.asmatrix(numpy.ravel( _HX )).T
3089         GradJb  = BT * _V
3090         GradJo  = - Ha( (_X, RI * (Y - _HX)) )
3091         GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
3092         return GradJ
3093     #
3094     # Minimisation de la fonctionnelle
3095     # --------------------------------
3096     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
3097     #
3098     if selfA._parameters["Minimizer"] == "LBFGSB":
3099         if "0.19" <= scipy.version.version <= "1.1.0":
3100             import lbfgsbhlt as optimiseur
3101         else:
3102             import scipy.optimize as optimiseur
3103         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
3104             func        = CostFunction,
3105             x0          = Xini,
3106             fprime      = GradientOfCostFunction,
3107             args        = (),
3108             bounds      = selfA._parameters["Bounds"],
3109             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
3110             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
3111             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
3112             iprint      = selfA._parameters["optiprint"],
3113             )
3114         nfeval = Informations['funcalls']
3115         rc     = Informations['warnflag']
3116     elif selfA._parameters["Minimizer"] == "TNC":
3117         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
3118             func        = CostFunction,
3119             x0          = Xini,
3120             fprime      = GradientOfCostFunction,
3121             args        = (),
3122             bounds      = selfA._parameters["Bounds"],
3123             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
3124             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
3125             ftol        = selfA._parameters["CostDecrementTolerance"],
3126             messages    = selfA._parameters["optmessages"],
3127             )
3128     elif selfA._parameters["Minimizer"] == "CG":
3129         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
3130             f           = CostFunction,
3131             x0          = Xini,
3132             fprime      = GradientOfCostFunction,
3133             args        = (),
3134             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3135             gtol        = selfA._parameters["GradientNormTolerance"],
3136             disp        = selfA._parameters["optdisp"],
3137             full_output = True,
3138             )
3139     elif selfA._parameters["Minimizer"] == "NCG":
3140         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
3141             f           = CostFunction,
3142             x0          = Xini,
3143             fprime      = GradientOfCostFunction,
3144             args        = (),
3145             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3146             avextol     = selfA._parameters["CostDecrementTolerance"],
3147             disp        = selfA._parameters["optdisp"],
3148             full_output = True,
3149             )
3150     elif selfA._parameters["Minimizer"] == "BFGS":
3151         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
3152             f           = CostFunction,
3153             x0          = Xini,
3154             fprime      = GradientOfCostFunction,
3155             args        = (),
3156             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3157             gtol        = selfA._parameters["GradientNormTolerance"],
3158             disp        = selfA._parameters["optdisp"],
3159             full_output = True,
3160             )
3161     else:
3162         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
3163     #
3164     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3165     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
3166     #
3167     # Correction pour pallier a un bug de TNC sur le retour du Minimum
3168     # ----------------------------------------------------------------
3169     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
3170         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
3171         Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
3172     else:
3173         Minimum = Xb + B * numpy.asmatrix(numpy.ravel( Minimum )).T
3174     #
3175     # Obtention de l'analyse
3176     # ----------------------
3177     Xa = Minimum
3178     #
3179     selfA.StoredVariables["Analysis"].store( Xa )
3180     #
3181     if selfA._toStore("OMA") or \
3182         selfA._toStore("SigmaObs2") or \
3183         selfA._toStore("SimulationQuantiles") or \
3184         selfA._toStore("SimulatedObservationAtOptimum"):
3185         if selfA._toStore("SimulatedObservationAtCurrentState"):
3186             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
3187         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3188             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
3189         else:
3190             HXa = Hm( Xa )
3191     #
3192     # Calcul de la covariance d'analyse
3193     # ---------------------------------
3194     if selfA._toStore("APosterioriCovariance") or \
3195         selfA._toStore("SimulationQuantiles") or \
3196         selfA._toStore("JacobianMatrixAtOptimum") or \
3197         selfA._toStore("KalmanGainAtOptimum"):
3198         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
3199         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
3200     if selfA._toStore("APosterioriCovariance") or \
3201         selfA._toStore("SimulationQuantiles") or \
3202         selfA._toStore("KalmanGainAtOptimum"):
3203         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
3204         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
3205     if selfA._toStore("APosterioriCovariance") or \
3206         selfA._toStore("SimulationQuantiles"):
3207         BI = B.getI()
3208         HessienneI = []
3209         nb = Xa.size
3210         for i in range(nb):
3211             _ee    = numpy.matrix(numpy.zeros(nb)).T
3212             _ee[i] = 1.
3213             _HtEE  = numpy.dot(HtM,_ee)
3214             _HtEE  = numpy.asmatrix(numpy.ravel( _HtEE )).T
3215             HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
3216         HessienneI = numpy.matrix( HessienneI )
3217         A = HessienneI.I
3218         if min(A.shape) != max(A.shape):
3219             raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
3220         if (numpy.diag(A) < 0).any():
3221             raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
3222         if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
3223             try:
3224                 L = numpy.linalg.cholesky( A )
3225             except:
3226                 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
3227     if selfA._toStore("APosterioriCovariance"):
3228         selfA.StoredVariables["APosterioriCovariance"].store( A )
3229     if selfA._toStore("JacobianMatrixAtOptimum"):
3230         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
3231     if selfA._toStore("KalmanGainAtOptimum"):
3232         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
3233         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
3234         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
3235     #
3236     # Calculs et/ou stockages supplémentaires
3237     # ---------------------------------------
3238     if selfA._toStore("Innovation") or \
3239         selfA._toStore("SigmaObs2") or \
3240         selfA._toStore("MahalanobisConsistency") or \
3241         selfA._toStore("OMB"):
3242         d  = Y - HXb
3243     if selfA._toStore("Innovation"):
3244         selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
3245     if selfA._toStore("BMA"):
3246         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
3247     if selfA._toStore("OMA"):
3248         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
3249     if selfA._toStore("OMB"):
3250         selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
3251     if selfA._toStore("SigmaObs2"):
3252         TraceR = R.trace(Y.size)
3253         selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
3254     if selfA._toStore("MahalanobisConsistency"):
3255         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
3256     if selfA._toStore("SimulationQuantiles"):
3257         nech = selfA._parameters["NumberOfSamplesForQuantiles"]
3258         HXa  = numpy.matrix(numpy.ravel( HXa )).T
3259         EXr  = None
3260         YfQ  = None
3261         for i in range(nech):
3262             if selfA._parameters["SimulationForQuantiles"] == "Linear":
3263                 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
3264                 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
3265                 Yr = HXa + dYr
3266                 if selfA._toStore("SampledStateForQuantiles"): Xr = Xa+dXr
3267             elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
3268                 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
3269                 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
3270             if YfQ is None:
3271                 YfQ = Yr
3272                 if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.ravel(Xr)
3273             else:
3274                 YfQ = numpy.hstack((YfQ,Yr))
3275                 if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.vstack((EXr,numpy.ravel(Xr)))
3276         YfQ.sort(axis=-1)
3277         YQ = None
3278         for quantile in selfA._parameters["Quantiles"]:
3279             if not (0. <= float(quantile) <= 1.): continue
3280             indice = int(nech * float(quantile) - 1./nech)
3281             if YQ is None: YQ = YfQ[:,indice]
3282             else:          YQ = numpy.hstack((YQ,YfQ[:,indice]))
3283         selfA.StoredVariables["SimulationQuantiles"].store( YQ )
3284         if selfA._toStore("SampledStateForQuantiles"):
3285             selfA.StoredVariables["SampledStateForQuantiles"].store( EXr.T )
3286     if selfA._toStore("SimulatedObservationAtBackground"):
3287         selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
3288     if selfA._toStore("SimulatedObservationAtOptimum"):
3289         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
3290     #
3291     return 0
3292
3293 # ==============================================================================
3294 if __name__ == "__main__":
3295     print('\n AUTODIAGNOSTIC\n')