Salome HOME
Minor internal modifications
[modules/adao.git] / src / daComposant / daCore / NumericObjects.py
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) 2008-2021 EDF R&D
4 #
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
9 #
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13 # Lesser General Public License for more details.
14 #
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
18 #
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
20 #
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
22
23 __doc__ = """
24     Définit les objets numériques génériques.
25 """
26 __author__ = "Jean-Philippe ARGAUD"
27
28 import os, time, copy, types, sys, logging
29 import math, numpy, scipy, scipy.optimize, scipy.version
30 from daCore.BasicObjects import Operator
31 from daCore.PlatformInfo import PlatformInfo
32 mpr = PlatformInfo().MachinePrecision()
33 mfp = PlatformInfo().MaximumPrecision()
34 # logging.getLogger().setLevel(logging.DEBUG)
35
36 # ==============================================================================
37 def ExecuteFunction( triplet ):
38     assert len(triplet) == 3, "Incorrect number of arguments"
39     X, xArgs, funcrepr = triplet
40     __X = numpy.asmatrix(numpy.ravel( X )).T
41     __sys_path_tmp = sys.path ; sys.path.insert(0,funcrepr["__userFunction__path"])
42     __module = __import__(funcrepr["__userFunction__modl"], globals(), locals(), [])
43     __fonction = getattr(__module,funcrepr["__userFunction__name"])
44     sys.path = __sys_path_tmp ; del __sys_path_tmp
45     if isinstance(xArgs, dict):
46         __HX  = __fonction( __X, **xArgs )
47     else:
48         __HX  = __fonction( __X )
49     return numpy.ravel( __HX )
50
51 # ==============================================================================
52 class FDApproximation(object):
53     """
54     Cette classe sert d'interface pour définir les opérateurs approximés. A la
55     création d'un objet, en fournissant une fonction "Function", on obtient un
56     objet qui dispose de 3 méthodes "DirectOperator", "TangentOperator" et
57     "AdjointOperator". On contrôle l'approximation DF avec l'incrément
58     multiplicatif "increment" valant par défaut 1%, ou avec l'incrément fixe
59     "dX" qui sera multiplié par "increment" (donc en %), et on effectue de DF
60     centrées si le booléen "centeredDF" est vrai.
61     """
62     def __init__(self,
63             name                  = "FDApproximation",
64             Function              = None,
65             centeredDF            = False,
66             increment             = 0.01,
67             dX                    = None,
68             extraArguments        = None,
69             avoidingRedundancy    = True,
70             toleranceInRedundancy = 1.e-18,
71             lenghtOfRedundancy    = -1,
72             mpEnabled             = False,
73             mpWorkers             = None,
74             mfEnabled             = False,
75             ):
76         self.__name = str(name)
77         self.__extraArgs = extraArguments
78         if mpEnabled:
79             try:
80                 import multiprocessing
81                 self.__mpEnabled = True
82             except ImportError:
83                 self.__mpEnabled = False
84         else:
85             self.__mpEnabled = False
86         self.__mpWorkers = mpWorkers
87         if self.__mpWorkers is not None and self.__mpWorkers < 1:
88             self.__mpWorkers = None
89         logging.debug("FDA Calculs en multiprocessing : %s (nombre de processus : %s)"%(self.__mpEnabled,self.__mpWorkers))
90         #
91         if mfEnabled:
92             self.__mfEnabled = True
93         else:
94             self.__mfEnabled = False
95         logging.debug("FDA Calculs en multifonctions : %s"%(self.__mfEnabled,))
96         #
97         if avoidingRedundancy:
98             self.__avoidRC = True
99             self.__tolerBP = float(toleranceInRedundancy)
100             self.__lenghtRJ = int(lenghtOfRedundancy)
101             self.__listJPCP = [] # Jacobian Previous Calculated Points
102             self.__listJPCI = [] # Jacobian Previous Calculated Increment
103             self.__listJPCR = [] # Jacobian Previous Calculated Results
104             self.__listJPPN = [] # Jacobian Previous Calculated Point Norms
105             self.__listJPIN = [] # Jacobian Previous Calculated Increment Norms
106         else:
107             self.__avoidRC = False
108         #
109         if self.__mpEnabled:
110             if isinstance(Function,types.FunctionType):
111                 logging.debug("FDA Calculs en multiprocessing : FunctionType")
112                 self.__userFunction__name = Function.__name__
113                 try:
114                     mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
115                 except:
116                     mod = os.path.abspath(Function.__globals__['__file__'])
117                 if not os.path.isfile(mod):
118                     raise ImportError("No user defined function or method found with the name %s"%(mod,))
119                 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
120                 self.__userFunction__path = os.path.dirname(mod)
121                 del mod
122                 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
123                 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
124             elif isinstance(Function,types.MethodType):
125                 logging.debug("FDA Calculs en multiprocessing : MethodType")
126                 self.__userFunction__name = Function.__name__
127                 try:
128                     mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
129                 except:
130                     mod = os.path.abspath(Function.__func__.__globals__['__file__'])
131                 if not os.path.isfile(mod):
132                     raise ImportError("No user defined function or method found with the name %s"%(mod,))
133                 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
134                 self.__userFunction__path = os.path.dirname(mod)
135                 del mod
136                 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
137                 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
138             else:
139                 raise TypeError("User defined function or method has to be provided for finite differences approximation.")
140         else:
141             self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
142             self.__userFunction = self.__userOperator.appliedTo
143         #
144         self.__centeredDF = bool(centeredDF)
145         if abs(float(increment)) > 1.e-15:
146             self.__increment  = float(increment)
147         else:
148             self.__increment  = 0.01
149         if dX is None:
150             self.__dX     = None
151         else:
152             self.__dX     = numpy.asmatrix(numpy.ravel( dX )).T
153         logging.debug("FDA Reduction des doublons de calcul : %s"%self.__avoidRC)
154         if self.__avoidRC:
155             logging.debug("FDA Tolerance de determination des doublons : %.2e"%self.__tolerBP)
156
157     # ---------------------------------------------------------
158     def __doublon__(self, e, l, n, v=None):
159         __ac, __iac = False, -1
160         for i in range(len(l)-1,-1,-1):
161             if numpy.linalg.norm(e - l[i]) < self.__tolerBP * n[i]:
162                 __ac, __iac = True, i
163                 if v is not None: logging.debug("FDA Cas%s déja calculé, récupération du doublon %i"%(v,__iac))
164                 break
165         return __ac, __iac
166
167     # ---------------------------------------------------------
168     def DirectOperator(self, X, **extraArgs ):
169         """
170         Calcul du direct à l'aide de la fonction fournie.
171
172         NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
173         ne doivent pas être données ici à la fonction utilisateur.
174         """
175         logging.debug("FDA Calcul DirectOperator (explicite)")
176         if self.__mfEnabled:
177             _HX = self.__userFunction( X, argsAsSerie = True )
178         else:
179             _X = numpy.asmatrix(numpy.ravel( X )).T
180             _HX = numpy.ravel(self.__userFunction( _X ))
181         #
182         return _HX
183
184     # ---------------------------------------------------------
185     def TangentMatrix(self, X ):
186         """
187         Calcul de l'opérateur tangent comme la Jacobienne par différences finies,
188         c'est-à-dire le gradient de H en X. On utilise des différences finies
189         directionnelles autour du point X. X est un numpy.matrix.
190
191         Différences finies centrées (approximation d'ordre 2):
192         1/ Pour chaque composante i de X, on ajoute et on enlève la perturbation
193            dX[i] à la  composante X[i], pour composer X_plus_dXi et X_moins_dXi, et
194            on calcule les réponses HX_plus_dXi = H( X_plus_dXi ) et HX_moins_dXi =
195            H( X_moins_dXi )
196         2/ On effectue les différences (HX_plus_dXi-HX_moins_dXi) et on divise par
197            le pas 2*dXi
198         3/ Chaque résultat, par composante, devient une colonne de la Jacobienne
199
200         Différences finies non centrées (approximation d'ordre 1):
201         1/ Pour chaque composante i de X, on ajoute la perturbation dX[i] à la
202            composante X[i] pour composer X_plus_dXi, et on calcule la réponse
203            HX_plus_dXi = H( X_plus_dXi )
204         2/ On calcule la valeur centrale HX = H(X)
205         3/ On effectue les différences (HX_plus_dXi-HX) et on divise par
206            le pas dXi
207         4/ Chaque résultat, par composante, devient une colonne de la Jacobienne
208
209         """
210         logging.debug("FDA Début du calcul de la Jacobienne")
211         logging.debug("FDA   Incrément de............: %s*X"%float(self.__increment))
212         logging.debug("FDA   Approximation centrée...: %s"%(self.__centeredDF))
213         #
214         if X is None or len(X)==0:
215             raise ValueError("Nominal point X for approximate derivatives can not be None or void (given X: %s)."%(str(X),))
216         #
217         _X = numpy.asmatrix(numpy.ravel( X )).T
218         #
219         if self.__dX is None:
220             _dX  = self.__increment * _X
221         else:
222             _dX = numpy.asmatrix(numpy.ravel( self.__dX )).T
223         #
224         if (_dX == 0.).any():
225             moyenne = _dX.mean()
226             if moyenne == 0.:
227                 _dX = numpy.where( _dX == 0., float(self.__increment), _dX )
228             else:
229                 _dX = numpy.where( _dX == 0., moyenne, _dX )
230         #
231         __alreadyCalculated  = False
232         if self.__avoidRC:
233             __bidon, __alreadyCalculatedP = self.__doublon__(_X,  self.__listJPCP, self.__listJPPN, None)
234             __bidon, __alreadyCalculatedI = self.__doublon__(_dX, self.__listJPCI, self.__listJPIN, None)
235             if __alreadyCalculatedP == __alreadyCalculatedI > -1:
236                 __alreadyCalculated, __i = True, __alreadyCalculatedP
237                 logging.debug("FDA Cas J déja calculé, récupération du doublon %i"%__i)
238         #
239         if __alreadyCalculated:
240             logging.debug("FDA   Calcul Jacobienne (par récupération du doublon %i)"%__i)
241             _Jacobienne = self.__listJPCR[__i]
242         else:
243             logging.debug("FDA   Calcul Jacobienne (explicite)")
244             if self.__centeredDF:
245                 #
246                 if self.__mpEnabled and not self.__mfEnabled:
247                     funcrepr = {
248                         "__userFunction__path" : self.__userFunction__path,
249                         "__userFunction__modl" : self.__userFunction__modl,
250                         "__userFunction__name" : self.__userFunction__name,
251                     }
252                     _jobs = []
253                     for i in range( len(_dX) ):
254                         _dXi            = _dX[i]
255                         _X_plus_dXi     = numpy.array( _X.A1, dtype=float )
256                         _X_plus_dXi[i]  = _X[i] + _dXi
257                         _X_moins_dXi    = numpy.array( _X.A1, dtype=float )
258                         _X_moins_dXi[i] = _X[i] - _dXi
259                         #
260                         _jobs.append( (_X_plus_dXi,  self.__extraArgs, funcrepr) )
261                         _jobs.append( (_X_moins_dXi, self.__extraArgs, funcrepr) )
262                     #
263                     import multiprocessing
264                     self.__pool = multiprocessing.Pool(self.__mpWorkers)
265                     _HX_plusmoins_dX = self.__pool.map( ExecuteFunction, _jobs )
266                     self.__pool.close()
267                     self.__pool.join()
268                     #
269                     _Jacobienne  = []
270                     for i in range( len(_dX) ):
271                         _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
272                     #
273                 elif self.__mfEnabled:
274                     _xserie = []
275                     for i in range( len(_dX) ):
276                         _dXi            = _dX[i]
277                         _X_plus_dXi     = numpy.array( _X.A1, dtype=float )
278                         _X_plus_dXi[i]  = _X[i] + _dXi
279                         _X_moins_dXi    = numpy.array( _X.A1, dtype=float )
280                         _X_moins_dXi[i] = _X[i] - _dXi
281                         #
282                         _xserie.append( _X_plus_dXi )
283                         _xserie.append( _X_moins_dXi )
284                     #
285                     _HX_plusmoins_dX = self.DirectOperator( _xserie )
286                      #
287                     _Jacobienne  = []
288                     for i in range( len(_dX) ):
289                         _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
290                     #
291                 else:
292                     _Jacobienne  = []
293                     for i in range( _dX.size ):
294                         _dXi            = _dX[i]
295                         _X_plus_dXi     = numpy.array( _X.A1, dtype=float )
296                         _X_plus_dXi[i]  = _X[i] + _dXi
297                         _X_moins_dXi    = numpy.array( _X.A1, dtype=float )
298                         _X_moins_dXi[i] = _X[i] - _dXi
299                         #
300                         _HX_plus_dXi    = self.DirectOperator( _X_plus_dXi )
301                         _HX_moins_dXi   = self.DirectOperator( _X_moins_dXi )
302                         #
303                         _Jacobienne.append( numpy.ravel( _HX_plus_dXi - _HX_moins_dXi ) / (2.*_dXi) )
304                 #
305             else:
306                 #
307                 if self.__mpEnabled and not self.__mfEnabled:
308                     funcrepr = {
309                         "__userFunction__path" : self.__userFunction__path,
310                         "__userFunction__modl" : self.__userFunction__modl,
311                         "__userFunction__name" : self.__userFunction__name,
312                     }
313                     _jobs = []
314                     _jobs.append( (_X.A1, self.__extraArgs, funcrepr) )
315                     for i in range( len(_dX) ):
316                         _X_plus_dXi    = numpy.array( _X.A1, dtype=float )
317                         _X_plus_dXi[i] = _X[i] + _dX[i]
318                         #
319                         _jobs.append( (_X_plus_dXi, self.__extraArgs, funcrepr) )
320                     #
321                     import multiprocessing
322                     self.__pool = multiprocessing.Pool(self.__mpWorkers)
323                     _HX_plus_dX = self.__pool.map( ExecuteFunction, _jobs )
324                     self.__pool.close()
325                     self.__pool.join()
326                     #
327                     _HX = _HX_plus_dX.pop(0)
328                     #
329                     _Jacobienne = []
330                     for i in range( len(_dX) ):
331                         _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
332                     #
333                 elif self.__mfEnabled:
334                     _xserie = []
335                     _xserie.append( _X.A1 )
336                     for i in range( len(_dX) ):
337                         _X_plus_dXi    = numpy.array( _X.A1, dtype=float )
338                         _X_plus_dXi[i] = _X[i] + _dX[i]
339                         #
340                         _xserie.append( _X_plus_dXi )
341                     #
342                     _HX_plus_dX = self.DirectOperator( _xserie )
343                     #
344                     _HX = _HX_plus_dX.pop(0)
345                     #
346                     _Jacobienne = []
347                     for i in range( len(_dX) ):
348                         _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
349                    #
350                 else:
351                     _Jacobienne  = []
352                     _HX = self.DirectOperator( _X )
353                     for i in range( _dX.size ):
354                         _dXi            = _dX[i]
355                         _X_plus_dXi     = numpy.array( _X.A1, dtype=float )
356                         _X_plus_dXi[i]  = _X[i] + _dXi
357                         #
358                         _HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
359                         #
360                         _Jacobienne.append( numpy.ravel(( _HX_plus_dXi - _HX ) / _dXi) )
361                 #
362             #
363             _Jacobienne = numpy.asmatrix( numpy.vstack( _Jacobienne ) ).T
364             if self.__avoidRC:
365                 if self.__lenghtRJ < 0: self.__lenghtRJ = 2 * _X.size
366                 while len(self.__listJPCP) > self.__lenghtRJ:
367                     self.__listJPCP.pop(0)
368                     self.__listJPCI.pop(0)
369                     self.__listJPCR.pop(0)
370                     self.__listJPPN.pop(0)
371                     self.__listJPIN.pop(0)
372                 self.__listJPCP.append( copy.copy(_X) )
373                 self.__listJPCI.append( copy.copy(_dX) )
374                 self.__listJPCR.append( copy.copy(_Jacobienne) )
375                 self.__listJPPN.append( numpy.linalg.norm(_X) )
376                 self.__listJPIN.append( numpy.linalg.norm(_Jacobienne) )
377         #
378         logging.debug("FDA Fin du calcul de la Jacobienne")
379         #
380         return _Jacobienne
381
382     # ---------------------------------------------------------
383     def TangentOperator(self, paire, **extraArgs ):
384         """
385         Calcul du tangent à l'aide de la Jacobienne.
386
387         NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
388         ne doivent pas être données ici à la fonction utilisateur.
389         """
390         if self.__mfEnabled:
391             assert len(paire) == 1, "Incorrect lenght of arguments"
392             _paire = paire[0]
393             assert len(_paire) == 2, "Incorrect number of arguments"
394         else:
395             assert len(paire) == 2, "Incorrect number of arguments"
396             _paire = paire
397         X, dX = _paire
398         _Jacobienne = self.TangentMatrix( X )
399         if dX is None or len(dX) == 0:
400             #
401             # Calcul de la forme matricielle si le second argument est None
402             # -------------------------------------------------------------
403             if self.__mfEnabled: return [_Jacobienne,]
404             else:                return _Jacobienne
405         else:
406             #
407             # Calcul de la valeur linéarisée de H en X appliqué à dX
408             # ------------------------------------------------------
409             _dX = numpy.asmatrix(numpy.ravel( dX )).T
410             _HtX = numpy.dot(_Jacobienne, _dX)
411             if self.__mfEnabled: return [_HtX.A1,]
412             else:                return _HtX.A1
413
414     # ---------------------------------------------------------
415     def AdjointOperator(self, paire, **extraArgs ):
416         """
417         Calcul de l'adjoint à l'aide de la Jacobienne.
418
419         NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
420         ne doivent pas être données ici à la fonction utilisateur.
421         """
422         if self.__mfEnabled:
423             assert len(paire) == 1, "Incorrect lenght of arguments"
424             _paire = paire[0]
425             assert len(_paire) == 2, "Incorrect number of arguments"
426         else:
427             assert len(paire) == 2, "Incorrect number of arguments"
428             _paire = paire
429         X, Y = _paire
430         _JacobienneT = self.TangentMatrix( X ).T
431         if Y is None or len(Y) == 0:
432             #
433             # Calcul de la forme matricielle si le second argument est None
434             # -------------------------------------------------------------
435             if self.__mfEnabled: return [_JacobienneT,]
436             else:                return _JacobienneT
437         else:
438             #
439             # Calcul de la valeur de l'adjoint en X appliqué à Y
440             # --------------------------------------------------
441             _Y = numpy.asmatrix(numpy.ravel( Y )).T
442             _HaY = numpy.dot(_JacobienneT, _Y)
443             if self.__mfEnabled: return [_HaY.A1,]
444             else:                return _HaY.A1
445
446 # ==============================================================================
447 def EnsembleOfCenteredPerturbations( _bgcenter, _bgcovariance, _nbmembers ):
448     "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
449     #
450     _bgcenter = numpy.ravel(_bgcenter)[:,None]
451     if _nbmembers < 1:
452         raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
453     #
454     if _bgcovariance is None:
455         BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
456     else:
457         _Z = numpy.random.multivariate_normal(numpy.zeros(_bgcenter.size), _bgcovariance, size=_nbmembers).T
458         BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers) + _Z
459     #
460     return BackgroundEnsemble
461
462 # ==============================================================================
463 def EnsembleOfBackgroundPerturbations( _bgcenter, _bgcovariance, _nbmembers, _withSVD = True):
464     "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
465     def __CenteredRandomAnomalies(Zr, N):
466         """
467         Génère une matrice de N anomalies aléatoires centrées sur Zr selon les
468         notes manuscrites de MB et conforme au code de PS avec eps = -1
469         """
470         eps = -1
471         Q = numpy.identity(N-1)-numpy.ones((N-1,N-1))/numpy.sqrt(N)/(numpy.sqrt(N)-eps)
472         Q = numpy.concatenate((Q, [eps*numpy.ones(N-1)/numpy.sqrt(N)]), axis=0)
473         R, _ = numpy.linalg.qr(numpy.random.normal(size = (N-1,N-1)))
474         Q = numpy.dot(Q,R)
475         Zr = numpy.dot(Q,Zr)
476         return Zr.T
477     #
478     _bgcenter = numpy.ravel(_bgcenter).reshape((-1,1))
479     if _nbmembers < 1:
480         raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
481     if _bgcovariance is None:
482         BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
483     else:
484         if _withSVD:
485             U, s, V = numpy.linalg.svd(_bgcovariance, full_matrices=False)
486             _nbctl = _bgcenter.size
487             if _nbmembers > _nbctl:
488                 _Z = numpy.concatenate((numpy.dot(
489                     numpy.diag(numpy.sqrt(s[:_nbctl])), V[:_nbctl]),
490                     numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1-_nbctl)), axis = 0)
491             else:
492                 _Z = numpy.dot(numpy.diag(numpy.sqrt(s[:_nbmembers-1])), V[:_nbmembers-1])
493             _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
494             BackgroundEnsemble = _bgcenter + _Zca
495         else:
496             if max(abs(_bgcovariance.flatten())) > 0:
497                 _nbctl = _bgcenter.size
498                 _Z = numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1)
499                 _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
500                 BackgroundEnsemble = _bgcenter + _Zca
501             else:
502                 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
503     #
504     return BackgroundEnsemble
505
506 # ==============================================================================
507 def EnsembleMean( __Ensemble ):
508     "Renvoie la moyenne empirique d'un ensemble"
509     return numpy.asarray(__Ensemble).mean(axis=1, dtype=mfp).astype('float').reshape((-1,1))
510
511 # ==============================================================================
512 def EnsembleOfAnomalies( Ensemble, OptMean = None, Normalisation = 1.):
513     "Renvoie les anomalies centrées à partir d'un ensemble"
514     if OptMean is None:
515         __Em = EnsembleMean( Ensemble )
516     else:
517         __Em = numpy.ravel(OptMean).reshape((-1,1))
518     #
519     return Normalisation * (numpy.asarray(Ensemble) - __Em)
520
521 # ==============================================================================
522 def EnsembleErrorCovariance( Ensemble, __quick = False ):
523     "Renvoie l'estimation empirique de la covariance d'ensemble"
524     if __quick:
525         # Covariance rapide mais rarement définie positive
526         __Covariance = numpy.cov(Ensemble)
527     else:
528         # Résultat souvent identique à numpy.cov, mais plus robuste
529         __n, __m = numpy.asarray(Ensemble).shape
530         __Anomalies = EnsembleOfAnomalies( Ensemble )
531         # Estimation empirique
532         __Covariance = (__Anomalies @ __Anomalies.T) / (__m-1)
533         # Assure la symétrie
534         __Covariance = (__Covariance + __Covariance.T) * 0.5
535         # Assure la positivité
536         __epsilon    = mpr*numpy.trace(__Covariance)
537         __Covariance = __Covariance + __epsilon * numpy.identity(__n)
538     #
539     return __Covariance
540
541 # ==============================================================================
542 def EnsemblePerturbationWithGivenCovariance( __Ensemble, __Covariance, __Seed=None ):
543     "Ajout d'une perturbation à chaque membre d'un ensemble selon une covariance prescrite"
544     if hasattr(__Covariance,"assparsematrix"):
545         if (abs(__Ensemble).mean() > mpr) and (abs(__Covariance.assparsematrix())/abs(__Ensemble).mean() < mpr).all():
546             # Traitement d'une covariance nulle ou presque
547             return __Ensemble
548         if (abs(__Ensemble).mean() <= mpr) and (abs(__Covariance.assparsematrix()) < mpr).all():
549             # Traitement d'une covariance nulle ou presque
550             return __Ensemble
551     else:
552         if (abs(__Ensemble).mean() > mpr) and (abs(__Covariance)/abs(__Ensemble).mean() < mpr).all():
553             # Traitement d'une covariance nulle ou presque
554             return __Ensemble
555         if (abs(__Ensemble).mean() <= mpr) and (abs(__Covariance) < mpr).all():
556             # Traitement d'une covariance nulle ou presque
557             return __Ensemble
558     #
559     __n, __m = __Ensemble.shape
560     if __Seed is not None: numpy.random.seed(__Seed)
561     #
562     if hasattr(__Covariance,"isscalar") and __Covariance.isscalar():
563         # Traitement d'une covariance multiple de l'identité
564         __zero = 0.
565         __std  = numpy.sqrt(__Covariance.assparsematrix())
566         __Ensemble += numpy.random.normal(__zero, __std, size=(__m,__n)).T
567     #
568     elif hasattr(__Covariance,"isvector") and __Covariance.isvector():
569         # Traitement d'une covariance diagonale avec variances non identiques
570         __zero = numpy.zeros(__n)
571         __std  = numpy.sqrt(__Covariance.assparsematrix())
572         __Ensemble += numpy.asarray([numpy.random.normal(__zero, __std) for i in range(__m)]).T
573     #
574     elif hasattr(__Covariance,"ismatrix") and __Covariance.ismatrix():
575         # Traitement d'une covariance pleine
576         __Ensemble += numpy.random.multivariate_normal(numpy.zeros(__n), __Covariance.asfullmatrix(__n), size=__m).T
577     #
578     elif isinstance(__Covariance, numpy.ndarray):
579         # Traitement d'une covariance numpy pleine, sachant qu'on arrive ici en dernier
580         __Ensemble += numpy.random.multivariate_normal(numpy.zeros(__n), __Covariance, size=__m).T
581     #
582     else:
583         raise ValueError("Error in ensemble perturbation with inadequate covariance specification")
584     #
585     return __Ensemble
586
587 # ==============================================================================
588 def CovarianceInflation(
589         InputCovOrEns,
590         InflationType   = None,
591         InflationFactor = None,
592         BackgroundCov   = None,
593         ):
594     """
595     Inflation applicable soit sur Pb ou Pa, soit sur les ensembles EXb ou EXa
596
597     Synthèse : Hunt 2007, section 2.3.5
598     """
599     if InflationFactor is None:
600         return InputCovOrEns
601     else:
602         InflationFactor = float(InflationFactor)
603     #
604     if InflationType in ["MultiplicativeOnAnalysisCovariance", "MultiplicativeOnBackgroundCovariance"]:
605         if InflationFactor < 1.:
606             raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
607         if InflationFactor < 1.+mpr:
608             return InputCovOrEns
609         OutputCovOrEns = InflationFactor**2 * InputCovOrEns
610     #
611     elif InflationType in ["MultiplicativeOnAnalysisAnomalies", "MultiplicativeOnBackgroundAnomalies"]:
612         if InflationFactor < 1.:
613             raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
614         if InflationFactor < 1.+mpr:
615             return InputCovOrEns
616         InputCovOrEnsMean = InputCovOrEns.mean(axis=1, dtype=mfp).astype('float')
617         OutputCovOrEns = InputCovOrEnsMean[:,numpy.newaxis] \
618             + InflationFactor * (InputCovOrEns - InputCovOrEnsMean[:,numpy.newaxis])
619     #
620     elif InflationType in ["AdditiveOnAnalysisCovariance", "AdditiveOnBackgroundCovariance"]:
621         if InflationFactor < 0.:
622             raise ValueError("Inflation factor for additive inflation has to be greater or equal than 0.")
623         if InflationFactor < mpr:
624             return InputCovOrEns
625         __n, __m = numpy.asarray(InputCovOrEns).shape
626         if __n != __m:
627             raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
628         OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * numpy.identity(__n)
629     #
630     elif InflationType == "HybridOnBackgroundCovariance":
631         if InflationFactor < 0.:
632             raise ValueError("Inflation factor for hybrid inflation has to be greater or equal than 0.")
633         if InflationFactor < mpr:
634             return InputCovOrEns
635         __n, __m = numpy.asarray(InputCovOrEns).shape
636         if __n != __m:
637             raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
638         if BackgroundCov is None:
639             raise ValueError("Background covariance matrix B has to be given for hybrid inflation.")
640         if InputCovOrEns.shape != BackgroundCov.shape:
641             raise ValueError("Ensemble covariance matrix has to be of same size than background covariance matrix B.")
642         OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * BackgroundCov
643     #
644     elif InflationType == "Relaxation":
645         raise NotImplementedError("InflationType Relaxation")
646     #
647     else:
648         raise ValueError("Error in inflation type, '%s' is not a valid keyword."%InflationType)
649     #
650     return OutputCovOrEns
651
652 # ==============================================================================
653 def QuantilesEstimations(selfA, A, Xa, HXa = None, Hm = None, HtM = None):
654     "Estimation des quantiles a posteriori (selfA est modifié)"
655     nbsamples = selfA._parameters["NumberOfSamplesForQuantiles"]
656     #
657     # Traitement des bornes
658     if "StateBoundsForQuantiles" in selfA._parameters:
659         LBounds = selfA._parameters["StateBoundsForQuantiles"] # Prioritaire
660     elif "Bounds" in selfA._parameters:
661         LBounds = selfA._parameters["Bounds"]  # Défaut raisonnable
662     else:
663         LBounds = None
664     if LBounds is not None:
665         def NoneRemove(paire):
666             bmin, bmax = paire
667             if bmin is None: bmin = numpy.finfo('float').min
668             if bmax is None: bmax = numpy.finfo('float').max
669             return [bmin, bmax]
670         LBounds = numpy.matrix( [NoneRemove(paire) for paire in LBounds] )
671     #
672     # Échantillonnage des états
673     YfQ  = None
674     EXr  = None
675     if selfA._parameters["SimulationForQuantiles"] == "Linear" and HXa is not None:
676         HXa  = numpy.matrix(numpy.ravel( HXa )).T
677     for i in range(nbsamples):
678         if selfA._parameters["SimulationForQuantiles"] == "Linear" and HtM is not None:
679             dXr = numpy.matrix(numpy.random.multivariate_normal(numpy.ravel(Xa),A) - numpy.ravel(Xa)).T
680             if LBounds is not None: # "EstimateProjection" par défaut
681                 dXr = numpy.max(numpy.hstack((dXr,LBounds[:,0]) - Xa),axis=1)
682                 dXr = numpy.min(numpy.hstack((dXr,LBounds[:,1]) - Xa),axis=1)
683             dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
684             Yr = HXa + dYr
685             if selfA._toStore("SampledStateForQuantiles"): Xr = Xa + dXr
686         elif selfA._parameters["SimulationForQuantiles"] == "NonLinear" and Hm is not None:
687             Xr = numpy.matrix(numpy.random.multivariate_normal(numpy.ravel(Xa),A)).T
688             if LBounds is not None: # "EstimateProjection" par défaut
689                 Xr = numpy.max(numpy.hstack((Xr,LBounds[:,0])),axis=1)
690                 Xr = numpy.min(numpy.hstack((Xr,LBounds[:,1])),axis=1)
691             Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
692         else:
693             raise ValueError("Quantile simulations has only to be Linear or NonLinear.")
694         #
695         if YfQ is None:
696             YfQ = Yr
697             if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.ravel(Xr)
698         else:
699             YfQ = numpy.hstack((YfQ,Yr))
700             if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.vstack((EXr,numpy.ravel(Xr)))
701     #
702     # Extraction des quantiles
703     YfQ.sort(axis=-1)
704     YQ = None
705     for quantile in selfA._parameters["Quantiles"]:
706         if not (0. <= float(quantile) <= 1.): continue
707         indice = int(nbsamples * float(quantile) - 1./nbsamples)
708         if YQ is None: YQ = YfQ[:,indice]
709         else:          YQ = numpy.hstack((YQ,YfQ[:,indice]))
710     selfA.StoredVariables["SimulationQuantiles"].store( YQ )
711     if selfA._toStore("SampledStateForQuantiles"):
712         selfA.StoredVariables["SampledStateForQuantiles"].store( EXr.T )
713     #
714     return 0
715
716 # ==============================================================================
717 def enks(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="EnKS16-KalmanFilterFormula"):
718     """
719     EnKS
720     """
721     #
722     # Opérateurs
723     H = HO["Direct"].appliedControledFormTo
724     #
725     if selfA._parameters["EstimationOf"] == "State":
726         M = EM["Direct"].appliedControledFormTo
727     #
728     if CM is not None and "Tangent" in CM and U is not None:
729         Cm = CM["Tangent"].asMatrix(Xb)
730     else:
731         Cm = None
732     #
733     # Précalcul des inversions de B et R
734     RIdemi = R.sqrtmI()
735     #
736     # Durée d'observation et tailles
737     LagL = selfA._parameters["SmootherLagL"]
738     if (not hasattr(Y,"store")) or (not hasattr(Y,"stepnumber")):
739         raise ValueError("Fixed-lag smoother requires a series of observation")
740     if Y.stepnumber() < LagL:
741         raise ValueError("Fixed-lag smoother requires a series of observation greater then the lag L")
742     duration = Y.stepnumber()
743     __p = numpy.cumprod(Y.shape())[-1]
744     __n = Xb.size
745     __m = selfA._parameters["NumberOfMembers"]
746     #
747     if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
748     else:                         Pn = B
749     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
750         selfA.StoredVariables["Analysis"].store( Xb )
751         if selfA._toStore("APosterioriCovariance"):
752             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
753             covarianceXa = Pn
754     #
755     # Calcul direct initial (on privilégie la mémorisation au recalcul)
756     __seed = numpy.random.get_state()
757     selfB = copy.deepcopy(selfA)
758     selfB._parameters["StoreSupplementaryCalculations"] = ["CurrentEnsembleState"]
759     if VariantM == "EnKS16-KalmanFilterFormula":
760         etkf(selfB, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM = "KalmanFilterFormula")
761     else:
762         raise ValueError("VariantM has to be chosen in the authorized methods list.")
763     if LagL > 0:
764         EL  = selfB.StoredVariables["CurrentEnsembleState"][LagL-1]
765     else:
766         EL = EnsembleOfBackgroundPerturbations( Xb, None, __m ) # Cf. etkf
767     selfA._parameters["SetSeed"] = numpy.random.set_state(__seed)
768     #
769     for step in range(LagL,duration-1):
770         #
771         sEL = selfB.StoredVariables["CurrentEnsembleState"][step+1-LagL:step+1]
772         sEL.append(None)
773         #
774         if hasattr(Y,"store"):
775             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
776         else:
777             Ynpu = numpy.ravel( Y ).reshape((__p,1))
778         #
779         if U is not None:
780             if hasattr(U,"store") and len(U)>1:
781                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
782             elif hasattr(U,"store") and len(U)==1:
783                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
784             else:
785                 Un = numpy.asmatrix(numpy.ravel( U )).T
786         else:
787             Un = None
788         #
789         #--------------------------
790         if VariantM == "EnKS16-KalmanFilterFormula":
791             if selfA._parameters["EstimationOf"] == "State": # Forecast
792                 EL = M( [(EL[:,i], Un) for i in range(__m)],
793                     argsAsSerie = True,
794                     returnSerieAsArrayMatrix = True )
795                 EL = EnsemblePerturbationWithGivenCovariance( EL, Q )
796                 EZ = H( [(EL[:,i], Un) for i in range(__m)],
797                     argsAsSerie = True,
798                     returnSerieAsArrayMatrix = True )
799                 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
800                     Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
801                     EZ = EZ + Cm * Un
802             elif selfA._parameters["EstimationOf"] == "Parameters":
803                 # --- > Par principe, M = Id, Q = 0
804                 EZ = H( [(EL[:,i], Un) for i in range(__m)],
805                     argsAsSerie = True,
806                     returnSerieAsArrayMatrix = True )
807             #
808             vEm   = EL.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
809             vZm   = EZ.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
810             #
811             mS    = RIdemi @ EnsembleOfAnomalies( EZ, vZm, 1./math.sqrt(__m-1) )
812             mS    = mS.reshape((-1,__m)) # Pour dimension 1
813             delta = RIdemi @ ( Ynpu - vZm )
814             mT    = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS )
815             vw    = mT @ mS.T @ delta
816             #
817             Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
818             mU    = numpy.identity(__m)
819             wTU   = (vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU)
820             #
821             EX    = EnsembleOfAnomalies( EL, vEm, 1./math.sqrt(__m-1) )
822             EL    = vEm + EX @ wTU
823             #
824             sEL[LagL] = EL
825             for irl in range(LagL): # Lissage des L précédentes analysis
826                 vEm = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
827                 EX = EnsembleOfAnomalies( sEL[irl], vEm, 1./math.sqrt(__m-1) )
828                 sEL[irl] = vEm + EX @ wTU
829             #
830             # Conservation de l'analyse retrospective d'ordre 0 avant rotation
831             Xa = sEL[0].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
832             if selfA._toStore("APosterioriCovariance"):
833                 EXn = sEL[0]
834             #
835             for irl in range(LagL):
836                 sEL[irl] = sEL[irl+1]
837             sEL[LagL] = None
838         #--------------------------
839         else:
840             raise ValueError("VariantM has to be chosen in the authorized methods list.")
841         #
842         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
843         # ---> avec analysis
844         selfA.StoredVariables["Analysis"].store( Xa )
845         if selfA._toStore("APosterioriCovariance"):
846             selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(EXn) )
847     #
848     # Stockage des dernières analyses incomplètement remises à jour
849     for irl in range(LagL):
850         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
851         Xa = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
852         selfA.StoredVariables["Analysis"].store( Xa )
853     #
854     return 0
855
856 # ==============================================================================
857 def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
858     """
859     Ensemble-Transform EnKF
860     """
861     if selfA._parameters["EstimationOf"] == "Parameters":
862         selfA._parameters["StoreInternalVariables"] = True
863     #
864     # Opérateurs
865     H = HO["Direct"].appliedControledFormTo
866     #
867     if selfA._parameters["EstimationOf"] == "State":
868         M = EM["Direct"].appliedControledFormTo
869     #
870     if CM is not None and "Tangent" in CM and U is not None:
871         Cm = CM["Tangent"].asMatrix(Xb)
872     else:
873         Cm = None
874     #
875     # Durée d'observation et tailles
876     if hasattr(Y,"stepnumber"):
877         duration = Y.stepnumber()
878         __p = numpy.cumprod(Y.shape())[-1]
879     else:
880         duration = 2
881         __p = numpy.array(Y).size
882     #
883     # Précalcul des inversions de B et R
884     if selfA._parameters["StoreInternalVariables"] \
885         or selfA._toStore("CostFunctionJ") \
886         or selfA._toStore("CostFunctionJb") \
887         or selfA._toStore("CostFunctionJo") \
888         or selfA._toStore("CurrentOptimum") \
889         or selfA._toStore("APosterioriCovariance"):
890         BI = B.getI()
891         RI = R.getI()
892     elif VariantM != "KalmanFilterFormula":
893         RI = R.getI()
894     if VariantM == "KalmanFilterFormula":
895         RIdemi = R.sqrtmI()
896     #
897     __n = Xb.size
898     __m = selfA._parameters["NumberOfMembers"]
899     if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
900     else:                         Pn = B
901     Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
902     #~ Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
903     #
904     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
905         selfA.StoredVariables["Analysis"].store( Xb )
906         if selfA._toStore("APosterioriCovariance"):
907             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
908             covarianceXa = Pn
909     #
910     previousJMinimum = numpy.finfo(float).max
911     #
912     for step in range(duration-1):
913         if hasattr(Y,"store"):
914             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
915         else:
916             Ynpu = numpy.ravel( Y ).reshape((__p,1))
917         #
918         if U is not None:
919             if hasattr(U,"store") and len(U)>1:
920                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
921             elif hasattr(U,"store") and len(U)==1:
922                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
923             else:
924                 Un = numpy.asmatrix(numpy.ravel( U )).T
925         else:
926             Un = None
927         #
928         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
929             Xn = CovarianceInflation( Xn,
930                 selfA._parameters["InflationType"],
931                 selfA._parameters["InflationFactor"],
932                 )
933         #
934         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
935             EMX = M( [(Xn[:,i], Un) for i in range(__m)],
936                 argsAsSerie = True,
937                 returnSerieAsArrayMatrix = True )
938             Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
939             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
940                 argsAsSerie = True,
941                 returnSerieAsArrayMatrix = True )
942             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
943                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
944                 Xn_predicted = Xn_predicted + Cm * Un
945         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
946             # --- > Par principe, M = Id, Q = 0
947             Xn_predicted = Xn
948             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
949                 argsAsSerie = True,
950                 returnSerieAsArrayMatrix = True )
951         #
952         # Mean of forecast and observation of forecast
953         Xfm  = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
954         Hfm  = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
955         #
956         # Anomalies
957         EaX   = EnsembleOfAnomalies( Xn_predicted, Xfm )
958         EaHX  = EnsembleOfAnomalies( HX_predicted, Hfm)
959         #
960         #--------------------------
961         if VariantM == "KalmanFilterFormula":
962             mS    = RIdemi * EaHX / math.sqrt(__m-1)
963             mS    = mS.reshape((-1,__m)) # Pour dimension 1
964             delta = RIdemi * ( Ynpu - Hfm )
965             mT    = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS )
966             vw    = mT @ mS.T @ delta
967             #
968             Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
969             mU    = numpy.identity(__m)
970             #
971             EaX   = EaX / math.sqrt(__m-1)
972             Xn    = Xfm + EaX @ ( vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU )
973         #--------------------------
974         elif VariantM == "Variational":
975             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
976             def CostFunction(w):
977                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
978                 _Jo = 0.5 * _A.T @ (RI * _A)
979                 _Jb = 0.5 * (__m-1) * w.T @ w
980                 _J  = _Jo + _Jb
981                 return float(_J)
982             def GradientOfCostFunction(w):
983                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
984                 _GardJo = - EaHX.T @ (RI * _A)
985                 _GradJb = (__m-1) * w.reshape((__m,1))
986                 _GradJ  = _GardJo + _GradJb
987                 return numpy.ravel(_GradJ)
988             vw = scipy.optimize.fmin_cg(
989                 f           = CostFunction,
990                 x0          = numpy.zeros(__m),
991                 fprime      = GradientOfCostFunction,
992                 args        = (),
993                 disp        = False,
994                 )
995             #
996             Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
997             Htb = (__m-1) * numpy.identity(__m)
998             Hta = Hto + Htb
999             #
1000             Pta = numpy.linalg.inv( Hta )
1001             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1002             #
1003             Xn  = Xfm + EaX @ (vw[:,None] + EWa)
1004         #--------------------------
1005         elif VariantM == "FiniteSize11": # Jauge Boc2011
1006             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1007             def CostFunction(w):
1008                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1009                 _Jo = 0.5 * _A.T @ (RI * _A)
1010                 _Jb = 0.5 * __m * math.log(1 + 1/__m + w.T @ w)
1011                 _J  = _Jo + _Jb
1012                 return float(_J)
1013             def GradientOfCostFunction(w):
1014                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1015                 _GardJo = - EaHX.T @ (RI * _A)
1016                 _GradJb = __m * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
1017                 _GradJ  = _GardJo + _GradJb
1018                 return numpy.ravel(_GradJ)
1019             vw = scipy.optimize.fmin_cg(
1020                 f           = CostFunction,
1021                 x0          = numpy.zeros(__m),
1022                 fprime      = GradientOfCostFunction,
1023                 args        = (),
1024                 disp        = False,
1025                 )
1026             #
1027             Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
1028             Htb = __m * \
1029                 ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
1030                 / (1 + 1/__m + vw.T @ vw)**2
1031             Hta = Hto + Htb
1032             #
1033             Pta = numpy.linalg.inv( Hta )
1034             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1035             #
1036             Xn  = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
1037         #--------------------------
1038         elif VariantM == "FiniteSize15": # Jauge Boc2015
1039             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1040             def CostFunction(w):
1041                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1042                 _Jo = 0.5 * _A.T * RI * _A
1043                 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w)
1044                 _J  = _Jo + _Jb
1045                 return float(_J)
1046             def GradientOfCostFunction(w):
1047                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1048                 _GardJo = - EaHX.T @ (RI * _A)
1049                 _GradJb = (__m+1) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
1050                 _GradJ  = _GardJo + _GradJb
1051                 return numpy.ravel(_GradJ)
1052             vw = scipy.optimize.fmin_cg(
1053                 f           = CostFunction,
1054                 x0          = numpy.zeros(__m),
1055                 fprime      = GradientOfCostFunction,
1056                 args        = (),
1057                 disp        = False,
1058                 )
1059             #
1060             Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
1061             Htb = (__m+1) * \
1062                 ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
1063                 / (1 + 1/__m + vw.T @ vw)**2
1064             Hta = Hto + Htb
1065             #
1066             Pta = numpy.linalg.inv( Hta )
1067             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1068             #
1069             Xn  = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
1070         #--------------------------
1071         elif VariantM == "FiniteSize16": # Jauge Boc2016
1072             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1073             def CostFunction(w):
1074                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1075                 _Jo = 0.5 * _A.T @ (RI * _A)
1076                 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w / (__m-1))
1077                 _J  = _Jo + _Jb
1078                 return float(_J)
1079             def GradientOfCostFunction(w):
1080                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1081                 _GardJo = - EaHX.T @ (RI * _A)
1082                 _GradJb = ((__m+1) / (__m-1)) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w / (__m-1))
1083                 _GradJ  = _GardJo + _GradJb
1084                 return numpy.ravel(_GradJ)
1085             vw = scipy.optimize.fmin_cg(
1086                 f           = CostFunction,
1087                 x0          = numpy.zeros(__m),
1088                 fprime      = GradientOfCostFunction,
1089                 args        = (),
1090                 disp        = False,
1091                 )
1092             #
1093             Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
1094             Htb = ((__m+1) / (__m-1)) * \
1095                 ( (1 + 1/__m + vw.T @ vw / (__m-1)) * numpy.identity(__m) - 2 * vw @ vw.T / (__m-1) ) \
1096                 / (1 + 1/__m + vw.T @ vw / (__m-1))**2
1097             Hta = Hto + Htb
1098             #
1099             Pta = numpy.linalg.inv( Hta )
1100             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1101             #
1102             Xn  = Xfm + EaX @ (vw[:,None] + EWa)
1103         #--------------------------
1104         else:
1105             raise ValueError("VariantM has to be chosen in the authorized methods list.")
1106         #
1107         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
1108             Xn = CovarianceInflation( Xn,
1109                 selfA._parameters["InflationType"],
1110                 selfA._parameters["InflationFactor"],
1111                 )
1112         #
1113         Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1114         #--------------------------
1115         #
1116         if selfA._parameters["StoreInternalVariables"] \
1117             or selfA._toStore("CostFunctionJ") \
1118             or selfA._toStore("CostFunctionJb") \
1119             or selfA._toStore("CostFunctionJo") \
1120             or selfA._toStore("APosterioriCovariance") \
1121             or selfA._toStore("InnovationAtCurrentAnalysis") \
1122             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1123             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1124             _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
1125             _Innovation = Ynpu - _HXa
1126         #
1127         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1128         # ---> avec analysis
1129         selfA.StoredVariables["Analysis"].store( Xa )
1130         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1131             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
1132         if selfA._toStore("InnovationAtCurrentAnalysis"):
1133             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1134         # ---> avec current state
1135         if selfA._parameters["StoreInternalVariables"] \
1136             or selfA._toStore("CurrentState"):
1137             selfA.StoredVariables["CurrentState"].store( Xn )
1138         if selfA._toStore("ForecastState"):
1139             selfA.StoredVariables["ForecastState"].store( EMX )
1140         if selfA._toStore("BMA"):
1141             selfA.StoredVariables["BMA"].store( EMX - Xa.reshape((__n,1)) )
1142         if selfA._toStore("InnovationAtCurrentState"):
1143             selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
1144         if selfA._toStore("SimulatedObservationAtCurrentState") \
1145             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1146             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
1147         # ---> autres
1148         if selfA._parameters["StoreInternalVariables"] \
1149             or selfA._toStore("CostFunctionJ") \
1150             or selfA._toStore("CostFunctionJb") \
1151             or selfA._toStore("CostFunctionJo") \
1152             or selfA._toStore("CurrentOptimum") \
1153             or selfA._toStore("APosterioriCovariance"):
1154             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
1155             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
1156             J   = Jb + Jo
1157             selfA.StoredVariables["CostFunctionJb"].store( Jb )
1158             selfA.StoredVariables["CostFunctionJo"].store( Jo )
1159             selfA.StoredVariables["CostFunctionJ" ].store( J )
1160             #
1161             if selfA._toStore("IndexOfOptimum") \
1162                 or selfA._toStore("CurrentOptimum") \
1163                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1164                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1165                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1166                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1167                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1168             if selfA._toStore("IndexOfOptimum"):
1169                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1170             if selfA._toStore("CurrentOptimum"):
1171                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1172             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1173                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1174             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1175                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1176             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1177                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1178             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1179                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1180         if selfA._toStore("APosterioriCovariance"):
1181             selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
1182         if selfA._parameters["EstimationOf"] == "Parameters" \
1183             and J < previousJMinimum:
1184             previousJMinimum    = J
1185             XaMin               = Xa
1186             if selfA._toStore("APosterioriCovariance"):
1187                 covarianceXaMin = Pn
1188         # ---> Pour les smoothers
1189         if selfA._toStore("CurrentEnsembleState"):
1190             selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
1191     #
1192     # Stockage final supplémentaire de l'optimum en estimation de paramètres
1193     # ----------------------------------------------------------------------
1194     if selfA._parameters["EstimationOf"] == "Parameters":
1195         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1196         selfA.StoredVariables["Analysis"].store( XaMin )
1197         if selfA._toStore("APosterioriCovariance"):
1198             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1199         if selfA._toStore("BMA"):
1200             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1201     #
1202     return 0
1203
1204 # ==============================================================================
1205 def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12",
1206     BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
1207     """
1208     Iterative EnKF
1209     """
1210     if selfA._parameters["EstimationOf"] == "Parameters":
1211         selfA._parameters["StoreInternalVariables"] = True
1212     #
1213     # Opérateurs
1214     H = HO["Direct"].appliedControledFormTo
1215     #
1216     if selfA._parameters["EstimationOf"] == "State":
1217         M = EM["Direct"].appliedControledFormTo
1218     #
1219     if CM is not None and "Tangent" in CM and U is not None:
1220         Cm = CM["Tangent"].asMatrix(Xb)
1221     else:
1222         Cm = None
1223     #
1224     # Durée d'observation et tailles
1225     if hasattr(Y,"stepnumber"):
1226         duration = Y.stepnumber()
1227         __p = numpy.cumprod(Y.shape())[-1]
1228     else:
1229         duration = 2
1230         __p = numpy.array(Y).size
1231     #
1232     # Précalcul des inversions de B et R
1233     if selfA._parameters["StoreInternalVariables"] \
1234         or selfA._toStore("CostFunctionJ") \
1235         or selfA._toStore("CostFunctionJb") \
1236         or selfA._toStore("CostFunctionJo") \
1237         or selfA._toStore("CurrentOptimum") \
1238         or selfA._toStore("APosterioriCovariance"):
1239         BI = B.getI()
1240     RI = R.getI()
1241     #
1242     __n = Xb.size
1243     __m = selfA._parameters["NumberOfMembers"]
1244     if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
1245     else:                         Pn = B
1246     if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
1247     else:                         Rn = R
1248     if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
1249     else:                         Qn = Q
1250     Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
1251     #
1252     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1253         selfA.StoredVariables["Analysis"].store( Xb )
1254         if selfA._toStore("APosterioriCovariance"):
1255             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1256             covarianceXa = Pn
1257     #
1258     previousJMinimum = numpy.finfo(float).max
1259     #
1260     for step in range(duration-1):
1261         if hasattr(Y,"store"):
1262             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1263         else:
1264             Ynpu = numpy.ravel( Y ).reshape((__p,1))
1265         #
1266         if U is not None:
1267             if hasattr(U,"store") and len(U)>1:
1268                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
1269             elif hasattr(U,"store") and len(U)==1:
1270                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
1271             else:
1272                 Un = numpy.asmatrix(numpy.ravel( U )).T
1273         else:
1274             Un = None
1275         #
1276         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
1277             Xn = CovarianceInflation( Xn,
1278                 selfA._parameters["InflationType"],
1279                 selfA._parameters["InflationFactor"],
1280                 )
1281         #
1282         #--------------------------
1283         if VariantM == "IEnKF12":
1284             Xfm = numpy.ravel(Xn.mean(axis=1, dtype=mfp).astype('float'))
1285             EaX = EnsembleOfAnomalies( Xn ) / math.sqrt(__m-1)
1286             __j = 0
1287             Deltaw = 1
1288             if not BnotT:
1289                 Ta  = numpy.identity(__m)
1290             vw  = numpy.zeros(__m)
1291             while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
1292                 vx1 = (Xfm + EaX @ vw).reshape((__n,1))
1293                 #
1294                 if BnotT:
1295                     E1 = vx1 + _epsilon * EaX
1296                 else:
1297                     E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
1298                 #
1299                 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q
1300                     E2 = M( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
1301                         argsAsSerie = True,
1302                         returnSerieAsArrayMatrix = True )
1303                 elif selfA._parameters["EstimationOf"] == "Parameters":
1304                     # --- > Par principe, M = Id
1305                     E2 = Xn
1306                 vx2 = E2.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1307                 vy1 = H((vx2, Un)).reshape((__p,1))
1308                 #
1309                 HE2 = H( [(E2[:,i,numpy.newaxis], Un) for i in range(__m)],
1310                     argsAsSerie = True,
1311                     returnSerieAsArrayMatrix = True )
1312                 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
1313                 #
1314                 if BnotT:
1315                     EaY = (HE2 - vy2) / _epsilon
1316                 else:
1317                     EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
1318                 #
1319                 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy1 )))
1320                 mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY).reshape((-1,__m))
1321                 Deltaw = - numpy.linalg.solve(mH,GradJ)
1322                 #
1323                 vw = vw + Deltaw
1324                 #
1325                 if not BnotT:
1326                     Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
1327                 #
1328                 __j = __j + 1
1329             #
1330             A2 = EnsembleOfAnomalies( E2 )
1331             #
1332             if BnotT:
1333                 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
1334                 A2 = math.sqrt(__m-1) * A2 @ Ta / _epsilon
1335             #
1336             Xn = vx2 + A2
1337         #--------------------------
1338         else:
1339             raise ValueError("VariantM has to be chosen in the authorized methods list.")
1340         #
1341         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
1342             Xn = CovarianceInflation( Xn,
1343                 selfA._parameters["InflationType"],
1344                 selfA._parameters["InflationFactor"],
1345                 )
1346         #
1347         Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1348         #--------------------------
1349         #
1350         if selfA._parameters["StoreInternalVariables"] \
1351             or selfA._toStore("CostFunctionJ") \
1352             or selfA._toStore("CostFunctionJb") \
1353             or selfA._toStore("CostFunctionJo") \
1354             or selfA._toStore("APosterioriCovariance") \
1355             or selfA._toStore("InnovationAtCurrentAnalysis") \
1356             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1357             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1358             _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
1359             _Innovation = Ynpu - _HXa
1360         #
1361         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1362         # ---> avec analysis
1363         selfA.StoredVariables["Analysis"].store( Xa )
1364         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1365             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
1366         if selfA._toStore("InnovationAtCurrentAnalysis"):
1367             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1368         # ---> avec current state
1369         if selfA._parameters["StoreInternalVariables"] \
1370             or selfA._toStore("CurrentState"):
1371             selfA.StoredVariables["CurrentState"].store( Xn )
1372         if selfA._toStore("ForecastState"):
1373             selfA.StoredVariables["ForecastState"].store( E2 )
1374         if selfA._toStore("BMA"):
1375             selfA.StoredVariables["BMA"].store( E2 - Xa )
1376         if selfA._toStore("InnovationAtCurrentState"):
1377             selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
1378         if selfA._toStore("SimulatedObservationAtCurrentState") \
1379             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1380             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
1381         # ---> autres
1382         if selfA._parameters["StoreInternalVariables"] \
1383             or selfA._toStore("CostFunctionJ") \
1384             or selfA._toStore("CostFunctionJb") \
1385             or selfA._toStore("CostFunctionJo") \
1386             or selfA._toStore("CurrentOptimum") \
1387             or selfA._toStore("APosterioriCovariance"):
1388             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
1389             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
1390             J   = Jb + Jo
1391             selfA.StoredVariables["CostFunctionJb"].store( Jb )
1392             selfA.StoredVariables["CostFunctionJo"].store( Jo )
1393             selfA.StoredVariables["CostFunctionJ" ].store( J )
1394             #
1395             if selfA._toStore("IndexOfOptimum") \
1396                 or selfA._toStore("CurrentOptimum") \
1397                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1398                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1399                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1400                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1401                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1402             if selfA._toStore("IndexOfOptimum"):
1403                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1404             if selfA._toStore("CurrentOptimum"):
1405                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1406             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1407                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1408             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1409                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1410             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1411                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1412             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1413                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1414         if selfA._toStore("APosterioriCovariance"):
1415             selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
1416         if selfA._parameters["EstimationOf"] == "Parameters" \
1417             and J < previousJMinimum:
1418             previousJMinimum    = J
1419             XaMin               = Xa
1420             if selfA._toStore("APosterioriCovariance"):
1421                 covarianceXaMin = Pn
1422     #
1423     # Stockage final supplémentaire de l'optimum en estimation de paramètres
1424     # ----------------------------------------------------------------------
1425     if selfA._parameters["EstimationOf"] == "Parameters":
1426         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1427         selfA.StoredVariables["Analysis"].store( XaMin )
1428         if selfA._toStore("APosterioriCovariance"):
1429             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1430         if selfA._toStore("BMA"):
1431             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1432     #
1433     return 0
1434
1435 # ==============================================================================
1436 def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1437     """
1438     3DVAR incrémental
1439     """
1440     #
1441     # Initialisations
1442     # ---------------
1443     #
1444     # Opérateur non-linéaire pour la boucle externe
1445     Hm = HO["Direct"].appliedTo
1446     #
1447     # Précalcul des inversions de B et R
1448     BI = B.getI()
1449     RI = R.getI()
1450     #
1451     # Point de démarrage de l'optimisation
1452     Xini = selfA._parameters["InitializationPoint"]
1453     #
1454     HXb = numpy.asmatrix(numpy.ravel( Hm( Xb ) )).T
1455     Innovation = Y - HXb
1456     #
1457     # Outer Loop
1458     # ----------
1459     iOuter = 0
1460     J      = 1./mpr
1461     DeltaJ = 1./mpr
1462     Xr     = Xini.reshape((-1,1))
1463     while abs(DeltaJ) >= selfA._parameters["CostDecrementTolerance"] and iOuter <= selfA._parameters["MaximumNumberOfSteps"]:
1464         #
1465         # Inner Loop
1466         # ----------
1467         Ht = HO["Tangent"].asMatrix(Xr)
1468         Ht = Ht.reshape(Y.size,Xr.size) # ADAO & check shape
1469         #
1470         # Définition de la fonction-coût
1471         # ------------------------------
1472         def CostFunction(dx):
1473             _dX  = numpy.asmatrix(numpy.ravel( dx )).T
1474             if selfA._parameters["StoreInternalVariables"] or \
1475                 selfA._toStore("CurrentState") or \
1476                 selfA._toStore("CurrentOptimum"):
1477                 selfA.StoredVariables["CurrentState"].store( Xb + _dX )
1478             _HdX = Ht * _dX
1479             _HdX = numpy.asmatrix(numpy.ravel( _HdX )).T
1480             _dInnovation = Innovation - _HdX
1481             if selfA._toStore("SimulatedObservationAtCurrentState") or \
1482                 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1483                 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HXb + _HdX )
1484             if selfA._toStore("InnovationAtCurrentState"):
1485                 selfA.StoredVariables["InnovationAtCurrentState"].store( _dInnovation )
1486             #
1487             Jb  = float( 0.5 * _dX.T * BI * _dX )
1488             Jo  = float( 0.5 * _dInnovation.T * RI * _dInnovation )
1489             J   = Jb + Jo
1490             #
1491             selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
1492             selfA.StoredVariables["CostFunctionJb"].store( Jb )
1493             selfA.StoredVariables["CostFunctionJo"].store( Jo )
1494             selfA.StoredVariables["CostFunctionJ" ].store( J )
1495             if selfA._toStore("IndexOfOptimum") or \
1496                 selfA._toStore("CurrentOptimum") or \
1497                 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
1498                 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
1499                 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
1500                 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1501                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1502             if selfA._toStore("IndexOfOptimum"):
1503                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1504             if selfA._toStore("CurrentOptimum"):
1505                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
1506             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1507                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
1508             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1509                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1510             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1511                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1512             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1513                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1514             return J
1515         #
1516         def GradientOfCostFunction(dx):
1517             _dX          = numpy.asmatrix(numpy.ravel( dx )).T
1518             _HdX         = Ht * _dX
1519             _HdX         = numpy.asmatrix(numpy.ravel( _HdX )).T
1520             _dInnovation = Innovation - _HdX
1521             GradJb       = BI * _dX
1522             GradJo       = - Ht.T @ (RI * _dInnovation)
1523             GradJ        = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
1524             return GradJ
1525         #
1526         # Minimisation de la fonctionnelle
1527         # --------------------------------
1528         nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
1529         #
1530         if selfA._parameters["Minimizer"] == "LBFGSB":
1531             # Minimum, J_optimal, Informations = scipy.optimize.fmin_l_bfgs_b(
1532             if "0.19" <= scipy.version.version <= "1.1.0":
1533                 import lbfgsbhlt as optimiseur
1534             else:
1535                 import scipy.optimize as optimiseur
1536             Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
1537                 func        = CostFunction,
1538                 x0          = numpy.zeros(Xini.size),
1539                 fprime      = GradientOfCostFunction,
1540                 args        = (),
1541                 bounds      = selfA._parameters["Bounds"],
1542                 maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
1543                 factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
1544                 pgtol       = selfA._parameters["ProjectedGradientTolerance"],
1545                 iprint      = selfA._parameters["optiprint"],
1546                 )
1547             nfeval = Informations['funcalls']
1548             rc     = Informations['warnflag']
1549         elif selfA._parameters["Minimizer"] == "TNC":
1550             Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
1551                 func        = CostFunction,
1552                 x0          = numpy.zeros(Xini.size),
1553                 fprime      = GradientOfCostFunction,
1554                 args        = (),
1555                 bounds      = selfA._parameters["Bounds"],
1556                 maxfun      = selfA._parameters["MaximumNumberOfSteps"],
1557                 pgtol       = selfA._parameters["ProjectedGradientTolerance"],
1558                 ftol        = selfA._parameters["CostDecrementTolerance"],
1559                 messages    = selfA._parameters["optmessages"],
1560                 )
1561         elif selfA._parameters["Minimizer"] == "CG":
1562             Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
1563                 f           = CostFunction,
1564                 x0          = numpy.zeros(Xini.size),
1565                 fprime      = GradientOfCostFunction,
1566                 args        = (),
1567                 maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1568                 gtol        = selfA._parameters["GradientNormTolerance"],
1569                 disp        = selfA._parameters["optdisp"],
1570                 full_output = True,
1571                 )
1572         elif selfA._parameters["Minimizer"] == "NCG":
1573             Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
1574                 f           = CostFunction,
1575                 x0          = numpy.zeros(Xini.size),
1576                 fprime      = GradientOfCostFunction,
1577                 args        = (),
1578                 maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1579                 avextol     = selfA._parameters["CostDecrementTolerance"],
1580                 disp        = selfA._parameters["optdisp"],
1581                 full_output = True,
1582                 )
1583         elif selfA._parameters["Minimizer"] == "BFGS":
1584             Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
1585                 f           = CostFunction,
1586                 x0          = numpy.zeros(Xini.size),
1587                 fprime      = GradientOfCostFunction,
1588                 args        = (),
1589                 maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1590                 gtol        = selfA._parameters["GradientNormTolerance"],
1591                 disp        = selfA._parameters["optdisp"],
1592                 full_output = True,
1593                 )
1594         else:
1595             raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
1596         #
1597         IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1598         MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
1599         #
1600         if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
1601             Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
1602             Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
1603         else:
1604             Minimum = Xb + numpy.asmatrix(numpy.ravel( Minimum )).T
1605         #
1606         Xr     = Minimum
1607         DeltaJ = selfA.StoredVariables["CostFunctionJ" ][-1] - J
1608         iOuter = selfA.StoredVariables["CurrentIterationNumber"][-1]
1609     #
1610     # Obtention de l'analyse
1611     # ----------------------
1612     Xa = Xr
1613     #
1614     selfA.StoredVariables["Analysis"].store( Xa )
1615     #
1616     if selfA._toStore("OMA") or \
1617         selfA._toStore("SigmaObs2") or \
1618         selfA._toStore("SimulationQuantiles") or \
1619         selfA._toStore("SimulatedObservationAtOptimum"):
1620         if selfA._toStore("SimulatedObservationAtCurrentState"):
1621             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
1622         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1623             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
1624         else:
1625             HXa = Hm( Xa )
1626     #
1627     # Calcul de la covariance d'analyse
1628     # ---------------------------------
1629     if selfA._toStore("APosterioriCovariance") or \
1630         selfA._toStore("SimulationQuantiles") or \
1631         selfA._toStore("JacobianMatrixAtOptimum") or \
1632         selfA._toStore("KalmanGainAtOptimum"):
1633         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
1634         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
1635     if selfA._toStore("APosterioriCovariance") or \
1636         selfA._toStore("SimulationQuantiles") or \
1637         selfA._toStore("KalmanGainAtOptimum"):
1638         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
1639         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
1640     if selfA._toStore("APosterioriCovariance") or \
1641         selfA._toStore("SimulationQuantiles"):
1642         HessienneI = []
1643         nb = Xa.size
1644         for i in range(nb):
1645             _ee    = numpy.matrix(numpy.zeros(nb)).T
1646             _ee[i] = 1.
1647             _HtEE  = numpy.dot(HtM,_ee)
1648             _HtEE  = numpy.asmatrix(numpy.ravel( _HtEE )).T
1649             HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
1650         HessienneI = numpy.matrix( HessienneI )
1651         A = HessienneI.I
1652         if min(A.shape) != max(A.shape):
1653             raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
1654         if (numpy.diag(A) < 0).any():
1655             raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
1656         if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
1657             try:
1658                 L = numpy.linalg.cholesky( A )
1659             except:
1660                 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
1661     if selfA._toStore("APosterioriCovariance"):
1662         selfA.StoredVariables["APosterioriCovariance"].store( A )
1663     if selfA._toStore("JacobianMatrixAtOptimum"):
1664         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
1665     if selfA._toStore("KalmanGainAtOptimum"):
1666         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
1667         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
1668         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
1669     #
1670     # Calculs et/ou stockages supplémentaires
1671     # ---------------------------------------
1672     if selfA._toStore("Innovation") or \
1673         selfA._toStore("SigmaObs2") or \
1674         selfA._toStore("MahalanobisConsistency") or \
1675         selfA._toStore("OMB"):
1676         d  = Y - HXb
1677     if selfA._toStore("Innovation"):
1678         selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
1679     if selfA._toStore("BMA"):
1680         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
1681     if selfA._toStore("OMA"):
1682         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
1683     if selfA._toStore("OMB"):
1684         selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
1685     if selfA._toStore("SigmaObs2"):
1686         TraceR = R.trace(Y.size)
1687         selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
1688     if selfA._toStore("MahalanobisConsistency"):
1689         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
1690     if selfA._toStore("SimulationQuantiles"):
1691         QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
1692     if selfA._toStore("SimulatedObservationAtBackground"):
1693         selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
1694     if selfA._toStore("SimulatedObservationAtOptimum"):
1695         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
1696     #
1697     return 0
1698
1699 # ==============================================================================
1700 def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13",
1701     BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
1702     """
1703     Maximum Likelihood Ensemble Filter
1704     """
1705     if selfA._parameters["EstimationOf"] == "Parameters":
1706         selfA._parameters["StoreInternalVariables"] = True
1707     #
1708     # Opérateurs
1709     H = HO["Direct"].appliedControledFormTo
1710     #
1711     if selfA._parameters["EstimationOf"] == "State":
1712         M = EM["Direct"].appliedControledFormTo
1713     #
1714     if CM is not None and "Tangent" in CM and U is not None:
1715         Cm = CM["Tangent"].asMatrix(Xb)
1716     else:
1717         Cm = None
1718     #
1719     # Durée d'observation et tailles
1720     if hasattr(Y,"stepnumber"):
1721         duration = Y.stepnumber()
1722         __p = numpy.cumprod(Y.shape())[-1]
1723     else:
1724         duration = 2
1725         __p = numpy.array(Y).size
1726     #
1727     # Précalcul des inversions de B et R
1728     if selfA._parameters["StoreInternalVariables"] \
1729         or selfA._toStore("CostFunctionJ") \
1730         or selfA._toStore("CostFunctionJb") \
1731         or selfA._toStore("CostFunctionJo") \
1732         or selfA._toStore("CurrentOptimum") \
1733         or selfA._toStore("APosterioriCovariance"):
1734         BI = B.getI()
1735     RI = R.getI()
1736     #
1737     __n = Xb.size
1738     __m = selfA._parameters["NumberOfMembers"]
1739     if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
1740     else:                         Pn = B
1741     if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
1742     else:                         Rn = R
1743     Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
1744     #
1745     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1746         selfA.StoredVariables["Analysis"].store( Xb )
1747         if selfA._toStore("APosterioriCovariance"):
1748             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1749             covarianceXa = Pn
1750     #
1751     previousJMinimum = numpy.finfo(float).max
1752     #
1753     for step in range(duration-1):
1754         if hasattr(Y,"store"):
1755             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1756         else:
1757             Ynpu = numpy.ravel( Y ).reshape((__p,1))
1758         #
1759         if U is not None:
1760             if hasattr(U,"store") and len(U)>1:
1761                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
1762             elif hasattr(U,"store") and len(U)==1:
1763                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
1764             else:
1765                 Un = numpy.asmatrix(numpy.ravel( U )).T
1766         else:
1767             Un = None
1768         #
1769         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
1770             Xn = CovarianceInflation( Xn,
1771                 selfA._parameters["InflationType"],
1772                 selfA._parameters["InflationFactor"],
1773                 )
1774         #
1775         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
1776             EMX = M( [(Xn[:,i], Un) for i in range(__m)],
1777                 argsAsSerie = True,
1778                 returnSerieAsArrayMatrix = True )
1779             Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
1780             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1781                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
1782                 Xn_predicted = Xn_predicted + Cm * Un
1783         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
1784             # --- > Par principe, M = Id, Q = 0
1785             Xn_predicted = Xn
1786         #
1787         #--------------------------
1788         if VariantM == "MLEF13":
1789             Xfm = numpy.ravel(Xn_predicted.mean(axis=1, dtype=mfp).astype('float'))
1790             EaX = EnsembleOfAnomalies( Xn_predicted, Xfm, 1./math.sqrt(__m-1) )
1791             Ua  = numpy.identity(__m)
1792             __j = 0
1793             Deltaw = 1
1794             if not BnotT:
1795                 Ta  = numpy.identity(__m)
1796             vw  = numpy.zeros(__m)
1797             while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
1798                 vx1 = (Xfm + EaX @ vw).reshape((__n,1))
1799                 #
1800                 if BnotT:
1801                     E1 = vx1 + _epsilon * EaX
1802                 else:
1803                     E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
1804                 #
1805                 HE2 = H( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
1806                     argsAsSerie = True,
1807                     returnSerieAsArrayMatrix = True )
1808                 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
1809                 #
1810                 if BnotT:
1811                     EaY = (HE2 - vy2) / _epsilon
1812                 else:
1813                     EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
1814                 #
1815                 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy2 )))
1816                 mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY).reshape((-1,__m))
1817                 Deltaw = - numpy.linalg.solve(mH,GradJ)
1818                 #
1819                 vw = vw + Deltaw
1820                 #
1821                 if not BnotT:
1822                     Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
1823                 #
1824                 __j = __j + 1
1825             #
1826             if BnotT:
1827                 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
1828             #
1829             Xn = vx1 + math.sqrt(__m-1) * EaX @ Ta @ Ua
1830         #--------------------------
1831         else:
1832             raise ValueError("VariantM has to be chosen in the authorized methods list.")
1833         #
1834         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
1835             Xn = CovarianceInflation( Xn,
1836                 selfA._parameters["InflationType"],
1837                 selfA._parameters["InflationFactor"],
1838                 )
1839         #
1840         Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1841         #--------------------------
1842         #
1843         if selfA._parameters["StoreInternalVariables"] \
1844             or selfA._toStore("CostFunctionJ") \
1845             or selfA._toStore("CostFunctionJb") \
1846             or selfA._toStore("CostFunctionJo") \
1847             or selfA._toStore("APosterioriCovariance") \
1848             or selfA._toStore("InnovationAtCurrentAnalysis") \
1849             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1850             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1851             _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
1852             _Innovation = Ynpu - _HXa
1853         #
1854         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1855         # ---> avec analysis
1856         selfA.StoredVariables["Analysis"].store( Xa )
1857         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1858             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
1859         if selfA._toStore("InnovationAtCurrentAnalysis"):
1860             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1861         # ---> avec current state
1862         if selfA._parameters["StoreInternalVariables"] \
1863             or selfA._toStore("CurrentState"):
1864             selfA.StoredVariables["CurrentState"].store( Xn )
1865         if selfA._toStore("ForecastState"):
1866             selfA.StoredVariables["ForecastState"].store( EMX )
1867         if selfA._toStore("BMA"):
1868             selfA.StoredVariables["BMA"].store( EMX - Xa )
1869         if selfA._toStore("InnovationAtCurrentState"):
1870             selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
1871         if selfA._toStore("SimulatedObservationAtCurrentState") \
1872             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1873             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
1874         # ---> autres
1875         if selfA._parameters["StoreInternalVariables"] \
1876             or selfA._toStore("CostFunctionJ") \
1877             or selfA._toStore("CostFunctionJb") \
1878             or selfA._toStore("CostFunctionJo") \
1879             or selfA._toStore("CurrentOptimum") \
1880             or selfA._toStore("APosterioriCovariance"):
1881             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
1882             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
1883             J   = Jb + Jo
1884             selfA.StoredVariables["CostFunctionJb"].store( Jb )
1885             selfA.StoredVariables["CostFunctionJo"].store( Jo )
1886             selfA.StoredVariables["CostFunctionJ" ].store( J )
1887             #
1888             if selfA._toStore("IndexOfOptimum") \
1889                 or selfA._toStore("CurrentOptimum") \
1890                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1891                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1892                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1893                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1894                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1895             if selfA._toStore("IndexOfOptimum"):
1896                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1897             if selfA._toStore("CurrentOptimum"):
1898                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1899             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1900                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1901             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1902                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1903             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1904                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1905             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1906                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1907         if selfA._toStore("APosterioriCovariance"):
1908             selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
1909         if selfA._parameters["EstimationOf"] == "Parameters" \
1910             and J < previousJMinimum:
1911             previousJMinimum    = J
1912             XaMin               = Xa
1913             if selfA._toStore("APosterioriCovariance"):
1914                 covarianceXaMin = Pn
1915     #
1916     # Stockage final supplémentaire de l'optimum en estimation de paramètres
1917     # ----------------------------------------------------------------------
1918     if selfA._parameters["EstimationOf"] == "Parameters":
1919         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1920         selfA.StoredVariables["Analysis"].store( XaMin )
1921         if selfA._toStore("APosterioriCovariance"):
1922             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1923         if selfA._toStore("BMA"):
1924             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1925     #
1926     return 0
1927
1928 # ==============================================================================
1929 def mmqr(
1930         func     = None,
1931         x0       = None,
1932         fprime   = None,
1933         bounds   = None,
1934         quantile = 0.5,
1935         maxfun   = 15000,
1936         toler    = 1.e-06,
1937         y        = None,
1938         ):
1939     """
1940     Implémentation informatique de l'algorithme MMQR, basée sur la publication :
1941     David R. Hunter, Kenneth Lange, "Quantile Regression via an MM Algorithm",
1942     Journal of Computational and Graphical Statistics, 9, 1, pp.60-77, 2000.
1943     """
1944     #
1945     # Recuperation des donnees et informations initiales
1946     # --------------------------------------------------
1947     variables = numpy.ravel( x0 )
1948     mesures   = numpy.ravel( y )
1949     increment = sys.float_info[0]
1950     p         = variables.size
1951     n         = mesures.size
1952     quantile  = float(quantile)
1953     #
1954     # Calcul des parametres du MM
1955     # ---------------------------
1956     tn      = float(toler) / n
1957     e0      = -tn / math.log(tn)
1958     epsilon = (e0-tn)/(1+math.log(e0))
1959     #
1960     # Calculs d'initialisation
1961     # ------------------------
1962     residus  = mesures - numpy.ravel( func( variables ) )
1963     poids    = 1./(epsilon+numpy.abs(residus))
1964     veps     = 1. - 2. * quantile - residus * poids
1965     lastsurrogate = -numpy.sum(residus*veps) - (1.-2.*quantile)*numpy.sum(residus)
1966     iteration = 0
1967     #
1968     # Recherche iterative
1969     # -------------------
1970     while (increment > toler) and (iteration < maxfun) :
1971         iteration += 1
1972         #
1973         Derivees  = numpy.array(fprime(variables))
1974         Derivees  = Derivees.reshape(n,p) # Necessaire pour remettre en place la matrice si elle passe par des tuyaux YACS
1975         DeriveesT = Derivees.transpose()
1976         M         =   numpy.dot( DeriveesT , (numpy.array(numpy.matrix(p*[poids,]).T)*Derivees) )
1977         SM        =   numpy.transpose(numpy.dot( DeriveesT , veps ))
1978         step      = - numpy.linalg.lstsq( M, SM, rcond=-1 )[0]
1979         #
1980         variables = variables + step
1981         if bounds is not None:
1982             # Attention : boucle infinie à éviter si un intervalle est trop petit
1983             while( (variables < numpy.ravel(numpy.asmatrix(bounds)[:,0])).any() or (variables > numpy.ravel(numpy.asmatrix(bounds)[:,1])).any() ):
1984                 step      = step/2.
1985                 variables = variables - step
1986         residus   = mesures - numpy.ravel( func(variables) )
1987         surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
1988         #
1989         while ( (surrogate > lastsurrogate) and ( max(list(numpy.abs(step))) > 1.e-16 ) ) :
1990             step      = step/2.
1991             variables = variables - step
1992             residus   = mesures - numpy.ravel( func(variables) )
1993             surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
1994         #
1995         increment     = lastsurrogate-surrogate
1996         poids         = 1./(epsilon+numpy.abs(residus))
1997         veps          = 1. - 2. * quantile - residus * poids
1998         lastsurrogate = -numpy.sum(residus * veps) - (1.-2.*quantile)*numpy.sum(residus)
1999     #
2000     # Mesure d'écart
2001     # --------------
2002     Ecart = quantile * numpy.sum(residus) - numpy.sum( residus[residus<0] )
2003     #
2004     return variables, Ecart, [n,p,iteration,increment,0]
2005
2006 # ==============================================================================
2007 def multi3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, oneCycle):
2008     """
2009     3DVAR multi-pas et multi-méthodes
2010     """
2011     #
2012     # Initialisation
2013     Xn = numpy.ravel(Xb).reshape((-1,1))
2014     #
2015     if selfA._parameters["EstimationOf"] == "State":
2016         M = EM["Direct"].appliedTo
2017         #
2018         if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2019             selfA.StoredVariables["Analysis"].store( Xn )
2020             if selfA._toStore("APosterioriCovariance"):
2021                 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(Xn.size)
2022                 else:                         Pn = B
2023                 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2024             if selfA._toStore("ForecastState"):
2025                 selfA.StoredVariables["ForecastState"].store( Xn )
2026     #
2027     if hasattr(Y,"stepnumber"):
2028         duration = Y.stepnumber()
2029     else:
2030         duration = 2
2031     #
2032     # Multi-pas
2033     for step in range(duration-1):
2034         if hasattr(Y,"store"):
2035             Ynpu = numpy.ravel( Y[step+1] ).reshape((-1,1))
2036         else:
2037             Ynpu = numpy.ravel( Y ).reshape((-1,1))
2038         #
2039         if selfA._parameters["EstimationOf"] == "State": # Forecast
2040             Xn = selfA.StoredVariables["Analysis"][-1]
2041             Xn_predicted = M( Xn )
2042             if selfA._toStore("ForecastState"):
2043                 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
2044         elif selfA._parameters["EstimationOf"] == "Parameters": # No forecast
2045             # --- > Par principe, M = Id, Q = 0
2046             Xn_predicted = Xn
2047         Xn_predicted = numpy.ravel(Xn_predicted).reshape((-1,1))
2048         #
2049         oneCycle(selfA, Xn_predicted, Ynpu, U, HO, None, None, R, B, None)
2050     #
2051     return 0
2052
2053 # ==============================================================================
2054 def psas3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
2055     """
2056     3DVAR PSAS
2057     """
2058     #
2059     # Initialisations
2060     # ---------------
2061     #
2062     # Opérateurs
2063     Hm = HO["Direct"].appliedTo
2064     #
2065     # Utilisation éventuelle d'un vecteur H(Xb) précalculé
2066     if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
2067         HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
2068     else:
2069         HXb = Hm( Xb )
2070     HXb = numpy.asmatrix(numpy.ravel( HXb )).T
2071     if Y.size != HXb.size:
2072         raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
2073     if max(Y.shape) != max(HXb.shape):
2074         raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
2075     #
2076     if selfA._toStore("JacobianMatrixAtBackground"):
2077         HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
2078         HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
2079         selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
2080     #
2081     Ht = HO["Tangent"].asMatrix(Xb)
2082     BHT = B * Ht.T
2083     HBHTpR = R + Ht * BHT
2084     Innovation = Y - HXb
2085     #
2086     # Point de démarrage de l'optimisation
2087     Xini = numpy.zeros(Xb.shape)
2088     #
2089     # Définition de la fonction-coût
2090     # ------------------------------
2091     def CostFunction(w):
2092         _W = numpy.asmatrix(numpy.ravel( w )).T
2093         if selfA._parameters["StoreInternalVariables"] or \
2094             selfA._toStore("CurrentState") or \
2095             selfA._toStore("CurrentOptimum"):
2096             selfA.StoredVariables["CurrentState"].store( Xb + BHT * _W )
2097         if selfA._toStore("SimulatedObservationAtCurrentState") or \
2098             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2099             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Hm( Xb + BHT * _W ) )
2100         if selfA._toStore("InnovationAtCurrentState"):
2101             selfA.StoredVariables["InnovationAtCurrentState"].store( Innovation )
2102         #
2103         Jb  = float( 0.5 * _W.T * HBHTpR * _W )
2104         Jo  = float( - _W.T * Innovation )
2105         J   = Jb + Jo
2106         #
2107         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
2108         selfA.StoredVariables["CostFunctionJb"].store( Jb )
2109         selfA.StoredVariables["CostFunctionJo"].store( Jo )
2110         selfA.StoredVariables["CostFunctionJ" ].store( J )
2111         if selfA._toStore("IndexOfOptimum") or \
2112             selfA._toStore("CurrentOptimum") or \
2113             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
2114             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
2115             selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
2116             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2117             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2118         if selfA._toStore("IndexOfOptimum"):
2119             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2120         if selfA._toStore("CurrentOptimum"):
2121             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
2122         if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2123             selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
2124         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2125             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2126         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2127             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2128         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2129             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2130         return J
2131     #
2132     def GradientOfCostFunction(w):
2133         _W = numpy.asmatrix(numpy.ravel( w )).T
2134         GradJb  = HBHTpR * _W
2135         GradJo  = - Innovation
2136         GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
2137         return GradJ
2138     #
2139     # Minimisation de la fonctionnelle
2140     # --------------------------------
2141     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
2142     #
2143     if selfA._parameters["Minimizer"] == "LBFGSB":
2144         if "0.19" <= scipy.version.version <= "1.1.0":
2145             import lbfgsbhlt as optimiseur
2146         else:
2147             import scipy.optimize as optimiseur
2148         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
2149             func        = CostFunction,
2150             x0          = Xini,
2151             fprime      = GradientOfCostFunction,
2152             args        = (),
2153             bounds      = selfA._parameters["Bounds"],
2154             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
2155             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
2156             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
2157             iprint      = selfA._parameters["optiprint"],
2158             )
2159         nfeval = Informations['funcalls']
2160         rc     = Informations['warnflag']
2161     elif selfA._parameters["Minimizer"] == "TNC":
2162         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
2163             func        = CostFunction,
2164             x0          = Xini,
2165             fprime      = GradientOfCostFunction,
2166             args        = (),
2167             bounds      = selfA._parameters["Bounds"],
2168             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
2169             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
2170             ftol        = selfA._parameters["CostDecrementTolerance"],
2171             messages    = selfA._parameters["optmessages"],
2172             )
2173     elif selfA._parameters["Minimizer"] == "CG":
2174         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
2175             f           = CostFunction,
2176             x0          = Xini,
2177             fprime      = GradientOfCostFunction,
2178             args        = (),
2179             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2180             gtol        = selfA._parameters["GradientNormTolerance"],
2181             disp        = selfA._parameters["optdisp"],
2182             full_output = True,
2183             )
2184     elif selfA._parameters["Minimizer"] == "NCG":
2185         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
2186             f           = CostFunction,
2187             x0          = Xini,
2188             fprime      = GradientOfCostFunction,
2189             args        = (),
2190             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2191             avextol     = selfA._parameters["CostDecrementTolerance"],
2192             disp        = selfA._parameters["optdisp"],
2193             full_output = True,
2194             )
2195     elif selfA._parameters["Minimizer"] == "BFGS":
2196         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
2197             f           = CostFunction,
2198             x0          = Xini,
2199             fprime      = GradientOfCostFunction,
2200             args        = (),
2201             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2202             gtol        = selfA._parameters["GradientNormTolerance"],
2203             disp        = selfA._parameters["optdisp"],
2204             full_output = True,
2205             )
2206     else:
2207         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
2208     #
2209     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2210     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
2211     #
2212     # Correction pour pallier a un bug de TNC sur le retour du Minimum
2213     # ----------------------------------------------------------------
2214     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
2215         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
2216         Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
2217     else:
2218         Minimum = Xb + BHT * numpy.asmatrix(numpy.ravel( Minimum )).T
2219     #
2220     # Obtention de l'analyse
2221     # ----------------------
2222     Xa = Minimum
2223     #
2224     selfA.StoredVariables["Analysis"].store( Xa )
2225     #
2226     if selfA._toStore("OMA") or \
2227         selfA._toStore("SigmaObs2") or \
2228         selfA._toStore("SimulationQuantiles") or \
2229         selfA._toStore("SimulatedObservationAtOptimum"):
2230         if selfA._toStore("SimulatedObservationAtCurrentState"):
2231             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
2232         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2233             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
2234         else:
2235             HXa = Hm( Xa )
2236     #
2237     # Calcul de la covariance d'analyse
2238     # ---------------------------------
2239     if selfA._toStore("APosterioriCovariance") or \
2240         selfA._toStore("SimulationQuantiles") or \
2241         selfA._toStore("JacobianMatrixAtOptimum") or \
2242         selfA._toStore("KalmanGainAtOptimum"):
2243         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
2244         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
2245     if selfA._toStore("APosterioriCovariance") or \
2246         selfA._toStore("SimulationQuantiles") or \
2247         selfA._toStore("KalmanGainAtOptimum"):
2248         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
2249         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
2250     if selfA._toStore("APosterioriCovariance") or \
2251         selfA._toStore("SimulationQuantiles"):
2252         BI = B.getI()
2253         RI = R.getI()
2254         HessienneI = []
2255         nb = Xa.size
2256         for i in range(nb):
2257             _ee    = numpy.matrix(numpy.zeros(nb)).T
2258             _ee[i] = 1.
2259             _HtEE  = numpy.dot(HtM,_ee)
2260             _HtEE  = numpy.asmatrix(numpy.ravel( _HtEE )).T
2261             HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
2262         HessienneI = numpy.matrix( HessienneI )
2263         A = HessienneI.I
2264         if min(A.shape) != max(A.shape):
2265             raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
2266         if (numpy.diag(A) < 0).any():
2267             raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
2268         if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
2269             try:
2270                 L = numpy.linalg.cholesky( A )
2271             except:
2272                 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
2273     if selfA._toStore("APosterioriCovariance"):
2274         selfA.StoredVariables["APosterioriCovariance"].store( A )
2275     if selfA._toStore("JacobianMatrixAtOptimum"):
2276         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
2277     if selfA._toStore("KalmanGainAtOptimum"):
2278         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
2279         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
2280         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
2281     #
2282     # Calculs et/ou stockages supplémentaires
2283     # ---------------------------------------
2284     if selfA._toStore("Innovation") or \
2285         selfA._toStore("SigmaObs2") or \
2286         selfA._toStore("MahalanobisConsistency") or \
2287         selfA._toStore("OMB"):
2288         d  = Y - HXb
2289     if selfA._toStore("Innovation"):
2290         selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
2291     if selfA._toStore("BMA"):
2292         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
2293     if selfA._toStore("OMA"):
2294         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
2295     if selfA._toStore("OMB"):
2296         selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
2297     if selfA._toStore("SigmaObs2"):
2298         TraceR = R.trace(Y.size)
2299         selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
2300     if selfA._toStore("MahalanobisConsistency"):
2301         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
2302     if selfA._toStore("SimulationQuantiles"):
2303         QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
2304     if selfA._toStore("SimulatedObservationAtBackground"):
2305         selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
2306     if selfA._toStore("SimulatedObservationAtOptimum"):
2307         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
2308     #
2309     return 0
2310
2311 # ==============================================================================
2312 def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
2313     """
2314     Stochastic EnKF
2315     """
2316     if selfA._parameters["EstimationOf"] == "Parameters":
2317         selfA._parameters["StoreInternalVariables"] = True
2318     #
2319     # Opérateurs
2320     H = HO["Direct"].appliedControledFormTo
2321     #
2322     if selfA._parameters["EstimationOf"] == "State":
2323         M = EM["Direct"].appliedControledFormTo
2324     #
2325     if CM is not None and "Tangent" in CM and U is not None:
2326         Cm = CM["Tangent"].asMatrix(Xb)
2327     else:
2328         Cm = None
2329     #
2330     # Durée d'observation et tailles
2331     if hasattr(Y,"stepnumber"):
2332         duration = Y.stepnumber()
2333         __p = numpy.cumprod(Y.shape())[-1]
2334     else:
2335         duration = 2
2336         __p = numpy.array(Y).size
2337     #
2338     # Précalcul des inversions de B et R
2339     if selfA._parameters["StoreInternalVariables"] \
2340         or selfA._toStore("CostFunctionJ") \
2341         or selfA._toStore("CostFunctionJb") \
2342         or selfA._toStore("CostFunctionJo") \
2343         or selfA._toStore("CurrentOptimum") \
2344         or selfA._toStore("APosterioriCovariance"):
2345         BI = B.getI()
2346         RI = R.getI()
2347     #
2348     __n = Xb.size
2349     __m = selfA._parameters["NumberOfMembers"]
2350     #
2351     if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
2352     else:                         Pn = B
2353     if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
2354     else:                         Rn = R
2355     Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
2356     #
2357     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2358         selfA.StoredVariables["Analysis"].store( Xb )
2359         if selfA._toStore("APosterioriCovariance"):
2360             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2361             covarianceXa = Pn
2362     #
2363     previousJMinimum = numpy.finfo(float).max
2364     #
2365     for step in range(duration-1):
2366         if hasattr(Y,"store"):
2367             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
2368         else:
2369             Ynpu = numpy.ravel( Y ).reshape((__p,1))
2370         #
2371         if U is not None:
2372             if hasattr(U,"store") and len(U)>1:
2373                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2374             elif hasattr(U,"store") and len(U)==1:
2375                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2376             else:
2377                 Un = numpy.asmatrix(numpy.ravel( U )).T
2378         else:
2379             Un = None
2380         #
2381         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2382             Xn = CovarianceInflation( Xn,
2383                 selfA._parameters["InflationType"],
2384                 selfA._parameters["InflationFactor"],
2385                 )
2386         #
2387         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
2388             EMX = M( [(Xn[:,i], Un) for i in range(__m)],
2389                 argsAsSerie = True,
2390                 returnSerieAsArrayMatrix = True )
2391             Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
2392             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2393                 argsAsSerie = True,
2394                 returnSerieAsArrayMatrix = True )
2395             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2396                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2397                 Xn_predicted = Xn_predicted + Cm * Un
2398         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
2399             # --- > Par principe, M = Id, Q = 0
2400             Xn_predicted = Xn
2401             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2402                 argsAsSerie = True,
2403                 returnSerieAsArrayMatrix = True )
2404         #
2405         # Mean of forecast and observation of forecast
2406         Xfm  = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2407         Hfm  = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2408         #
2409         #--------------------------
2410         if VariantM == "KalmanFilterFormula05":
2411             PfHT, HPfHT = 0., 0.
2412             for i in range(__m):
2413                 Exfi = Xn_predicted[:,i].reshape((__n,1)) - Xfm
2414                 Eyfi = HX_predicted[:,i].reshape((__p,1)) - Hfm
2415                 PfHT  += Exfi * Eyfi.T
2416                 HPfHT += Eyfi * Eyfi.T
2417             PfHT  = (1./(__m-1)) * PfHT
2418             HPfHT = (1./(__m-1)) * HPfHT
2419             Kn     = PfHT * ( R + HPfHT ).I
2420             del PfHT, HPfHT
2421             #
2422             for i in range(__m):
2423                 ri = numpy.random.multivariate_normal(numpy.zeros(__p), Rn)
2424                 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(Ynpu) + ri - HX_predicted[:,i])
2425         #--------------------------
2426         elif VariantM == "KalmanFilterFormula16":
2427             EpY   = EnsembleOfCenteredPerturbations(Ynpu, Rn, __m)
2428             EpYm  = EpY.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2429             #
2430             EaX   = EnsembleOfAnomalies( Xn_predicted ) / math.sqrt(__m-1)
2431             EaY = (HX_predicted - Hfm - EpY + EpYm) / math.sqrt(__m-1)
2432             #
2433             Kn = EaX @ EaY.T @ numpy.linalg.inv( EaY @ EaY.T)
2434             #
2435             for i in range(__m):
2436                 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(EpY[:,i]) - HX_predicted[:,i])
2437         #--------------------------
2438         else:
2439             raise ValueError("VariantM has to be chosen in the authorized methods list.")
2440         #
2441         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2442             Xn = CovarianceInflation( Xn,
2443                 selfA._parameters["InflationType"],
2444                 selfA._parameters["InflationFactor"],
2445                 )
2446         #
2447         Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2448         #--------------------------
2449         #
2450         if selfA._parameters["StoreInternalVariables"] \
2451             or selfA._toStore("CostFunctionJ") \
2452             or selfA._toStore("CostFunctionJb") \
2453             or selfA._toStore("CostFunctionJo") \
2454             or selfA._toStore("APosterioriCovariance") \
2455             or selfA._toStore("InnovationAtCurrentAnalysis") \
2456             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2457             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2458             _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
2459             _Innovation = Ynpu - _HXa
2460         #
2461         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2462         # ---> avec analysis
2463         selfA.StoredVariables["Analysis"].store( Xa )
2464         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2465             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2466         if selfA._toStore("InnovationAtCurrentAnalysis"):
2467             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2468         # ---> avec current state
2469         if selfA._parameters["StoreInternalVariables"] \
2470             or selfA._toStore("CurrentState"):
2471             selfA.StoredVariables["CurrentState"].store( Xn )
2472         if selfA._toStore("ForecastState"):
2473             selfA.StoredVariables["ForecastState"].store( EMX )
2474         if selfA._toStore("BMA"):
2475             selfA.StoredVariables["BMA"].store( EMX - Xa )
2476         if selfA._toStore("InnovationAtCurrentState"):
2477             selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
2478         if selfA._toStore("SimulatedObservationAtCurrentState") \
2479             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2480             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
2481         # ---> autres
2482         if selfA._parameters["StoreInternalVariables"] \
2483             or selfA._toStore("CostFunctionJ") \
2484             or selfA._toStore("CostFunctionJb") \
2485             or selfA._toStore("CostFunctionJo") \
2486             or selfA._toStore("CurrentOptimum") \
2487             or selfA._toStore("APosterioriCovariance"):
2488             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2489             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
2490             J   = Jb + Jo
2491             selfA.StoredVariables["CostFunctionJb"].store( Jb )
2492             selfA.StoredVariables["CostFunctionJo"].store( Jo )
2493             selfA.StoredVariables["CostFunctionJ" ].store( J )
2494             #
2495             if selfA._toStore("IndexOfOptimum") \
2496                 or selfA._toStore("CurrentOptimum") \
2497                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2498                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2499                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2500                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2501                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2502             if selfA._toStore("IndexOfOptimum"):
2503                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2504             if selfA._toStore("CurrentOptimum"):
2505                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2506             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2507                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2508             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2509                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2510             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2511                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2512             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2513                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2514         if selfA._toStore("APosterioriCovariance"):
2515             selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
2516         if selfA._parameters["EstimationOf"] == "Parameters" \
2517             and J < previousJMinimum:
2518             previousJMinimum    = J
2519             XaMin               = Xa
2520             if selfA._toStore("APosterioriCovariance"):
2521                 covarianceXaMin = Pn
2522     #
2523     # Stockage final supplémentaire de l'optimum en estimation de paramètres
2524     # ----------------------------------------------------------------------
2525     if selfA._parameters["EstimationOf"] == "Parameters":
2526         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2527         selfA.StoredVariables["Analysis"].store( XaMin )
2528         if selfA._toStore("APosterioriCovariance"):
2529             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2530         if selfA._toStore("BMA"):
2531             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2532     #
2533     return 0
2534
2535 # ==============================================================================
2536 def std3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
2537     """
2538     3DVAR
2539     """
2540     #
2541     # Initialisations
2542     # ---------------
2543     #
2544     # Opérateurs
2545     Hm = HO["Direct"].appliedTo
2546     Ha = HO["Adjoint"].appliedInXTo
2547     #
2548     # Utilisation éventuelle d'un vecteur H(Xb) précalculé
2549     if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
2550         HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
2551     else:
2552         HXb = Hm( Xb )
2553     HXb = numpy.asmatrix(numpy.ravel( HXb )).T
2554     if Y.size != HXb.size:
2555         raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
2556     if max(Y.shape) != max(HXb.shape):
2557         raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
2558     #
2559     if selfA._toStore("JacobianMatrixAtBackground"):
2560         HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
2561         HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
2562         selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
2563     #
2564     # Précalcul des inversions de B et R
2565     BI = B.getI()
2566     RI = R.getI()
2567     #
2568     # Point de démarrage de l'optimisation
2569     Xini = selfA._parameters["InitializationPoint"]
2570     #
2571     # Définition de la fonction-coût
2572     # ------------------------------
2573     def CostFunction(x):
2574         _X  = numpy.asmatrix(numpy.ravel( x )).T
2575         if selfA._parameters["StoreInternalVariables"] or \
2576             selfA._toStore("CurrentState") or \
2577             selfA._toStore("CurrentOptimum"):
2578             selfA.StoredVariables["CurrentState"].store( _X )
2579         _HX = Hm( _X )
2580         _HX = numpy.asmatrix(numpy.ravel( _HX )).T
2581         _Innovation = Y - _HX
2582         if selfA._toStore("SimulatedObservationAtCurrentState") or \
2583             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2584             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
2585         if selfA._toStore("InnovationAtCurrentState"):
2586             selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
2587         #
2588         Jb  = float( 0.5 * (_X - Xb).T * BI * (_X - Xb) )
2589         Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
2590         J   = Jb + Jo
2591         #
2592         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
2593         selfA.StoredVariables["CostFunctionJb"].store( Jb )
2594         selfA.StoredVariables["CostFunctionJo"].store( Jo )
2595         selfA.StoredVariables["CostFunctionJ" ].store( J )
2596         if selfA._toStore("IndexOfOptimum") or \
2597             selfA._toStore("CurrentOptimum") or \
2598             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
2599             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
2600             selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
2601             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2602             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2603         if selfA._toStore("IndexOfOptimum"):
2604             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2605         if selfA._toStore("CurrentOptimum"):
2606             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
2607         if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2608             selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
2609         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2610             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2611         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2612             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2613         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2614             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2615         return J
2616     #
2617     def GradientOfCostFunction(x):
2618         _X      = numpy.asmatrix(numpy.ravel( x )).T
2619         _HX     = Hm( _X )
2620         _HX     = numpy.asmatrix(numpy.ravel( _HX )).T
2621         GradJb  = BI * (_X - Xb)
2622         GradJo  = - Ha( (_X, RI * (Y - _HX)) )
2623         GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
2624         return GradJ
2625     #
2626     # Minimisation de la fonctionnelle
2627     # --------------------------------
2628     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
2629     #
2630     if selfA._parameters["Minimizer"] == "LBFGSB":
2631         if "0.19" <= scipy.version.version <= "1.1.0":
2632             import lbfgsbhlt as optimiseur
2633         else:
2634             import scipy.optimize as optimiseur
2635         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
2636             func        = CostFunction,
2637             x0          = Xini,
2638             fprime      = GradientOfCostFunction,
2639             args        = (),
2640             bounds      = selfA._parameters["Bounds"],
2641             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
2642             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
2643             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
2644             iprint      = selfA._parameters["optiprint"],
2645             )
2646         nfeval = Informations['funcalls']
2647         rc     = Informations['warnflag']
2648     elif selfA._parameters["Minimizer"] == "TNC":
2649         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
2650             func        = CostFunction,
2651             x0          = Xini,
2652             fprime      = GradientOfCostFunction,
2653             args        = (),
2654             bounds      = selfA._parameters["Bounds"],
2655             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
2656             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
2657             ftol        = selfA._parameters["CostDecrementTolerance"],
2658             messages    = selfA._parameters["optmessages"],
2659             )
2660     elif selfA._parameters["Minimizer"] == "CG":
2661         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
2662             f           = CostFunction,
2663             x0          = Xini,
2664             fprime      = GradientOfCostFunction,
2665             args        = (),
2666             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2667             gtol        = selfA._parameters["GradientNormTolerance"],
2668             disp        = selfA._parameters["optdisp"],
2669             full_output = True,
2670             )
2671     elif selfA._parameters["Minimizer"] == "NCG":
2672         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
2673             f           = CostFunction,
2674             x0          = Xini,
2675             fprime      = GradientOfCostFunction,
2676             args        = (),
2677             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2678             avextol     = selfA._parameters["CostDecrementTolerance"],
2679             disp        = selfA._parameters["optdisp"],
2680             full_output = True,
2681             )
2682     elif selfA._parameters["Minimizer"] == "BFGS":
2683         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
2684             f           = CostFunction,
2685             x0          = Xini,
2686             fprime      = GradientOfCostFunction,
2687             args        = (),
2688             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2689             gtol        = selfA._parameters["GradientNormTolerance"],
2690             disp        = selfA._parameters["optdisp"],
2691             full_output = True,
2692             )
2693     else:
2694         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
2695     #
2696     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2697     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
2698     #
2699     # Correction pour pallier a un bug de TNC sur le retour du Minimum
2700     # ----------------------------------------------------------------
2701     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
2702         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
2703     #
2704     # Obtention de l'analyse
2705     # ----------------------
2706     Xa = numpy.asmatrix(numpy.ravel( Minimum )).T
2707     #
2708     selfA.StoredVariables["Analysis"].store( Xa )
2709     #
2710     if selfA._toStore("OMA") or \
2711         selfA._toStore("SigmaObs2") or \
2712         selfA._toStore("SimulationQuantiles") or \
2713         selfA._toStore("SimulatedObservationAtOptimum"):
2714         if selfA._toStore("SimulatedObservationAtCurrentState"):
2715             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
2716         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2717             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
2718         else:
2719             HXa = Hm( Xa )
2720     #
2721     # Calcul de la covariance d'analyse
2722     # ---------------------------------
2723     if selfA._toStore("APosterioriCovariance") or \
2724         selfA._toStore("SimulationQuantiles") or \
2725         selfA._toStore("JacobianMatrixAtOptimum") or \
2726         selfA._toStore("KalmanGainAtOptimum"):
2727         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
2728         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
2729     if selfA._toStore("APosterioriCovariance") or \
2730         selfA._toStore("SimulationQuantiles") or \
2731         selfA._toStore("KalmanGainAtOptimum"):
2732         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
2733         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
2734     if selfA._toStore("APosterioriCovariance") or \
2735         selfA._toStore("SimulationQuantiles"):
2736         HessienneI = []
2737         nb = Xa.size
2738         for i in range(nb):
2739             _ee    = numpy.matrix(numpy.zeros(nb)).T
2740             _ee[i] = 1.
2741             _HtEE  = numpy.dot(HtM,_ee)
2742             _HtEE  = numpy.asmatrix(numpy.ravel( _HtEE )).T
2743             HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
2744         HessienneI = numpy.matrix( HessienneI )
2745         A = HessienneI.I
2746         if min(A.shape) != max(A.shape):
2747             raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
2748         if (numpy.diag(A) < 0).any():
2749             raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
2750         if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
2751             try:
2752                 L = numpy.linalg.cholesky( A )
2753             except:
2754                 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
2755     if selfA._toStore("APosterioriCovariance"):
2756         selfA.StoredVariables["APosterioriCovariance"].store( A )
2757     if selfA._toStore("JacobianMatrixAtOptimum"):
2758         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
2759     if selfA._toStore("KalmanGainAtOptimum"):
2760         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
2761         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
2762         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
2763     #
2764     # Calculs et/ou stockages supplémentaires
2765     # ---------------------------------------
2766     if selfA._toStore("Innovation") or \
2767         selfA._toStore("SigmaObs2") or \
2768         selfA._toStore("MahalanobisConsistency") or \
2769         selfA._toStore("OMB"):
2770         d  = Y - HXb
2771     if selfA._toStore("Innovation"):
2772         selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
2773     if selfA._toStore("BMA"):
2774         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
2775     if selfA._toStore("OMA"):
2776         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
2777     if selfA._toStore("OMB"):
2778         selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
2779     if selfA._toStore("SigmaObs2"):
2780         TraceR = R.trace(Y.size)
2781         selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
2782     if selfA._toStore("MahalanobisConsistency"):
2783         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
2784     if selfA._toStore("SimulationQuantiles"):
2785         QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
2786     if selfA._toStore("SimulatedObservationAtBackground"):
2787         selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
2788     if selfA._toStore("SimulatedObservationAtOptimum"):
2789         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
2790     #
2791     return 0
2792
2793 # ==============================================================================
2794 def std4dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
2795     """
2796     4DVAR
2797     """
2798     #
2799     # Initialisations
2800     # ---------------
2801     #
2802     # Opérateurs
2803     Hm = HO["Direct"].appliedControledFormTo
2804     Mm = EM["Direct"].appliedControledFormTo
2805     #
2806     if CM is not None and "Tangent" in CM and U is not None:
2807         Cm = CM["Tangent"].asMatrix(Xb)
2808     else:
2809         Cm = None
2810     #
2811     def Un(_step):
2812         if U is not None:
2813             if hasattr(U,"store") and 1<=_step<len(U) :
2814                 _Un = numpy.asmatrix(numpy.ravel( U[_step] )).T
2815             elif hasattr(U,"store") and len(U)==1:
2816                 _Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2817             else:
2818                 _Un = numpy.asmatrix(numpy.ravel( U )).T
2819         else:
2820             _Un = None
2821         return _Un
2822     def CmUn(_xn,_un):
2823         if Cm is not None and _un is not None: # Attention : si Cm est aussi dans M, doublon !
2824             _Cm   = Cm.reshape(_xn.size,_un.size) # ADAO & check shape
2825             _CmUn = _Cm * _un
2826         else:
2827             _CmUn = 0.
2828         return _CmUn
2829     #
2830     # Remarque : les observations sont exploitées à partir du pas de temps
2831     # numéro 1, et sont utilisées dans Yo comme rangées selon ces indices.
2832     # Donc le pas 0 n'est pas utilisé puisque la première étape commence
2833     # avec l'observation du pas 1.
2834     #
2835     # Nombre de pas identique au nombre de pas d'observations
2836     if hasattr(Y,"stepnumber"):
2837         duration = Y.stepnumber()
2838     else:
2839         duration = 2
2840     #
2841     # Précalcul des inversions de B et R
2842     BI = B.getI()
2843     RI = R.getI()
2844     #
2845     # Point de démarrage de l'optimisation
2846     Xini = selfA._parameters["InitializationPoint"]
2847     #
2848     # Définition de la fonction-coût
2849     # ------------------------------
2850     selfA.DirectCalculation = [None,] # Le pas 0 n'est pas observé
2851     selfA.DirectInnovation  = [None,] # Le pas 0 n'est pas observé
2852     def CostFunction(x):
2853         _X  = numpy.asmatrix(numpy.ravel( x )).T
2854         if selfA._parameters["StoreInternalVariables"] or \
2855             selfA._toStore("CurrentState") or \
2856             selfA._toStore("CurrentOptimum"):
2857             selfA.StoredVariables["CurrentState"].store( _X )
2858         Jb  = float( 0.5 * (_X - Xb).T * BI * (_X - Xb) )
2859         selfA.DirectCalculation = [None,]
2860         selfA.DirectInnovation  = [None,]
2861         Jo  = 0.
2862         _Xn = _X
2863         for step in range(0,duration-1):
2864             if hasattr(Y,"store"):
2865                 _Ynpu = numpy.asmatrix(numpy.ravel( Y[step+1] )).T
2866             else:
2867                 _Ynpu = numpy.asmatrix(numpy.ravel( Y )).T
2868             _Un = Un(step)
2869             #
2870             # Etape d'évolution
2871             if selfA._parameters["EstimationOf"] == "State":
2872                 _Xn = Mm( (_Xn, _Un) ) + CmUn(_Xn, _Un)
2873             elif selfA._parameters["EstimationOf"] == "Parameters":
2874                 pass
2875             #
2876             if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
2877                 _Xn = numpy.max(numpy.hstack((_Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1)
2878                 _Xn = numpy.min(numpy.hstack((_Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1)
2879             #
2880             # Etape de différence aux observations
2881             if selfA._parameters["EstimationOf"] == "State":
2882                 _YmHMX = _Ynpu - numpy.asmatrix(numpy.ravel( Hm( (_Xn, None) ) )).T
2883             elif selfA._parameters["EstimationOf"] == "Parameters":
2884                 _YmHMX = _Ynpu - numpy.asmatrix(numpy.ravel( Hm( (_Xn, _Un) ) )).T - CmUn(_Xn, _Un)
2885             #
2886             # Stockage de l'état
2887             selfA.DirectCalculation.append( _Xn )
2888             selfA.DirectInnovation.append( _YmHMX )
2889             #
2890             # Ajout dans la fonctionnelle d'observation
2891             Jo = Jo + 0.5 * float( _YmHMX.T * RI * _YmHMX )
2892         J = Jb + Jo
2893         #
2894         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
2895         selfA.StoredVariables["CostFunctionJb"].store( Jb )
2896         selfA.StoredVariables["CostFunctionJo"].store( Jo )
2897         selfA.StoredVariables["CostFunctionJ" ].store( J )
2898         if selfA._toStore("IndexOfOptimum") or \
2899             selfA._toStore("CurrentOptimum") or \
2900             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
2901             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
2902             selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2903             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2904         if selfA._toStore("IndexOfOptimum"):
2905             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2906         if selfA._toStore("CurrentOptimum"):
2907             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
2908         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2909             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2910         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2911             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2912         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2913             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2914         return J
2915     #
2916     def GradientOfCostFunction(x):
2917         _X      = numpy.asmatrix(numpy.ravel( x )).T
2918         GradJb  = BI * (_X - Xb)
2919         GradJo  = 0.
2920         for step in range(duration-1,0,-1):
2921             # Étape de récupération du dernier stockage de l'évolution
2922             _Xn = selfA.DirectCalculation.pop()
2923             # Étape de récupération du dernier stockage de l'innovation
2924             _YmHMX = selfA.DirectInnovation.pop()
2925             # Calcul des adjoints
2926             Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
2927             Ha = Ha.reshape(_Xn.size,_YmHMX.size) # ADAO & check shape
2928             Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
2929             Ma = Ma.reshape(_Xn.size,_Xn.size) # ADAO & check shape
2930             # Calcul du gradient par état adjoint
2931             GradJo = GradJo + Ha * RI * _YmHMX # Équivaut pour Ha linéaire à : Ha( (_Xn, RI * _YmHMX) )
2932             GradJo = Ma * GradJo               # Équivaut pour Ma linéaire à : Ma( (_Xn, GradJo) )
2933         GradJ = numpy.ravel( GradJb ) - numpy.ravel( GradJo )
2934         return GradJ
2935     #
2936     # Minimisation de la fonctionnelle
2937     # --------------------------------
2938     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
2939     #
2940     if selfA._parameters["Minimizer"] == "LBFGSB":
2941         if "0.19" <= scipy.version.version <= "1.1.0":
2942             import lbfgsbhlt as optimiseur
2943         else:
2944             import scipy.optimize as optimiseur
2945         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
2946             func        = CostFunction,
2947             x0          = Xini,
2948             fprime      = GradientOfCostFunction,
2949             args        = (),
2950             bounds      = selfA._parameters["Bounds"],
2951             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
2952             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
2953             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
2954             iprint      = selfA._parameters["optiprint"],
2955             )
2956         nfeval = Informations['funcalls']
2957         rc     = Informations['warnflag']
2958     elif selfA._parameters["Minimizer"] == "TNC":
2959         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
2960             func        = CostFunction,
2961             x0          = Xini,
2962             fprime      = GradientOfCostFunction,
2963             args        = (),
2964             bounds      = selfA._parameters["Bounds"],
2965             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
2966             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
2967             ftol        = selfA._parameters["CostDecrementTolerance"],
2968             messages    = selfA._parameters["optmessages"],
2969             )
2970     elif selfA._parameters["Minimizer"] == "CG":
2971         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
2972             f           = CostFunction,
2973             x0          = Xini,
2974             fprime      = GradientOfCostFunction,
2975             args        = (),
2976             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2977             gtol        = selfA._parameters["GradientNormTolerance"],
2978             disp        = selfA._parameters["optdisp"],
2979             full_output = True,
2980             )
2981     elif selfA._parameters["Minimizer"] == "NCG":
2982         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
2983             f           = CostFunction,
2984             x0          = Xini,
2985             fprime      = GradientOfCostFunction,
2986             args        = (),
2987             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2988             avextol     = selfA._parameters["CostDecrementTolerance"],
2989             disp        = selfA._parameters["optdisp"],
2990             full_output = True,
2991             )
2992     elif selfA._parameters["Minimizer"] == "BFGS":
2993         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
2994             f           = CostFunction,
2995             x0          = Xini,
2996             fprime      = GradientOfCostFunction,
2997             args        = (),
2998             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2999             gtol        = selfA._parameters["GradientNormTolerance"],
3000             disp        = selfA._parameters["optdisp"],
3001             full_output = True,
3002             )
3003     else:
3004         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
3005     #
3006     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3007     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
3008     #
3009     # Correction pour pallier a un bug de TNC sur le retour du Minimum
3010     # ----------------------------------------------------------------
3011     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
3012         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
3013     #
3014     # Obtention de l'analyse
3015     # ----------------------
3016     Xa = numpy.asmatrix(numpy.ravel( Minimum )).T
3017     #
3018     selfA.StoredVariables["Analysis"].store( Xa )
3019     #
3020     # Calculs et/ou stockages supplémentaires
3021     # ---------------------------------------
3022     if selfA._toStore("BMA"):
3023         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
3024     #
3025     return 0
3026
3027 # ==============================================================================
3028 def van3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3029     """
3030     3DVAR variational analysis with no inversion of B
3031     """
3032     #
3033     # Initialisations
3034     # ---------------
3035     #
3036     # Opérateurs
3037     Hm = HO["Direct"].appliedTo
3038     Ha = HO["Adjoint"].appliedInXTo
3039     #
3040     # Précalcul des inversions de B et R
3041     BT = B.getT()
3042     RI = R.getI()
3043     #
3044     # Point de démarrage de l'optimisation
3045     Xini = numpy.zeros(Xb.shape)
3046     #
3047     # Définition de la fonction-coût
3048     # ------------------------------
3049     def CostFunction(v):
3050         _V = numpy.asmatrix(numpy.ravel( v )).T
3051         _X = Xb + B * _V
3052         if selfA._parameters["StoreInternalVariables"] or \
3053             selfA._toStore("CurrentState") or \
3054             selfA._toStore("CurrentOptimum"):
3055             selfA.StoredVariables["CurrentState"].store( _X )
3056         _HX = Hm( _X )
3057         _HX = numpy.asmatrix(numpy.ravel( _HX )).T
3058         _Innovation = Y - _HX
3059         if selfA._toStore("SimulatedObservationAtCurrentState") or \
3060             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3061             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
3062         if selfA._toStore("InnovationAtCurrentState"):
3063             selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
3064         #
3065         Jb  = float( 0.5 * _V.T * BT * _V )
3066         Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
3067         J   = Jb + Jo
3068         #
3069         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
3070         selfA.StoredVariables["CostFunctionJb"].store( Jb )
3071         selfA.StoredVariables["CostFunctionJo"].store( Jo )
3072         selfA.StoredVariables["CostFunctionJ" ].store( J )
3073         if selfA._toStore("IndexOfOptimum") or \
3074             selfA._toStore("CurrentOptimum") or \
3075             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
3076             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
3077             selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
3078             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3079             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3080         if selfA._toStore("IndexOfOptimum"):
3081             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3082         if selfA._toStore("CurrentOptimum"):
3083             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
3084         if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3085             selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
3086         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3087             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3088         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3089             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3090         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3091             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3092         return J
3093     #
3094     def GradientOfCostFunction(v):
3095         _V = numpy.asmatrix(numpy.ravel( v )).T
3096         _X = Xb + B * _V
3097         _HX     = Hm( _X )
3098         _HX     = numpy.asmatrix(numpy.ravel( _HX )).T
3099         GradJb  = BT * _V
3100         GradJo  = - Ha( (_X, RI * (Y - _HX)) )
3101         GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
3102         return GradJ
3103     #
3104     # Minimisation de la fonctionnelle
3105     # --------------------------------
3106     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
3107     #
3108     if selfA._parameters["Minimizer"] == "LBFGSB":
3109         if "0.19" <= scipy.version.version <= "1.1.0":
3110             import lbfgsbhlt as optimiseur
3111         else:
3112             import scipy.optimize as optimiseur
3113         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
3114             func        = CostFunction,
3115             x0          = Xini,
3116             fprime      = GradientOfCostFunction,
3117             args        = (),
3118             bounds      = selfA._parameters["Bounds"],
3119             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
3120             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
3121             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
3122             iprint      = selfA._parameters["optiprint"],
3123             )
3124         nfeval = Informations['funcalls']
3125         rc     = Informations['warnflag']
3126     elif selfA._parameters["Minimizer"] == "TNC":
3127         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
3128             func        = CostFunction,
3129             x0          = Xini,
3130             fprime      = GradientOfCostFunction,
3131             args        = (),
3132             bounds      = selfA._parameters["Bounds"],
3133             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
3134             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
3135             ftol        = selfA._parameters["CostDecrementTolerance"],
3136             messages    = selfA._parameters["optmessages"],
3137             )
3138     elif selfA._parameters["Minimizer"] == "CG":
3139         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
3140             f           = CostFunction,
3141             x0          = Xini,
3142             fprime      = GradientOfCostFunction,
3143             args        = (),
3144             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3145             gtol        = selfA._parameters["GradientNormTolerance"],
3146             disp        = selfA._parameters["optdisp"],
3147             full_output = True,
3148             )
3149     elif selfA._parameters["Minimizer"] == "NCG":
3150         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
3151             f           = CostFunction,
3152             x0          = Xini,
3153             fprime      = GradientOfCostFunction,
3154             args        = (),
3155             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3156             avextol     = selfA._parameters["CostDecrementTolerance"],
3157             disp        = selfA._parameters["optdisp"],
3158             full_output = True,
3159             )
3160     elif selfA._parameters["Minimizer"] == "BFGS":
3161         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
3162             f           = CostFunction,
3163             x0          = Xini,
3164             fprime      = GradientOfCostFunction,
3165             args        = (),
3166             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3167             gtol        = selfA._parameters["GradientNormTolerance"],
3168             disp        = selfA._parameters["optdisp"],
3169             full_output = True,
3170             )
3171     else:
3172         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
3173     #
3174     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3175     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
3176     #
3177     # Correction pour pallier a un bug de TNC sur le retour du Minimum
3178     # ----------------------------------------------------------------
3179     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
3180         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
3181         Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
3182     else:
3183         Minimum = Xb + B * numpy.asmatrix(numpy.ravel( Minimum )).T
3184     #
3185     # Obtention de l'analyse
3186     # ----------------------
3187     Xa = Minimum
3188     #
3189     selfA.StoredVariables["Analysis"].store( Xa )
3190     #
3191     if selfA._toStore("OMA") or \
3192         selfA._toStore("SigmaObs2") or \
3193         selfA._toStore("SimulationQuantiles") or \
3194         selfA._toStore("SimulatedObservationAtOptimum"):
3195         if selfA._toStore("SimulatedObservationAtCurrentState"):
3196             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
3197         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3198             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
3199         else:
3200             HXa = Hm( Xa )
3201     #
3202     # Calcul de la covariance d'analyse
3203     # ---------------------------------
3204     if selfA._toStore("APosterioriCovariance") or \
3205         selfA._toStore("SimulationQuantiles") or \
3206         selfA._toStore("JacobianMatrixAtOptimum") or \
3207         selfA._toStore("KalmanGainAtOptimum"):
3208         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
3209         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
3210     if selfA._toStore("APosterioriCovariance") or \
3211         selfA._toStore("SimulationQuantiles") or \
3212         selfA._toStore("KalmanGainAtOptimum"):
3213         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
3214         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
3215     if selfA._toStore("APosterioriCovariance") or \
3216         selfA._toStore("SimulationQuantiles"):
3217         BI = B.getI()
3218         HessienneI = []
3219         nb = Xa.size
3220         for i in range(nb):
3221             _ee    = numpy.matrix(numpy.zeros(nb)).T
3222             _ee[i] = 1.
3223             _HtEE  = numpy.dot(HtM,_ee)
3224             _HtEE  = numpy.asmatrix(numpy.ravel( _HtEE )).T
3225             HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
3226         HessienneI = numpy.matrix( HessienneI )
3227         A = HessienneI.I
3228         if min(A.shape) != max(A.shape):
3229             raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
3230         if (numpy.diag(A) < 0).any():
3231             raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
3232         if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
3233             try:
3234                 L = numpy.linalg.cholesky( A )
3235             except:
3236                 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
3237     if selfA._toStore("APosterioriCovariance"):
3238         selfA.StoredVariables["APosterioriCovariance"].store( A )
3239     if selfA._toStore("JacobianMatrixAtOptimum"):
3240         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
3241     if selfA._toStore("KalmanGainAtOptimum"):
3242         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
3243         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
3244         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
3245     #
3246     # Calculs et/ou stockages supplémentaires
3247     # ---------------------------------------
3248     if selfA._toStore("Innovation") or \
3249         selfA._toStore("SigmaObs2") or \
3250         selfA._toStore("MahalanobisConsistency") or \
3251         selfA._toStore("OMB"):
3252         d  = Y - HXb
3253     if selfA._toStore("Innovation"):
3254         selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
3255     if selfA._toStore("BMA"):
3256         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
3257     if selfA._toStore("OMA"):
3258         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
3259     if selfA._toStore("OMB"):
3260         selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
3261     if selfA._toStore("SigmaObs2"):
3262         TraceR = R.trace(Y.size)
3263         selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
3264     if selfA._toStore("MahalanobisConsistency"):
3265         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
3266     if selfA._toStore("SimulationQuantiles"):
3267         QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
3268     if selfA._toStore("SimulatedObservationAtBackground"):
3269         selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
3270     if selfA._toStore("SimulatedObservationAtOptimum"):
3271         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
3272     #
3273     return 0
3274
3275 # ==============================================================================
3276 if __name__ == "__main__":
3277     print('\n AUTODIAGNOSTIC\n')