Salome HOME
Fixing iterating observation use (1)
[modules/adao.git] / src / daComposant / daCore / NumericObjects.py
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) 2008-2021 EDF R&D
4 #
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
9 #
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13 # Lesser General Public License for more details.
14 #
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
18 #
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
20 #
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
22
23 __doc__ = """
24     Définit les objets numériques génériques.
25 """
26 __author__ = "Jean-Philippe ARGAUD"
27
28 import os, time, copy, types, sys, logging
29 import math, numpy, scipy, scipy.optimize, scipy.version
30 from daCore.BasicObjects import Operator
31 from daCore.PlatformInfo import PlatformInfo
32 mpr = PlatformInfo().MachinePrecision()
33 mfp = PlatformInfo().MaximumPrecision()
34 # logging.getLogger().setLevel(logging.DEBUG)
35
36 # ==============================================================================
37 def ExecuteFunction( triplet ):
38     assert len(triplet) == 3, "Incorrect number of arguments"
39     X, xArgs, funcrepr = triplet
40     __X = numpy.asmatrix(numpy.ravel( X )).T
41     __sys_path_tmp = sys.path ; sys.path.insert(0,funcrepr["__userFunction__path"])
42     __module = __import__(funcrepr["__userFunction__modl"], globals(), locals(), [])
43     __fonction = getattr(__module,funcrepr["__userFunction__name"])
44     sys.path = __sys_path_tmp ; del __sys_path_tmp
45     if isinstance(xArgs, dict):
46         __HX  = __fonction( __X, **xArgs )
47     else:
48         __HX  = __fonction( __X )
49     return numpy.ravel( __HX )
50
51 # ==============================================================================
52 class FDApproximation(object):
53     """
54     Cette classe sert d'interface pour définir les opérateurs approximés. A la
55     création d'un objet, en fournissant une fonction "Function", on obtient un
56     objet qui dispose de 3 méthodes "DirectOperator", "TangentOperator" et
57     "AdjointOperator". On contrôle l'approximation DF avec l'incrément
58     multiplicatif "increment" valant par défaut 1%, ou avec l'incrément fixe
59     "dX" qui sera multiplié par "increment" (donc en %), et on effectue de DF
60     centrées si le booléen "centeredDF" est vrai.
61     """
62     def __init__(self,
63             name                  = "FDApproximation",
64             Function              = None,
65             centeredDF            = False,
66             increment             = 0.01,
67             dX                    = None,
68             extraArguments        = None,
69             avoidingRedundancy    = True,
70             toleranceInRedundancy = 1.e-18,
71             lenghtOfRedundancy    = -1,
72             mpEnabled             = False,
73             mpWorkers             = None,
74             mfEnabled             = False,
75             ):
76         self.__name = str(name)
77         self.__extraArgs = extraArguments
78         if mpEnabled:
79             try:
80                 import multiprocessing
81                 self.__mpEnabled = True
82             except ImportError:
83                 self.__mpEnabled = False
84         else:
85             self.__mpEnabled = False
86         self.__mpWorkers = mpWorkers
87         if self.__mpWorkers is not None and self.__mpWorkers < 1:
88             self.__mpWorkers = None
89         logging.debug("FDA Calculs en multiprocessing : %s (nombre de processus : %s)"%(self.__mpEnabled,self.__mpWorkers))
90         #
91         if mfEnabled:
92             self.__mfEnabled = True
93         else:
94             self.__mfEnabled = False
95         logging.debug("FDA Calculs en multifonctions : %s"%(self.__mfEnabled,))
96         #
97         if avoidingRedundancy:
98             self.__avoidRC = True
99             self.__tolerBP = float(toleranceInRedundancy)
100             self.__lenghtRJ = int(lenghtOfRedundancy)
101             self.__listJPCP = [] # Jacobian Previous Calculated Points
102             self.__listJPCI = [] # Jacobian Previous Calculated Increment
103             self.__listJPCR = [] # Jacobian Previous Calculated Results
104             self.__listJPPN = [] # Jacobian Previous Calculated Point Norms
105             self.__listJPIN = [] # Jacobian Previous Calculated Increment Norms
106         else:
107             self.__avoidRC = False
108         #
109         if self.__mpEnabled:
110             if isinstance(Function,types.FunctionType):
111                 logging.debug("FDA Calculs en multiprocessing : FunctionType")
112                 self.__userFunction__name = Function.__name__
113                 try:
114                     mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
115                 except:
116                     mod = os.path.abspath(Function.__globals__['__file__'])
117                 if not os.path.isfile(mod):
118                     raise ImportError("No user defined function or method found with the name %s"%(mod,))
119                 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
120                 self.__userFunction__path = os.path.dirname(mod)
121                 del mod
122                 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
123                 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
124             elif isinstance(Function,types.MethodType):
125                 logging.debug("FDA Calculs en multiprocessing : MethodType")
126                 self.__userFunction__name = Function.__name__
127                 try:
128                     mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
129                 except:
130                     mod = os.path.abspath(Function.__func__.__globals__['__file__'])
131                 if not os.path.isfile(mod):
132                     raise ImportError("No user defined function or method found with the name %s"%(mod,))
133                 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
134                 self.__userFunction__path = os.path.dirname(mod)
135                 del mod
136                 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
137                 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
138             else:
139                 raise TypeError("User defined function or method has to be provided for finite differences approximation.")
140         else:
141             self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
142             self.__userFunction = self.__userOperator.appliedTo
143         #
144         self.__centeredDF = bool(centeredDF)
145         if abs(float(increment)) > 1.e-15:
146             self.__increment  = float(increment)
147         else:
148             self.__increment  = 0.01
149         if dX is None:
150             self.__dX     = None
151         else:
152             self.__dX     = numpy.asmatrix(numpy.ravel( dX )).T
153         logging.debug("FDA Reduction des doublons de calcul : %s"%self.__avoidRC)
154         if self.__avoidRC:
155             logging.debug("FDA Tolerance de determination des doublons : %.2e"%self.__tolerBP)
156
157     # ---------------------------------------------------------
158     def __doublon__(self, e, l, n, v=None):
159         __ac, __iac = False, -1
160         for i in range(len(l)-1,-1,-1):
161             if numpy.linalg.norm(e - l[i]) < self.__tolerBP * n[i]:
162                 __ac, __iac = True, i
163                 if v is not None: logging.debug("FDA Cas%s déja calculé, récupération du doublon %i"%(v,__iac))
164                 break
165         return __ac, __iac
166
167     # ---------------------------------------------------------
168     def DirectOperator(self, X, **extraArgs ):
169         """
170         Calcul du direct à l'aide de la fonction fournie.
171
172         NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
173         ne doivent pas être données ici à la fonction utilisateur.
174         """
175         logging.debug("FDA Calcul DirectOperator (explicite)")
176         if self.__mfEnabled:
177             _HX = self.__userFunction( X, argsAsSerie = True )
178         else:
179             _X = numpy.asmatrix(numpy.ravel( X )).T
180             _HX = numpy.ravel(self.__userFunction( _X ))
181         #
182         return _HX
183
184     # ---------------------------------------------------------
185     def TangentMatrix(self, X ):
186         """
187         Calcul de l'opérateur tangent comme la Jacobienne par différences finies,
188         c'est-à-dire le gradient de H en X. On utilise des différences finies
189         directionnelles autour du point X. X est un numpy.matrix.
190
191         Différences finies centrées (approximation d'ordre 2):
192         1/ Pour chaque composante i de X, on ajoute et on enlève la perturbation
193            dX[i] à la  composante X[i], pour composer X_plus_dXi et X_moins_dXi, et
194            on calcule les réponses HX_plus_dXi = H( X_plus_dXi ) et HX_moins_dXi =
195            H( X_moins_dXi )
196         2/ On effectue les différences (HX_plus_dXi-HX_moins_dXi) et on divise par
197            le pas 2*dXi
198         3/ Chaque résultat, par composante, devient une colonne de la Jacobienne
199
200         Différences finies non centrées (approximation d'ordre 1):
201         1/ Pour chaque composante i de X, on ajoute la perturbation dX[i] à la
202            composante X[i] pour composer X_plus_dXi, et on calcule la réponse
203            HX_plus_dXi = H( X_plus_dXi )
204         2/ On calcule la valeur centrale HX = H(X)
205         3/ On effectue les différences (HX_plus_dXi-HX) et on divise par
206            le pas dXi
207         4/ Chaque résultat, par composante, devient une colonne de la Jacobienne
208
209         """
210         logging.debug("FDA Début du calcul de la Jacobienne")
211         logging.debug("FDA   Incrément de............: %s*X"%float(self.__increment))
212         logging.debug("FDA   Approximation centrée...: %s"%(self.__centeredDF))
213         #
214         if X is None or len(X)==0:
215             raise ValueError("Nominal point X for approximate derivatives can not be None or void (given X: %s)."%(str(X),))
216         #
217         _X = numpy.asmatrix(numpy.ravel( X )).T
218         #
219         if self.__dX is None:
220             _dX  = self.__increment * _X
221         else:
222             _dX = numpy.asmatrix(numpy.ravel( self.__dX )).T
223         #
224         if (_dX == 0.).any():
225             moyenne = _dX.mean()
226             if moyenne == 0.:
227                 _dX = numpy.where( _dX == 0., float(self.__increment), _dX )
228             else:
229                 _dX = numpy.where( _dX == 0., moyenne, _dX )
230         #
231         __alreadyCalculated  = False
232         if self.__avoidRC:
233             __bidon, __alreadyCalculatedP = self.__doublon__(_X,  self.__listJPCP, self.__listJPPN, None)
234             __bidon, __alreadyCalculatedI = self.__doublon__(_dX, self.__listJPCI, self.__listJPIN, None)
235             if __alreadyCalculatedP == __alreadyCalculatedI > -1:
236                 __alreadyCalculated, __i = True, __alreadyCalculatedP
237                 logging.debug("FDA Cas J déja calculé, récupération du doublon %i"%__i)
238         #
239         if __alreadyCalculated:
240             logging.debug("FDA   Calcul Jacobienne (par récupération du doublon %i)"%__i)
241             _Jacobienne = self.__listJPCR[__i]
242         else:
243             logging.debug("FDA   Calcul Jacobienne (explicite)")
244             if self.__centeredDF:
245                 #
246                 if self.__mpEnabled and not self.__mfEnabled:
247                     funcrepr = {
248                         "__userFunction__path" : self.__userFunction__path,
249                         "__userFunction__modl" : self.__userFunction__modl,
250                         "__userFunction__name" : self.__userFunction__name,
251                     }
252                     _jobs = []
253                     for i in range( len(_dX) ):
254                         _dXi            = _dX[i]
255                         _X_plus_dXi     = numpy.array( _X.A1, dtype=float )
256                         _X_plus_dXi[i]  = _X[i] + _dXi
257                         _X_moins_dXi    = numpy.array( _X.A1, dtype=float )
258                         _X_moins_dXi[i] = _X[i] - _dXi
259                         #
260                         _jobs.append( (_X_plus_dXi,  self.__extraArgs, funcrepr) )
261                         _jobs.append( (_X_moins_dXi, self.__extraArgs, funcrepr) )
262                     #
263                     import multiprocessing
264                     self.__pool = multiprocessing.Pool(self.__mpWorkers)
265                     _HX_plusmoins_dX = self.__pool.map( ExecuteFunction, _jobs )
266                     self.__pool.close()
267                     self.__pool.join()
268                     #
269                     _Jacobienne  = []
270                     for i in range( len(_dX) ):
271                         _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
272                     #
273                 elif self.__mfEnabled:
274                     _xserie = []
275                     for i in range( len(_dX) ):
276                         _dXi            = _dX[i]
277                         _X_plus_dXi     = numpy.array( _X.A1, dtype=float )
278                         _X_plus_dXi[i]  = _X[i] + _dXi
279                         _X_moins_dXi    = numpy.array( _X.A1, dtype=float )
280                         _X_moins_dXi[i] = _X[i] - _dXi
281                         #
282                         _xserie.append( _X_plus_dXi )
283                         _xserie.append( _X_moins_dXi )
284                     #
285                     _HX_plusmoins_dX = self.DirectOperator( _xserie )
286                      #
287                     _Jacobienne  = []
288                     for i in range( len(_dX) ):
289                         _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
290                     #
291                 else:
292                     _Jacobienne  = []
293                     for i in range( _dX.size ):
294                         _dXi            = _dX[i]
295                         _X_plus_dXi     = numpy.array( _X.A1, dtype=float )
296                         _X_plus_dXi[i]  = _X[i] + _dXi
297                         _X_moins_dXi    = numpy.array( _X.A1, dtype=float )
298                         _X_moins_dXi[i] = _X[i] - _dXi
299                         #
300                         _HX_plus_dXi    = self.DirectOperator( _X_plus_dXi )
301                         _HX_moins_dXi   = self.DirectOperator( _X_moins_dXi )
302                         #
303                         _Jacobienne.append( numpy.ravel( _HX_plus_dXi - _HX_moins_dXi ) / (2.*_dXi) )
304                 #
305             else:
306                 #
307                 if self.__mpEnabled and not self.__mfEnabled:
308                     funcrepr = {
309                         "__userFunction__path" : self.__userFunction__path,
310                         "__userFunction__modl" : self.__userFunction__modl,
311                         "__userFunction__name" : self.__userFunction__name,
312                     }
313                     _jobs = []
314                     _jobs.append( (_X.A1, self.__extraArgs, funcrepr) )
315                     for i in range( len(_dX) ):
316                         _X_plus_dXi    = numpy.array( _X.A1, dtype=float )
317                         _X_plus_dXi[i] = _X[i] + _dX[i]
318                         #
319                         _jobs.append( (_X_plus_dXi, self.__extraArgs, funcrepr) )
320                     #
321                     import multiprocessing
322                     self.__pool = multiprocessing.Pool(self.__mpWorkers)
323                     _HX_plus_dX = self.__pool.map( ExecuteFunction, _jobs )
324                     self.__pool.close()
325                     self.__pool.join()
326                     #
327                     _HX = _HX_plus_dX.pop(0)
328                     #
329                     _Jacobienne = []
330                     for i in range( len(_dX) ):
331                         _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
332                     #
333                 elif self.__mfEnabled:
334                     _xserie = []
335                     _xserie.append( _X.A1 )
336                     for i in range( len(_dX) ):
337                         _X_plus_dXi    = numpy.array( _X.A1, dtype=float )
338                         _X_plus_dXi[i] = _X[i] + _dX[i]
339                         #
340                         _xserie.append( _X_plus_dXi )
341                     #
342                     _HX_plus_dX = self.DirectOperator( _xserie )
343                     #
344                     _HX = _HX_plus_dX.pop(0)
345                     #
346                     _Jacobienne = []
347                     for i in range( len(_dX) ):
348                         _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
349                    #
350                 else:
351                     _Jacobienne  = []
352                     _HX = self.DirectOperator( _X )
353                     for i in range( _dX.size ):
354                         _dXi            = _dX[i]
355                         _X_plus_dXi     = numpy.array( _X.A1, dtype=float )
356                         _X_plus_dXi[i]  = _X[i] + _dXi
357                         #
358                         _HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
359                         #
360                         _Jacobienne.append( numpy.ravel(( _HX_plus_dXi - _HX ) / _dXi) )
361                 #
362             #
363             _Jacobienne = numpy.asmatrix( numpy.vstack( _Jacobienne ) ).T
364             if self.__avoidRC:
365                 if self.__lenghtRJ < 0: self.__lenghtRJ = 2 * _X.size
366                 while len(self.__listJPCP) > self.__lenghtRJ:
367                     self.__listJPCP.pop(0)
368                     self.__listJPCI.pop(0)
369                     self.__listJPCR.pop(0)
370                     self.__listJPPN.pop(0)
371                     self.__listJPIN.pop(0)
372                 self.__listJPCP.append( copy.copy(_X) )
373                 self.__listJPCI.append( copy.copy(_dX) )
374                 self.__listJPCR.append( copy.copy(_Jacobienne) )
375                 self.__listJPPN.append( numpy.linalg.norm(_X) )
376                 self.__listJPIN.append( numpy.linalg.norm(_Jacobienne) )
377         #
378         logging.debug("FDA Fin du calcul de la Jacobienne")
379         #
380         return _Jacobienne
381
382     # ---------------------------------------------------------
383     def TangentOperator(self, paire, **extraArgs ):
384         """
385         Calcul du tangent à l'aide de la Jacobienne.
386
387         NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
388         ne doivent pas être données ici à la fonction utilisateur.
389         """
390         if self.__mfEnabled:
391             assert len(paire) == 1, "Incorrect lenght of arguments"
392             _paire = paire[0]
393             assert len(_paire) == 2, "Incorrect number of arguments"
394         else:
395             assert len(paire) == 2, "Incorrect number of arguments"
396             _paire = paire
397         X, dX = _paire
398         _Jacobienne = self.TangentMatrix( X )
399         if dX is None or len(dX) == 0:
400             #
401             # Calcul de la forme matricielle si le second argument est None
402             # -------------------------------------------------------------
403             if self.__mfEnabled: return [_Jacobienne,]
404             else:                return _Jacobienne
405         else:
406             #
407             # Calcul de la valeur linéarisée de H en X appliqué à dX
408             # ------------------------------------------------------
409             _dX = numpy.asmatrix(numpy.ravel( dX )).T
410             _HtX = numpy.dot(_Jacobienne, _dX)
411             if self.__mfEnabled: return [_HtX.A1,]
412             else:                return _HtX.A1
413
414     # ---------------------------------------------------------
415     def AdjointOperator(self, paire, **extraArgs ):
416         """
417         Calcul de l'adjoint à l'aide de la Jacobienne.
418
419         NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
420         ne doivent pas être données ici à la fonction utilisateur.
421         """
422         if self.__mfEnabled:
423             assert len(paire) == 1, "Incorrect lenght of arguments"
424             _paire = paire[0]
425             assert len(_paire) == 2, "Incorrect number of arguments"
426         else:
427             assert len(paire) == 2, "Incorrect number of arguments"
428             _paire = paire
429         X, Y = _paire
430         _JacobienneT = self.TangentMatrix( X ).T
431         if Y is None or len(Y) == 0:
432             #
433             # Calcul de la forme matricielle si le second argument est None
434             # -------------------------------------------------------------
435             if self.__mfEnabled: return [_JacobienneT,]
436             else:                return _JacobienneT
437         else:
438             #
439             # Calcul de la valeur de l'adjoint en X appliqué à Y
440             # --------------------------------------------------
441             _Y = numpy.asmatrix(numpy.ravel( Y )).T
442             _HaY = numpy.dot(_JacobienneT, _Y)
443             if self.__mfEnabled: return [_HaY.A1,]
444             else:                return _HaY.A1
445
446 # ==============================================================================
447 def EnsembleOfCenteredPerturbations( _bgcenter, _bgcovariance, _nbmembers ):
448     "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
449     #
450     _bgcenter = numpy.ravel(_bgcenter)[:,None]
451     if _nbmembers < 1:
452         raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
453     #
454     if _bgcovariance is None:
455         BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
456     else:
457         _Z = numpy.random.multivariate_normal(numpy.zeros(_bgcenter.size), _bgcovariance, size=_nbmembers).T
458         BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers) + _Z
459     #
460     return BackgroundEnsemble
461
462 # ==============================================================================
463 def EnsembleOfBackgroundPerturbations( _bgcenter, _bgcovariance, _nbmembers, _withSVD = True):
464     "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
465     def __CenteredRandomAnomalies(Zr, N):
466         """
467         Génère une matrice de N anomalies aléatoires centrées sur Zr selon les
468         notes manuscrites de MB et conforme au code de PS avec eps = -1
469         """
470         eps = -1
471         Q = numpy.identity(N-1)-numpy.ones((N-1,N-1))/numpy.sqrt(N)/(numpy.sqrt(N)-eps)
472         Q = numpy.concatenate((Q, [eps*numpy.ones(N-1)/numpy.sqrt(N)]), axis=0)
473         R, _ = numpy.linalg.qr(numpy.random.normal(size = (N-1,N-1)))
474         Q = numpy.dot(Q,R)
475         Zr = numpy.dot(Q,Zr)
476         return Zr.T
477     #
478     _bgcenter = numpy.ravel(_bgcenter).reshape((-1,1))
479     if _nbmembers < 1:
480         raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
481     if _bgcovariance is None:
482         BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
483     else:
484         if _withSVD:
485             U, s, V = numpy.linalg.svd(_bgcovariance, full_matrices=False)
486             _nbctl = _bgcenter.size
487             if _nbmembers > _nbctl:
488                 _Z = numpy.concatenate((numpy.dot(
489                     numpy.diag(numpy.sqrt(s[:_nbctl])), V[:_nbctl]),
490                     numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1-_nbctl)), axis = 0)
491             else:
492                 _Z = numpy.dot(numpy.diag(numpy.sqrt(s[:_nbmembers-1])), V[:_nbmembers-1])
493             _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
494             BackgroundEnsemble = _bgcenter + _Zca
495         else:
496             if max(abs(_bgcovariance.flatten())) > 0:
497                 _nbctl = _bgcenter.size
498                 _Z = numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1)
499                 _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
500                 BackgroundEnsemble = _bgcenter + _Zca
501             else:
502                 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
503     #
504     return BackgroundEnsemble
505
506 # ==============================================================================
507 def EnsembleMean( __Ensemble ):
508     "Renvoie la moyenne empirique d'un ensemble"
509     return numpy.asarray(__Ensemble).mean(axis=1, dtype=mfp).astype('float').reshape((-1,1))
510
511 # ==============================================================================
512 def EnsembleOfAnomalies( Ensemble, OptMean = None, Normalisation = 1.):
513     "Renvoie les anomalies centrées à partir d'un ensemble"
514     if OptMean is None:
515         __Em = EnsembleMean( Ensemble )
516     else:
517         __Em = numpy.ravel(OptMean).reshape((-1,1))
518     #
519     return Normalisation * (numpy.asarray(Ensemble) - __Em)
520
521 # ==============================================================================
522 def EnsembleErrorCovariance( Ensemble, __quick = False ):
523     "Renvoie l'estimation empirique de la covariance d'ensemble"
524     if __quick:
525         # Covariance rapide mais rarement définie positive
526         __Covariance = numpy.cov(Ensemble)
527     else:
528         # Résultat souvent identique à numpy.cov, mais plus robuste
529         __n, __m = numpy.asarray(Ensemble).shape
530         __Anomalies = EnsembleOfAnomalies( Ensemble )
531         # Estimation empirique
532         __Covariance = (__Anomalies @ __Anomalies.T) / (__m-1)
533         # Assure la symétrie
534         __Covariance = (__Covariance + __Covariance.T) * 0.5
535         # Assure la positivité
536         __epsilon    = mpr*numpy.trace(__Covariance)
537         __Covariance = __Covariance + __epsilon * numpy.identity(__n)
538     #
539     return __Covariance
540
541 # ==============================================================================
542 def EnsemblePerturbationWithGivenCovariance( __Ensemble, __Covariance, __Seed=None ):
543     "Ajout d'une perturbation à chaque membre d'un ensemble selon une covariance prescrite"
544     if hasattr(__Covariance,"assparsematrix"):
545         if (abs(__Ensemble).mean() > mpr) and (abs(__Covariance.assparsematrix())/abs(__Ensemble).mean() < mpr).all():
546             # Traitement d'une covariance nulle ou presque
547             return __Ensemble
548         if (abs(__Ensemble).mean() <= mpr) and (abs(__Covariance.assparsematrix()) < mpr).all():
549             # Traitement d'une covariance nulle ou presque
550             return __Ensemble
551     else:
552         if (abs(__Ensemble).mean() > mpr) and (abs(__Covariance)/abs(__Ensemble).mean() < mpr).all():
553             # Traitement d'une covariance nulle ou presque
554             return __Ensemble
555         if (abs(__Ensemble).mean() <= mpr) and (abs(__Covariance) < mpr).all():
556             # Traitement d'une covariance nulle ou presque
557             return __Ensemble
558     #
559     __n, __m = __Ensemble.shape
560     if __Seed is not None: numpy.random.seed(__Seed)
561     #
562     if hasattr(__Covariance,"isscalar") and __Covariance.isscalar():
563         # Traitement d'une covariance multiple de l'identité
564         __zero = 0.
565         __std  = numpy.sqrt(__Covariance.assparsematrix())
566         __Ensemble += numpy.random.normal(__zero, __std, size=(__m,__n)).T
567     #
568     elif hasattr(__Covariance,"isvector") and __Covariance.isvector():
569         # Traitement d'une covariance diagonale avec variances non identiques
570         __zero = numpy.zeros(__n)
571         __std  = numpy.sqrt(__Covariance.assparsematrix())
572         __Ensemble += numpy.asarray([numpy.random.normal(__zero, __std) for i in range(__m)]).T
573     #
574     elif hasattr(__Covariance,"ismatrix") and __Covariance.ismatrix():
575         # Traitement d'une covariance pleine
576         __Ensemble += numpy.random.multivariate_normal(numpy.zeros(__n), __Covariance.asfullmatrix(__n), size=__m).T
577     #
578     elif isinstance(__Covariance, numpy.ndarray):
579         # Traitement d'une covariance numpy pleine, sachant qu'on arrive ici en dernier
580         __Ensemble += numpy.random.multivariate_normal(numpy.zeros(__n), __Covariance, size=__m).T
581     #
582     else:
583         raise ValueError("Error in ensemble perturbation with inadequate covariance specification")
584     #
585     return __Ensemble
586
587 # ==============================================================================
588 def CovarianceInflation(
589         InputCovOrEns,
590         InflationType   = None,
591         InflationFactor = None,
592         BackgroundCov   = None,
593         ):
594     """
595     Inflation applicable soit sur Pb ou Pa, soit sur les ensembles EXb ou EXa
596
597     Synthèse : Hunt 2007, section 2.3.5
598     """
599     if InflationFactor is None:
600         return InputCovOrEns
601     else:
602         InflationFactor = float(InflationFactor)
603     #
604     if InflationType in ["MultiplicativeOnAnalysisCovariance", "MultiplicativeOnBackgroundCovariance"]:
605         if InflationFactor < 1.:
606             raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
607         if InflationFactor < 1.+mpr:
608             return InputCovOrEns
609         OutputCovOrEns = InflationFactor**2 * InputCovOrEns
610     #
611     elif InflationType in ["MultiplicativeOnAnalysisAnomalies", "MultiplicativeOnBackgroundAnomalies"]:
612         if InflationFactor < 1.:
613             raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
614         if InflationFactor < 1.+mpr:
615             return InputCovOrEns
616         InputCovOrEnsMean = InputCovOrEns.mean(axis=1, dtype=mfp).astype('float')
617         OutputCovOrEns = InputCovOrEnsMean[:,numpy.newaxis] \
618             + InflationFactor * (InputCovOrEns - InputCovOrEnsMean[:,numpy.newaxis])
619     #
620     elif InflationType in ["AdditiveOnAnalysisCovariance", "AdditiveOnBackgroundCovariance"]:
621         if InflationFactor < 0.:
622             raise ValueError("Inflation factor for additive inflation has to be greater or equal than 0.")
623         if InflationFactor < mpr:
624             return InputCovOrEns
625         __n, __m = numpy.asarray(InputCovOrEns).shape
626         if __n != __m:
627             raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
628         OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * numpy.identity(__n)
629     #
630     elif InflationType == "HybridOnBackgroundCovariance":
631         if InflationFactor < 0.:
632             raise ValueError("Inflation factor for hybrid inflation has to be greater or equal than 0.")
633         if InflationFactor < mpr:
634             return InputCovOrEns
635         __n, __m = numpy.asarray(InputCovOrEns).shape
636         if __n != __m:
637             raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
638         if BackgroundCov is None:
639             raise ValueError("Background covariance matrix B has to be given for hybrid inflation.")
640         if InputCovOrEns.shape != BackgroundCov.shape:
641             raise ValueError("Ensemble covariance matrix has to be of same size than background covariance matrix B.")
642         OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * BackgroundCov
643     #
644     elif InflationType == "Relaxation":
645         raise NotImplementedError("InflationType Relaxation")
646     #
647     else:
648         raise ValueError("Error in inflation type, '%s' is not a valid keyword."%InflationType)
649     #
650     return OutputCovOrEns
651
652 # ==============================================================================
653 def QuantilesEstimations(selfA, A, Xa, HXa = None, Hm = None, HtM = None):
654     "Estimation des quantiles a posteriori (selfA est modifié)"
655     nbsamples = selfA._parameters["NumberOfSamplesForQuantiles"]
656     #
657     # Traitement des bornes
658     if "StateBoundsForQuantiles" in selfA._parameters:
659         LBounds = selfA._parameters["StateBoundsForQuantiles"] # Prioritaire
660     elif "Bounds" in selfA._parameters:
661         LBounds = selfA._parameters["Bounds"]  # Défaut raisonnable
662     else:
663         LBounds = None
664     if LBounds is not None:
665         def NoneRemove(paire):
666             bmin, bmax = paire
667             if bmin is None: bmin = numpy.finfo('float').min
668             if bmax is None: bmax = numpy.finfo('float').max
669             return [bmin, bmax]
670         LBounds = numpy.matrix( [NoneRemove(paire) for paire in LBounds] )
671     #
672     # Échantillonnage des états
673     YfQ  = None
674     EXr  = None
675     if selfA._parameters["SimulationForQuantiles"] == "Linear" and HXa is not None:
676         HXa  = numpy.matrix(numpy.ravel( HXa )).T
677     for i in range(nbsamples):
678         if selfA._parameters["SimulationForQuantiles"] == "Linear" and HtM is not None:
679             dXr = numpy.matrix(numpy.random.multivariate_normal(numpy.ravel(Xa),A) - numpy.ravel(Xa)).T
680             if LBounds is not None: # "EstimateProjection" par défaut
681                 dXr = numpy.max(numpy.hstack((dXr,LBounds[:,0]) - Xa),axis=1)
682                 dXr = numpy.min(numpy.hstack((dXr,LBounds[:,1]) - Xa),axis=1)
683             dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
684             Yr = HXa + dYr
685             if selfA._toStore("SampledStateForQuantiles"): Xr = Xa + dXr
686         elif selfA._parameters["SimulationForQuantiles"] == "NonLinear" and Hm is not None:
687             Xr = numpy.matrix(numpy.random.multivariate_normal(numpy.ravel(Xa),A)).T
688             if LBounds is not None: # "EstimateProjection" par défaut
689                 Xr = numpy.max(numpy.hstack((Xr,LBounds[:,0])),axis=1)
690                 Xr = numpy.min(numpy.hstack((Xr,LBounds[:,1])),axis=1)
691             Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
692         else:
693             raise ValueError("Quantile simulations has only to be Linear or NonLinear.")
694         #
695         if YfQ is None:
696             YfQ = Yr
697             if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.ravel(Xr)
698         else:
699             YfQ = numpy.hstack((YfQ,Yr))
700             if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.vstack((EXr,numpy.ravel(Xr)))
701     #
702     # Extraction des quantiles
703     YfQ.sort(axis=-1)
704     YQ = None
705     for quantile in selfA._parameters["Quantiles"]:
706         if not (0. <= float(quantile) <= 1.): continue
707         indice = int(nbsamples * float(quantile) - 1./nbsamples)
708         if YQ is None: YQ = YfQ[:,indice]
709         else:          YQ = numpy.hstack((YQ,YfQ[:,indice]))
710     selfA.StoredVariables["SimulationQuantiles"].store( YQ )
711     if selfA._toStore("SampledStateForQuantiles"):
712         selfA.StoredVariables["SampledStateForQuantiles"].store( EXr.T )
713     #
714     return 0
715
716 # ==============================================================================
717 def enks(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="EnKS16-KalmanFilterFormula"):
718     """
719     EnKS
720     """
721     #
722     # Opérateurs
723     H = HO["Direct"].appliedControledFormTo
724     #
725     if selfA._parameters["EstimationOf"] == "State":
726         M = EM["Direct"].appliedControledFormTo
727     #
728     if CM is not None and "Tangent" in CM and U is not None:
729         Cm = CM["Tangent"].asMatrix(Xb)
730     else:
731         Cm = None
732     #
733     # Précalcul des inversions de B et R
734     RIdemi = R.sqrtmI()
735     #
736     # Durée d'observation et tailles
737     LagL = selfA._parameters["SmootherLagL"]
738     if (not hasattr(Y,"store")) or (not hasattr(Y,"stepnumber")):
739         raise ValueError("Fixed-lag smoother requires a series of observation")
740     if Y.stepnumber() < LagL:
741         raise ValueError("Fixed-lag smoother requires a series of observation greater then the lag L")
742     duration = Y.stepnumber()
743     __p = numpy.cumprod(Y.shape())[-1]
744     __n = Xb.size
745     __m = selfA._parameters["NumberOfMembers"]
746     #
747     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
748         selfA.StoredVariables["Analysis"].store( Xb )
749         if selfA._toStore("APosterioriCovariance"):
750             if hasattr(B,"asfullmatrix"):
751                 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
752             else:
753                 selfA.StoredVariables["APosterioriCovariance"].store( B )
754     #
755     # Calcul direct initial (on privilégie la mémorisation au recalcul)
756     __seed = numpy.random.get_state()
757     selfB = copy.deepcopy(selfA)
758     selfB._parameters["StoreSupplementaryCalculations"] = ["CurrentEnsembleState"]
759     if VariantM == "EnKS16-KalmanFilterFormula":
760         etkf(selfB, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM = "KalmanFilterFormula")
761     else:
762         raise ValueError("VariantM has to be chosen in the authorized methods list.")
763     if LagL > 0:
764         EL  = selfB.StoredVariables["CurrentEnsembleState"][LagL-1]
765     else:
766         EL = EnsembleOfBackgroundPerturbations( Xb, None, __m ) # Cf. etkf
767     selfA._parameters["SetSeed"] = numpy.random.set_state(__seed)
768     #
769     for step in range(LagL,duration-1):
770         #
771         sEL = selfB.StoredVariables["CurrentEnsembleState"][step+1-LagL:step+1]
772         sEL.append(None)
773         #
774         if hasattr(Y,"store"):
775             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
776         else:
777             Ynpu = numpy.ravel( Y ).reshape((__p,1))
778         #
779         if U is not None:
780             if hasattr(U,"store") and len(U)>1:
781                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
782             elif hasattr(U,"store") and len(U)==1:
783                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
784             else:
785                 Un = numpy.asmatrix(numpy.ravel( U )).T
786         else:
787             Un = None
788         #
789         #--------------------------
790         if VariantM == "EnKS16-KalmanFilterFormula":
791             if selfA._parameters["EstimationOf"] == "State": # Forecast
792                 EL = M( [(EL[:,i], Un) for i in range(__m)],
793                     argsAsSerie = True,
794                     returnSerieAsArrayMatrix = True )
795                 EL = EnsemblePerturbationWithGivenCovariance( EL, Q )
796                 EZ = H( [(EL[:,i], Un) for i in range(__m)],
797                     argsAsSerie = True,
798                     returnSerieAsArrayMatrix = True )
799                 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
800                     Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
801                     EZ = EZ + Cm * Un
802             elif selfA._parameters["EstimationOf"] == "Parameters":
803                 # --- > Par principe, M = Id, Q = 0
804                 EZ = H( [(EL[:,i], Un) for i in range(__m)],
805                     argsAsSerie = True,
806                     returnSerieAsArrayMatrix = True )
807             #
808             vEm   = EL.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
809             vZm   = EZ.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
810             #
811             mS    = RIdemi @ EnsembleOfAnomalies( EZ, vZm, 1./math.sqrt(__m-1) )
812             mS    = mS.reshape((-1,__m)) # Pour dimension 1
813             delta = RIdemi @ ( Ynpu - vZm )
814             mT    = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS )
815             vw    = mT @ mS.T @ delta
816             #
817             Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
818             mU    = numpy.identity(__m)
819             wTU   = (vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU)
820             #
821             EX    = EnsembleOfAnomalies( EL, vEm, 1./math.sqrt(__m-1) )
822             EL    = vEm + EX @ wTU
823             #
824             sEL[LagL] = EL
825             for irl in range(LagL): # Lissage des L précédentes analysis
826                 vEm = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
827                 EX = EnsembleOfAnomalies( sEL[irl], vEm, 1./math.sqrt(__m-1) )
828                 sEL[irl] = vEm + EX @ wTU
829             #
830             # Conservation de l'analyse retrospective d'ordre 0 avant rotation
831             Xa = sEL[0].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
832             if selfA._toStore("APosterioriCovariance"):
833                 EXn = sEL[0]
834             #
835             for irl in range(LagL):
836                 sEL[irl] = sEL[irl+1]
837             sEL[LagL] = None
838         #--------------------------
839         else:
840             raise ValueError("VariantM has to be chosen in the authorized methods list.")
841         #
842         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
843         # ---> avec analysis
844         selfA.StoredVariables["Analysis"].store( Xa )
845         if selfA._toStore("APosterioriCovariance"):
846             selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(EXn) )
847     #
848     # Stockage des dernières analyses incomplètement remises à jour
849     for irl in range(LagL):
850         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
851         Xa = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
852         selfA.StoredVariables["Analysis"].store( Xa )
853     #
854     return 0
855
856 # ==============================================================================
857 def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
858     """
859     Ensemble-Transform EnKF
860     """
861     if selfA._parameters["EstimationOf"] == "Parameters":
862         selfA._parameters["StoreInternalVariables"] = True
863     #
864     # Opérateurs
865     H = HO["Direct"].appliedControledFormTo
866     #
867     if selfA._parameters["EstimationOf"] == "State":
868         M = EM["Direct"].appliedControledFormTo
869     #
870     if CM is not None and "Tangent" in CM and U is not None:
871         Cm = CM["Tangent"].asMatrix(Xb)
872     else:
873         Cm = None
874     #
875     # Durée d'observation et tailles
876     if hasattr(Y,"stepnumber"):
877         duration = Y.stepnumber()
878         __p = numpy.cumprod(Y.shape())[-1]
879     else:
880         duration = 2
881         __p = numpy.array(Y).size
882     #
883     # Précalcul des inversions de B et R
884     if selfA._parameters["StoreInternalVariables"] \
885         or selfA._toStore("CostFunctionJ") \
886         or selfA._toStore("CostFunctionJb") \
887         or selfA._toStore("CostFunctionJo") \
888         or selfA._toStore("CurrentOptimum") \
889         or selfA._toStore("APosterioriCovariance"):
890         BI = B.getI()
891         RI = R.getI()
892     elif VariantM != "KalmanFilterFormula":
893         RI = R.getI()
894     if VariantM == "KalmanFilterFormula":
895         RIdemi = R.sqrtmI()
896     #
897     __n = Xb.size
898     __m = selfA._parameters["NumberOfMembers"]
899     #
900     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
901         Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
902         selfA.StoredVariables["Analysis"].store( Xb )
903         if selfA._toStore("APosterioriCovariance"):
904             if hasattr(B,"asfullmatrix"):
905                 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
906             else:
907                 selfA.StoredVariables["APosterioriCovariance"].store( B )
908         selfA._setInternalState("seed", numpy.random.get_state())
909     elif selfA._parameters["nextStep"]:
910         Xn = selfA._getInternalState("Xn")
911     #
912     previousJMinimum = numpy.finfo(float).max
913     #
914     for step in range(duration-1):
915         numpy.random.set_state(selfA._getInternalState("seed"))
916         if hasattr(Y,"store"):
917             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
918         else:
919             Ynpu = numpy.ravel( Y ).reshape((__p,1))
920         #
921         if U is not None:
922             if hasattr(U,"store") and len(U)>1:
923                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
924             elif hasattr(U,"store") and len(U)==1:
925                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
926             else:
927                 Un = numpy.asmatrix(numpy.ravel( U )).T
928         else:
929             Un = None
930         #
931         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
932             Xn = CovarianceInflation( Xn,
933                 selfA._parameters["InflationType"],
934                 selfA._parameters["InflationFactor"],
935                 )
936         #
937         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
938             EMX = M( [(Xn[:,i], Un) for i in range(__m)],
939                 argsAsSerie = True,
940                 returnSerieAsArrayMatrix = True )
941             Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
942             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
943                 argsAsSerie = True,
944                 returnSerieAsArrayMatrix = True )
945             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
946                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
947                 Xn_predicted = Xn_predicted + Cm * Un
948         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
949             # --- > Par principe, M = Id, Q = 0
950             Xn_predicted = EMX = Xn
951             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
952                 argsAsSerie = True,
953                 returnSerieAsArrayMatrix = True )
954         #
955         # Mean of forecast and observation of forecast
956         Xfm  = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
957         Hfm  = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
958         #
959         # Anomalies
960         EaX   = EnsembleOfAnomalies( Xn_predicted, Xfm )
961         EaHX  = EnsembleOfAnomalies( HX_predicted, Hfm)
962         #
963         #--------------------------
964         if VariantM == "KalmanFilterFormula":
965             mS    = RIdemi * EaHX / math.sqrt(__m-1)
966             mS    = mS.reshape((-1,__m)) # Pour dimension 1
967             delta = RIdemi * ( Ynpu - Hfm )
968             mT    = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS )
969             vw    = mT @ mS.T @ delta
970             #
971             Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
972             mU    = numpy.identity(__m)
973             #
974             EaX   = EaX / math.sqrt(__m-1)
975             Xn    = Xfm + EaX @ ( vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU )
976         #--------------------------
977         elif VariantM == "Variational":
978             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
979             def CostFunction(w):
980                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
981                 _Jo = 0.5 * _A.T @ (RI * _A)
982                 _Jb = 0.5 * (__m-1) * w.T @ w
983                 _J  = _Jo + _Jb
984                 return float(_J)
985             def GradientOfCostFunction(w):
986                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
987                 _GardJo = - EaHX.T @ (RI * _A)
988                 _GradJb = (__m-1) * w.reshape((__m,1))
989                 _GradJ  = _GardJo + _GradJb
990                 return numpy.ravel(_GradJ)
991             vw = scipy.optimize.fmin_cg(
992                 f           = CostFunction,
993                 x0          = numpy.zeros(__m),
994                 fprime      = GradientOfCostFunction,
995                 args        = (),
996                 disp        = False,
997                 )
998             #
999             Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
1000             Htb = (__m-1) * numpy.identity(__m)
1001             Hta = Hto + Htb
1002             #
1003             Pta = numpy.linalg.inv( Hta )
1004             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1005             #
1006             Xn  = Xfm + EaX @ (vw[:,None] + EWa)
1007         #--------------------------
1008         elif VariantM == "FiniteSize11": # Jauge Boc2011
1009             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1010             def CostFunction(w):
1011                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1012                 _Jo = 0.5 * _A.T @ (RI * _A)
1013                 _Jb = 0.5 * __m * math.log(1 + 1/__m + w.T @ w)
1014                 _J  = _Jo + _Jb
1015                 return float(_J)
1016             def GradientOfCostFunction(w):
1017                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1018                 _GardJo = - EaHX.T @ (RI * _A)
1019                 _GradJb = __m * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
1020                 _GradJ  = _GardJo + _GradJb
1021                 return numpy.ravel(_GradJ)
1022             vw = scipy.optimize.fmin_cg(
1023                 f           = CostFunction,
1024                 x0          = numpy.zeros(__m),
1025                 fprime      = GradientOfCostFunction,
1026                 args        = (),
1027                 disp        = False,
1028                 )
1029             #
1030             Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
1031             Htb = __m * \
1032                 ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
1033                 / (1 + 1/__m + vw.T @ vw)**2
1034             Hta = Hto + Htb
1035             #
1036             Pta = numpy.linalg.inv( Hta )
1037             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1038             #
1039             Xn  = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
1040         #--------------------------
1041         elif VariantM == "FiniteSize15": # Jauge Boc2015
1042             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1043             def CostFunction(w):
1044                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1045                 _Jo = 0.5 * _A.T * RI * _A
1046                 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w)
1047                 _J  = _Jo + _Jb
1048                 return float(_J)
1049             def GradientOfCostFunction(w):
1050                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1051                 _GardJo = - EaHX.T @ (RI * _A)
1052                 _GradJb = (__m+1) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
1053                 _GradJ  = _GardJo + _GradJb
1054                 return numpy.ravel(_GradJ)
1055             vw = scipy.optimize.fmin_cg(
1056                 f           = CostFunction,
1057                 x0          = numpy.zeros(__m),
1058                 fprime      = GradientOfCostFunction,
1059                 args        = (),
1060                 disp        = False,
1061                 )
1062             #
1063             Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
1064             Htb = (__m+1) * \
1065                 ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
1066                 / (1 + 1/__m + vw.T @ vw)**2
1067             Hta = Hto + Htb
1068             #
1069             Pta = numpy.linalg.inv( Hta )
1070             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1071             #
1072             Xn  = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
1073         #--------------------------
1074         elif VariantM == "FiniteSize16": # Jauge Boc2016
1075             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1076             def CostFunction(w):
1077                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1078                 _Jo = 0.5 * _A.T @ (RI * _A)
1079                 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w / (__m-1))
1080                 _J  = _Jo + _Jb
1081                 return float(_J)
1082             def GradientOfCostFunction(w):
1083                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1084                 _GardJo = - EaHX.T @ (RI * _A)
1085                 _GradJb = ((__m+1) / (__m-1)) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w / (__m-1))
1086                 _GradJ  = _GardJo + _GradJb
1087                 return numpy.ravel(_GradJ)
1088             vw = scipy.optimize.fmin_cg(
1089                 f           = CostFunction,
1090                 x0          = numpy.zeros(__m),
1091                 fprime      = GradientOfCostFunction,
1092                 args        = (),
1093                 disp        = False,
1094                 )
1095             #
1096             Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
1097             Htb = ((__m+1) / (__m-1)) * \
1098                 ( (1 + 1/__m + vw.T @ vw / (__m-1)) * numpy.identity(__m) - 2 * vw @ vw.T / (__m-1) ) \
1099                 / (1 + 1/__m + vw.T @ vw / (__m-1))**2
1100             Hta = Hto + Htb
1101             #
1102             Pta = numpy.linalg.inv( Hta )
1103             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1104             #
1105             Xn  = Xfm + EaX @ (vw[:,None] + EWa)
1106         #--------------------------
1107         else:
1108             raise ValueError("VariantM has to be chosen in the authorized methods list.")
1109         #
1110         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
1111             Xn = CovarianceInflation( Xn,
1112                 selfA._parameters["InflationType"],
1113                 selfA._parameters["InflationFactor"],
1114                 )
1115         #
1116         Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1117         #--------------------------
1118         selfA._setInternalState("Xn", Xn)
1119         selfA._setInternalState("seed", numpy.random.get_state())
1120         #--------------------------
1121         #
1122         if selfA._parameters["StoreInternalVariables"] \
1123             or selfA._toStore("CostFunctionJ") \
1124             or selfA._toStore("CostFunctionJb") \
1125             or selfA._toStore("CostFunctionJo") \
1126             or selfA._toStore("APosterioriCovariance") \
1127             or selfA._toStore("InnovationAtCurrentAnalysis") \
1128             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1129             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1130             _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
1131             _Innovation = Ynpu - _HXa
1132         #
1133         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1134         # ---> avec analysis
1135         selfA.StoredVariables["Analysis"].store( Xa )
1136         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1137             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
1138         if selfA._toStore("InnovationAtCurrentAnalysis"):
1139             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1140         # ---> avec current state
1141         if selfA._parameters["StoreInternalVariables"] \
1142             or selfA._toStore("CurrentState"):
1143             selfA.StoredVariables["CurrentState"].store( Xn )
1144         if selfA._toStore("ForecastState"):
1145             selfA.StoredVariables["ForecastState"].store( EMX )
1146         if selfA._toStore("ForecastCovariance"):
1147             selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(EMX) )
1148         if selfA._toStore("BMA"):
1149             selfA.StoredVariables["BMA"].store( EMX - Xa )
1150         if selfA._toStore("InnovationAtCurrentState"):
1151             selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
1152         if selfA._toStore("SimulatedObservationAtCurrentState") \
1153             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1154             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
1155         # ---> autres
1156         if selfA._parameters["StoreInternalVariables"] \
1157             or selfA._toStore("CostFunctionJ") \
1158             or selfA._toStore("CostFunctionJb") \
1159             or selfA._toStore("CostFunctionJo") \
1160             or selfA._toStore("CurrentOptimum") \
1161             or selfA._toStore("APosterioriCovariance"):
1162             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
1163             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
1164             J   = Jb + Jo
1165             selfA.StoredVariables["CostFunctionJb"].store( Jb )
1166             selfA.StoredVariables["CostFunctionJo"].store( Jo )
1167             selfA.StoredVariables["CostFunctionJ" ].store( J )
1168             #
1169             if selfA._toStore("IndexOfOptimum") \
1170                 or selfA._toStore("CurrentOptimum") \
1171                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1172                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1173                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1174                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1175                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1176             if selfA._toStore("IndexOfOptimum"):
1177                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1178             if selfA._toStore("CurrentOptimum"):
1179                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1180             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1181                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1182             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1183                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1184             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1185                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1186             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1187                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1188         if selfA._toStore("APosterioriCovariance"):
1189             selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
1190         if selfA._parameters["EstimationOf"] == "Parameters" \
1191             and J < previousJMinimum:
1192             previousJMinimum    = J
1193             XaMin               = Xa
1194             if selfA._toStore("APosterioriCovariance"):
1195                 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
1196         # ---> Pour les smoothers
1197         if selfA._toStore("CurrentEnsembleState"):
1198             selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
1199     #
1200     # Stockage final supplémentaire de l'optimum en estimation de paramètres
1201     # ----------------------------------------------------------------------
1202     if selfA._parameters["EstimationOf"] == "Parameters":
1203         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1204         selfA.StoredVariables["Analysis"].store( XaMin )
1205         if selfA._toStore("APosterioriCovariance"):
1206             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1207         if selfA._toStore("BMA"):
1208             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1209     #
1210     return 0
1211
1212 # ==============================================================================
1213 def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12",
1214     BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
1215     """
1216     Iterative EnKF
1217     """
1218     if selfA._parameters["EstimationOf"] == "Parameters":
1219         selfA._parameters["StoreInternalVariables"] = True
1220     #
1221     # Opérateurs
1222     H = HO["Direct"].appliedControledFormTo
1223     #
1224     if selfA._parameters["EstimationOf"] == "State":
1225         M = EM["Direct"].appliedControledFormTo
1226     #
1227     if CM is not None and "Tangent" in CM and U is not None:
1228         Cm = CM["Tangent"].asMatrix(Xb)
1229     else:
1230         Cm = None
1231     #
1232     # Durée d'observation et tailles
1233     if hasattr(Y,"stepnumber"):
1234         duration = Y.stepnumber()
1235         __p = numpy.cumprod(Y.shape())[-1]
1236     else:
1237         duration = 2
1238         __p = numpy.array(Y).size
1239     #
1240     # Précalcul des inversions de B et R
1241     if selfA._parameters["StoreInternalVariables"] \
1242         or selfA._toStore("CostFunctionJ") \
1243         or selfA._toStore("CostFunctionJb") \
1244         or selfA._toStore("CostFunctionJo") \
1245         or selfA._toStore("CurrentOptimum") \
1246         or selfA._toStore("APosterioriCovariance"):
1247         BI = B.getI()
1248     RI = R.getI()
1249     #
1250     __n = Xb.size
1251     __m = selfA._parameters["NumberOfMembers"]
1252     #
1253     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1254         if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
1255         else:                         Pn = B
1256         Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
1257         selfA.StoredVariables["Analysis"].store( Xb )
1258         if selfA._toStore("APosterioriCovariance"):
1259             if hasattr(B,"asfullmatrix"):
1260                 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
1261             else:
1262                 selfA.StoredVariables["APosterioriCovariance"].store( B )
1263         selfA._setInternalState("seed", numpy.random.get_state())
1264     elif selfA._parameters["nextStep"]:
1265         Xn = selfA._getInternalState("Xn")
1266     #
1267     previousJMinimum = numpy.finfo(float).max
1268     #
1269     for step in range(duration-1):
1270         numpy.random.set_state(selfA._getInternalState("seed"))
1271         if hasattr(Y,"store"):
1272             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1273         else:
1274             Ynpu = numpy.ravel( Y ).reshape((__p,1))
1275         #
1276         if U is not None:
1277             if hasattr(U,"store") and len(U)>1:
1278                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
1279             elif hasattr(U,"store") and len(U)==1:
1280                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
1281             else:
1282                 Un = numpy.asmatrix(numpy.ravel( U )).T
1283         else:
1284             Un = None
1285         #
1286         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
1287             Xn = CovarianceInflation( Xn,
1288                 selfA._parameters["InflationType"],
1289                 selfA._parameters["InflationFactor"],
1290                 )
1291         #
1292         #--------------------------
1293         if VariantM == "IEnKF12":
1294             Xfm = numpy.ravel(Xn.mean(axis=1, dtype=mfp).astype('float'))
1295             EaX = EnsembleOfAnomalies( Xn ) / math.sqrt(__m-1)
1296             __j = 0
1297             Deltaw = 1
1298             if not BnotT:
1299                 Ta  = numpy.identity(__m)
1300             vw  = numpy.zeros(__m)
1301             while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
1302                 vx1 = (Xfm + EaX @ vw).reshape((__n,1))
1303                 #
1304                 if BnotT:
1305                     E1 = vx1 + _epsilon * EaX
1306                 else:
1307                     E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
1308                 #
1309                 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q
1310                     E2 = M( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
1311                         argsAsSerie = True,
1312                         returnSerieAsArrayMatrix = True )
1313                 elif selfA._parameters["EstimationOf"] == "Parameters":
1314                     # --- > Par principe, M = Id
1315                     E2 = Xn
1316                 vx2 = E2.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1317                 vy1 = H((vx2, Un)).reshape((__p,1))
1318                 #
1319                 HE2 = H( [(E2[:,i,numpy.newaxis], Un) for i in range(__m)],
1320                     argsAsSerie = True,
1321                     returnSerieAsArrayMatrix = True )
1322                 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
1323                 #
1324                 if BnotT:
1325                     EaY = (HE2 - vy2) / _epsilon
1326                 else:
1327                     EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
1328                 #
1329                 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy1 )))
1330                 mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY).reshape((-1,__m))
1331                 Deltaw = - numpy.linalg.solve(mH,GradJ)
1332                 #
1333                 vw = vw + Deltaw
1334                 #
1335                 if not BnotT:
1336                     Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
1337                 #
1338                 __j = __j + 1
1339             #
1340             A2 = EnsembleOfAnomalies( E2 )
1341             #
1342             if BnotT:
1343                 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
1344                 A2 = math.sqrt(__m-1) * A2 @ Ta / _epsilon
1345             #
1346             Xn = vx2 + A2
1347         #--------------------------
1348         else:
1349             raise ValueError("VariantM has to be chosen in the authorized methods list.")
1350         #
1351         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
1352             Xn = CovarianceInflation( Xn,
1353                 selfA._parameters["InflationType"],
1354                 selfA._parameters["InflationFactor"],
1355                 )
1356         #
1357         Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1358         #--------------------------
1359         selfA._setInternalState("Xn", Xn)
1360         selfA._setInternalState("seed", numpy.random.get_state())
1361         #--------------------------
1362         #
1363         if selfA._parameters["StoreInternalVariables"] \
1364             or selfA._toStore("CostFunctionJ") \
1365             or selfA._toStore("CostFunctionJb") \
1366             or selfA._toStore("CostFunctionJo") \
1367             or selfA._toStore("APosterioriCovariance") \
1368             or selfA._toStore("InnovationAtCurrentAnalysis") \
1369             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1370             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1371             _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
1372             _Innovation = Ynpu - _HXa
1373         #
1374         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1375         # ---> avec analysis
1376         selfA.StoredVariables["Analysis"].store( Xa )
1377         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1378             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
1379         if selfA._toStore("InnovationAtCurrentAnalysis"):
1380             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1381         # ---> avec current state
1382         if selfA._parameters["StoreInternalVariables"] \
1383             or selfA._toStore("CurrentState"):
1384             selfA.StoredVariables["CurrentState"].store( Xn )
1385         if selfA._toStore("ForecastState"):
1386             selfA.StoredVariables["ForecastState"].store( E2 )
1387         if selfA._toStore("ForecastCovariance"):
1388             selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(E2) )
1389         if selfA._toStore("BMA"):
1390             selfA.StoredVariables["BMA"].store( E2 - Xa )
1391         if selfA._toStore("InnovationAtCurrentState"):
1392             selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
1393         if selfA._toStore("SimulatedObservationAtCurrentState") \
1394             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1395             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
1396         # ---> autres
1397         if selfA._parameters["StoreInternalVariables"] \
1398             or selfA._toStore("CostFunctionJ") \
1399             or selfA._toStore("CostFunctionJb") \
1400             or selfA._toStore("CostFunctionJo") \
1401             or selfA._toStore("CurrentOptimum") \
1402             or selfA._toStore("APosterioriCovariance"):
1403             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
1404             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
1405             J   = Jb + Jo
1406             selfA.StoredVariables["CostFunctionJb"].store( Jb )
1407             selfA.StoredVariables["CostFunctionJo"].store( Jo )
1408             selfA.StoredVariables["CostFunctionJ" ].store( J )
1409             #
1410             if selfA._toStore("IndexOfOptimum") \
1411                 or selfA._toStore("CurrentOptimum") \
1412                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1413                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1414                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1415                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1416                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1417             if selfA._toStore("IndexOfOptimum"):
1418                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1419             if selfA._toStore("CurrentOptimum"):
1420                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1421             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1422                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1423             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1424                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1425             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1426                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1427             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1428                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1429         if selfA._toStore("APosterioriCovariance"):
1430             selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
1431         if selfA._parameters["EstimationOf"] == "Parameters" \
1432             and J < previousJMinimum:
1433             previousJMinimum    = J
1434             XaMin               = Xa
1435             if selfA._toStore("APosterioriCovariance"):
1436                 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
1437     #
1438     # Stockage final supplémentaire de l'optimum en estimation de paramètres
1439     # ----------------------------------------------------------------------
1440     if selfA._parameters["EstimationOf"] == "Parameters":
1441         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1442         selfA.StoredVariables["Analysis"].store( XaMin )
1443         if selfA._toStore("APosterioriCovariance"):
1444             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1445         if selfA._toStore("BMA"):
1446             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1447     #
1448     return 0
1449
1450 # ==============================================================================
1451 def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1452     """
1453     3DVAR incrémental
1454     """
1455     #
1456     # Initialisations
1457     # ---------------
1458     #
1459     # Opérateur non-linéaire pour la boucle externe
1460     Hm = HO["Direct"].appliedTo
1461     #
1462     # Précalcul des inversions de B et R
1463     BI = B.getI()
1464     RI = R.getI()
1465     #
1466     # Point de démarrage de l'optimisation
1467     Xini = selfA._parameters["InitializationPoint"]
1468     #
1469     HXb = numpy.asmatrix(numpy.ravel( Hm( Xb ) )).T
1470     Innovation = Y - HXb
1471     #
1472     # Outer Loop
1473     # ----------
1474     iOuter = 0
1475     J      = 1./mpr
1476     DeltaJ = 1./mpr
1477     Xr     = Xini.reshape((-1,1))
1478     while abs(DeltaJ) >= selfA._parameters["CostDecrementTolerance"] and iOuter <= selfA._parameters["MaximumNumberOfSteps"]:
1479         #
1480         # Inner Loop
1481         # ----------
1482         Ht = HO["Tangent"].asMatrix(Xr)
1483         Ht = Ht.reshape(Y.size,Xr.size) # ADAO & check shape
1484         #
1485         # Définition de la fonction-coût
1486         # ------------------------------
1487         def CostFunction(dx):
1488             _dX  = numpy.asmatrix(numpy.ravel( dx )).T
1489             if selfA._parameters["StoreInternalVariables"] or \
1490                 selfA._toStore("CurrentState") or \
1491                 selfA._toStore("CurrentOptimum"):
1492                 selfA.StoredVariables["CurrentState"].store( Xb + _dX )
1493             _HdX = Ht * _dX
1494             _HdX = numpy.asmatrix(numpy.ravel( _HdX )).T
1495             _dInnovation = Innovation - _HdX
1496             if selfA._toStore("SimulatedObservationAtCurrentState") or \
1497                 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1498                 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HXb + _HdX )
1499             if selfA._toStore("InnovationAtCurrentState"):
1500                 selfA.StoredVariables["InnovationAtCurrentState"].store( _dInnovation )
1501             #
1502             Jb  = float( 0.5 * _dX.T * BI * _dX )
1503             Jo  = float( 0.5 * _dInnovation.T * RI * _dInnovation )
1504             J   = Jb + Jo
1505             #
1506             selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
1507             selfA.StoredVariables["CostFunctionJb"].store( Jb )
1508             selfA.StoredVariables["CostFunctionJo"].store( Jo )
1509             selfA.StoredVariables["CostFunctionJ" ].store( J )
1510             if selfA._toStore("IndexOfOptimum") or \
1511                 selfA._toStore("CurrentOptimum") or \
1512                 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
1513                 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
1514                 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
1515                 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1516                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1517             if selfA._toStore("IndexOfOptimum"):
1518                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1519             if selfA._toStore("CurrentOptimum"):
1520                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
1521             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1522                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
1523             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1524                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1525             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1526                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1527             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1528                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1529             return J
1530         #
1531         def GradientOfCostFunction(dx):
1532             _dX          = numpy.asmatrix(numpy.ravel( dx )).T
1533             _HdX         = Ht * _dX
1534             _HdX         = numpy.asmatrix(numpy.ravel( _HdX )).T
1535             _dInnovation = Innovation - _HdX
1536             GradJb       = BI * _dX
1537             GradJo       = - Ht.T @ (RI * _dInnovation)
1538             GradJ        = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
1539             return GradJ
1540         #
1541         # Minimisation de la fonctionnelle
1542         # --------------------------------
1543         nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
1544         #
1545         if selfA._parameters["Minimizer"] == "LBFGSB":
1546             # Minimum, J_optimal, Informations = scipy.optimize.fmin_l_bfgs_b(
1547             if "0.19" <= scipy.version.version <= "1.1.0":
1548                 import lbfgsbhlt as optimiseur
1549             else:
1550                 import scipy.optimize as optimiseur
1551             Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
1552                 func        = CostFunction,
1553                 x0          = numpy.zeros(Xini.size),
1554                 fprime      = GradientOfCostFunction,
1555                 args        = (),
1556                 bounds      = selfA._parameters["Bounds"],
1557                 maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
1558                 factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
1559                 pgtol       = selfA._parameters["ProjectedGradientTolerance"],
1560                 iprint      = selfA._parameters["optiprint"],
1561                 )
1562             nfeval = Informations['funcalls']
1563             rc     = Informations['warnflag']
1564         elif selfA._parameters["Minimizer"] == "TNC":
1565             Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
1566                 func        = CostFunction,
1567                 x0          = numpy.zeros(Xini.size),
1568                 fprime      = GradientOfCostFunction,
1569                 args        = (),
1570                 bounds      = selfA._parameters["Bounds"],
1571                 maxfun      = selfA._parameters["MaximumNumberOfSteps"],
1572                 pgtol       = selfA._parameters["ProjectedGradientTolerance"],
1573                 ftol        = selfA._parameters["CostDecrementTolerance"],
1574                 messages    = selfA._parameters["optmessages"],
1575                 )
1576         elif selfA._parameters["Minimizer"] == "CG":
1577             Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
1578                 f           = CostFunction,
1579                 x0          = numpy.zeros(Xini.size),
1580                 fprime      = GradientOfCostFunction,
1581                 args        = (),
1582                 maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1583                 gtol        = selfA._parameters["GradientNormTolerance"],
1584                 disp        = selfA._parameters["optdisp"],
1585                 full_output = True,
1586                 )
1587         elif selfA._parameters["Minimizer"] == "NCG":
1588             Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
1589                 f           = CostFunction,
1590                 x0          = numpy.zeros(Xini.size),
1591                 fprime      = GradientOfCostFunction,
1592                 args        = (),
1593                 maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1594                 avextol     = selfA._parameters["CostDecrementTolerance"],
1595                 disp        = selfA._parameters["optdisp"],
1596                 full_output = True,
1597                 )
1598         elif selfA._parameters["Minimizer"] == "BFGS":
1599             Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
1600                 f           = CostFunction,
1601                 x0          = numpy.zeros(Xini.size),
1602                 fprime      = GradientOfCostFunction,
1603                 args        = (),
1604                 maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1605                 gtol        = selfA._parameters["GradientNormTolerance"],
1606                 disp        = selfA._parameters["optdisp"],
1607                 full_output = True,
1608                 )
1609         else:
1610             raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
1611         #
1612         IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1613         MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
1614         #
1615         if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
1616             Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
1617             Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
1618         else:
1619             Minimum = Xb + numpy.asmatrix(numpy.ravel( Minimum )).T
1620         #
1621         Xr     = Minimum
1622         DeltaJ = selfA.StoredVariables["CostFunctionJ" ][-1] - J
1623         iOuter = selfA.StoredVariables["CurrentIterationNumber"][-1]
1624     #
1625     # Obtention de l'analyse
1626     # ----------------------
1627     Xa = Xr
1628     #
1629     selfA.StoredVariables["Analysis"].store( Xa )
1630     #
1631     if selfA._toStore("OMA") or \
1632         selfA._toStore("SigmaObs2") or \
1633         selfA._toStore("SimulationQuantiles") or \
1634         selfA._toStore("SimulatedObservationAtOptimum"):
1635         if selfA._toStore("SimulatedObservationAtCurrentState"):
1636             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
1637         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1638             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
1639         else:
1640             HXa = Hm( Xa )
1641     #
1642     # Calcul de la covariance d'analyse
1643     # ---------------------------------
1644     if selfA._toStore("APosterioriCovariance") or \
1645         selfA._toStore("SimulationQuantiles") or \
1646         selfA._toStore("JacobianMatrixAtOptimum") or \
1647         selfA._toStore("KalmanGainAtOptimum"):
1648         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
1649         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
1650     if selfA._toStore("APosterioriCovariance") or \
1651         selfA._toStore("SimulationQuantiles") or \
1652         selfA._toStore("KalmanGainAtOptimum"):
1653         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
1654         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
1655     if selfA._toStore("APosterioriCovariance") or \
1656         selfA._toStore("SimulationQuantiles"):
1657         HessienneI = []
1658         nb = Xa.size
1659         for i in range(nb):
1660             _ee    = numpy.matrix(numpy.zeros(nb)).T
1661             _ee[i] = 1.
1662             _HtEE  = numpy.dot(HtM,_ee)
1663             _HtEE  = numpy.asmatrix(numpy.ravel( _HtEE )).T
1664             HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
1665         HessienneI = numpy.matrix( HessienneI )
1666         A = HessienneI.I
1667         if min(A.shape) != max(A.shape):
1668             raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
1669         if (numpy.diag(A) < 0).any():
1670             raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
1671         if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
1672             try:
1673                 L = numpy.linalg.cholesky( A )
1674             except:
1675                 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
1676     if selfA._toStore("APosterioriCovariance"):
1677         selfA.StoredVariables["APosterioriCovariance"].store( A )
1678     if selfA._toStore("JacobianMatrixAtOptimum"):
1679         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
1680     if selfA._toStore("KalmanGainAtOptimum"):
1681         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
1682         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
1683         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
1684     #
1685     # Calculs et/ou stockages supplémentaires
1686     # ---------------------------------------
1687     if selfA._toStore("Innovation") or \
1688         selfA._toStore("SigmaObs2") or \
1689         selfA._toStore("MahalanobisConsistency") or \
1690         selfA._toStore("OMB"):
1691         d  = Y - HXb
1692     if selfA._toStore("Innovation"):
1693         selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
1694     if selfA._toStore("BMA"):
1695         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
1696     if selfA._toStore("OMA"):
1697         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
1698     if selfA._toStore("OMB"):
1699         selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
1700     if selfA._toStore("SigmaObs2"):
1701         TraceR = R.trace(Y.size)
1702         selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
1703     if selfA._toStore("MahalanobisConsistency"):
1704         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
1705     if selfA._toStore("SimulationQuantiles"):
1706         QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
1707     if selfA._toStore("SimulatedObservationAtBackground"):
1708         selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
1709     if selfA._toStore("SimulatedObservationAtOptimum"):
1710         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
1711     #
1712     return 0
1713
1714 # ==============================================================================
1715 def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13",
1716     BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
1717     """
1718     Maximum Likelihood Ensemble Filter
1719     """
1720     if selfA._parameters["EstimationOf"] == "Parameters":
1721         selfA._parameters["StoreInternalVariables"] = True
1722     #
1723     # Opérateurs
1724     H = HO["Direct"].appliedControledFormTo
1725     #
1726     if selfA._parameters["EstimationOf"] == "State":
1727         M = EM["Direct"].appliedControledFormTo
1728     #
1729     if CM is not None and "Tangent" in CM and U is not None:
1730         Cm = CM["Tangent"].asMatrix(Xb)
1731     else:
1732         Cm = None
1733     #
1734     # Durée d'observation et tailles
1735     if hasattr(Y,"stepnumber"):
1736         duration = Y.stepnumber()
1737         __p = numpy.cumprod(Y.shape())[-1]
1738     else:
1739         duration = 2
1740         __p = numpy.array(Y).size
1741     #
1742     # Précalcul des inversions de B et R
1743     if selfA._parameters["StoreInternalVariables"] \
1744         or selfA._toStore("CostFunctionJ") \
1745         or selfA._toStore("CostFunctionJb") \
1746         or selfA._toStore("CostFunctionJo") \
1747         or selfA._toStore("CurrentOptimum") \
1748         or selfA._toStore("APosterioriCovariance"):
1749         BI = B.getI()
1750     RI = R.getI()
1751     #
1752     __n = Xb.size
1753     __m = selfA._parameters["NumberOfMembers"]
1754     #
1755     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1756         Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
1757         selfA.StoredVariables["Analysis"].store( Xb )
1758         if selfA._toStore("APosterioriCovariance"):
1759             if hasattr(B,"asfullmatrix"):
1760                 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
1761             else:
1762                 selfA.StoredVariables["APosterioriCovariance"].store( B )
1763         selfA._setInternalState("seed", numpy.random.get_state())
1764     elif selfA._parameters["nextStep"]:
1765         Xn = selfA._getInternalState("Xn")
1766     #
1767     previousJMinimum = numpy.finfo(float).max
1768     #
1769     for step in range(duration-1):
1770         numpy.random.set_state(selfA._getInternalState("seed"))
1771         if hasattr(Y,"store"):
1772             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1773         else:
1774             Ynpu = numpy.ravel( Y ).reshape((__p,1))
1775         #
1776         if U is not None:
1777             if hasattr(U,"store") and len(U)>1:
1778                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
1779             elif hasattr(U,"store") and len(U)==1:
1780                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
1781             else:
1782                 Un = numpy.asmatrix(numpy.ravel( U )).T
1783         else:
1784             Un = None
1785         #
1786         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
1787             Xn = CovarianceInflation( Xn,
1788                 selfA._parameters["InflationType"],
1789                 selfA._parameters["InflationFactor"],
1790                 )
1791         #
1792         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
1793             EMX = M( [(Xn[:,i], Un) for i in range(__m)],
1794                 argsAsSerie = True,
1795                 returnSerieAsArrayMatrix = True )
1796             Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
1797             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1798                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
1799                 Xn_predicted = Xn_predicted + Cm * Un
1800         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
1801             # --- > Par principe, M = Id, Q = 0
1802             Xn_predicted = EMX = Xn
1803         #
1804         #--------------------------
1805         if VariantM == "MLEF13":
1806             Xfm = numpy.ravel(Xn_predicted.mean(axis=1, dtype=mfp).astype('float'))
1807             EaX = EnsembleOfAnomalies( Xn_predicted, Xfm, 1./math.sqrt(__m-1) )
1808             Ua  = numpy.identity(__m)
1809             __j = 0
1810             Deltaw = 1
1811             if not BnotT:
1812                 Ta  = numpy.identity(__m)
1813             vw  = numpy.zeros(__m)
1814             while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
1815                 vx1 = (Xfm + EaX @ vw).reshape((__n,1))
1816                 #
1817                 if BnotT:
1818                     E1 = vx1 + _epsilon * EaX
1819                 else:
1820                     E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
1821                 #
1822                 HE2 = H( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
1823                     argsAsSerie = True,
1824                     returnSerieAsArrayMatrix = True )
1825                 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
1826                 #
1827                 if BnotT:
1828                     EaY = (HE2 - vy2) / _epsilon
1829                 else:
1830                     EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
1831                 #
1832                 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy2 )))
1833                 mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY).reshape((-1,__m))
1834                 Deltaw = - numpy.linalg.solve(mH,GradJ)
1835                 #
1836                 vw = vw + Deltaw
1837                 #
1838                 if not BnotT:
1839                     Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
1840                 #
1841                 __j = __j + 1
1842             #
1843             if BnotT:
1844                 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
1845             #
1846             Xn = vx1 + math.sqrt(__m-1) * EaX @ Ta @ Ua
1847         #--------------------------
1848         else:
1849             raise ValueError("VariantM has to be chosen in the authorized methods list.")
1850         #
1851         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
1852             Xn = CovarianceInflation( Xn,
1853                 selfA._parameters["InflationType"],
1854                 selfA._parameters["InflationFactor"],
1855                 )
1856         #
1857         Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1858         #--------------------------
1859         selfA._setInternalState("Xn", Xn)
1860         selfA._setInternalState("seed", numpy.random.get_state())
1861         #--------------------------
1862         #
1863         if selfA._parameters["StoreInternalVariables"] \
1864             or selfA._toStore("CostFunctionJ") \
1865             or selfA._toStore("CostFunctionJb") \
1866             or selfA._toStore("CostFunctionJo") \
1867             or selfA._toStore("APosterioriCovariance") \
1868             or selfA._toStore("InnovationAtCurrentAnalysis") \
1869             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1870             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1871             _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
1872             _Innovation = Ynpu - _HXa
1873         #
1874         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1875         # ---> avec analysis
1876         selfA.StoredVariables["Analysis"].store( Xa )
1877         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1878             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
1879         if selfA._toStore("InnovationAtCurrentAnalysis"):
1880             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1881         # ---> avec current state
1882         if selfA._parameters["StoreInternalVariables"] \
1883             or selfA._toStore("CurrentState"):
1884             selfA.StoredVariables["CurrentState"].store( Xn )
1885         if selfA._toStore("ForecastState"):
1886             selfA.StoredVariables["ForecastState"].store( EMX )
1887         if selfA._toStore("ForecastCovariance"):
1888             selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(EMX) )
1889         if selfA._toStore("BMA"):
1890             selfA.StoredVariables["BMA"].store( EMX - Xa )
1891         if selfA._toStore("InnovationAtCurrentState"):
1892             selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
1893         if selfA._toStore("SimulatedObservationAtCurrentState") \
1894             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1895             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
1896         # ---> autres
1897         if selfA._parameters["StoreInternalVariables"] \
1898             or selfA._toStore("CostFunctionJ") \
1899             or selfA._toStore("CostFunctionJb") \
1900             or selfA._toStore("CostFunctionJo") \
1901             or selfA._toStore("CurrentOptimum") \
1902             or selfA._toStore("APosterioriCovariance"):
1903             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
1904             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
1905             J   = Jb + Jo
1906             selfA.StoredVariables["CostFunctionJb"].store( Jb )
1907             selfA.StoredVariables["CostFunctionJo"].store( Jo )
1908             selfA.StoredVariables["CostFunctionJ" ].store( J )
1909             #
1910             if selfA._toStore("IndexOfOptimum") \
1911                 or selfA._toStore("CurrentOptimum") \
1912                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1913                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1914                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1915                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1916                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1917             if selfA._toStore("IndexOfOptimum"):
1918                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1919             if selfA._toStore("CurrentOptimum"):
1920                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1921             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1922                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1923             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1924                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1925             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1926                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1927             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1928                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1929         if selfA._toStore("APosterioriCovariance"):
1930             selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
1931         if selfA._parameters["EstimationOf"] == "Parameters" \
1932             and J < previousJMinimum:
1933             previousJMinimum    = J
1934             XaMin               = Xa
1935             if selfA._toStore("APosterioriCovariance"):
1936                 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
1937     #
1938     # Stockage final supplémentaire de l'optimum en estimation de paramètres
1939     # ----------------------------------------------------------------------
1940     if selfA._parameters["EstimationOf"] == "Parameters":
1941         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1942         selfA.StoredVariables["Analysis"].store( XaMin )
1943         if selfA._toStore("APosterioriCovariance"):
1944             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1945         if selfA._toStore("BMA"):
1946             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1947     #
1948     return 0
1949
1950 # ==============================================================================
1951 def mmqr(
1952         func     = None,
1953         x0       = None,
1954         fprime   = None,
1955         bounds   = None,
1956         quantile = 0.5,
1957         maxfun   = 15000,
1958         toler    = 1.e-06,
1959         y        = None,
1960         ):
1961     """
1962     Implémentation informatique de l'algorithme MMQR, basée sur la publication :
1963     David R. Hunter, Kenneth Lange, "Quantile Regression via an MM Algorithm",
1964     Journal of Computational and Graphical Statistics, 9, 1, pp.60-77, 2000.
1965     """
1966     #
1967     # Recuperation des donnees et informations initiales
1968     # --------------------------------------------------
1969     variables = numpy.ravel( x0 )
1970     mesures   = numpy.ravel( y )
1971     increment = sys.float_info[0]
1972     p         = variables.size
1973     n         = mesures.size
1974     quantile  = float(quantile)
1975     #
1976     # Calcul des parametres du MM
1977     # ---------------------------
1978     tn      = float(toler) / n
1979     e0      = -tn / math.log(tn)
1980     epsilon = (e0-tn)/(1+math.log(e0))
1981     #
1982     # Calculs d'initialisation
1983     # ------------------------
1984     residus  = mesures - numpy.ravel( func( variables ) )
1985     poids    = 1./(epsilon+numpy.abs(residus))
1986     veps     = 1. - 2. * quantile - residus * poids
1987     lastsurrogate = -numpy.sum(residus*veps) - (1.-2.*quantile)*numpy.sum(residus)
1988     iteration = 0
1989     #
1990     # Recherche iterative
1991     # -------------------
1992     while (increment > toler) and (iteration < maxfun) :
1993         iteration += 1
1994         #
1995         Derivees  = numpy.array(fprime(variables))
1996         Derivees  = Derivees.reshape(n,p) # Necessaire pour remettre en place la matrice si elle passe par des tuyaux YACS
1997         DeriveesT = Derivees.transpose()
1998         M         =   numpy.dot( DeriveesT , (numpy.array(numpy.matrix(p*[poids,]).T)*Derivees) )
1999         SM        =   numpy.transpose(numpy.dot( DeriveesT , veps ))
2000         step      = - numpy.linalg.lstsq( M, SM, rcond=-1 )[0]
2001         #
2002         variables = variables + step
2003         if bounds is not None:
2004             # Attention : boucle infinie à éviter si un intervalle est trop petit
2005             while( (variables < numpy.ravel(numpy.asmatrix(bounds)[:,0])).any() or (variables > numpy.ravel(numpy.asmatrix(bounds)[:,1])).any() ):
2006                 step      = step/2.
2007                 variables = variables - step
2008         residus   = mesures - numpy.ravel( func(variables) )
2009         surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
2010         #
2011         while ( (surrogate > lastsurrogate) and ( max(list(numpy.abs(step))) > 1.e-16 ) ) :
2012             step      = step/2.
2013             variables = variables - step
2014             residus   = mesures - numpy.ravel( func(variables) )
2015             surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
2016         #
2017         increment     = lastsurrogate-surrogate
2018         poids         = 1./(epsilon+numpy.abs(residus))
2019         veps          = 1. - 2. * quantile - residus * poids
2020         lastsurrogate = -numpy.sum(residus * veps) - (1.-2.*quantile)*numpy.sum(residus)
2021     #
2022     # Mesure d'écart
2023     # --------------
2024     Ecart = quantile * numpy.sum(residus) - numpy.sum( residus[residus<0] )
2025     #
2026     return variables, Ecart, [n,p,iteration,increment,0]
2027
2028 # ==============================================================================
2029 def multi3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, oneCycle):
2030     """
2031     3DVAR multi-pas et multi-méthodes
2032     """
2033     #
2034     # Initialisation
2035     if selfA._parameters["EstimationOf"] == "State":
2036         M = EM["Direct"].appliedTo
2037         #
2038         if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2039             Xn = numpy.ravel(Xb).reshape((-1,1))
2040             selfA.StoredVariables["Analysis"].store( Xn )
2041             if selfA._toStore("APosterioriCovariance"):
2042                 if hasattr(B,"asfullmatrix"):
2043                     selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(Xn.size) )
2044                 else:
2045                     selfA.StoredVariables["APosterioriCovariance"].store( B )
2046             if selfA._toStore("ForecastState"):
2047                 selfA.StoredVariables["ForecastState"].store( Xn )
2048         elif selfA._parameters["nextStep"]:
2049             Xn = selfA._getInternalState("Xn")
2050     else:
2051         Xn = numpy.ravel(Xb).reshape((-1,1))
2052     #
2053     if hasattr(Y,"stepnumber"):
2054         duration = Y.stepnumber()
2055     else:
2056         duration = 2
2057     #
2058     # Multi-pas
2059     for step in range(duration-1):
2060         if hasattr(Y,"store"):
2061             Ynpu = numpy.ravel( Y[step+1] ).reshape((-1,1))
2062         else:
2063             Ynpu = numpy.ravel( Y ).reshape((-1,1))
2064         #
2065         if selfA._parameters["EstimationOf"] == "State": # Forecast
2066             Xn_predicted = M( Xn )
2067             if selfA._toStore("ForecastState"):
2068                 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
2069         elif selfA._parameters["EstimationOf"] == "Parameters": # No forecast
2070             # --- > Par principe, M = Id, Q = 0
2071             Xn_predicted = Xn
2072         Xn_predicted = numpy.ravel(Xn_predicted).reshape((-1,1))
2073         #
2074         oneCycle(selfA, Xn_predicted, Ynpu, U, HO, None, None, R, B, None)
2075         #
2076         Xn = selfA.StoredVariables["Analysis"][-1]
2077         #--------------------------
2078         selfA._setInternalState("Xn", Xn)
2079     #
2080     return 0
2081
2082 # ==============================================================================
2083 def psas3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
2084     """
2085     3DVAR PSAS
2086     """
2087     #
2088     # Initialisations
2089     # ---------------
2090     #
2091     # Opérateurs
2092     Hm = HO["Direct"].appliedTo
2093     #
2094     # Utilisation éventuelle d'un vecteur H(Xb) précalculé
2095     if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
2096         HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
2097     else:
2098         HXb = Hm( Xb )
2099     HXb = numpy.asmatrix(numpy.ravel( HXb )).T
2100     if Y.size != HXb.size:
2101         raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
2102     if max(Y.shape) != max(HXb.shape):
2103         raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
2104     #
2105     if selfA._toStore("JacobianMatrixAtBackground"):
2106         HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
2107         HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
2108         selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
2109     #
2110     Ht = HO["Tangent"].asMatrix(Xb)
2111     BHT = B * Ht.T
2112     HBHTpR = R + Ht * BHT
2113     Innovation = Y - HXb
2114     #
2115     # Point de démarrage de l'optimisation
2116     Xini = numpy.zeros(Xb.shape)
2117     #
2118     # Définition de la fonction-coût
2119     # ------------------------------
2120     def CostFunction(w):
2121         _W = numpy.asmatrix(numpy.ravel( w )).T
2122         if selfA._parameters["StoreInternalVariables"] or \
2123             selfA._toStore("CurrentState") or \
2124             selfA._toStore("CurrentOptimum"):
2125             selfA.StoredVariables["CurrentState"].store( Xb + BHT * _W )
2126         if selfA._toStore("SimulatedObservationAtCurrentState") or \
2127             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2128             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Hm( Xb + BHT * _W ) )
2129         if selfA._toStore("InnovationAtCurrentState"):
2130             selfA.StoredVariables["InnovationAtCurrentState"].store( Innovation )
2131         #
2132         Jb  = float( 0.5 * _W.T * HBHTpR * _W )
2133         Jo  = float( - _W.T * Innovation )
2134         J   = Jb + Jo
2135         #
2136         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
2137         selfA.StoredVariables["CostFunctionJb"].store( Jb )
2138         selfA.StoredVariables["CostFunctionJo"].store( Jo )
2139         selfA.StoredVariables["CostFunctionJ" ].store( J )
2140         if selfA._toStore("IndexOfOptimum") or \
2141             selfA._toStore("CurrentOptimum") or \
2142             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
2143             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
2144             selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
2145             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2146             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2147         if selfA._toStore("IndexOfOptimum"):
2148             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2149         if selfA._toStore("CurrentOptimum"):
2150             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
2151         if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2152             selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
2153         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2154             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2155         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2156             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2157         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2158             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2159         return J
2160     #
2161     def GradientOfCostFunction(w):
2162         _W = numpy.asmatrix(numpy.ravel( w )).T
2163         GradJb  = HBHTpR * _W
2164         GradJo  = - Innovation
2165         GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
2166         return GradJ
2167     #
2168     # Minimisation de la fonctionnelle
2169     # --------------------------------
2170     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
2171     #
2172     if selfA._parameters["Minimizer"] == "LBFGSB":
2173         if "0.19" <= scipy.version.version <= "1.1.0":
2174             import lbfgsbhlt as optimiseur
2175         else:
2176             import scipy.optimize as optimiseur
2177         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
2178             func        = CostFunction,
2179             x0          = Xini,
2180             fprime      = GradientOfCostFunction,
2181             args        = (),
2182             bounds      = selfA._parameters["Bounds"],
2183             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
2184             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
2185             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
2186             iprint      = selfA._parameters["optiprint"],
2187             )
2188         nfeval = Informations['funcalls']
2189         rc     = Informations['warnflag']
2190     elif selfA._parameters["Minimizer"] == "TNC":
2191         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
2192             func        = CostFunction,
2193             x0          = Xini,
2194             fprime      = GradientOfCostFunction,
2195             args        = (),
2196             bounds      = selfA._parameters["Bounds"],
2197             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
2198             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
2199             ftol        = selfA._parameters["CostDecrementTolerance"],
2200             messages    = selfA._parameters["optmessages"],
2201             )
2202     elif selfA._parameters["Minimizer"] == "CG":
2203         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
2204             f           = CostFunction,
2205             x0          = Xini,
2206             fprime      = GradientOfCostFunction,
2207             args        = (),
2208             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2209             gtol        = selfA._parameters["GradientNormTolerance"],
2210             disp        = selfA._parameters["optdisp"],
2211             full_output = True,
2212             )
2213     elif selfA._parameters["Minimizer"] == "NCG":
2214         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
2215             f           = CostFunction,
2216             x0          = Xini,
2217             fprime      = GradientOfCostFunction,
2218             args        = (),
2219             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2220             avextol     = selfA._parameters["CostDecrementTolerance"],
2221             disp        = selfA._parameters["optdisp"],
2222             full_output = True,
2223             )
2224     elif selfA._parameters["Minimizer"] == "BFGS":
2225         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
2226             f           = CostFunction,
2227             x0          = Xini,
2228             fprime      = GradientOfCostFunction,
2229             args        = (),
2230             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2231             gtol        = selfA._parameters["GradientNormTolerance"],
2232             disp        = selfA._parameters["optdisp"],
2233             full_output = True,
2234             )
2235     else:
2236         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
2237     #
2238     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2239     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
2240     #
2241     # Correction pour pallier a un bug de TNC sur le retour du Minimum
2242     # ----------------------------------------------------------------
2243     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
2244         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
2245         Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
2246     else:
2247         Minimum = Xb + BHT * numpy.asmatrix(numpy.ravel( Minimum )).T
2248     #
2249     # Obtention de l'analyse
2250     # ----------------------
2251     Xa = Minimum
2252     #
2253     selfA.StoredVariables["Analysis"].store( Xa )
2254     #
2255     if selfA._toStore("OMA") or \
2256         selfA._toStore("SigmaObs2") or \
2257         selfA._toStore("SimulationQuantiles") or \
2258         selfA._toStore("SimulatedObservationAtOptimum"):
2259         if selfA._toStore("SimulatedObservationAtCurrentState"):
2260             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
2261         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2262             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
2263         else:
2264             HXa = Hm( Xa )
2265     #
2266     # Calcul de la covariance d'analyse
2267     # ---------------------------------
2268     if selfA._toStore("APosterioriCovariance") or \
2269         selfA._toStore("SimulationQuantiles") or \
2270         selfA._toStore("JacobianMatrixAtOptimum") or \
2271         selfA._toStore("KalmanGainAtOptimum"):
2272         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
2273         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
2274     if selfA._toStore("APosterioriCovariance") or \
2275         selfA._toStore("SimulationQuantiles") or \
2276         selfA._toStore("KalmanGainAtOptimum"):
2277         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
2278         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
2279     if selfA._toStore("APosterioriCovariance") or \
2280         selfA._toStore("SimulationQuantiles"):
2281         BI = B.getI()
2282         RI = R.getI()
2283         HessienneI = []
2284         nb = Xa.size
2285         for i in range(nb):
2286             _ee    = numpy.matrix(numpy.zeros(nb)).T
2287             _ee[i] = 1.
2288             _HtEE  = numpy.dot(HtM,_ee)
2289             _HtEE  = numpy.asmatrix(numpy.ravel( _HtEE )).T
2290             HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
2291         HessienneI = numpy.matrix( HessienneI )
2292         A = HessienneI.I
2293         if min(A.shape) != max(A.shape):
2294             raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
2295         if (numpy.diag(A) < 0).any():
2296             raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
2297         if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
2298             try:
2299                 L = numpy.linalg.cholesky( A )
2300             except:
2301                 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
2302     if selfA._toStore("APosterioriCovariance"):
2303         selfA.StoredVariables["APosterioriCovariance"].store( A )
2304     if selfA._toStore("JacobianMatrixAtOptimum"):
2305         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
2306     if selfA._toStore("KalmanGainAtOptimum"):
2307         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
2308         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
2309         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
2310     #
2311     # Calculs et/ou stockages supplémentaires
2312     # ---------------------------------------
2313     if selfA._toStore("Innovation") or \
2314         selfA._toStore("SigmaObs2") or \
2315         selfA._toStore("MahalanobisConsistency") or \
2316         selfA._toStore("OMB"):
2317         d  = Y - HXb
2318     if selfA._toStore("Innovation"):
2319         selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
2320     if selfA._toStore("BMA"):
2321         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
2322     if selfA._toStore("OMA"):
2323         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
2324     if selfA._toStore("OMB"):
2325         selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
2326     if selfA._toStore("SigmaObs2"):
2327         TraceR = R.trace(Y.size)
2328         selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
2329     if selfA._toStore("MahalanobisConsistency"):
2330         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
2331     if selfA._toStore("SimulationQuantiles"):
2332         QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
2333     if selfA._toStore("SimulatedObservationAtBackground"):
2334         selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
2335     if selfA._toStore("SimulatedObservationAtOptimum"):
2336         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
2337     #
2338     return 0
2339
2340 # ==============================================================================
2341 def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula16"):
2342     """
2343     Stochastic EnKF
2344     """
2345     if selfA._parameters["EstimationOf"] == "Parameters":
2346         selfA._parameters["StoreInternalVariables"] = True
2347     #
2348     # Opérateurs
2349     H = HO["Direct"].appliedControledFormTo
2350     #
2351     if selfA._parameters["EstimationOf"] == "State":
2352         M = EM["Direct"].appliedControledFormTo
2353     #
2354     if CM is not None and "Tangent" in CM and U is not None:
2355         Cm = CM["Tangent"].asMatrix(Xb)
2356     else:
2357         Cm = None
2358     #
2359     # Durée d'observation et tailles
2360     if hasattr(Y,"stepnumber"):
2361         duration = Y.stepnumber()
2362         __p = numpy.cumprod(Y.shape())[-1]
2363     else:
2364         duration = 2
2365         __p = numpy.array(Y).size
2366     #
2367     # Précalcul des inversions de B et R
2368     if selfA._parameters["StoreInternalVariables"] \
2369         or selfA._toStore("CostFunctionJ") \
2370         or selfA._toStore("CostFunctionJb") \
2371         or selfA._toStore("CostFunctionJo") \
2372         or selfA._toStore("CurrentOptimum") \
2373         or selfA._toStore("APosterioriCovariance"):
2374         BI = B.getI()
2375         RI = R.getI()
2376     #
2377     __n = Xb.size
2378     __m = selfA._parameters["NumberOfMembers"]
2379     #
2380     if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
2381     else:                         Rn = R
2382     #
2383     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2384         Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
2385         selfA.StoredVariables["Analysis"].store( Xb )
2386         if selfA._toStore("APosterioriCovariance"):
2387             if hasattr(B,"asfullmatrix"):
2388                 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
2389             else:
2390                 selfA.StoredVariables["APosterioriCovariance"].store( B )
2391         selfA._setInternalState("seed", numpy.random.get_state())
2392     elif selfA._parameters["nextStep"]:
2393         Xn = selfA._getInternalState("Xn")
2394     #
2395     previousJMinimum = numpy.finfo(float).max
2396     #
2397     for step in range(duration-1):
2398         numpy.random.set_state(selfA._getInternalState("seed"))
2399         if hasattr(Y,"store"):
2400             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
2401         else:
2402             Ynpu = numpy.ravel( Y ).reshape((__p,1))
2403         #
2404         if U is not None:
2405             if hasattr(U,"store") and len(U)>1:
2406                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2407             elif hasattr(U,"store") and len(U)==1:
2408                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2409             else:
2410                 Un = numpy.asmatrix(numpy.ravel( U )).T
2411         else:
2412             Un = None
2413         #
2414         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2415             Xn = CovarianceInflation( Xn,
2416                 selfA._parameters["InflationType"],
2417                 selfA._parameters["InflationFactor"],
2418                 )
2419         #
2420         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
2421             EMX = M( [(Xn[:,i], Un) for i in range(__m)],
2422                 argsAsSerie = True,
2423                 returnSerieAsArrayMatrix = True )
2424             Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
2425             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2426                 argsAsSerie = True,
2427                 returnSerieAsArrayMatrix = True )
2428             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2429                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2430                 Xn_predicted = Xn_predicted + Cm * Un
2431         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
2432             # --- > Par principe, M = Id, Q = 0
2433             Xn_predicted = EMX = Xn
2434             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2435                 argsAsSerie = True,
2436                 returnSerieAsArrayMatrix = True )
2437         #
2438         # Mean of forecast and observation of forecast
2439         Xfm  = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2440         Hfm  = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2441         #
2442         #--------------------------
2443         if VariantM == "KalmanFilterFormula05":
2444             PfHT, HPfHT = 0., 0.
2445             for i in range(__m):
2446                 Exfi = Xn_predicted[:,i].reshape((__n,1)) - Xfm
2447                 Eyfi = HX_predicted[:,i].reshape((__p,1)) - Hfm
2448                 PfHT  += Exfi * Eyfi.T
2449                 HPfHT += Eyfi * Eyfi.T
2450             PfHT  = (1./(__m-1)) * PfHT
2451             HPfHT = (1./(__m-1)) * HPfHT
2452             Kn     = PfHT * ( R + HPfHT ).I
2453             del PfHT, HPfHT
2454             #
2455             for i in range(__m):
2456                 ri = numpy.random.multivariate_normal(numpy.zeros(__p), Rn)
2457                 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(Ynpu) + ri - HX_predicted[:,i])
2458         #--------------------------
2459         elif VariantM == "KalmanFilterFormula16":
2460             EpY   = EnsembleOfCenteredPerturbations(Ynpu, Rn, __m)
2461             EpYm  = EpY.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2462             #
2463             EaX   = EnsembleOfAnomalies( Xn_predicted ) / math.sqrt(__m-1)
2464             EaY = (HX_predicted - Hfm - EpY + EpYm) / math.sqrt(__m-1)
2465             #
2466             Kn = EaX @ EaY.T @ numpy.linalg.inv( EaY @ EaY.T)
2467             #
2468             for i in range(__m):
2469                 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(EpY[:,i]) - HX_predicted[:,i])
2470         #--------------------------
2471         else:
2472             raise ValueError("VariantM has to be chosen in the authorized methods list.")
2473         #
2474         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2475             Xn = CovarianceInflation( Xn,
2476                 selfA._parameters["InflationType"],
2477                 selfA._parameters["InflationFactor"],
2478                 )
2479         #
2480         Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2481         #--------------------------
2482         selfA._setInternalState("Xn", Xn)
2483         selfA._setInternalState("seed", numpy.random.get_state())
2484         #--------------------------
2485         #
2486         if selfA._parameters["StoreInternalVariables"] \
2487             or selfA._toStore("CostFunctionJ") \
2488             or selfA._toStore("CostFunctionJb") \
2489             or selfA._toStore("CostFunctionJo") \
2490             or selfA._toStore("APosterioriCovariance") \
2491             or selfA._toStore("InnovationAtCurrentAnalysis") \
2492             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2493             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2494             _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
2495             _Innovation = Ynpu - _HXa
2496         #
2497         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2498         # ---> avec analysis
2499         selfA.StoredVariables["Analysis"].store( Xa )
2500         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2501             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2502         if selfA._toStore("InnovationAtCurrentAnalysis"):
2503             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2504         # ---> avec current state
2505         if selfA._parameters["StoreInternalVariables"] \
2506             or selfA._toStore("CurrentState"):
2507             selfA.StoredVariables["CurrentState"].store( Xn )
2508         if selfA._toStore("ForecastState"):
2509             selfA.StoredVariables["ForecastState"].store( EMX )
2510         if selfA._toStore("ForecastCovariance"):
2511             selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(EMX) )
2512         if selfA._toStore("BMA"):
2513             selfA.StoredVariables["BMA"].store( EMX - Xa )
2514         if selfA._toStore("InnovationAtCurrentState"):
2515             selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
2516         if selfA._toStore("SimulatedObservationAtCurrentState") \
2517             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2518             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
2519         # ---> autres
2520         if selfA._parameters["StoreInternalVariables"] \
2521             or selfA._toStore("CostFunctionJ") \
2522             or selfA._toStore("CostFunctionJb") \
2523             or selfA._toStore("CostFunctionJo") \
2524             or selfA._toStore("CurrentOptimum") \
2525             or selfA._toStore("APosterioriCovariance"):
2526             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2527             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
2528             J   = Jb + Jo
2529             selfA.StoredVariables["CostFunctionJb"].store( Jb )
2530             selfA.StoredVariables["CostFunctionJo"].store( Jo )
2531             selfA.StoredVariables["CostFunctionJ" ].store( J )
2532             #
2533             if selfA._toStore("IndexOfOptimum") \
2534                 or selfA._toStore("CurrentOptimum") \
2535                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2536                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2537                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2538                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2539                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2540             if selfA._toStore("IndexOfOptimum"):
2541                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2542             if selfA._toStore("CurrentOptimum"):
2543                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2544             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2545                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2546             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2547                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2548             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2549                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2550             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2551                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2552         if selfA._toStore("APosterioriCovariance"):
2553             selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
2554         if selfA._parameters["EstimationOf"] == "Parameters" \
2555             and J < previousJMinimum:
2556             previousJMinimum    = J
2557             XaMin               = Xa
2558             if selfA._toStore("APosterioriCovariance"):
2559                 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
2560     #
2561     # Stockage final supplémentaire de l'optimum en estimation de paramètres
2562     # ----------------------------------------------------------------------
2563     if selfA._parameters["EstimationOf"] == "Parameters":
2564         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2565         selfA.StoredVariables["Analysis"].store( XaMin )
2566         if selfA._toStore("APosterioriCovariance"):
2567             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2568         if selfA._toStore("BMA"):
2569             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2570     #
2571     return 0
2572
2573 # ==============================================================================
2574 def std3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
2575     """
2576     3DVAR
2577     """
2578     #
2579     # Initialisations
2580     # ---------------
2581     #
2582     # Opérateurs
2583     Hm = HO["Direct"].appliedTo
2584     Ha = HO["Adjoint"].appliedInXTo
2585     #
2586     # Utilisation éventuelle d'un vecteur H(Xb) précalculé
2587     if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
2588         HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
2589     else:
2590         HXb = Hm( Xb )
2591     HXb = numpy.asmatrix(numpy.ravel( HXb )).T
2592     if Y.size != HXb.size:
2593         raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
2594     if max(Y.shape) != max(HXb.shape):
2595         raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
2596     #
2597     if selfA._toStore("JacobianMatrixAtBackground"):
2598         HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
2599         HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
2600         selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
2601     #
2602     # Précalcul des inversions de B et R
2603     BI = B.getI()
2604     RI = R.getI()
2605     #
2606     # Point de démarrage de l'optimisation
2607     Xini = selfA._parameters["InitializationPoint"]
2608     #
2609     # Définition de la fonction-coût
2610     # ------------------------------
2611     def CostFunction(x):
2612         _X  = numpy.asmatrix(numpy.ravel( x )).T
2613         if selfA._parameters["StoreInternalVariables"] or \
2614             selfA._toStore("CurrentState") or \
2615             selfA._toStore("CurrentOptimum"):
2616             selfA.StoredVariables["CurrentState"].store( _X )
2617         _HX = Hm( _X )
2618         _HX = numpy.asmatrix(numpy.ravel( _HX )).T
2619         _Innovation = Y - _HX
2620         if selfA._toStore("SimulatedObservationAtCurrentState") or \
2621             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2622             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
2623         if selfA._toStore("InnovationAtCurrentState"):
2624             selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
2625         #
2626         Jb  = float( 0.5 * (_X - Xb).T * BI * (_X - Xb) )
2627         Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
2628         J   = Jb + Jo
2629         #
2630         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
2631         selfA.StoredVariables["CostFunctionJb"].store( Jb )
2632         selfA.StoredVariables["CostFunctionJo"].store( Jo )
2633         selfA.StoredVariables["CostFunctionJ" ].store( J )
2634         if selfA._toStore("IndexOfOptimum") or \
2635             selfA._toStore("CurrentOptimum") or \
2636             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
2637             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
2638             selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
2639             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2640             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2641         if selfA._toStore("IndexOfOptimum"):
2642             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2643         if selfA._toStore("CurrentOptimum"):
2644             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
2645         if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2646             selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
2647         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2648             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2649         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2650             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2651         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2652             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2653         return J
2654     #
2655     def GradientOfCostFunction(x):
2656         _X      = numpy.asmatrix(numpy.ravel( x )).T
2657         _HX     = Hm( _X )
2658         _HX     = numpy.asmatrix(numpy.ravel( _HX )).T
2659         GradJb  = BI * (_X - Xb)
2660         GradJo  = - Ha( (_X, RI * (Y - _HX)) )
2661         GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
2662         return GradJ
2663     #
2664     # Minimisation de la fonctionnelle
2665     # --------------------------------
2666     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
2667     #
2668     if selfA._parameters["Minimizer"] == "LBFGSB":
2669         if "0.19" <= scipy.version.version <= "1.1.0":
2670             import lbfgsbhlt as optimiseur
2671         else:
2672             import scipy.optimize as optimiseur
2673         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
2674             func        = CostFunction,
2675             x0          = Xini,
2676             fprime      = GradientOfCostFunction,
2677             args        = (),
2678             bounds      = selfA._parameters["Bounds"],
2679             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
2680             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
2681             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
2682             iprint      = selfA._parameters["optiprint"],
2683             )
2684         nfeval = Informations['funcalls']
2685         rc     = Informations['warnflag']
2686     elif selfA._parameters["Minimizer"] == "TNC":
2687         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
2688             func        = CostFunction,
2689             x0          = Xini,
2690             fprime      = GradientOfCostFunction,
2691             args        = (),
2692             bounds      = selfA._parameters["Bounds"],
2693             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
2694             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
2695             ftol        = selfA._parameters["CostDecrementTolerance"],
2696             messages    = selfA._parameters["optmessages"],
2697             )
2698     elif selfA._parameters["Minimizer"] == "CG":
2699         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
2700             f           = CostFunction,
2701             x0          = Xini,
2702             fprime      = GradientOfCostFunction,
2703             args        = (),
2704             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2705             gtol        = selfA._parameters["GradientNormTolerance"],
2706             disp        = selfA._parameters["optdisp"],
2707             full_output = True,
2708             )
2709     elif selfA._parameters["Minimizer"] == "NCG":
2710         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
2711             f           = CostFunction,
2712             x0          = Xini,
2713             fprime      = GradientOfCostFunction,
2714             args        = (),
2715             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2716             avextol     = selfA._parameters["CostDecrementTolerance"],
2717             disp        = selfA._parameters["optdisp"],
2718             full_output = True,
2719             )
2720     elif selfA._parameters["Minimizer"] == "BFGS":
2721         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
2722             f           = CostFunction,
2723             x0          = Xini,
2724             fprime      = GradientOfCostFunction,
2725             args        = (),
2726             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2727             gtol        = selfA._parameters["GradientNormTolerance"],
2728             disp        = selfA._parameters["optdisp"],
2729             full_output = True,
2730             )
2731     else:
2732         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
2733     #
2734     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2735     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
2736     #
2737     # Correction pour pallier a un bug de TNC sur le retour du Minimum
2738     # ----------------------------------------------------------------
2739     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
2740         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
2741     #
2742     # Obtention de l'analyse
2743     # ----------------------
2744     Xa = numpy.asmatrix(numpy.ravel( Minimum )).T
2745     #
2746     selfA.StoredVariables["Analysis"].store( Xa )
2747     #
2748     if selfA._toStore("OMA") or \
2749         selfA._toStore("SigmaObs2") or \
2750         selfA._toStore("SimulationQuantiles") or \
2751         selfA._toStore("SimulatedObservationAtOptimum"):
2752         if selfA._toStore("SimulatedObservationAtCurrentState"):
2753             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
2754         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2755             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
2756         else:
2757             HXa = Hm( Xa )
2758     #
2759     # Calcul de la covariance d'analyse
2760     # ---------------------------------
2761     if selfA._toStore("APosterioriCovariance") or \
2762         selfA._toStore("SimulationQuantiles") or \
2763         selfA._toStore("JacobianMatrixAtOptimum") or \
2764         selfA._toStore("KalmanGainAtOptimum"):
2765         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
2766         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
2767     if selfA._toStore("APosterioriCovariance") or \
2768         selfA._toStore("SimulationQuantiles") or \
2769         selfA._toStore("KalmanGainAtOptimum"):
2770         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
2771         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
2772     if selfA._toStore("APosterioriCovariance") or \
2773         selfA._toStore("SimulationQuantiles"):
2774         HessienneI = []
2775         nb = Xa.size
2776         for i in range(nb):
2777             _ee    = numpy.matrix(numpy.zeros(nb)).T
2778             _ee[i] = 1.
2779             _HtEE  = numpy.dot(HtM,_ee)
2780             _HtEE  = numpy.asmatrix(numpy.ravel( _HtEE )).T
2781             HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
2782         HessienneI = numpy.matrix( HessienneI )
2783         A = HessienneI.I
2784         if min(A.shape) != max(A.shape):
2785             raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
2786         if (numpy.diag(A) < 0).any():
2787             raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
2788         if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
2789             try:
2790                 L = numpy.linalg.cholesky( A )
2791             except:
2792                 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
2793     if selfA._toStore("APosterioriCovariance"):
2794         selfA.StoredVariables["APosterioriCovariance"].store( A )
2795     if selfA._toStore("JacobianMatrixAtOptimum"):
2796         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
2797     if selfA._toStore("KalmanGainAtOptimum"):
2798         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
2799         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
2800         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
2801     #
2802     # Calculs et/ou stockages supplémentaires
2803     # ---------------------------------------
2804     if selfA._toStore("Innovation") or \
2805         selfA._toStore("SigmaObs2") or \
2806         selfA._toStore("MahalanobisConsistency") or \
2807         selfA._toStore("OMB"):
2808         d  = Y - HXb
2809     if selfA._toStore("Innovation"):
2810         selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
2811     if selfA._toStore("BMA"):
2812         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
2813     if selfA._toStore("OMA"):
2814         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
2815     if selfA._toStore("OMB"):
2816         selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
2817     if selfA._toStore("SigmaObs2"):
2818         TraceR = R.trace(Y.size)
2819         selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
2820     if selfA._toStore("MahalanobisConsistency"):
2821         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
2822     if selfA._toStore("SimulationQuantiles"):
2823         QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
2824     if selfA._toStore("SimulatedObservationAtBackground"):
2825         selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
2826     if selfA._toStore("SimulatedObservationAtOptimum"):
2827         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
2828     #
2829     return 0
2830
2831 # ==============================================================================
2832 def std4dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
2833     """
2834     4DVAR
2835     """
2836     #
2837     # Initialisations
2838     # ---------------
2839     #
2840     # Opérateurs
2841     Hm = HO["Direct"].appliedControledFormTo
2842     Mm = EM["Direct"].appliedControledFormTo
2843     #
2844     if CM is not None and "Tangent" in CM and U is not None:
2845         Cm = CM["Tangent"].asMatrix(Xb)
2846     else:
2847         Cm = None
2848     #
2849     def Un(_step):
2850         if U is not None:
2851             if hasattr(U,"store") and 1<=_step<len(U) :
2852                 _Un = numpy.asmatrix(numpy.ravel( U[_step] )).T
2853             elif hasattr(U,"store") and len(U)==1:
2854                 _Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2855             else:
2856                 _Un = numpy.asmatrix(numpy.ravel( U )).T
2857         else:
2858             _Un = None
2859         return _Un
2860     def CmUn(_xn,_un):
2861         if Cm is not None and _un is not None: # Attention : si Cm est aussi dans M, doublon !
2862             _Cm   = Cm.reshape(_xn.size,_un.size) # ADAO & check shape
2863             _CmUn = _Cm * _un
2864         else:
2865             _CmUn = 0.
2866         return _CmUn
2867     #
2868     # Remarque : les observations sont exploitées à partir du pas de temps
2869     # numéro 1, et sont utilisées dans Yo comme rangées selon ces indices.
2870     # Donc le pas 0 n'est pas utilisé puisque la première étape commence
2871     # avec l'observation du pas 1.
2872     #
2873     # Nombre de pas identique au nombre de pas d'observations
2874     if hasattr(Y,"stepnumber"):
2875         duration = Y.stepnumber()
2876     else:
2877         duration = 2
2878     #
2879     # Précalcul des inversions de B et R
2880     BI = B.getI()
2881     RI = R.getI()
2882     #
2883     # Point de démarrage de l'optimisation
2884     Xini = selfA._parameters["InitializationPoint"]
2885     #
2886     # Définition de la fonction-coût
2887     # ------------------------------
2888     selfA.DirectCalculation = [None,] # Le pas 0 n'est pas observé
2889     selfA.DirectInnovation  = [None,] # Le pas 0 n'est pas observé
2890     def CostFunction(x):
2891         _X  = numpy.asmatrix(numpy.ravel( x )).T
2892         if selfA._parameters["StoreInternalVariables"] or \
2893             selfA._toStore("CurrentState") or \
2894             selfA._toStore("CurrentOptimum"):
2895             selfA.StoredVariables["CurrentState"].store( _X )
2896         Jb  = float( 0.5 * (_X - Xb).T * BI * (_X - Xb) )
2897         selfA.DirectCalculation = [None,]
2898         selfA.DirectInnovation  = [None,]
2899         Jo  = 0.
2900         _Xn = _X
2901         for step in range(0,duration-1):
2902             if hasattr(Y,"store"):
2903                 _Ynpu = numpy.asmatrix(numpy.ravel( Y[step+1] )).T
2904             else:
2905                 _Ynpu = numpy.asmatrix(numpy.ravel( Y )).T
2906             _Un = Un(step)
2907             #
2908             # Etape d'évolution
2909             if selfA._parameters["EstimationOf"] == "State":
2910                 _Xn = Mm( (_Xn, _Un) ) + CmUn(_Xn, _Un)
2911             elif selfA._parameters["EstimationOf"] == "Parameters":
2912                 pass
2913             #
2914             if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
2915                 _Xn = numpy.max(numpy.hstack((_Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1)
2916                 _Xn = numpy.min(numpy.hstack((_Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1)
2917             #
2918             # Etape de différence aux observations
2919             if selfA._parameters["EstimationOf"] == "State":
2920                 _YmHMX = _Ynpu - numpy.asmatrix(numpy.ravel( Hm( (_Xn, None) ) )).T
2921             elif selfA._parameters["EstimationOf"] == "Parameters":
2922                 _YmHMX = _Ynpu - numpy.asmatrix(numpy.ravel( Hm( (_Xn, _Un) ) )).T - CmUn(_Xn, _Un)
2923             #
2924             # Stockage de l'état
2925             selfA.DirectCalculation.append( _Xn )
2926             selfA.DirectInnovation.append( _YmHMX )
2927             #
2928             # Ajout dans la fonctionnelle d'observation
2929             Jo = Jo + 0.5 * float( _YmHMX.T * RI * _YmHMX )
2930         J = Jb + Jo
2931         #
2932         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
2933         selfA.StoredVariables["CostFunctionJb"].store( Jb )
2934         selfA.StoredVariables["CostFunctionJo"].store( Jo )
2935         selfA.StoredVariables["CostFunctionJ" ].store( J )
2936         if selfA._toStore("IndexOfOptimum") or \
2937             selfA._toStore("CurrentOptimum") or \
2938             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
2939             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
2940             selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2941             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2942         if selfA._toStore("IndexOfOptimum"):
2943             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2944         if selfA._toStore("CurrentOptimum"):
2945             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
2946         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2947             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2948         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2949             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2950         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2951             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2952         return J
2953     #
2954     def GradientOfCostFunction(x):
2955         _X      = numpy.asmatrix(numpy.ravel( x )).T
2956         GradJb  = BI * (_X - Xb)
2957         GradJo  = 0.
2958         for step in range(duration-1,0,-1):
2959             # Étape de récupération du dernier stockage de l'évolution
2960             _Xn = selfA.DirectCalculation.pop()
2961             # Étape de récupération du dernier stockage de l'innovation
2962             _YmHMX = selfA.DirectInnovation.pop()
2963             # Calcul des adjoints
2964             Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
2965             Ha = Ha.reshape(_Xn.size,_YmHMX.size) # ADAO & check shape
2966             Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
2967             Ma = Ma.reshape(_Xn.size,_Xn.size) # ADAO & check shape
2968             # Calcul du gradient par état adjoint
2969             GradJo = GradJo + Ha * RI * _YmHMX # Équivaut pour Ha linéaire à : Ha( (_Xn, RI * _YmHMX) )
2970             GradJo = Ma * GradJo               # Équivaut pour Ma linéaire à : Ma( (_Xn, GradJo) )
2971         GradJ = numpy.ravel( GradJb ) - numpy.ravel( GradJo )
2972         return GradJ
2973     #
2974     # Minimisation de la fonctionnelle
2975     # --------------------------------
2976     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
2977     #
2978     if selfA._parameters["Minimizer"] == "LBFGSB":
2979         if "0.19" <= scipy.version.version <= "1.1.0":
2980             import lbfgsbhlt as optimiseur
2981         else:
2982             import scipy.optimize as optimiseur
2983         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
2984             func        = CostFunction,
2985             x0          = Xini,
2986             fprime      = GradientOfCostFunction,
2987             args        = (),
2988             bounds      = selfA._parameters["Bounds"],
2989             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
2990             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
2991             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
2992             iprint      = selfA._parameters["optiprint"],
2993             )
2994         nfeval = Informations['funcalls']
2995         rc     = Informations['warnflag']
2996     elif selfA._parameters["Minimizer"] == "TNC":
2997         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
2998             func        = CostFunction,
2999             x0          = Xini,
3000             fprime      = GradientOfCostFunction,
3001             args        = (),
3002             bounds      = selfA._parameters["Bounds"],
3003             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
3004             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
3005             ftol        = selfA._parameters["CostDecrementTolerance"],
3006             messages    = selfA._parameters["optmessages"],
3007             )
3008     elif selfA._parameters["Minimizer"] == "CG":
3009         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
3010             f           = CostFunction,
3011             x0          = Xini,
3012             fprime      = GradientOfCostFunction,
3013             args        = (),
3014             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3015             gtol        = selfA._parameters["GradientNormTolerance"],
3016             disp        = selfA._parameters["optdisp"],
3017             full_output = True,
3018             )
3019     elif selfA._parameters["Minimizer"] == "NCG":
3020         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
3021             f           = CostFunction,
3022             x0          = Xini,
3023             fprime      = GradientOfCostFunction,
3024             args        = (),
3025             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3026             avextol     = selfA._parameters["CostDecrementTolerance"],
3027             disp        = selfA._parameters["optdisp"],
3028             full_output = True,
3029             )
3030     elif selfA._parameters["Minimizer"] == "BFGS":
3031         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
3032             f           = CostFunction,
3033             x0          = Xini,
3034             fprime      = GradientOfCostFunction,
3035             args        = (),
3036             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3037             gtol        = selfA._parameters["GradientNormTolerance"],
3038             disp        = selfA._parameters["optdisp"],
3039             full_output = True,
3040             )
3041     else:
3042         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
3043     #
3044     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3045     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
3046     #
3047     # Correction pour pallier a un bug de TNC sur le retour du Minimum
3048     # ----------------------------------------------------------------
3049     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
3050         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
3051     #
3052     # Obtention de l'analyse
3053     # ----------------------
3054     Xa = numpy.asmatrix(numpy.ravel( Minimum )).T
3055     #
3056     selfA.StoredVariables["Analysis"].store( Xa )
3057     #
3058     # Calculs et/ou stockages supplémentaires
3059     # ---------------------------------------
3060     if selfA._toStore("BMA"):
3061         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
3062     #
3063     return 0
3064
3065 # ==============================================================================
3066 def van3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3067     """
3068     3DVAR variational analysis with no inversion of B
3069     """
3070     #
3071     # Initialisations
3072     # ---------------
3073     #
3074     # Opérateurs
3075     Hm = HO["Direct"].appliedTo
3076     Ha = HO["Adjoint"].appliedInXTo
3077     #
3078     # Précalcul des inversions de B et R
3079     BT = B.getT()
3080     RI = R.getI()
3081     #
3082     # Point de démarrage de l'optimisation
3083     Xini = numpy.zeros(Xb.shape)
3084     #
3085     # Définition de la fonction-coût
3086     # ------------------------------
3087     def CostFunction(v):
3088         _V = numpy.asmatrix(numpy.ravel( v )).T
3089         _X = Xb + B * _V
3090         if selfA._parameters["StoreInternalVariables"] or \
3091             selfA._toStore("CurrentState") or \
3092             selfA._toStore("CurrentOptimum"):
3093             selfA.StoredVariables["CurrentState"].store( _X )
3094         _HX = Hm( _X )
3095         _HX = numpy.asmatrix(numpy.ravel( _HX )).T
3096         _Innovation = Y - _HX
3097         if selfA._toStore("SimulatedObservationAtCurrentState") or \
3098             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3099             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
3100         if selfA._toStore("InnovationAtCurrentState"):
3101             selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
3102         #
3103         Jb  = float( 0.5 * _V.T * BT * _V )
3104         Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
3105         J   = Jb + Jo
3106         #
3107         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
3108         selfA.StoredVariables["CostFunctionJb"].store( Jb )
3109         selfA.StoredVariables["CostFunctionJo"].store( Jo )
3110         selfA.StoredVariables["CostFunctionJ" ].store( J )
3111         if selfA._toStore("IndexOfOptimum") or \
3112             selfA._toStore("CurrentOptimum") or \
3113             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
3114             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
3115             selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
3116             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3117             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3118         if selfA._toStore("IndexOfOptimum"):
3119             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3120         if selfA._toStore("CurrentOptimum"):
3121             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
3122         if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3123             selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
3124         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3125             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3126         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3127             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3128         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3129             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3130         return J
3131     #
3132     def GradientOfCostFunction(v):
3133         _V = numpy.asmatrix(numpy.ravel( v )).T
3134         _X = Xb + B * _V
3135         _HX     = Hm( _X )
3136         _HX     = numpy.asmatrix(numpy.ravel( _HX )).T
3137         GradJb  = BT * _V
3138         GradJo  = - Ha( (_X, RI * (Y - _HX)) )
3139         GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
3140         return GradJ
3141     #
3142     # Minimisation de la fonctionnelle
3143     # --------------------------------
3144     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
3145     #
3146     if selfA._parameters["Minimizer"] == "LBFGSB":
3147         if "0.19" <= scipy.version.version <= "1.1.0":
3148             import lbfgsbhlt as optimiseur
3149         else:
3150             import scipy.optimize as optimiseur
3151         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
3152             func        = CostFunction,
3153             x0          = Xini,
3154             fprime      = GradientOfCostFunction,
3155             args        = (),
3156             bounds      = selfA._parameters["Bounds"],
3157             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
3158             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
3159             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
3160             iprint      = selfA._parameters["optiprint"],
3161             )
3162         nfeval = Informations['funcalls']
3163         rc     = Informations['warnflag']
3164     elif selfA._parameters["Minimizer"] == "TNC":
3165         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
3166             func        = CostFunction,
3167             x0          = Xini,
3168             fprime      = GradientOfCostFunction,
3169             args        = (),
3170             bounds      = selfA._parameters["Bounds"],
3171             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
3172             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
3173             ftol        = selfA._parameters["CostDecrementTolerance"],
3174             messages    = selfA._parameters["optmessages"],
3175             )
3176     elif selfA._parameters["Minimizer"] == "CG":
3177         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
3178             f           = CostFunction,
3179             x0          = Xini,
3180             fprime      = GradientOfCostFunction,
3181             args        = (),
3182             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3183             gtol        = selfA._parameters["GradientNormTolerance"],
3184             disp        = selfA._parameters["optdisp"],
3185             full_output = True,
3186             )
3187     elif selfA._parameters["Minimizer"] == "NCG":
3188         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
3189             f           = CostFunction,
3190             x0          = Xini,
3191             fprime      = GradientOfCostFunction,
3192             args        = (),
3193             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3194             avextol     = selfA._parameters["CostDecrementTolerance"],
3195             disp        = selfA._parameters["optdisp"],
3196             full_output = True,
3197             )
3198     elif selfA._parameters["Minimizer"] == "BFGS":
3199         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
3200             f           = CostFunction,
3201             x0          = Xini,
3202             fprime      = GradientOfCostFunction,
3203             args        = (),
3204             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3205             gtol        = selfA._parameters["GradientNormTolerance"],
3206             disp        = selfA._parameters["optdisp"],
3207             full_output = True,
3208             )
3209     else:
3210         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
3211     #
3212     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3213     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
3214     #
3215     # Correction pour pallier a un bug de TNC sur le retour du Minimum
3216     # ----------------------------------------------------------------
3217     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
3218         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
3219         Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
3220     else:
3221         Minimum = Xb + B * numpy.asmatrix(numpy.ravel( Minimum )).T
3222     #
3223     # Obtention de l'analyse
3224     # ----------------------
3225     Xa = Minimum
3226     #
3227     selfA.StoredVariables["Analysis"].store( Xa )
3228     #
3229     if selfA._toStore("OMA") or \
3230         selfA._toStore("SigmaObs2") or \
3231         selfA._toStore("SimulationQuantiles") or \
3232         selfA._toStore("SimulatedObservationAtOptimum"):
3233         if selfA._toStore("SimulatedObservationAtCurrentState"):
3234             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
3235         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3236             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
3237         else:
3238             HXa = Hm( Xa )
3239     #
3240     # Calcul de la covariance d'analyse
3241     # ---------------------------------
3242     if selfA._toStore("APosterioriCovariance") or \
3243         selfA._toStore("SimulationQuantiles") or \
3244         selfA._toStore("JacobianMatrixAtOptimum") or \
3245         selfA._toStore("KalmanGainAtOptimum"):
3246         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
3247         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
3248     if selfA._toStore("APosterioriCovariance") or \
3249         selfA._toStore("SimulationQuantiles") or \
3250         selfA._toStore("KalmanGainAtOptimum"):
3251         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
3252         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
3253     if selfA._toStore("APosterioriCovariance") or \
3254         selfA._toStore("SimulationQuantiles"):
3255         BI = B.getI()
3256         HessienneI = []
3257         nb = Xa.size
3258         for i in range(nb):
3259             _ee    = numpy.matrix(numpy.zeros(nb)).T
3260             _ee[i] = 1.
3261             _HtEE  = numpy.dot(HtM,_ee)
3262             _HtEE  = numpy.asmatrix(numpy.ravel( _HtEE )).T
3263             HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
3264         HessienneI = numpy.matrix( HessienneI )
3265         A = HessienneI.I
3266         if min(A.shape) != max(A.shape):
3267             raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
3268         if (numpy.diag(A) < 0).any():
3269             raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
3270         if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
3271             try:
3272                 L = numpy.linalg.cholesky( A )
3273             except:
3274                 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
3275     if selfA._toStore("APosterioriCovariance"):
3276         selfA.StoredVariables["APosterioriCovariance"].store( A )
3277     if selfA._toStore("JacobianMatrixAtOptimum"):
3278         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
3279     if selfA._toStore("KalmanGainAtOptimum"):
3280         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
3281         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
3282         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
3283     #
3284     # Calculs et/ou stockages supplémentaires
3285     # ---------------------------------------
3286     if selfA._toStore("Innovation") or \
3287         selfA._toStore("SigmaObs2") or \
3288         selfA._toStore("MahalanobisConsistency") or \
3289         selfA._toStore("OMB"):
3290         d  = Y - HXb
3291     if selfA._toStore("Innovation"):
3292         selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
3293     if selfA._toStore("BMA"):
3294         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
3295     if selfA._toStore("OMA"):
3296         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
3297     if selfA._toStore("OMB"):
3298         selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
3299     if selfA._toStore("SigmaObs2"):
3300         TraceR = R.trace(Y.size)
3301         selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
3302     if selfA._toStore("MahalanobisConsistency"):
3303         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
3304     if selfA._toStore("SimulationQuantiles"):
3305         QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
3306     if selfA._toStore("SimulatedObservationAtBackground"):
3307         selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
3308     if selfA._toStore("SimulatedObservationAtOptimum"):
3309         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
3310     #
3311     return 0
3312
3313 # ==============================================================================
3314 if __name__ == "__main__":
3315     print('\n AUTODIAGNOSTIC\n')