Salome HOME
Minor internal modifications and bounds corrections
[modules/adao.git] / src / daComposant / daCore / NumericObjects.py
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) 2008-2021 EDF R&D
4 #
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
9 #
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13 # Lesser General Public License for more details.
14 #
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
18 #
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
20 #
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
22
23 __doc__ = """
24     Définit les objets numériques génériques.
25 """
26 __author__ = "Jean-Philippe ARGAUD"
27
28 import os, time, copy, types, sys, logging
29 import math, numpy, scipy, scipy.optimize, scipy.version
30 from daCore.BasicObjects import Operator
31 from daCore.PlatformInfo import PlatformInfo
32 mpr = PlatformInfo().MachinePrecision()
33 mfp = PlatformInfo().MaximumPrecision()
34 # logging.getLogger().setLevel(logging.DEBUG)
35
36 # ==============================================================================
37 def ExecuteFunction( triplet ):
38     assert len(triplet) == 3, "Incorrect number of arguments"
39     X, xArgs, funcrepr = triplet
40     __X = numpy.asmatrix(numpy.ravel( X )).T
41     __sys_path_tmp = sys.path ; sys.path.insert(0,funcrepr["__userFunction__path"])
42     __module = __import__(funcrepr["__userFunction__modl"], globals(), locals(), [])
43     __fonction = getattr(__module,funcrepr["__userFunction__name"])
44     sys.path = __sys_path_tmp ; del __sys_path_tmp
45     if isinstance(xArgs, dict):
46         __HX  = __fonction( __X, **xArgs )
47     else:
48         __HX  = __fonction( __X )
49     return numpy.ravel( __HX )
50
51 # ==============================================================================
52 class FDApproximation(object):
53     """
54     Cette classe sert d'interface pour définir les opérateurs approximés. A la
55     création d'un objet, en fournissant une fonction "Function", on obtient un
56     objet qui dispose de 3 méthodes "DirectOperator", "TangentOperator" et
57     "AdjointOperator". On contrôle l'approximation DF avec l'incrément
58     multiplicatif "increment" valant par défaut 1%, ou avec l'incrément fixe
59     "dX" qui sera multiplié par "increment" (donc en %), et on effectue de DF
60     centrées si le booléen "centeredDF" est vrai.
61     """
62     def __init__(self,
63             name                  = "FDApproximation",
64             Function              = None,
65             centeredDF            = False,
66             increment             = 0.01,
67             dX                    = None,
68             extraArguments        = None,
69             avoidingRedundancy    = True,
70             toleranceInRedundancy = 1.e-18,
71             lenghtOfRedundancy    = -1,
72             mpEnabled             = False,
73             mpWorkers             = None,
74             mfEnabled             = False,
75             ):
76         self.__name = str(name)
77         self.__extraArgs = extraArguments
78         if mpEnabled:
79             try:
80                 import multiprocessing
81                 self.__mpEnabled = True
82             except ImportError:
83                 self.__mpEnabled = False
84         else:
85             self.__mpEnabled = False
86         self.__mpWorkers = mpWorkers
87         if self.__mpWorkers is not None and self.__mpWorkers < 1:
88             self.__mpWorkers = None
89         logging.debug("FDA Calculs en multiprocessing : %s (nombre de processus : %s)"%(self.__mpEnabled,self.__mpWorkers))
90         #
91         if mfEnabled:
92             self.__mfEnabled = True
93         else:
94             self.__mfEnabled = False
95         logging.debug("FDA Calculs en multifonctions : %s"%(self.__mfEnabled,))
96         #
97         if avoidingRedundancy:
98             self.__avoidRC = True
99             self.__tolerBP = float(toleranceInRedundancy)
100             self.__lenghtRJ = int(lenghtOfRedundancy)
101             self.__listJPCP = [] # Jacobian Previous Calculated Points
102             self.__listJPCI = [] # Jacobian Previous Calculated Increment
103             self.__listJPCR = [] # Jacobian Previous Calculated Results
104             self.__listJPPN = [] # Jacobian Previous Calculated Point Norms
105             self.__listJPIN = [] # Jacobian Previous Calculated Increment Norms
106         else:
107             self.__avoidRC = False
108         #
109         if self.__mpEnabled:
110             if isinstance(Function,types.FunctionType):
111                 logging.debug("FDA Calculs en multiprocessing : FunctionType")
112                 self.__userFunction__name = Function.__name__
113                 try:
114                     mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
115                 except:
116                     mod = os.path.abspath(Function.__globals__['__file__'])
117                 if not os.path.isfile(mod):
118                     raise ImportError("No user defined function or method found with the name %s"%(mod,))
119                 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
120                 self.__userFunction__path = os.path.dirname(mod)
121                 del mod
122                 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
123                 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
124             elif isinstance(Function,types.MethodType):
125                 logging.debug("FDA Calculs en multiprocessing : MethodType")
126                 self.__userFunction__name = Function.__name__
127                 try:
128                     mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
129                 except:
130                     mod = os.path.abspath(Function.__func__.__globals__['__file__'])
131                 if not os.path.isfile(mod):
132                     raise ImportError("No user defined function or method found with the name %s"%(mod,))
133                 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
134                 self.__userFunction__path = os.path.dirname(mod)
135                 del mod
136                 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
137                 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
138             else:
139                 raise TypeError("User defined function or method has to be provided for finite differences approximation.")
140         else:
141             self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
142             self.__userFunction = self.__userOperator.appliedTo
143         #
144         self.__centeredDF = bool(centeredDF)
145         if abs(float(increment)) > 1.e-15:
146             self.__increment  = float(increment)
147         else:
148             self.__increment  = 0.01
149         if dX is None:
150             self.__dX     = None
151         else:
152             self.__dX     = numpy.asmatrix(numpy.ravel( dX )).T
153         logging.debug("FDA Reduction des doublons de calcul : %s"%self.__avoidRC)
154         if self.__avoidRC:
155             logging.debug("FDA Tolerance de determination des doublons : %.2e"%self.__tolerBP)
156
157     # ---------------------------------------------------------
158     def __doublon__(self, e, l, n, v=None):
159         __ac, __iac = False, -1
160         for i in range(len(l)-1,-1,-1):
161             if numpy.linalg.norm(e - l[i]) < self.__tolerBP * n[i]:
162                 __ac, __iac = True, i
163                 if v is not None: logging.debug("FDA Cas%s déja calculé, récupération du doublon %i"%(v,__iac))
164                 break
165         return __ac, __iac
166
167     # ---------------------------------------------------------
168     def DirectOperator(self, X, **extraArgs ):
169         """
170         Calcul du direct à l'aide de la fonction fournie.
171
172         NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
173         ne doivent pas être données ici à la fonction utilisateur.
174         """
175         logging.debug("FDA Calcul DirectOperator (explicite)")
176         if self.__mfEnabled:
177             _HX = self.__userFunction( X, argsAsSerie = True )
178         else:
179             _X = numpy.asmatrix(numpy.ravel( X )).T
180             _HX = numpy.ravel(self.__userFunction( _X ))
181         #
182         return _HX
183
184     # ---------------------------------------------------------
185     def TangentMatrix(self, X ):
186         """
187         Calcul de l'opérateur tangent comme la Jacobienne par différences finies,
188         c'est-à-dire le gradient de H en X. On utilise des différences finies
189         directionnelles autour du point X. X est un numpy.matrix.
190
191         Différences finies centrées (approximation d'ordre 2):
192         1/ Pour chaque composante i de X, on ajoute et on enlève la perturbation
193            dX[i] à la  composante X[i], pour composer X_plus_dXi et X_moins_dXi, et
194            on calcule les réponses HX_plus_dXi = H( X_plus_dXi ) et HX_moins_dXi =
195            H( X_moins_dXi )
196         2/ On effectue les différences (HX_plus_dXi-HX_moins_dXi) et on divise par
197            le pas 2*dXi
198         3/ Chaque résultat, par composante, devient une colonne de la Jacobienne
199
200         Différences finies non centrées (approximation d'ordre 1):
201         1/ Pour chaque composante i de X, on ajoute la perturbation dX[i] à la
202            composante X[i] pour composer X_plus_dXi, et on calcule la réponse
203            HX_plus_dXi = H( X_plus_dXi )
204         2/ On calcule la valeur centrale HX = H(X)
205         3/ On effectue les différences (HX_plus_dXi-HX) et on divise par
206            le pas dXi
207         4/ Chaque résultat, par composante, devient une colonne de la Jacobienne
208
209         """
210         logging.debug("FDA Début du calcul de la Jacobienne")
211         logging.debug("FDA   Incrément de............: %s*X"%float(self.__increment))
212         logging.debug("FDA   Approximation centrée...: %s"%(self.__centeredDF))
213         #
214         if X is None or len(X)==0:
215             raise ValueError("Nominal point X for approximate derivatives can not be None or void (given X: %s)."%(str(X),))
216         #
217         _X = numpy.asmatrix(numpy.ravel( X )).T
218         #
219         if self.__dX is None:
220             _dX  = self.__increment * _X
221         else:
222             _dX = numpy.asmatrix(numpy.ravel( self.__dX )).T
223         #
224         if (_dX == 0.).any():
225             moyenne = _dX.mean()
226             if moyenne == 0.:
227                 _dX = numpy.where( _dX == 0., float(self.__increment), _dX )
228             else:
229                 _dX = numpy.where( _dX == 0., moyenne, _dX )
230         #
231         __alreadyCalculated  = False
232         if self.__avoidRC:
233             __bidon, __alreadyCalculatedP = self.__doublon__(_X,  self.__listJPCP, self.__listJPPN, None)
234             __bidon, __alreadyCalculatedI = self.__doublon__(_dX, self.__listJPCI, self.__listJPIN, None)
235             if __alreadyCalculatedP == __alreadyCalculatedI > -1:
236                 __alreadyCalculated, __i = True, __alreadyCalculatedP
237                 logging.debug("FDA Cas J déja calculé, récupération du doublon %i"%__i)
238         #
239         if __alreadyCalculated:
240             logging.debug("FDA   Calcul Jacobienne (par récupération du doublon %i)"%__i)
241             _Jacobienne = self.__listJPCR[__i]
242         else:
243             logging.debug("FDA   Calcul Jacobienne (explicite)")
244             if self.__centeredDF:
245                 #
246                 if self.__mpEnabled and not self.__mfEnabled:
247                     funcrepr = {
248                         "__userFunction__path" : self.__userFunction__path,
249                         "__userFunction__modl" : self.__userFunction__modl,
250                         "__userFunction__name" : self.__userFunction__name,
251                     }
252                     _jobs = []
253                     for i in range( len(_dX) ):
254                         _dXi            = _dX[i]
255                         _X_plus_dXi     = numpy.array( _X.A1, dtype=float )
256                         _X_plus_dXi[i]  = _X[i] + _dXi
257                         _X_moins_dXi    = numpy.array( _X.A1, dtype=float )
258                         _X_moins_dXi[i] = _X[i] - _dXi
259                         #
260                         _jobs.append( (_X_plus_dXi,  self.__extraArgs, funcrepr) )
261                         _jobs.append( (_X_moins_dXi, self.__extraArgs, funcrepr) )
262                     #
263                     import multiprocessing
264                     self.__pool = multiprocessing.Pool(self.__mpWorkers)
265                     _HX_plusmoins_dX = self.__pool.map( ExecuteFunction, _jobs )
266                     self.__pool.close()
267                     self.__pool.join()
268                     #
269                     _Jacobienne  = []
270                     for i in range( len(_dX) ):
271                         _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
272                     #
273                 elif self.__mfEnabled:
274                     _xserie = []
275                     for i in range( len(_dX) ):
276                         _dXi            = _dX[i]
277                         _X_plus_dXi     = numpy.array( _X.A1, dtype=float )
278                         _X_plus_dXi[i]  = _X[i] + _dXi
279                         _X_moins_dXi    = numpy.array( _X.A1, dtype=float )
280                         _X_moins_dXi[i] = _X[i] - _dXi
281                         #
282                         _xserie.append( _X_plus_dXi )
283                         _xserie.append( _X_moins_dXi )
284                     #
285                     _HX_plusmoins_dX = self.DirectOperator( _xserie )
286                      #
287                     _Jacobienne  = []
288                     for i in range( len(_dX) ):
289                         _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
290                     #
291                 else:
292                     _Jacobienne  = []
293                     for i in range( _dX.size ):
294                         _dXi            = _dX[i]
295                         _X_plus_dXi     = numpy.array( _X.A1, dtype=float )
296                         _X_plus_dXi[i]  = _X[i] + _dXi
297                         _X_moins_dXi    = numpy.array( _X.A1, dtype=float )
298                         _X_moins_dXi[i] = _X[i] - _dXi
299                         #
300                         _HX_plus_dXi    = self.DirectOperator( _X_plus_dXi )
301                         _HX_moins_dXi   = self.DirectOperator( _X_moins_dXi )
302                         #
303                         _Jacobienne.append( numpy.ravel( _HX_plus_dXi - _HX_moins_dXi ) / (2.*_dXi) )
304                 #
305             else:
306                 #
307                 if self.__mpEnabled and not self.__mfEnabled:
308                     funcrepr = {
309                         "__userFunction__path" : self.__userFunction__path,
310                         "__userFunction__modl" : self.__userFunction__modl,
311                         "__userFunction__name" : self.__userFunction__name,
312                     }
313                     _jobs = []
314                     _jobs.append( (_X.A1, self.__extraArgs, funcrepr) )
315                     for i in range( len(_dX) ):
316                         _X_plus_dXi    = numpy.array( _X.A1, dtype=float )
317                         _X_plus_dXi[i] = _X[i] + _dX[i]
318                         #
319                         _jobs.append( (_X_plus_dXi, self.__extraArgs, funcrepr) )
320                     #
321                     import multiprocessing
322                     self.__pool = multiprocessing.Pool(self.__mpWorkers)
323                     _HX_plus_dX = self.__pool.map( ExecuteFunction, _jobs )
324                     self.__pool.close()
325                     self.__pool.join()
326                     #
327                     _HX = _HX_plus_dX.pop(0)
328                     #
329                     _Jacobienne = []
330                     for i in range( len(_dX) ):
331                         _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
332                     #
333                 elif self.__mfEnabled:
334                     _xserie = []
335                     _xserie.append( _X.A1 )
336                     for i in range( len(_dX) ):
337                         _X_plus_dXi    = numpy.array( _X.A1, dtype=float )
338                         _X_plus_dXi[i] = _X[i] + _dX[i]
339                         #
340                         _xserie.append( _X_plus_dXi )
341                     #
342                     _HX_plus_dX = self.DirectOperator( _xserie )
343                     #
344                     _HX = _HX_plus_dX.pop(0)
345                     #
346                     _Jacobienne = []
347                     for i in range( len(_dX) ):
348                         _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
349                    #
350                 else:
351                     _Jacobienne  = []
352                     _HX = self.DirectOperator( _X )
353                     for i in range( _dX.size ):
354                         _dXi            = _dX[i]
355                         _X_plus_dXi     = numpy.array( _X.A1, dtype=float )
356                         _X_plus_dXi[i]  = _X[i] + _dXi
357                         #
358                         _HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
359                         #
360                         _Jacobienne.append( numpy.ravel(( _HX_plus_dXi - _HX ) / _dXi) )
361                 #
362             #
363             _Jacobienne = numpy.asmatrix( numpy.vstack( _Jacobienne ) ).T
364             if self.__avoidRC:
365                 if self.__lenghtRJ < 0: self.__lenghtRJ = 2 * _X.size
366                 while len(self.__listJPCP) > self.__lenghtRJ:
367                     self.__listJPCP.pop(0)
368                     self.__listJPCI.pop(0)
369                     self.__listJPCR.pop(0)
370                     self.__listJPPN.pop(0)
371                     self.__listJPIN.pop(0)
372                 self.__listJPCP.append( copy.copy(_X) )
373                 self.__listJPCI.append( copy.copy(_dX) )
374                 self.__listJPCR.append( copy.copy(_Jacobienne) )
375                 self.__listJPPN.append( numpy.linalg.norm(_X) )
376                 self.__listJPIN.append( numpy.linalg.norm(_Jacobienne) )
377         #
378         logging.debug("FDA Fin du calcul de la Jacobienne")
379         #
380         return _Jacobienne
381
382     # ---------------------------------------------------------
383     def TangentOperator(self, paire, **extraArgs ):
384         """
385         Calcul du tangent à l'aide de la Jacobienne.
386
387         NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
388         ne doivent pas être données ici à la fonction utilisateur.
389         """
390         if self.__mfEnabled:
391             assert len(paire) == 1, "Incorrect lenght of arguments"
392             _paire = paire[0]
393             assert len(_paire) == 2, "Incorrect number of arguments"
394         else:
395             assert len(paire) == 2, "Incorrect number of arguments"
396             _paire = paire
397         X, dX = _paire
398         _Jacobienne = self.TangentMatrix( X )
399         if dX is None or len(dX) == 0:
400             #
401             # Calcul de la forme matricielle si le second argument est None
402             # -------------------------------------------------------------
403             if self.__mfEnabled: return [_Jacobienne,]
404             else:                return _Jacobienne
405         else:
406             #
407             # Calcul de la valeur linéarisée de H en X appliqué à dX
408             # ------------------------------------------------------
409             _dX = numpy.asmatrix(numpy.ravel( dX )).T
410             _HtX = numpy.dot(_Jacobienne, _dX)
411             if self.__mfEnabled: return [_HtX.A1,]
412             else:                return _HtX.A1
413
414     # ---------------------------------------------------------
415     def AdjointOperator(self, paire, **extraArgs ):
416         """
417         Calcul de l'adjoint à l'aide de la Jacobienne.
418
419         NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
420         ne doivent pas être données ici à la fonction utilisateur.
421         """
422         if self.__mfEnabled:
423             assert len(paire) == 1, "Incorrect lenght of arguments"
424             _paire = paire[0]
425             assert len(_paire) == 2, "Incorrect number of arguments"
426         else:
427             assert len(paire) == 2, "Incorrect number of arguments"
428             _paire = paire
429         X, Y = _paire
430         _JacobienneT = self.TangentMatrix( X ).T
431         if Y is None or len(Y) == 0:
432             #
433             # Calcul de la forme matricielle si le second argument est None
434             # -------------------------------------------------------------
435             if self.__mfEnabled: return [_JacobienneT,]
436             else:                return _JacobienneT
437         else:
438             #
439             # Calcul de la valeur de l'adjoint en X appliqué à Y
440             # --------------------------------------------------
441             _Y = numpy.asmatrix(numpy.ravel( Y )).T
442             _HaY = numpy.dot(_JacobienneT, _Y)
443             if self.__mfEnabled: return [_HaY.A1,]
444             else:                return _HaY.A1
445
446 # ==============================================================================
447 def EnsembleOfCenteredPerturbations( _bgcenter, _bgcovariance, _nbmembers ):
448     "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
449     #
450     _bgcenter = numpy.ravel(_bgcenter)[:,None]
451     if _nbmembers < 1:
452         raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
453     #
454     if _bgcovariance is None:
455         BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
456     else:
457         _Z = numpy.random.multivariate_normal(numpy.zeros(_bgcenter.size), _bgcovariance, size=_nbmembers).T
458         BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers) + _Z
459     #
460     return BackgroundEnsemble
461
462 # ==============================================================================
463 def EnsembleOfBackgroundPerturbations( _bgcenter, _bgcovariance, _nbmembers, _withSVD = True):
464     "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
465     def __CenteredRandomAnomalies(Zr, N):
466         """
467         Génère une matrice de N anomalies aléatoires centrées sur Zr selon les
468         notes manuscrites de MB et conforme au code de PS avec eps = -1
469         """
470         eps = -1
471         Q = numpy.identity(N-1)-numpy.ones((N-1,N-1))/numpy.sqrt(N)/(numpy.sqrt(N)-eps)
472         Q = numpy.concatenate((Q, [eps*numpy.ones(N-1)/numpy.sqrt(N)]), axis=0)
473         R, _ = numpy.linalg.qr(numpy.random.normal(size = (N-1,N-1)))
474         Q = numpy.dot(Q,R)
475         Zr = numpy.dot(Q,Zr)
476         return Zr.T
477     #
478     _bgcenter = numpy.ravel(_bgcenter).reshape((-1,1))
479     if _nbmembers < 1:
480         raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
481     if _bgcovariance is None:
482         BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
483     else:
484         if _withSVD:
485             U, s, V = numpy.linalg.svd(_bgcovariance, full_matrices=False)
486             _nbctl = _bgcenter.size
487             if _nbmembers > _nbctl:
488                 _Z = numpy.concatenate((numpy.dot(
489                     numpy.diag(numpy.sqrt(s[:_nbctl])), V[:_nbctl]),
490                     numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1-_nbctl)), axis = 0)
491             else:
492                 _Z = numpy.dot(numpy.diag(numpy.sqrt(s[:_nbmembers-1])), V[:_nbmembers-1])
493             _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
494             BackgroundEnsemble = _bgcenter + _Zca
495         else:
496             if max(abs(_bgcovariance.flatten())) > 0:
497                 _nbctl = _bgcenter.size
498                 _Z = numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1)
499                 _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
500                 BackgroundEnsemble = _bgcenter + _Zca
501             else:
502                 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
503     #
504     return BackgroundEnsemble
505
506 # ==============================================================================
507 def EnsembleMean( __Ensemble ):
508     "Renvoie la moyenne empirique d'un ensemble"
509     return numpy.asarray(__Ensemble).mean(axis=1, dtype=mfp).astype('float').reshape((-1,1))
510
511 # ==============================================================================
512 def EnsembleOfAnomalies( __Ensemble, __OptMean = None, __Normalisation = 1.):
513     "Renvoie les anomalies centrées à partir d'un ensemble"
514     if __OptMean is None:
515         __Em = EnsembleMean( __Ensemble )
516     else:
517         __Em = numpy.ravel( __OptMean ).reshape((-1,1))
518     #
519     return __Normalisation * (numpy.asarray( __Ensemble ) - __Em)
520
521 # ==============================================================================
522 def EnsembleErrorCovariance( __Ensemble, __quick = False ):
523     "Renvoie l'estimation empirique de la covariance d'ensemble"
524     if __quick:
525         # Covariance rapide mais rarement définie positive
526         __Covariance = numpy.cov( __Ensemble )
527     else:
528         # Résultat souvent identique à numpy.cov, mais plus robuste
529         __n, __m = numpy.asarray( __Ensemble ).shape
530         __Anomalies = EnsembleOfAnomalies( __Ensemble )
531         # Estimation empirique
532         __Covariance = ( __Anomalies @ __Anomalies.T ) / (__m-1)
533         # Assure la symétrie
534         __Covariance = ( __Covariance + __Covariance.T ) * 0.5
535         # Assure la positivité
536         __epsilon    = mpr*numpy.trace( __Covariance )
537         __Covariance = __Covariance + __epsilon * numpy.identity(__n)
538     #
539     return __Covariance
540
541 # ==============================================================================
542 def EnsemblePerturbationWithGivenCovariance( __Ensemble, __Covariance, __Seed=None ):
543     "Ajout d'une perturbation à chaque membre d'un ensemble selon une covariance prescrite"
544     if hasattr(__Covariance,"assparsematrix"):
545         if (abs(__Ensemble).mean() > mpr) and (abs(__Covariance.assparsematrix())/abs(__Ensemble).mean() < mpr).all():
546             # Traitement d'une covariance nulle ou presque
547             return __Ensemble
548         if (abs(__Ensemble).mean() <= mpr) and (abs(__Covariance.assparsematrix()) < mpr).all():
549             # Traitement d'une covariance nulle ou presque
550             return __Ensemble
551     else:
552         if (abs(__Ensemble).mean() > mpr) and (abs(__Covariance)/abs(__Ensemble).mean() < mpr).all():
553             # Traitement d'une covariance nulle ou presque
554             return __Ensemble
555         if (abs(__Ensemble).mean() <= mpr) and (abs(__Covariance) < mpr).all():
556             # Traitement d'une covariance nulle ou presque
557             return __Ensemble
558     #
559     __n, __m = __Ensemble.shape
560     if __Seed is not None: numpy.random.seed(__Seed)
561     #
562     if hasattr(__Covariance,"isscalar") and __Covariance.isscalar():
563         # Traitement d'une covariance multiple de l'identité
564         __zero = 0.
565         __std  = numpy.sqrt(__Covariance.assparsematrix())
566         __Ensemble += numpy.random.normal(__zero, __std, size=(__m,__n)).T
567     #
568     elif hasattr(__Covariance,"isvector") and __Covariance.isvector():
569         # Traitement d'une covariance diagonale avec variances non identiques
570         __zero = numpy.zeros(__n)
571         __std  = numpy.sqrt(__Covariance.assparsematrix())
572         __Ensemble += numpy.asarray([numpy.random.normal(__zero, __std) for i in range(__m)]).T
573     #
574     elif hasattr(__Covariance,"ismatrix") and __Covariance.ismatrix():
575         # Traitement d'une covariance pleine
576         __Ensemble += numpy.random.multivariate_normal(numpy.zeros(__n), __Covariance.asfullmatrix(__n), size=__m).T
577     #
578     elif isinstance(__Covariance, numpy.ndarray):
579         # Traitement d'une covariance numpy pleine, sachant qu'on arrive ici en dernier
580         __Ensemble += numpy.random.multivariate_normal(numpy.zeros(__n), __Covariance, size=__m).T
581     #
582     else:
583         raise ValueError("Error in ensemble perturbation with inadequate covariance specification")
584     #
585     return __Ensemble
586
587 # ==============================================================================
588 def CovarianceInflation(
589         InputCovOrEns,
590         InflationType   = None,
591         InflationFactor = None,
592         BackgroundCov   = None,
593         ):
594     """
595     Inflation applicable soit sur Pb ou Pa, soit sur les ensembles EXb ou EXa
596
597     Synthèse : Hunt 2007, section 2.3.5
598     """
599     if InflationFactor is None:
600         return InputCovOrEns
601     else:
602         InflationFactor = float(InflationFactor)
603     #
604     if InflationType in ["MultiplicativeOnAnalysisCovariance", "MultiplicativeOnBackgroundCovariance"]:
605         if InflationFactor < 1.:
606             raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
607         if InflationFactor < 1.+mpr:
608             return InputCovOrEns
609         OutputCovOrEns = InflationFactor**2 * InputCovOrEns
610     #
611     elif InflationType in ["MultiplicativeOnAnalysisAnomalies", "MultiplicativeOnBackgroundAnomalies"]:
612         if InflationFactor < 1.:
613             raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
614         if InflationFactor < 1.+mpr:
615             return InputCovOrEns
616         InputCovOrEnsMean = InputCovOrEns.mean(axis=1, dtype=mfp).astype('float')
617         OutputCovOrEns = InputCovOrEnsMean[:,numpy.newaxis] \
618             + InflationFactor * (InputCovOrEns - InputCovOrEnsMean[:,numpy.newaxis])
619     #
620     elif InflationType in ["AdditiveOnAnalysisCovariance", "AdditiveOnBackgroundCovariance"]:
621         if InflationFactor < 0.:
622             raise ValueError("Inflation factor for additive inflation has to be greater or equal than 0.")
623         if InflationFactor < mpr:
624             return InputCovOrEns
625         __n, __m = numpy.asarray(InputCovOrEns).shape
626         if __n != __m:
627             raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
628         OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * numpy.identity(__n)
629     #
630     elif InflationType == "HybridOnBackgroundCovariance":
631         if InflationFactor < 0.:
632             raise ValueError("Inflation factor for hybrid inflation has to be greater or equal than 0.")
633         if InflationFactor < mpr:
634             return InputCovOrEns
635         __n, __m = numpy.asarray(InputCovOrEns).shape
636         if __n != __m:
637             raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
638         if BackgroundCov is None:
639             raise ValueError("Background covariance matrix B has to be given for hybrid inflation.")
640         if InputCovOrEns.shape != BackgroundCov.shape:
641             raise ValueError("Ensemble covariance matrix has to be of same size than background covariance matrix B.")
642         OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * BackgroundCov
643     #
644     elif InflationType == "Relaxation":
645         raise NotImplementedError("InflationType Relaxation")
646     #
647     else:
648         raise ValueError("Error in inflation type, '%s' is not a valid keyword."%InflationType)
649     #
650     return OutputCovOrEns
651
652 # ==============================================================================
653 def QuantilesEstimations(selfA, A, Xa, HXa = None, Hm = None, HtM = None):
654     "Estimation des quantiles a posteriori (selfA est modifié)"
655     nbsamples = selfA._parameters["NumberOfSamplesForQuantiles"]
656     #
657     # Traitement des bornes
658     if "StateBoundsForQuantiles" in selfA._parameters:
659         LBounds = selfA._parameters["StateBoundsForQuantiles"] # Prioritaire
660     elif "Bounds" in selfA._parameters:
661         LBounds = selfA._parameters["Bounds"]  # Défaut raisonnable
662     else:
663         LBounds = None
664     if LBounds is not None:
665         LBounds = numpy.asmatrix(ForceNumericBounds( LBounds ))
666     #
667     # Échantillonnage des états
668     YfQ  = None
669     EXr  = None
670     if selfA._parameters["SimulationForQuantiles"] == "Linear" and HXa is not None:
671         HXa  = numpy.matrix(numpy.ravel( HXa )).T
672     for i in range(nbsamples):
673         if selfA._parameters["SimulationForQuantiles"] == "Linear" and HtM is not None:
674             dXr = numpy.matrix(numpy.random.multivariate_normal(numpy.ravel(Xa),A) - numpy.ravel(Xa)).T
675             if LBounds is not None: # "EstimateProjection" par défaut
676                 dXr = numpy.max(numpy.hstack((dXr,LBounds[:,0]) - Xa),axis=1)
677                 dXr = numpy.min(numpy.hstack((dXr,LBounds[:,1]) - Xa),axis=1)
678             dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
679             Yr = HXa + dYr
680             if selfA._toStore("SampledStateForQuantiles"): Xr = Xa + dXr
681         elif selfA._parameters["SimulationForQuantiles"] == "NonLinear" and Hm is not None:
682             Xr = numpy.matrix(numpy.random.multivariate_normal(numpy.ravel(Xa),A)).T
683             if LBounds is not None: # "EstimateProjection" par défaut
684                 Xr = numpy.max(numpy.hstack((Xr,LBounds[:,0])),axis=1)
685                 Xr = numpy.min(numpy.hstack((Xr,LBounds[:,1])),axis=1)
686             Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
687         else:
688             raise ValueError("Quantile simulations has only to be Linear or NonLinear.")
689         #
690         if YfQ is None:
691             YfQ = Yr
692             if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.ravel(Xr)
693         else:
694             YfQ = numpy.hstack((YfQ,Yr))
695             if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.vstack((EXr,numpy.ravel(Xr)))
696     #
697     # Extraction des quantiles
698     YfQ.sort(axis=-1)
699     YQ = None
700     for quantile in selfA._parameters["Quantiles"]:
701         if not (0. <= float(quantile) <= 1.): continue
702         indice = int(nbsamples * float(quantile) - 1./nbsamples)
703         if YQ is None: YQ = YfQ[:,indice]
704         else:          YQ = numpy.hstack((YQ,YfQ[:,indice]))
705     selfA.StoredVariables["SimulationQuantiles"].store( YQ )
706     if selfA._toStore("SampledStateForQuantiles"):
707         selfA.StoredVariables["SampledStateForQuantiles"].store( EXr.T )
708     #
709     return 0
710
711 # ==============================================================================
712 def ForceNumericBounds( __Bounds ):
713     "Force les bornes à être des valeurs numériques, sauf si None"
714     # Conserve une valeur par défaut à None s'il n'y a pas de bornes
715     if __Bounds is None: return None
716     # Converti toutes les bornes individuelles None à +/- l'infini
717     __Bounds = numpy.asarray( __Bounds, dtype=float )
718     if len(__Bounds.shape) != 2 or min(__Bounds.shape) <= 0:
719         raise ValueError("Incorrectly defined bounds data")
720     __Bounds[numpy.isnan(__Bounds)[:,0],0] = -sys.float_info.max
721     __Bounds[numpy.isnan(__Bounds)[:,1],1] =  sys.float_info.max
722     return __Bounds
723
724 # ==============================================================================
725 def RecentredBounds( __Bounds, __Center):
726     # Conserve une valeur par défaut à None s'il n'y a pas de bornes
727     if __Bounds is None: return None
728     # Recentre les valeurs numériques de bornes
729     return ForceNumericBounds( __Bounds ) - numpy.ravel( __Center ).transpose((-1,1))
730
731 # ==============================================================================
732 def enks(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="EnKS16-KalmanFilterFormula"):
733     """
734     EnKS
735     """
736     #
737     # Opérateurs
738     H = HO["Direct"].appliedControledFormTo
739     #
740     if selfA._parameters["EstimationOf"] == "State":
741         M = EM["Direct"].appliedControledFormTo
742     #
743     if CM is not None and "Tangent" in CM and U is not None:
744         Cm = CM["Tangent"].asMatrix(Xb)
745     else:
746         Cm = None
747     #
748     # Précalcul des inversions de B et R
749     RIdemi = R.sqrtmI()
750     #
751     # Durée d'observation et tailles
752     LagL = selfA._parameters["SmootherLagL"]
753     if (not hasattr(Y,"store")) or (not hasattr(Y,"stepnumber")):
754         raise ValueError("Fixed-lag smoother requires a series of observation")
755     if Y.stepnumber() < LagL:
756         raise ValueError("Fixed-lag smoother requires a series of observation greater then the lag L")
757     duration = Y.stepnumber()
758     __p = numpy.cumprod(Y.shape())[-1]
759     __n = Xb.size
760     __m = selfA._parameters["NumberOfMembers"]
761     #
762     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
763         selfA.StoredVariables["Analysis"].store( Xb )
764         if selfA._toStore("APosterioriCovariance"):
765             if hasattr(B,"asfullmatrix"):
766                 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
767             else:
768                 selfA.StoredVariables["APosterioriCovariance"].store( B )
769     #
770     # Calcul direct initial (on privilégie la mémorisation au recalcul)
771     __seed = numpy.random.get_state()
772     selfB = copy.deepcopy(selfA)
773     selfB._parameters["StoreSupplementaryCalculations"] = ["CurrentEnsembleState"]
774     if VariantM == "EnKS16-KalmanFilterFormula":
775         etkf(selfB, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM = "KalmanFilterFormula")
776     else:
777         raise ValueError("VariantM has to be chosen in the authorized methods list.")
778     if LagL > 0:
779         EL  = selfB.StoredVariables["CurrentEnsembleState"][LagL-1]
780     else:
781         EL = EnsembleOfBackgroundPerturbations( Xb, None, __m ) # Cf. etkf
782     selfA._parameters["SetSeed"] = numpy.random.set_state(__seed)
783     #
784     for step in range(LagL,duration-1):
785         #
786         sEL = selfB.StoredVariables["CurrentEnsembleState"][step+1-LagL:step+1]
787         sEL.append(None)
788         #
789         if hasattr(Y,"store"):
790             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
791         else:
792             Ynpu = numpy.ravel( Y ).reshape((__p,1))
793         #
794         if U is not None:
795             if hasattr(U,"store") and len(U)>1:
796                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
797             elif hasattr(U,"store") and len(U)==1:
798                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
799             else:
800                 Un = numpy.asmatrix(numpy.ravel( U )).T
801         else:
802             Un = None
803         #
804         #--------------------------
805         if VariantM == "EnKS16-KalmanFilterFormula":
806             if selfA._parameters["EstimationOf"] == "State": # Forecast
807                 EL = M( [(EL[:,i], Un) for i in range(__m)],
808                     argsAsSerie = True,
809                     returnSerieAsArrayMatrix = True )
810                 EL = EnsemblePerturbationWithGivenCovariance( EL, Q )
811                 EZ = H( [(EL[:,i], Un) for i in range(__m)],
812                     argsAsSerie = True,
813                     returnSerieAsArrayMatrix = True )
814                 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
815                     Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
816                     EZ = EZ + Cm * Un
817             elif selfA._parameters["EstimationOf"] == "Parameters":
818                 # --- > Par principe, M = Id, Q = 0
819                 EZ = H( [(EL[:,i], Un) for i in range(__m)],
820                     argsAsSerie = True,
821                     returnSerieAsArrayMatrix = True )
822             #
823             vEm   = EL.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
824             vZm   = EZ.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
825             #
826             mS    = RIdemi @ EnsembleOfAnomalies( EZ, vZm, 1./math.sqrt(__m-1) )
827             mS    = mS.reshape((-1,__m)) # Pour dimension 1
828             delta = RIdemi @ ( Ynpu - vZm )
829             mT    = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS )
830             vw    = mT @ mS.T @ delta
831             #
832             Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
833             mU    = numpy.identity(__m)
834             wTU   = (vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU)
835             #
836             EX    = EnsembleOfAnomalies( EL, vEm, 1./math.sqrt(__m-1) )
837             EL    = vEm + EX @ wTU
838             #
839             sEL[LagL] = EL
840             for irl in range(LagL): # Lissage des L précédentes analysis
841                 vEm = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
842                 EX = EnsembleOfAnomalies( sEL[irl], vEm, 1./math.sqrt(__m-1) )
843                 sEL[irl] = vEm + EX @ wTU
844             #
845             # Conservation de l'analyse retrospective d'ordre 0 avant rotation
846             Xa = sEL[0].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
847             if selfA._toStore("APosterioriCovariance"):
848                 EXn = sEL[0]
849             #
850             for irl in range(LagL):
851                 sEL[irl] = sEL[irl+1]
852             sEL[LagL] = None
853         #--------------------------
854         else:
855             raise ValueError("VariantM has to be chosen in the authorized methods list.")
856         #
857         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
858         # ---> avec analysis
859         selfA.StoredVariables["Analysis"].store( Xa )
860         if selfA._toStore("APosterioriCovariance"):
861             selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(EXn) )
862     #
863     # Stockage des dernières analyses incomplètement remises à jour
864     for irl in range(LagL):
865         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
866         Xa = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
867         selfA.StoredVariables["Analysis"].store( Xa )
868     #
869     return 0
870
871 # ==============================================================================
872 def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
873     """
874     Ensemble-Transform EnKF
875     """
876     if selfA._parameters["EstimationOf"] == "Parameters":
877         selfA._parameters["StoreInternalVariables"] = True
878     #
879     # Opérateurs
880     H = HO["Direct"].appliedControledFormTo
881     #
882     if selfA._parameters["EstimationOf"] == "State":
883         M = EM["Direct"].appliedControledFormTo
884     #
885     if CM is not None and "Tangent" in CM and U is not None:
886         Cm = CM["Tangent"].asMatrix(Xb)
887     else:
888         Cm = None
889     #
890     # Durée d'observation et tailles
891     if hasattr(Y,"stepnumber"):
892         duration = Y.stepnumber()
893         __p = numpy.cumprod(Y.shape())[-1]
894     else:
895         duration = 2
896         __p = numpy.array(Y).size
897     #
898     # Précalcul des inversions de B et R
899     if selfA._parameters["StoreInternalVariables"] \
900         or selfA._toStore("CostFunctionJ") \
901         or selfA._toStore("CostFunctionJb") \
902         or selfA._toStore("CostFunctionJo") \
903         or selfA._toStore("CurrentOptimum") \
904         or selfA._toStore("APosterioriCovariance"):
905         BI = B.getI()
906         RI = R.getI()
907     elif VariantM != "KalmanFilterFormula":
908         RI = R.getI()
909     if VariantM == "KalmanFilterFormula":
910         RIdemi = R.sqrtmI()
911     #
912     __n = Xb.size
913     __m = selfA._parameters["NumberOfMembers"]
914     #
915     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
916         Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
917         selfA.StoredVariables["Analysis"].store( Xb )
918         if selfA._toStore("APosterioriCovariance"):
919             if hasattr(B,"asfullmatrix"):
920                 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
921             else:
922                 selfA.StoredVariables["APosterioriCovariance"].store( B )
923         selfA._setInternalState("seed", numpy.random.get_state())
924     elif selfA._parameters["nextStep"]:
925         Xn = selfA._getInternalState("Xn")
926     #
927     previousJMinimum = numpy.finfo(float).max
928     #
929     for step in range(duration-1):
930         numpy.random.set_state(selfA._getInternalState("seed"))
931         if hasattr(Y,"store"):
932             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
933         else:
934             Ynpu = numpy.ravel( Y ).reshape((__p,1))
935         #
936         if U is not None:
937             if hasattr(U,"store") and len(U)>1:
938                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
939             elif hasattr(U,"store") and len(U)==1:
940                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
941             else:
942                 Un = numpy.asmatrix(numpy.ravel( U )).T
943         else:
944             Un = None
945         #
946         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
947             Xn = CovarianceInflation( Xn,
948                 selfA._parameters["InflationType"],
949                 selfA._parameters["InflationFactor"],
950                 )
951         #
952         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
953             EMX = M( [(Xn[:,i], Un) for i in range(__m)],
954                 argsAsSerie = True,
955                 returnSerieAsArrayMatrix = True )
956             Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
957             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
958                 argsAsSerie = True,
959                 returnSerieAsArrayMatrix = True )
960             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
961                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
962                 Xn_predicted = Xn_predicted + Cm * Un
963         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
964             # --- > Par principe, M = Id, Q = 0
965             Xn_predicted = EMX = Xn
966             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
967                 argsAsSerie = True,
968                 returnSerieAsArrayMatrix = True )
969         #
970         # Mean of forecast and observation of forecast
971         Xfm  = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
972         Hfm  = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
973         #
974         # Anomalies
975         EaX   = EnsembleOfAnomalies( Xn_predicted, Xfm )
976         EaHX  = EnsembleOfAnomalies( HX_predicted, Hfm)
977         #
978         #--------------------------
979         if VariantM == "KalmanFilterFormula":
980             mS    = RIdemi * EaHX / math.sqrt(__m-1)
981             mS    = mS.reshape((-1,__m)) # Pour dimension 1
982             delta = RIdemi * ( Ynpu - Hfm )
983             mT    = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS )
984             vw    = mT @ mS.T @ delta
985             #
986             Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
987             mU    = numpy.identity(__m)
988             #
989             EaX   = EaX / math.sqrt(__m-1)
990             Xn    = Xfm + EaX @ ( vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU )
991         #--------------------------
992         elif VariantM == "Variational":
993             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
994             def CostFunction(w):
995                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
996                 _Jo = 0.5 * _A.T @ (RI * _A)
997                 _Jb = 0.5 * (__m-1) * w.T @ w
998                 _J  = _Jo + _Jb
999                 return float(_J)
1000             def GradientOfCostFunction(w):
1001                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1002                 _GardJo = - EaHX.T @ (RI * _A)
1003                 _GradJb = (__m-1) * w.reshape((__m,1))
1004                 _GradJ  = _GardJo + _GradJb
1005                 return numpy.ravel(_GradJ)
1006             vw = scipy.optimize.fmin_cg(
1007                 f           = CostFunction,
1008                 x0          = numpy.zeros(__m),
1009                 fprime      = GradientOfCostFunction,
1010                 args        = (),
1011                 disp        = False,
1012                 )
1013             #
1014             Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
1015             Htb = (__m-1) * numpy.identity(__m)
1016             Hta = Hto + Htb
1017             #
1018             Pta = numpy.linalg.inv( Hta )
1019             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1020             #
1021             Xn  = Xfm + EaX @ (vw[:,None] + EWa)
1022         #--------------------------
1023         elif VariantM == "FiniteSize11": # Jauge Boc2011
1024             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1025             def CostFunction(w):
1026                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1027                 _Jo = 0.5 * _A.T @ (RI * _A)
1028                 _Jb = 0.5 * __m * math.log(1 + 1/__m + w.T @ w)
1029                 _J  = _Jo + _Jb
1030                 return float(_J)
1031             def GradientOfCostFunction(w):
1032                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1033                 _GardJo = - EaHX.T @ (RI * _A)
1034                 _GradJb = __m * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
1035                 _GradJ  = _GardJo + _GradJb
1036                 return numpy.ravel(_GradJ)
1037             vw = scipy.optimize.fmin_cg(
1038                 f           = CostFunction,
1039                 x0          = numpy.zeros(__m),
1040                 fprime      = GradientOfCostFunction,
1041                 args        = (),
1042                 disp        = False,
1043                 )
1044             #
1045             Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
1046             Htb = __m * \
1047                 ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
1048                 / (1 + 1/__m + vw.T @ vw)**2
1049             Hta = Hto + Htb
1050             #
1051             Pta = numpy.linalg.inv( Hta )
1052             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1053             #
1054             Xn  = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
1055         #--------------------------
1056         elif VariantM == "FiniteSize15": # Jauge Boc2015
1057             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1058             def CostFunction(w):
1059                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1060                 _Jo = 0.5 * _A.T * RI * _A
1061                 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w)
1062                 _J  = _Jo + _Jb
1063                 return float(_J)
1064             def GradientOfCostFunction(w):
1065                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1066                 _GardJo = - EaHX.T @ (RI * _A)
1067                 _GradJb = (__m+1) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
1068                 _GradJ  = _GardJo + _GradJb
1069                 return numpy.ravel(_GradJ)
1070             vw = scipy.optimize.fmin_cg(
1071                 f           = CostFunction,
1072                 x0          = numpy.zeros(__m),
1073                 fprime      = GradientOfCostFunction,
1074                 args        = (),
1075                 disp        = False,
1076                 )
1077             #
1078             Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
1079             Htb = (__m+1) * \
1080                 ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
1081                 / (1 + 1/__m + vw.T @ vw)**2
1082             Hta = Hto + Htb
1083             #
1084             Pta = numpy.linalg.inv( Hta )
1085             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1086             #
1087             Xn  = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
1088         #--------------------------
1089         elif VariantM == "FiniteSize16": # Jauge Boc2016
1090             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1091             def CostFunction(w):
1092                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1093                 _Jo = 0.5 * _A.T @ (RI * _A)
1094                 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w / (__m-1))
1095                 _J  = _Jo + _Jb
1096                 return float(_J)
1097             def GradientOfCostFunction(w):
1098                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1099                 _GardJo = - EaHX.T @ (RI * _A)
1100                 _GradJb = ((__m+1) / (__m-1)) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w / (__m-1))
1101                 _GradJ  = _GardJo + _GradJb
1102                 return numpy.ravel(_GradJ)
1103             vw = scipy.optimize.fmin_cg(
1104                 f           = CostFunction,
1105                 x0          = numpy.zeros(__m),
1106                 fprime      = GradientOfCostFunction,
1107                 args        = (),
1108                 disp        = False,
1109                 )
1110             #
1111             Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
1112             Htb = ((__m+1) / (__m-1)) * \
1113                 ( (1 + 1/__m + vw.T @ vw / (__m-1)) * numpy.identity(__m) - 2 * vw @ vw.T / (__m-1) ) \
1114                 / (1 + 1/__m + vw.T @ vw / (__m-1))**2
1115             Hta = Hto + Htb
1116             #
1117             Pta = numpy.linalg.inv( Hta )
1118             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1119             #
1120             Xn  = Xfm + EaX @ (vw[:,None] + EWa)
1121         #--------------------------
1122         else:
1123             raise ValueError("VariantM has to be chosen in the authorized methods list.")
1124         #
1125         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
1126             Xn = CovarianceInflation( Xn,
1127                 selfA._parameters["InflationType"],
1128                 selfA._parameters["InflationFactor"],
1129                 )
1130         #
1131         Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1132         #--------------------------
1133         selfA._setInternalState("Xn", Xn)
1134         selfA._setInternalState("seed", numpy.random.get_state())
1135         #--------------------------
1136         #
1137         if selfA._parameters["StoreInternalVariables"] \
1138             or selfA._toStore("CostFunctionJ") \
1139             or selfA._toStore("CostFunctionJb") \
1140             or selfA._toStore("CostFunctionJo") \
1141             or selfA._toStore("APosterioriCovariance") \
1142             or selfA._toStore("InnovationAtCurrentAnalysis") \
1143             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1144             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1145             _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
1146             _Innovation = Ynpu - _HXa
1147         #
1148         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1149         # ---> avec analysis
1150         selfA.StoredVariables["Analysis"].store( Xa )
1151         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1152             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
1153         if selfA._toStore("InnovationAtCurrentAnalysis"):
1154             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1155         # ---> avec current state
1156         if selfA._parameters["StoreInternalVariables"] \
1157             or selfA._toStore("CurrentState"):
1158             selfA.StoredVariables["CurrentState"].store( Xn )
1159         if selfA._toStore("ForecastState"):
1160             selfA.StoredVariables["ForecastState"].store( EMX )
1161         if selfA._toStore("ForecastCovariance"):
1162             selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(EMX) )
1163         if selfA._toStore("BMA"):
1164             selfA.StoredVariables["BMA"].store( EMX - Xa )
1165         if selfA._toStore("InnovationAtCurrentState"):
1166             selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
1167         if selfA._toStore("SimulatedObservationAtCurrentState") \
1168             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1169             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
1170         # ---> autres
1171         if selfA._parameters["StoreInternalVariables"] \
1172             or selfA._toStore("CostFunctionJ") \
1173             or selfA._toStore("CostFunctionJb") \
1174             or selfA._toStore("CostFunctionJo") \
1175             or selfA._toStore("CurrentOptimum") \
1176             or selfA._toStore("APosterioriCovariance"):
1177             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
1178             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
1179             J   = Jb + Jo
1180             selfA.StoredVariables["CostFunctionJb"].store( Jb )
1181             selfA.StoredVariables["CostFunctionJo"].store( Jo )
1182             selfA.StoredVariables["CostFunctionJ" ].store( J )
1183             #
1184             if selfA._toStore("IndexOfOptimum") \
1185                 or selfA._toStore("CurrentOptimum") \
1186                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1187                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1188                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1189                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1190                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1191             if selfA._toStore("IndexOfOptimum"):
1192                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1193             if selfA._toStore("CurrentOptimum"):
1194                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1195             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1196                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1197             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1198                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1199             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1200                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1201             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1202                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1203         if selfA._toStore("APosterioriCovariance"):
1204             selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
1205         if selfA._parameters["EstimationOf"] == "Parameters" \
1206             and J < previousJMinimum:
1207             previousJMinimum    = J
1208             XaMin               = Xa
1209             if selfA._toStore("APosterioriCovariance"):
1210                 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
1211         # ---> Pour les smoothers
1212         if selfA._toStore("CurrentEnsembleState"):
1213             selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
1214     #
1215     # Stockage final supplémentaire de l'optimum en estimation de paramètres
1216     # ----------------------------------------------------------------------
1217     if selfA._parameters["EstimationOf"] == "Parameters":
1218         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1219         selfA.StoredVariables["Analysis"].store( XaMin )
1220         if selfA._toStore("APosterioriCovariance"):
1221             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1222         if selfA._toStore("BMA"):
1223             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1224     #
1225     return 0
1226
1227 # ==============================================================================
1228 def exkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1229     """
1230     Extended Kalman Filter
1231     """
1232     if selfA._parameters["EstimationOf"] == "Parameters":
1233         selfA._parameters["StoreInternalVariables"] = True
1234     selfA._parameters["Bounds"] = ForceNumericBounds( selfA._parameters["Bounds"] )
1235     #
1236     # Opérateurs
1237     H = HO["Direct"].appliedControledFormTo
1238     #
1239     if selfA._parameters["EstimationOf"] == "State":
1240         M = EM["Direct"].appliedControledFormTo
1241     #
1242     if CM is not None and "Tangent" in CM and U is not None:
1243         Cm = CM["Tangent"].asMatrix(Xb)
1244     else:
1245         Cm = None
1246     #
1247     # Durée d'observation et tailles
1248     if hasattr(Y,"stepnumber"):
1249         duration = Y.stepnumber()
1250         __p = numpy.cumprod(Y.shape())[-1]
1251     else:
1252         duration = 2
1253         __p = numpy.array(Y).size
1254     #
1255     # Précalcul des inversions de B et R
1256     if selfA._parameters["StoreInternalVariables"] \
1257         or selfA._toStore("CostFunctionJ") \
1258         or selfA._toStore("CostFunctionJb") \
1259         or selfA._toStore("CostFunctionJo") \
1260         or selfA._toStore("CurrentOptimum") \
1261         or selfA._toStore("APosterioriCovariance"):
1262         BI = B.getI()
1263         RI = R.getI()
1264     #
1265     __n = Xb.size
1266     #
1267     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1268         Xn = Xb
1269         Pn = B
1270         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1271         selfA.StoredVariables["Analysis"].store( Xb )
1272         if selfA._toStore("APosterioriCovariance"):
1273             if hasattr(B,"asfullmatrix"):
1274                 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
1275             else:
1276                 selfA.StoredVariables["APosterioriCovariance"].store( B )
1277         selfA._setInternalState("seed", numpy.random.get_state())
1278     elif selfA._parameters["nextStep"]:
1279         Xn = selfA._getInternalState("Xn")
1280         Pn = selfA._getInternalState("Pn")
1281     #
1282     if selfA._parameters["EstimationOf"] == "Parameters":
1283         XaMin            = Xn
1284         previousJMinimum = numpy.finfo(float).max
1285     #
1286     for step in range(duration-1):
1287         if hasattr(Y,"store"):
1288             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1289         else:
1290             Ynpu = numpy.ravel( Y ).reshape((__p,1))
1291         #
1292         Ht = HO["Tangent"].asMatrix(ValueForMethodForm = Xn)
1293         Ht = Ht.reshape(Ynpu.size,Xn.size) # ADAO & check shape
1294         Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = Xn)
1295         Ha = Ha.reshape(Xn.size,Ynpu.size) # ADAO & check shape
1296         #
1297         if selfA._parameters["EstimationOf"] == "State":
1298             Mt = EM["Tangent"].asMatrix(ValueForMethodForm = Xn)
1299             Mt = Mt.reshape(Xn.size,Xn.size) # ADAO & check shape
1300             Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = Xn)
1301             Ma = Ma.reshape(Xn.size,Xn.size) # ADAO & check shape
1302         #
1303         if U is not None:
1304             if hasattr(U,"store") and len(U)>1:
1305                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
1306             elif hasattr(U,"store") and len(U)==1:
1307                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
1308             else:
1309                 Un = numpy.asmatrix(numpy.ravel( U )).T
1310         else:
1311             Un = None
1312         #
1313         if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
1314             Xn = numpy.max(numpy.hstack((Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1)
1315             Xn = numpy.min(numpy.hstack((Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1)
1316         #
1317         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
1318             Xn_predicted = numpy.asmatrix(numpy.ravel( M( (Xn, Un) ) )).T
1319             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1320                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
1321                 Xn_predicted = Xn_predicted + Cm * Un
1322             Pn_predicted = Q + Mt * Pn * Ma
1323         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
1324             # --- > Par principe, M = Id, Q = 0
1325             Xn_predicted = Xn
1326             Pn_predicted = Pn
1327         #
1328         if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
1329             Xn_predicted = numpy.max(numpy.hstack((Xn_predicted,numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1)
1330             Xn_predicted = numpy.min(numpy.hstack((Xn_predicted,numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1)
1331         #
1332         if selfA._parameters["EstimationOf"] == "State":
1333             HX_predicted = numpy.asmatrix(numpy.ravel( H( (Xn_predicted, None) ) )).T
1334             _Innovation  = Ynpu - HX_predicted
1335         elif selfA._parameters["EstimationOf"] == "Parameters":
1336             HX_predicted = numpy.asmatrix(numpy.ravel( H( (Xn_predicted, Un) ) )).T
1337             _Innovation  = Ynpu - HX_predicted
1338             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
1339                 _Innovation = _Innovation - Cm * Un
1340         #
1341         Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha))
1342         Xn = Xn_predicted + Kn * _Innovation
1343         Pn = Pn_predicted - Kn * Ht * Pn_predicted
1344         #
1345         if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
1346             Xn = numpy.max(numpy.hstack((Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1)
1347             Xn = numpy.min(numpy.hstack((Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1)
1348         #
1349         Xa = Xn # Pointeurs
1350         #--------------------------
1351         selfA._setInternalState("Xn", Xn)
1352         selfA._setInternalState("Pn", Pn)
1353         #--------------------------
1354         #
1355         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1356         # ---> avec analysis
1357         selfA.StoredVariables["Analysis"].store( Xa )
1358         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1359             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( H((Xa, Un)) )
1360         if selfA._toStore("InnovationAtCurrentAnalysis"):
1361             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1362         # ---> avec current state
1363         if selfA._parameters["StoreInternalVariables"] \
1364             or selfA._toStore("CurrentState"):
1365             selfA.StoredVariables["CurrentState"].store( Xn )
1366         if selfA._toStore("ForecastState"):
1367             selfA.StoredVariables["ForecastState"].store( Xn_predicted )
1368         if selfA._toStore("ForecastCovariance"):
1369             selfA.StoredVariables["ForecastCovariance"].store( Pn_predicted )
1370         if selfA._toStore("BMA"):
1371             selfA.StoredVariables["BMA"].store( Xn_predicted - Xa )
1372         if selfA._toStore("InnovationAtCurrentState"):
1373             selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
1374         if selfA._toStore("SimulatedObservationAtCurrentState") \
1375             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1376             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
1377         # ---> autres
1378         if selfA._parameters["StoreInternalVariables"] \
1379             or selfA._toStore("CostFunctionJ") \
1380             or selfA._toStore("CostFunctionJb") \
1381             or selfA._toStore("CostFunctionJo") \
1382             or selfA._toStore("CurrentOptimum") \
1383             or selfA._toStore("APosterioriCovariance"):
1384             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
1385             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
1386             J   = Jb + Jo
1387             selfA.StoredVariables["CostFunctionJb"].store( Jb )
1388             selfA.StoredVariables["CostFunctionJo"].store( Jo )
1389             selfA.StoredVariables["CostFunctionJ" ].store( J )
1390             #
1391             if selfA._toStore("IndexOfOptimum") \
1392                 or selfA._toStore("CurrentOptimum") \
1393                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1394                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1395                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1396                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1397                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1398             if selfA._toStore("IndexOfOptimum"):
1399                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1400             if selfA._toStore("CurrentOptimum"):
1401                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1402             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1403                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1404             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1405                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1406             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1407                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1408             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1409                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1410         if selfA._toStore("APosterioriCovariance"):
1411             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1412         if selfA._parameters["EstimationOf"] == "Parameters" \
1413             and J < previousJMinimum:
1414             previousJMinimum    = J
1415             XaMin               = Xa
1416             if selfA._toStore("APosterioriCovariance"):
1417                 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
1418     #
1419     # Stockage final supplémentaire de l'optimum en estimation de paramètres
1420     # ----------------------------------------------------------------------
1421     if selfA._parameters["EstimationOf"] == "Parameters":
1422         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1423         selfA.StoredVariables["Analysis"].store( XaMin )
1424         if selfA._toStore("APosterioriCovariance"):
1425             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1426         if selfA._toStore("BMA"):
1427             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1428     #
1429     return 0
1430
1431 # ==============================================================================
1432 def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12",
1433     BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
1434     """
1435     Iterative EnKF
1436     """
1437     if selfA._parameters["EstimationOf"] == "Parameters":
1438         selfA._parameters["StoreInternalVariables"] = True
1439     #
1440     # Opérateurs
1441     H = HO["Direct"].appliedControledFormTo
1442     #
1443     if selfA._parameters["EstimationOf"] == "State":
1444         M = EM["Direct"].appliedControledFormTo
1445     #
1446     if CM is not None and "Tangent" in CM and U is not None:
1447         Cm = CM["Tangent"].asMatrix(Xb)
1448     else:
1449         Cm = None
1450     #
1451     # Durée d'observation et tailles
1452     if hasattr(Y,"stepnumber"):
1453         duration = Y.stepnumber()
1454         __p = numpy.cumprod(Y.shape())[-1]
1455     else:
1456         duration = 2
1457         __p = numpy.array(Y).size
1458     #
1459     # Précalcul des inversions de B et R
1460     if selfA._parameters["StoreInternalVariables"] \
1461         or selfA._toStore("CostFunctionJ") \
1462         or selfA._toStore("CostFunctionJb") \
1463         or selfA._toStore("CostFunctionJo") \
1464         or selfA._toStore("CurrentOptimum") \
1465         or selfA._toStore("APosterioriCovariance"):
1466         BI = B.getI()
1467     RI = R.getI()
1468     #
1469     __n = Xb.size
1470     __m = selfA._parameters["NumberOfMembers"]
1471     #
1472     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1473         if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
1474         else:                         Pn = B
1475         Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
1476         selfA.StoredVariables["Analysis"].store( Xb )
1477         if selfA._toStore("APosterioriCovariance"):
1478             if hasattr(B,"asfullmatrix"):
1479                 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
1480             else:
1481                 selfA.StoredVariables["APosterioriCovariance"].store( B )
1482         selfA._setInternalState("seed", numpy.random.get_state())
1483     elif selfA._parameters["nextStep"]:
1484         Xn = selfA._getInternalState("Xn")
1485     #
1486     previousJMinimum = numpy.finfo(float).max
1487     #
1488     for step in range(duration-1):
1489         numpy.random.set_state(selfA._getInternalState("seed"))
1490         if hasattr(Y,"store"):
1491             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1492         else:
1493             Ynpu = numpy.ravel( Y ).reshape((__p,1))
1494         #
1495         if U is not None:
1496             if hasattr(U,"store") and len(U)>1:
1497                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
1498             elif hasattr(U,"store") and len(U)==1:
1499                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
1500             else:
1501                 Un = numpy.asmatrix(numpy.ravel( U )).T
1502         else:
1503             Un = None
1504         #
1505         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
1506             Xn = CovarianceInflation( Xn,
1507                 selfA._parameters["InflationType"],
1508                 selfA._parameters["InflationFactor"],
1509                 )
1510         #
1511         #--------------------------
1512         if VariantM == "IEnKF12":
1513             Xfm = numpy.ravel(Xn.mean(axis=1, dtype=mfp).astype('float'))
1514             EaX = EnsembleOfAnomalies( Xn ) / math.sqrt(__m-1)
1515             __j = 0
1516             Deltaw = 1
1517             if not BnotT:
1518                 Ta  = numpy.identity(__m)
1519             vw  = numpy.zeros(__m)
1520             while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
1521                 vx1 = (Xfm + EaX @ vw).reshape((__n,1))
1522                 #
1523                 if BnotT:
1524                     E1 = vx1 + _epsilon * EaX
1525                 else:
1526                     E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
1527                 #
1528                 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q
1529                     E2 = M( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
1530                         argsAsSerie = True,
1531                         returnSerieAsArrayMatrix = True )
1532                 elif selfA._parameters["EstimationOf"] == "Parameters":
1533                     # --- > Par principe, M = Id
1534                     E2 = Xn
1535                 vx2 = E2.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1536                 vy1 = H((vx2, Un)).reshape((__p,1))
1537                 #
1538                 HE2 = H( [(E2[:,i,numpy.newaxis], Un) for i in range(__m)],
1539                     argsAsSerie = True,
1540                     returnSerieAsArrayMatrix = True )
1541                 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
1542                 #
1543                 if BnotT:
1544                     EaY = (HE2 - vy2) / _epsilon
1545                 else:
1546                     EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
1547                 #
1548                 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy1 )))
1549                 mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY).reshape((-1,__m))
1550                 Deltaw = - numpy.linalg.solve(mH,GradJ)
1551                 #
1552                 vw = vw + Deltaw
1553                 #
1554                 if not BnotT:
1555                     Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
1556                 #
1557                 __j = __j + 1
1558             #
1559             A2 = EnsembleOfAnomalies( E2 )
1560             #
1561             if BnotT:
1562                 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
1563                 A2 = math.sqrt(__m-1) * A2 @ Ta / _epsilon
1564             #
1565             Xn = vx2 + A2
1566         #--------------------------
1567         else:
1568             raise ValueError("VariantM has to be chosen in the authorized methods list.")
1569         #
1570         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
1571             Xn = CovarianceInflation( Xn,
1572                 selfA._parameters["InflationType"],
1573                 selfA._parameters["InflationFactor"],
1574                 )
1575         #
1576         Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1577         #--------------------------
1578         selfA._setInternalState("Xn", Xn)
1579         selfA._setInternalState("seed", numpy.random.get_state())
1580         #--------------------------
1581         #
1582         if selfA._parameters["StoreInternalVariables"] \
1583             or selfA._toStore("CostFunctionJ") \
1584             or selfA._toStore("CostFunctionJb") \
1585             or selfA._toStore("CostFunctionJo") \
1586             or selfA._toStore("APosterioriCovariance") \
1587             or selfA._toStore("InnovationAtCurrentAnalysis") \
1588             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1589             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1590             _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
1591             _Innovation = Ynpu - _HXa
1592         #
1593         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1594         # ---> avec analysis
1595         selfA.StoredVariables["Analysis"].store( Xa )
1596         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1597             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
1598         if selfA._toStore("InnovationAtCurrentAnalysis"):
1599             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1600         # ---> avec current state
1601         if selfA._parameters["StoreInternalVariables"] \
1602             or selfA._toStore("CurrentState"):
1603             selfA.StoredVariables["CurrentState"].store( Xn )
1604         if selfA._toStore("ForecastState"):
1605             selfA.StoredVariables["ForecastState"].store( E2 )
1606         if selfA._toStore("ForecastCovariance"):
1607             selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(E2) )
1608         if selfA._toStore("BMA"):
1609             selfA.StoredVariables["BMA"].store( E2 - Xa )
1610         if selfA._toStore("InnovationAtCurrentState"):
1611             selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
1612         if selfA._toStore("SimulatedObservationAtCurrentState") \
1613             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1614             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
1615         # ---> autres
1616         if selfA._parameters["StoreInternalVariables"] \
1617             or selfA._toStore("CostFunctionJ") \
1618             or selfA._toStore("CostFunctionJb") \
1619             or selfA._toStore("CostFunctionJo") \
1620             or selfA._toStore("CurrentOptimum") \
1621             or selfA._toStore("APosterioriCovariance"):
1622             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
1623             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
1624             J   = Jb + Jo
1625             selfA.StoredVariables["CostFunctionJb"].store( Jb )
1626             selfA.StoredVariables["CostFunctionJo"].store( Jo )
1627             selfA.StoredVariables["CostFunctionJ" ].store( J )
1628             #
1629             if selfA._toStore("IndexOfOptimum") \
1630                 or selfA._toStore("CurrentOptimum") \
1631                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1632                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1633                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1634                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1635                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1636             if selfA._toStore("IndexOfOptimum"):
1637                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1638             if selfA._toStore("CurrentOptimum"):
1639                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1640             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1641                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1642             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1643                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1644             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1645                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1646             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1647                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1648         if selfA._toStore("APosterioriCovariance"):
1649             selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
1650         if selfA._parameters["EstimationOf"] == "Parameters" \
1651             and J < previousJMinimum:
1652             previousJMinimum    = J
1653             XaMin               = Xa
1654             if selfA._toStore("APosterioriCovariance"):
1655                 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
1656     #
1657     # Stockage final supplémentaire de l'optimum en estimation de paramètres
1658     # ----------------------------------------------------------------------
1659     if selfA._parameters["EstimationOf"] == "Parameters":
1660         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1661         selfA.StoredVariables["Analysis"].store( XaMin )
1662         if selfA._toStore("APosterioriCovariance"):
1663             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1664         if selfA._toStore("BMA"):
1665             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1666     #
1667     return 0
1668
1669 # ==============================================================================
1670 def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1671     """
1672     3DVAR incrémental
1673     """
1674     #
1675     # Initialisations
1676     # ---------------
1677     #
1678     # Opérateur non-linéaire pour la boucle externe
1679     Hm = HO["Direct"].appliedTo
1680     #
1681     # Précalcul des inversions de B et R
1682     BI = B.getI()
1683     RI = R.getI()
1684     #
1685     # Point de démarrage de l'optimisation
1686     Xini = selfA._parameters["InitializationPoint"]
1687     #
1688     HXb = numpy.asmatrix(numpy.ravel( Hm( Xb ) )).T
1689     Innovation = Y - HXb
1690     #
1691     # Outer Loop
1692     # ----------
1693     iOuter = 0
1694     J      = 1./mpr
1695     DeltaJ = 1./mpr
1696     Xr     = Xini.reshape((-1,1))
1697     while abs(DeltaJ) >= selfA._parameters["CostDecrementTolerance"] and iOuter <= selfA._parameters["MaximumNumberOfSteps"]:
1698         #
1699         # Inner Loop
1700         # ----------
1701         Ht = HO["Tangent"].asMatrix(Xr)
1702         Ht = Ht.reshape(Y.size,Xr.size) # ADAO & check shape
1703         #
1704         # Définition de la fonction-coût
1705         # ------------------------------
1706         def CostFunction(dx):
1707             _dX  = numpy.asmatrix(numpy.ravel( dx )).T
1708             if selfA._parameters["StoreInternalVariables"] or \
1709                 selfA._toStore("CurrentState") or \
1710                 selfA._toStore("CurrentOptimum"):
1711                 selfA.StoredVariables["CurrentState"].store( Xb + _dX )
1712             _HdX = Ht * _dX
1713             _HdX = numpy.asmatrix(numpy.ravel( _HdX )).T
1714             _dInnovation = Innovation - _HdX
1715             if selfA._toStore("SimulatedObservationAtCurrentState") or \
1716                 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1717                 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HXb + _HdX )
1718             if selfA._toStore("InnovationAtCurrentState"):
1719                 selfA.StoredVariables["InnovationAtCurrentState"].store( _dInnovation )
1720             #
1721             Jb  = float( 0.5 * _dX.T * BI * _dX )
1722             Jo  = float( 0.5 * _dInnovation.T * RI * _dInnovation )
1723             J   = Jb + Jo
1724             #
1725             selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
1726             selfA.StoredVariables["CostFunctionJb"].store( Jb )
1727             selfA.StoredVariables["CostFunctionJo"].store( Jo )
1728             selfA.StoredVariables["CostFunctionJ" ].store( J )
1729             if selfA._toStore("IndexOfOptimum") or \
1730                 selfA._toStore("CurrentOptimum") or \
1731                 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
1732                 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
1733                 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
1734                 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1735                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1736             if selfA._toStore("IndexOfOptimum"):
1737                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1738             if selfA._toStore("CurrentOptimum"):
1739                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
1740             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1741                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
1742             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1743                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1744             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1745                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1746             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1747                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1748             return J
1749         #
1750         def GradientOfCostFunction(dx):
1751             _dX          = numpy.asmatrix(numpy.ravel( dx )).T
1752             _HdX         = Ht * _dX
1753             _HdX         = numpy.asmatrix(numpy.ravel( _HdX )).T
1754             _dInnovation = Innovation - _HdX
1755             GradJb       = BI * _dX
1756             GradJo       = - Ht.T @ (RI * _dInnovation)
1757             GradJ        = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
1758             return GradJ
1759         #
1760         # Minimisation de la fonctionnelle
1761         # --------------------------------
1762         nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
1763         #
1764         if selfA._parameters["Minimizer"] == "LBFGSB":
1765             # Minimum, J_optimal, Informations = scipy.optimize.fmin_l_bfgs_b(
1766             if "0.19" <= scipy.version.version <= "1.1.0":
1767                 import lbfgsbhlt as optimiseur
1768             else:
1769                 import scipy.optimize as optimiseur
1770             Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
1771                 func        = CostFunction,
1772                 x0          = numpy.zeros(Xini.size),
1773                 fprime      = GradientOfCostFunction,
1774                 args        = (),
1775                 bounds      = RecentredBounds(selfA._parameters["Bounds"], Xb),
1776                 maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
1777                 factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
1778                 pgtol       = selfA._parameters["ProjectedGradientTolerance"],
1779                 iprint      = selfA._parameters["optiprint"],
1780                 )
1781             nfeval = Informations['funcalls']
1782             rc     = Informations['warnflag']
1783         elif selfA._parameters["Minimizer"] == "TNC":
1784             Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
1785                 func        = CostFunction,
1786                 x0          = numpy.zeros(Xini.size),
1787                 fprime      = GradientOfCostFunction,
1788                 args        = (),
1789                 bounds      = RecentredBounds(selfA._parameters["Bounds"], Xb),
1790                 maxfun      = selfA._parameters["MaximumNumberOfSteps"],
1791                 pgtol       = selfA._parameters["ProjectedGradientTolerance"],
1792                 ftol        = selfA._parameters["CostDecrementTolerance"],
1793                 messages    = selfA._parameters["optmessages"],
1794                 )
1795         elif selfA._parameters["Minimizer"] == "CG":
1796             Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
1797                 f           = CostFunction,
1798                 x0          = numpy.zeros(Xini.size),
1799                 fprime      = GradientOfCostFunction,
1800                 args        = (),
1801                 maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1802                 gtol        = selfA._parameters["GradientNormTolerance"],
1803                 disp        = selfA._parameters["optdisp"],
1804                 full_output = True,
1805                 )
1806         elif selfA._parameters["Minimizer"] == "NCG":
1807             Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
1808                 f           = CostFunction,
1809                 x0          = numpy.zeros(Xini.size),
1810                 fprime      = GradientOfCostFunction,
1811                 args        = (),
1812                 maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1813                 avextol     = selfA._parameters["CostDecrementTolerance"],
1814                 disp        = selfA._parameters["optdisp"],
1815                 full_output = True,
1816                 )
1817         elif selfA._parameters["Minimizer"] == "BFGS":
1818             Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
1819                 f           = CostFunction,
1820                 x0          = numpy.zeros(Xini.size),
1821                 fprime      = GradientOfCostFunction,
1822                 args        = (),
1823                 maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1824                 gtol        = selfA._parameters["GradientNormTolerance"],
1825                 disp        = selfA._parameters["optdisp"],
1826                 full_output = True,
1827                 )
1828         else:
1829             raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
1830         #
1831         IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1832         MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
1833         #
1834         if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
1835             Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
1836             Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
1837         else:
1838             Minimum = Xb + numpy.asmatrix(numpy.ravel( Minimum )).T
1839         #
1840         Xr     = Minimum
1841         DeltaJ = selfA.StoredVariables["CostFunctionJ" ][-1] - J
1842         iOuter = selfA.StoredVariables["CurrentIterationNumber"][-1]
1843     #
1844     # Obtention de l'analyse
1845     # ----------------------
1846     Xa = Xr
1847     #
1848     selfA.StoredVariables["Analysis"].store( Xa )
1849     #
1850     if selfA._toStore("OMA") or \
1851         selfA._toStore("SigmaObs2") or \
1852         selfA._toStore("SimulationQuantiles") or \
1853         selfA._toStore("SimulatedObservationAtOptimum"):
1854         if selfA._toStore("SimulatedObservationAtCurrentState"):
1855             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
1856         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1857             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
1858         else:
1859             HXa = Hm( Xa )
1860     #
1861     # Calcul de la covariance d'analyse
1862     # ---------------------------------
1863     if selfA._toStore("APosterioriCovariance") or \
1864         selfA._toStore("SimulationQuantiles") or \
1865         selfA._toStore("JacobianMatrixAtOptimum") or \
1866         selfA._toStore("KalmanGainAtOptimum"):
1867         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
1868         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
1869     if selfA._toStore("APosterioriCovariance") or \
1870         selfA._toStore("SimulationQuantiles") or \
1871         selfA._toStore("KalmanGainAtOptimum"):
1872         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
1873         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
1874     if selfA._toStore("APosterioriCovariance") or \
1875         selfA._toStore("SimulationQuantiles"):
1876         HessienneI = []
1877         nb = Xa.size
1878         for i in range(nb):
1879             _ee    = numpy.matrix(numpy.zeros(nb)).T
1880             _ee[i] = 1.
1881             _HtEE  = numpy.dot(HtM,_ee)
1882             _HtEE  = numpy.asmatrix(numpy.ravel( _HtEE )).T
1883             HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
1884         HessienneI = numpy.matrix( HessienneI )
1885         A = HessienneI.I
1886         if min(A.shape) != max(A.shape):
1887             raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
1888         if (numpy.diag(A) < 0).any():
1889             raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
1890         if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
1891             try:
1892                 L = numpy.linalg.cholesky( A )
1893             except:
1894                 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
1895     if selfA._toStore("APosterioriCovariance"):
1896         selfA.StoredVariables["APosterioriCovariance"].store( A )
1897     if selfA._toStore("JacobianMatrixAtOptimum"):
1898         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
1899     if selfA._toStore("KalmanGainAtOptimum"):
1900         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
1901         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
1902         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
1903     #
1904     # Calculs et/ou stockages supplémentaires
1905     # ---------------------------------------
1906     if selfA._toStore("Innovation") or \
1907         selfA._toStore("SigmaObs2") or \
1908         selfA._toStore("MahalanobisConsistency") or \
1909         selfA._toStore("OMB"):
1910         d  = Y - HXb
1911     if selfA._toStore("Innovation"):
1912         selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
1913     if selfA._toStore("BMA"):
1914         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
1915     if selfA._toStore("OMA"):
1916         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
1917     if selfA._toStore("OMB"):
1918         selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
1919     if selfA._toStore("SigmaObs2"):
1920         TraceR = R.trace(Y.size)
1921         selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
1922     if selfA._toStore("MahalanobisConsistency"):
1923         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
1924     if selfA._toStore("SimulationQuantiles"):
1925         QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
1926     if selfA._toStore("SimulatedObservationAtBackground"):
1927         selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
1928     if selfA._toStore("SimulatedObservationAtOptimum"):
1929         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
1930     #
1931     return 0
1932
1933 # ==============================================================================
1934 def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13",
1935     BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
1936     """
1937     Maximum Likelihood Ensemble Filter
1938     """
1939     if selfA._parameters["EstimationOf"] == "Parameters":
1940         selfA._parameters["StoreInternalVariables"] = True
1941     #
1942     # Opérateurs
1943     H = HO["Direct"].appliedControledFormTo
1944     #
1945     if selfA._parameters["EstimationOf"] == "State":
1946         M = EM["Direct"].appliedControledFormTo
1947     #
1948     if CM is not None and "Tangent" in CM and U is not None:
1949         Cm = CM["Tangent"].asMatrix(Xb)
1950     else:
1951         Cm = None
1952     #
1953     # Durée d'observation et tailles
1954     if hasattr(Y,"stepnumber"):
1955         duration = Y.stepnumber()
1956         __p = numpy.cumprod(Y.shape())[-1]
1957     else:
1958         duration = 2
1959         __p = numpy.array(Y).size
1960     #
1961     # Précalcul des inversions de B et R
1962     if selfA._parameters["StoreInternalVariables"] \
1963         or selfA._toStore("CostFunctionJ") \
1964         or selfA._toStore("CostFunctionJb") \
1965         or selfA._toStore("CostFunctionJo") \
1966         or selfA._toStore("CurrentOptimum") \
1967         or selfA._toStore("APosterioriCovariance"):
1968         BI = B.getI()
1969     RI = R.getI()
1970     #
1971     __n = Xb.size
1972     __m = selfA._parameters["NumberOfMembers"]
1973     #
1974     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1975         Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
1976         selfA.StoredVariables["Analysis"].store( Xb )
1977         if selfA._toStore("APosterioriCovariance"):
1978             if hasattr(B,"asfullmatrix"):
1979                 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
1980             else:
1981                 selfA.StoredVariables["APosterioriCovariance"].store( B )
1982         selfA._setInternalState("seed", numpy.random.get_state())
1983     elif selfA._parameters["nextStep"]:
1984         Xn = selfA._getInternalState("Xn")
1985     #
1986     previousJMinimum = numpy.finfo(float).max
1987     #
1988     for step in range(duration-1):
1989         numpy.random.set_state(selfA._getInternalState("seed"))
1990         if hasattr(Y,"store"):
1991             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1992         else:
1993             Ynpu = numpy.ravel( Y ).reshape((__p,1))
1994         #
1995         if U is not None:
1996             if hasattr(U,"store") and len(U)>1:
1997                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
1998             elif hasattr(U,"store") and len(U)==1:
1999                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2000             else:
2001                 Un = numpy.asmatrix(numpy.ravel( U )).T
2002         else:
2003             Un = None
2004         #
2005         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2006             Xn = CovarianceInflation( Xn,
2007                 selfA._parameters["InflationType"],
2008                 selfA._parameters["InflationFactor"],
2009                 )
2010         #
2011         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
2012             EMX = M( [(Xn[:,i], Un) for i in range(__m)],
2013                 argsAsSerie = True,
2014                 returnSerieAsArrayMatrix = True )
2015             Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
2016             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2017                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2018                 Xn_predicted = Xn_predicted + Cm * Un
2019         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
2020             # --- > Par principe, M = Id, Q = 0
2021             Xn_predicted = EMX = Xn
2022         #
2023         #--------------------------
2024         if VariantM == "MLEF13":
2025             Xfm = numpy.ravel(Xn_predicted.mean(axis=1, dtype=mfp).astype('float'))
2026             EaX = EnsembleOfAnomalies( Xn_predicted, Xfm, 1./math.sqrt(__m-1) )
2027             Ua  = numpy.identity(__m)
2028             __j = 0
2029             Deltaw = 1
2030             if not BnotT:
2031                 Ta  = numpy.identity(__m)
2032             vw  = numpy.zeros(__m)
2033             while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
2034                 vx1 = (Xfm + EaX @ vw).reshape((__n,1))
2035                 #
2036                 if BnotT:
2037                     E1 = vx1 + _epsilon * EaX
2038                 else:
2039                     E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
2040                 #
2041                 HE2 = H( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
2042                     argsAsSerie = True,
2043                     returnSerieAsArrayMatrix = True )
2044                 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2045                 #
2046                 if BnotT:
2047                     EaY = (HE2 - vy2) / _epsilon
2048                 else:
2049                     EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
2050                 #
2051                 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy2 )))
2052                 mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY).reshape((-1,__m))
2053                 Deltaw = - numpy.linalg.solve(mH,GradJ)
2054                 #
2055                 vw = vw + Deltaw
2056                 #
2057                 if not BnotT:
2058                     Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2059                 #
2060                 __j = __j + 1
2061             #
2062             if BnotT:
2063                 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2064             #
2065             Xn = vx1 + math.sqrt(__m-1) * EaX @ Ta @ Ua
2066         #--------------------------
2067         else:
2068             raise ValueError("VariantM has to be chosen in the authorized methods list.")
2069         #
2070         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2071             Xn = CovarianceInflation( Xn,
2072                 selfA._parameters["InflationType"],
2073                 selfA._parameters["InflationFactor"],
2074                 )
2075         #
2076         Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2077         #--------------------------
2078         selfA._setInternalState("Xn", Xn)
2079         selfA._setInternalState("seed", numpy.random.get_state())
2080         #--------------------------
2081         #
2082         if selfA._parameters["StoreInternalVariables"] \
2083             or selfA._toStore("CostFunctionJ") \
2084             or selfA._toStore("CostFunctionJb") \
2085             or selfA._toStore("CostFunctionJo") \
2086             or selfA._toStore("APosterioriCovariance") \
2087             or selfA._toStore("InnovationAtCurrentAnalysis") \
2088             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2089             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2090             _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
2091             _Innovation = Ynpu - _HXa
2092         #
2093         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2094         # ---> avec analysis
2095         selfA.StoredVariables["Analysis"].store( Xa )
2096         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2097             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2098         if selfA._toStore("InnovationAtCurrentAnalysis"):
2099             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2100         # ---> avec current state
2101         if selfA._parameters["StoreInternalVariables"] \
2102             or selfA._toStore("CurrentState"):
2103             selfA.StoredVariables["CurrentState"].store( Xn )
2104         if selfA._toStore("ForecastState"):
2105             selfA.StoredVariables["ForecastState"].store( EMX )
2106         if selfA._toStore("ForecastCovariance"):
2107             selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(EMX) )
2108         if selfA._toStore("BMA"):
2109             selfA.StoredVariables["BMA"].store( EMX - Xa )
2110         if selfA._toStore("InnovationAtCurrentState"):
2111             selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
2112         if selfA._toStore("SimulatedObservationAtCurrentState") \
2113             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2114             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
2115         # ---> autres
2116         if selfA._parameters["StoreInternalVariables"] \
2117             or selfA._toStore("CostFunctionJ") \
2118             or selfA._toStore("CostFunctionJb") \
2119             or selfA._toStore("CostFunctionJo") \
2120             or selfA._toStore("CurrentOptimum") \
2121             or selfA._toStore("APosterioriCovariance"):
2122             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2123             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
2124             J   = Jb + Jo
2125             selfA.StoredVariables["CostFunctionJb"].store( Jb )
2126             selfA.StoredVariables["CostFunctionJo"].store( Jo )
2127             selfA.StoredVariables["CostFunctionJ" ].store( J )
2128             #
2129             if selfA._toStore("IndexOfOptimum") \
2130                 or selfA._toStore("CurrentOptimum") \
2131                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2132                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2133                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2134                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2135                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2136             if selfA._toStore("IndexOfOptimum"):
2137                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2138             if selfA._toStore("CurrentOptimum"):
2139                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2140             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2141                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2142             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2143                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2144             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2145                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2146             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2147                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2148         if selfA._toStore("APosterioriCovariance"):
2149             selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
2150         if selfA._parameters["EstimationOf"] == "Parameters" \
2151             and J < previousJMinimum:
2152             previousJMinimum    = J
2153             XaMin               = Xa
2154             if selfA._toStore("APosterioriCovariance"):
2155                 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
2156     #
2157     # Stockage final supplémentaire de l'optimum en estimation de paramètres
2158     # ----------------------------------------------------------------------
2159     if selfA._parameters["EstimationOf"] == "Parameters":
2160         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2161         selfA.StoredVariables["Analysis"].store( XaMin )
2162         if selfA._toStore("APosterioriCovariance"):
2163             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2164         if selfA._toStore("BMA"):
2165             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2166     #
2167     return 0
2168
2169 # ==============================================================================
2170 def mmqr(
2171         func     = None,
2172         x0       = None,
2173         fprime   = None,
2174         bounds   = None,
2175         quantile = 0.5,
2176         maxfun   = 15000,
2177         toler    = 1.e-06,
2178         y        = None,
2179         ):
2180     """
2181     Implémentation informatique de l'algorithme MMQR, basée sur la publication :
2182     David R. Hunter, Kenneth Lange, "Quantile Regression via an MM Algorithm",
2183     Journal of Computational and Graphical Statistics, 9, 1, pp.60-77, 2000.
2184     """
2185     #
2186     # Recuperation des donnees et informations initiales
2187     # --------------------------------------------------
2188     variables = numpy.ravel( x0 )
2189     mesures   = numpy.ravel( y )
2190     increment = sys.float_info[0]
2191     p         = variables.size
2192     n         = mesures.size
2193     quantile  = float(quantile)
2194     #
2195     # Calcul des parametres du MM
2196     # ---------------------------
2197     tn      = float(toler) / n
2198     e0      = -tn / math.log(tn)
2199     epsilon = (e0-tn)/(1+math.log(e0))
2200     #
2201     # Calculs d'initialisation
2202     # ------------------------
2203     residus  = mesures - numpy.ravel( func( variables ) )
2204     poids    = 1./(epsilon+numpy.abs(residus))
2205     veps     = 1. - 2. * quantile - residus * poids
2206     lastsurrogate = -numpy.sum(residus*veps) - (1.-2.*quantile)*numpy.sum(residus)
2207     iteration = 0
2208     #
2209     # Recherche iterative
2210     # -------------------
2211     while (increment > toler) and (iteration < maxfun) :
2212         iteration += 1
2213         #
2214         Derivees  = numpy.array(fprime(variables))
2215         Derivees  = Derivees.reshape(n,p) # Necessaire pour remettre en place la matrice si elle passe par des tuyaux YACS
2216         DeriveesT = Derivees.transpose()
2217         M         =   numpy.dot( DeriveesT , (numpy.array(numpy.matrix(p*[poids,]).T)*Derivees) )
2218         SM        =   numpy.transpose(numpy.dot( DeriveesT , veps ))
2219         step      = - numpy.linalg.lstsq( M, SM, rcond=-1 )[0]
2220         #
2221         variables = variables + step
2222         if bounds is not None:
2223             # Attention : boucle infinie à éviter si un intervalle est trop petit
2224             while( (variables < numpy.ravel(numpy.asmatrix(bounds)[:,0])).any() or (variables > numpy.ravel(numpy.asmatrix(bounds)[:,1])).any() ):
2225                 step      = step/2.
2226                 variables = variables - step
2227         residus   = mesures - numpy.ravel( func(variables) )
2228         surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
2229         #
2230         while ( (surrogate > lastsurrogate) and ( max(list(numpy.abs(step))) > 1.e-16 ) ) :
2231             step      = step/2.
2232             variables = variables - step
2233             residus   = mesures - numpy.ravel( func(variables) )
2234             surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
2235         #
2236         increment     = lastsurrogate-surrogate
2237         poids         = 1./(epsilon+numpy.abs(residus))
2238         veps          = 1. - 2. * quantile - residus * poids
2239         lastsurrogate = -numpy.sum(residus * veps) - (1.-2.*quantile)*numpy.sum(residus)
2240     #
2241     # Mesure d'écart
2242     # --------------
2243     Ecart = quantile * numpy.sum(residus) - numpy.sum( residus[residus<0] )
2244     #
2245     return variables, Ecart, [n,p,iteration,increment,0]
2246
2247 # ==============================================================================
2248 def multi3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, oneCycle):
2249     """
2250     3DVAR multi-pas et multi-méthodes
2251     """
2252     #
2253     # Initialisation
2254     if selfA._parameters["EstimationOf"] == "State":
2255         M = EM["Direct"].appliedTo
2256         #
2257         if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2258             Xn = numpy.ravel(Xb).reshape((-1,1))
2259             selfA.StoredVariables["Analysis"].store( Xn )
2260             if selfA._toStore("APosterioriCovariance"):
2261                 if hasattr(B,"asfullmatrix"):
2262                     selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(Xn.size) )
2263                 else:
2264                     selfA.StoredVariables["APosterioriCovariance"].store( B )
2265             if selfA._toStore("ForecastState"):
2266                 selfA.StoredVariables["ForecastState"].store( Xn )
2267         elif selfA._parameters["nextStep"]:
2268             Xn = selfA._getInternalState("Xn")
2269     else:
2270         Xn = numpy.ravel(Xb).reshape((-1,1))
2271     #
2272     if hasattr(Y,"stepnumber"):
2273         duration = Y.stepnumber()
2274     else:
2275         duration = 2
2276     #
2277     # Multi-pas
2278     for step in range(duration-1):
2279         if hasattr(Y,"store"):
2280             Ynpu = numpy.ravel( Y[step+1] ).reshape((-1,1))
2281         else:
2282             Ynpu = numpy.ravel( Y ).reshape((-1,1))
2283         #
2284         if selfA._parameters["EstimationOf"] == "State": # Forecast
2285             Xn_predicted = M( Xn )
2286             if selfA._toStore("ForecastState"):
2287                 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
2288         elif selfA._parameters["EstimationOf"] == "Parameters": # No forecast
2289             # --- > Par principe, M = Id, Q = 0
2290             Xn_predicted = Xn
2291         Xn_predicted = numpy.ravel(Xn_predicted).reshape((-1,1))
2292         #
2293         oneCycle(selfA, Xn_predicted, Ynpu, U, HO, None, None, R, B, None)
2294         #
2295         Xn = selfA.StoredVariables["Analysis"][-1]
2296         #--------------------------
2297         selfA._setInternalState("Xn", Xn)
2298     #
2299     return 0
2300
2301 # ==============================================================================
2302 def psas3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
2303     """
2304     3DVAR PSAS
2305     """
2306     #
2307     # Initialisations
2308     # ---------------
2309     #
2310     # Opérateurs
2311     Hm = HO["Direct"].appliedTo
2312     #
2313     # Utilisation éventuelle d'un vecteur H(Xb) précalculé
2314     if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
2315         HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
2316     else:
2317         HXb = Hm( Xb )
2318     HXb = numpy.asmatrix(numpy.ravel( HXb )).T
2319     if Y.size != HXb.size:
2320         raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
2321     if max(Y.shape) != max(HXb.shape):
2322         raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
2323     #
2324     if selfA._toStore("JacobianMatrixAtBackground"):
2325         HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
2326         HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
2327         selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
2328     #
2329     Ht = HO["Tangent"].asMatrix(Xb)
2330     BHT = B * Ht.T
2331     HBHTpR = R + Ht * BHT
2332     Innovation = Y - HXb
2333     #
2334     # Point de démarrage de l'optimisation
2335     Xini = numpy.zeros(Xb.shape)
2336     #
2337     # Définition de la fonction-coût
2338     # ------------------------------
2339     def CostFunction(w):
2340         _W = numpy.asmatrix(numpy.ravel( w )).T
2341         if selfA._parameters["StoreInternalVariables"] or \
2342             selfA._toStore("CurrentState") or \
2343             selfA._toStore("CurrentOptimum"):
2344             selfA.StoredVariables["CurrentState"].store( Xb + BHT * _W )
2345         if selfA._toStore("SimulatedObservationAtCurrentState") or \
2346             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2347             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Hm( Xb + BHT * _W ) )
2348         if selfA._toStore("InnovationAtCurrentState"):
2349             selfA.StoredVariables["InnovationAtCurrentState"].store( Innovation )
2350         #
2351         Jb  = float( 0.5 * _W.T * HBHTpR * _W )
2352         Jo  = float( - _W.T * Innovation )
2353         J   = Jb + Jo
2354         #
2355         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
2356         selfA.StoredVariables["CostFunctionJb"].store( Jb )
2357         selfA.StoredVariables["CostFunctionJo"].store( Jo )
2358         selfA.StoredVariables["CostFunctionJ" ].store( J )
2359         if selfA._toStore("IndexOfOptimum") or \
2360             selfA._toStore("CurrentOptimum") or \
2361             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
2362             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
2363             selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
2364             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2365             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2366         if selfA._toStore("IndexOfOptimum"):
2367             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2368         if selfA._toStore("CurrentOptimum"):
2369             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
2370         if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2371             selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
2372         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2373             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2374         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2375             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2376         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2377             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2378         return J
2379     #
2380     def GradientOfCostFunction(w):
2381         _W = numpy.asmatrix(numpy.ravel( w )).T
2382         GradJb  = HBHTpR * _W
2383         GradJo  = - Innovation
2384         GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
2385         return GradJ
2386     #
2387     # Minimisation de la fonctionnelle
2388     # --------------------------------
2389     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
2390     #
2391     if selfA._parameters["Minimizer"] == "LBFGSB":
2392         if "0.19" <= scipy.version.version <= "1.1.0":
2393             import lbfgsbhlt as optimiseur
2394         else:
2395             import scipy.optimize as optimiseur
2396         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
2397             func        = CostFunction,
2398             x0          = Xini,
2399             fprime      = GradientOfCostFunction,
2400             args        = (),
2401             bounds      = RecentredBounds(selfA._parameters["Bounds"], Xb),
2402             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
2403             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
2404             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
2405             iprint      = selfA._parameters["optiprint"],
2406             )
2407         nfeval = Informations['funcalls']
2408         rc     = Informations['warnflag']
2409     elif selfA._parameters["Minimizer"] == "TNC":
2410         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
2411             func        = CostFunction,
2412             x0          = Xini,
2413             fprime      = GradientOfCostFunction,
2414             args        = (),
2415             bounds      = RecentredBounds(selfA._parameters["Bounds"], Xb),
2416             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
2417             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
2418             ftol        = selfA._parameters["CostDecrementTolerance"],
2419             messages    = selfA._parameters["optmessages"],
2420             )
2421     elif selfA._parameters["Minimizer"] == "CG":
2422         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
2423             f           = CostFunction,
2424             x0          = Xini,
2425             fprime      = GradientOfCostFunction,
2426             args        = (),
2427             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2428             gtol        = selfA._parameters["GradientNormTolerance"],
2429             disp        = selfA._parameters["optdisp"],
2430             full_output = True,
2431             )
2432     elif selfA._parameters["Minimizer"] == "NCG":
2433         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
2434             f           = CostFunction,
2435             x0          = Xini,
2436             fprime      = GradientOfCostFunction,
2437             args        = (),
2438             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2439             avextol     = selfA._parameters["CostDecrementTolerance"],
2440             disp        = selfA._parameters["optdisp"],
2441             full_output = True,
2442             )
2443     elif selfA._parameters["Minimizer"] == "BFGS":
2444         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
2445             f           = CostFunction,
2446             x0          = Xini,
2447             fprime      = GradientOfCostFunction,
2448             args        = (),
2449             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2450             gtol        = selfA._parameters["GradientNormTolerance"],
2451             disp        = selfA._parameters["optdisp"],
2452             full_output = True,
2453             )
2454     else:
2455         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
2456     #
2457     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2458     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
2459     #
2460     # Correction pour pallier a un bug de TNC sur le retour du Minimum
2461     # ----------------------------------------------------------------
2462     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
2463         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
2464         Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
2465     else:
2466         Minimum = Xb + BHT * numpy.asmatrix(numpy.ravel( Minimum )).T
2467     #
2468     # Obtention de l'analyse
2469     # ----------------------
2470     Xa = Minimum
2471     #
2472     selfA.StoredVariables["Analysis"].store( Xa )
2473     #
2474     if selfA._toStore("OMA") or \
2475         selfA._toStore("SigmaObs2") or \
2476         selfA._toStore("SimulationQuantiles") or \
2477         selfA._toStore("SimulatedObservationAtOptimum"):
2478         if selfA._toStore("SimulatedObservationAtCurrentState"):
2479             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
2480         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2481             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
2482         else:
2483             HXa = Hm( Xa )
2484     #
2485     # Calcul de la covariance d'analyse
2486     # ---------------------------------
2487     if selfA._toStore("APosterioriCovariance") or \
2488         selfA._toStore("SimulationQuantiles") or \
2489         selfA._toStore("JacobianMatrixAtOptimum") or \
2490         selfA._toStore("KalmanGainAtOptimum"):
2491         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
2492         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
2493     if selfA._toStore("APosterioriCovariance") or \
2494         selfA._toStore("SimulationQuantiles") or \
2495         selfA._toStore("KalmanGainAtOptimum"):
2496         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
2497         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
2498     if selfA._toStore("APosterioriCovariance") or \
2499         selfA._toStore("SimulationQuantiles"):
2500         BI = B.getI()
2501         RI = R.getI()
2502         HessienneI = []
2503         nb = Xa.size
2504         for i in range(nb):
2505             _ee    = numpy.matrix(numpy.zeros(nb)).T
2506             _ee[i] = 1.
2507             _HtEE  = numpy.dot(HtM,_ee)
2508             _HtEE  = numpy.asmatrix(numpy.ravel( _HtEE )).T
2509             HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
2510         HessienneI = numpy.matrix( HessienneI )
2511         A = HessienneI.I
2512         if min(A.shape) != max(A.shape):
2513             raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
2514         if (numpy.diag(A) < 0).any():
2515             raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
2516         if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
2517             try:
2518                 L = numpy.linalg.cholesky( A )
2519             except:
2520                 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
2521     if selfA._toStore("APosterioriCovariance"):
2522         selfA.StoredVariables["APosterioriCovariance"].store( A )
2523     if selfA._toStore("JacobianMatrixAtOptimum"):
2524         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
2525     if selfA._toStore("KalmanGainAtOptimum"):
2526         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
2527         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
2528         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
2529     #
2530     # Calculs et/ou stockages supplémentaires
2531     # ---------------------------------------
2532     if selfA._toStore("Innovation") or \
2533         selfA._toStore("SigmaObs2") or \
2534         selfA._toStore("MahalanobisConsistency") or \
2535         selfA._toStore("OMB"):
2536         d  = Y - HXb
2537     if selfA._toStore("Innovation"):
2538         selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
2539     if selfA._toStore("BMA"):
2540         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
2541     if selfA._toStore("OMA"):
2542         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
2543     if selfA._toStore("OMB"):
2544         selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
2545     if selfA._toStore("SigmaObs2"):
2546         TraceR = R.trace(Y.size)
2547         selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
2548     if selfA._toStore("MahalanobisConsistency"):
2549         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
2550     if selfA._toStore("SimulationQuantiles"):
2551         QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
2552     if selfA._toStore("SimulatedObservationAtBackground"):
2553         selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
2554     if selfA._toStore("SimulatedObservationAtOptimum"):
2555         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
2556     #
2557     return 0
2558
2559 # ==============================================================================
2560 def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula16"):
2561     """
2562     Stochastic EnKF
2563     """
2564     if selfA._parameters["EstimationOf"] == "Parameters":
2565         selfA._parameters["StoreInternalVariables"] = True
2566     #
2567     # Opérateurs
2568     H = HO["Direct"].appliedControledFormTo
2569     #
2570     if selfA._parameters["EstimationOf"] == "State":
2571         M = EM["Direct"].appliedControledFormTo
2572     #
2573     if CM is not None and "Tangent" in CM and U is not None:
2574         Cm = CM["Tangent"].asMatrix(Xb)
2575     else:
2576         Cm = None
2577     #
2578     # Durée d'observation et tailles
2579     if hasattr(Y,"stepnumber"):
2580         duration = Y.stepnumber()
2581         __p = numpy.cumprod(Y.shape())[-1]
2582     else:
2583         duration = 2
2584         __p = numpy.array(Y).size
2585     #
2586     # Précalcul des inversions de B et R
2587     if selfA._parameters["StoreInternalVariables"] \
2588         or selfA._toStore("CostFunctionJ") \
2589         or selfA._toStore("CostFunctionJb") \
2590         or selfA._toStore("CostFunctionJo") \
2591         or selfA._toStore("CurrentOptimum") \
2592         or selfA._toStore("APosterioriCovariance"):
2593         BI = B.getI()
2594         RI = R.getI()
2595     #
2596     __n = Xb.size
2597     __m = selfA._parameters["NumberOfMembers"]
2598     #
2599     if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
2600     else:                         Rn = R
2601     #
2602     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2603         Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
2604         selfA.StoredVariables["Analysis"].store( Xb )
2605         if selfA._toStore("APosterioriCovariance"):
2606             if hasattr(B,"asfullmatrix"):
2607                 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
2608             else:
2609                 selfA.StoredVariables["APosterioriCovariance"].store( B )
2610         selfA._setInternalState("seed", numpy.random.get_state())
2611     elif selfA._parameters["nextStep"]:
2612         Xn = selfA._getInternalState("Xn")
2613     #
2614     previousJMinimum = numpy.finfo(float).max
2615     #
2616     for step in range(duration-1):
2617         numpy.random.set_state(selfA._getInternalState("seed"))
2618         if hasattr(Y,"store"):
2619             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
2620         else:
2621             Ynpu = numpy.ravel( Y ).reshape((__p,1))
2622         #
2623         if U is not None:
2624             if hasattr(U,"store") and len(U)>1:
2625                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2626             elif hasattr(U,"store") and len(U)==1:
2627                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2628             else:
2629                 Un = numpy.asmatrix(numpy.ravel( U )).T
2630         else:
2631             Un = None
2632         #
2633         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2634             Xn = CovarianceInflation( Xn,
2635                 selfA._parameters["InflationType"],
2636                 selfA._parameters["InflationFactor"],
2637                 )
2638         #
2639         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
2640             EMX = M( [(Xn[:,i], Un) for i in range(__m)],
2641                 argsAsSerie = True,
2642                 returnSerieAsArrayMatrix = True )
2643             Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
2644             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2645                 argsAsSerie = True,
2646                 returnSerieAsArrayMatrix = True )
2647             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2648                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2649                 Xn_predicted = Xn_predicted + Cm * Un
2650         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
2651             # --- > Par principe, M = Id, Q = 0
2652             Xn_predicted = EMX = Xn
2653             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2654                 argsAsSerie = True,
2655                 returnSerieAsArrayMatrix = True )
2656         #
2657         # Mean of forecast and observation of forecast
2658         Xfm  = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2659         Hfm  = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2660         #
2661         #--------------------------
2662         if VariantM == "KalmanFilterFormula05":
2663             PfHT, HPfHT = 0., 0.
2664             for i in range(__m):
2665                 Exfi = Xn_predicted[:,i].reshape((__n,1)) - Xfm
2666                 Eyfi = HX_predicted[:,i].reshape((__p,1)) - Hfm
2667                 PfHT  += Exfi * Eyfi.T
2668                 HPfHT += Eyfi * Eyfi.T
2669             PfHT  = (1./(__m-1)) * PfHT
2670             HPfHT = (1./(__m-1)) * HPfHT
2671             Kn     = PfHT * ( R + HPfHT ).I
2672             del PfHT, HPfHT
2673             #
2674             for i in range(__m):
2675                 ri = numpy.random.multivariate_normal(numpy.zeros(__p), Rn)
2676                 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(Ynpu) + ri - HX_predicted[:,i])
2677         #--------------------------
2678         elif VariantM == "KalmanFilterFormula16":
2679             EpY   = EnsembleOfCenteredPerturbations(Ynpu, Rn, __m)
2680             EpYm  = EpY.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2681             #
2682             EaX   = EnsembleOfAnomalies( Xn_predicted ) / math.sqrt(__m-1)
2683             EaY = (HX_predicted - Hfm - EpY + EpYm) / math.sqrt(__m-1)
2684             #
2685             Kn = EaX @ EaY.T @ numpy.linalg.inv( EaY @ EaY.T)
2686             #
2687             for i in range(__m):
2688                 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(EpY[:,i]) - HX_predicted[:,i])
2689         #--------------------------
2690         else:
2691             raise ValueError("VariantM has to be chosen in the authorized methods list.")
2692         #
2693         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2694             Xn = CovarianceInflation( Xn,
2695                 selfA._parameters["InflationType"],
2696                 selfA._parameters["InflationFactor"],
2697                 )
2698         #
2699         Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2700         #--------------------------
2701         selfA._setInternalState("Xn", Xn)
2702         selfA._setInternalState("seed", numpy.random.get_state())
2703         #--------------------------
2704         #
2705         if selfA._parameters["StoreInternalVariables"] \
2706             or selfA._toStore("CostFunctionJ") \
2707             or selfA._toStore("CostFunctionJb") \
2708             or selfA._toStore("CostFunctionJo") \
2709             or selfA._toStore("APosterioriCovariance") \
2710             or selfA._toStore("InnovationAtCurrentAnalysis") \
2711             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2712             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2713             _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
2714             _Innovation = Ynpu - _HXa
2715         #
2716         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2717         # ---> avec analysis
2718         selfA.StoredVariables["Analysis"].store( Xa )
2719         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2720             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2721         if selfA._toStore("InnovationAtCurrentAnalysis"):
2722             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2723         # ---> avec current state
2724         if selfA._parameters["StoreInternalVariables"] \
2725             or selfA._toStore("CurrentState"):
2726             selfA.StoredVariables["CurrentState"].store( Xn )
2727         if selfA._toStore("ForecastState"):
2728             selfA.StoredVariables["ForecastState"].store( EMX )
2729         if selfA._toStore("ForecastCovariance"):
2730             selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(EMX) )
2731         if selfA._toStore("BMA"):
2732             selfA.StoredVariables["BMA"].store( EMX - Xa )
2733         if selfA._toStore("InnovationAtCurrentState"):
2734             selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
2735         if selfA._toStore("SimulatedObservationAtCurrentState") \
2736             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2737             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
2738         # ---> autres
2739         if selfA._parameters["StoreInternalVariables"] \
2740             or selfA._toStore("CostFunctionJ") \
2741             or selfA._toStore("CostFunctionJb") \
2742             or selfA._toStore("CostFunctionJo") \
2743             or selfA._toStore("CurrentOptimum") \
2744             or selfA._toStore("APosterioriCovariance"):
2745             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2746             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
2747             J   = Jb + Jo
2748             selfA.StoredVariables["CostFunctionJb"].store( Jb )
2749             selfA.StoredVariables["CostFunctionJo"].store( Jo )
2750             selfA.StoredVariables["CostFunctionJ" ].store( J )
2751             #
2752             if selfA._toStore("IndexOfOptimum") \
2753                 or selfA._toStore("CurrentOptimum") \
2754                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2755                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2756                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2757                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2758                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2759             if selfA._toStore("IndexOfOptimum"):
2760                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2761             if selfA._toStore("CurrentOptimum"):
2762                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2763             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2764                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2765             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2766                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2767             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2768                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2769             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2770                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2771         if selfA._toStore("APosterioriCovariance"):
2772             selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
2773         if selfA._parameters["EstimationOf"] == "Parameters" \
2774             and J < previousJMinimum:
2775             previousJMinimum    = J
2776             XaMin               = Xa
2777             if selfA._toStore("APosterioriCovariance"):
2778                 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
2779     #
2780     # Stockage final supplémentaire de l'optimum en estimation de paramètres
2781     # ----------------------------------------------------------------------
2782     if selfA._parameters["EstimationOf"] == "Parameters":
2783         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2784         selfA.StoredVariables["Analysis"].store( XaMin )
2785         if selfA._toStore("APosterioriCovariance"):
2786             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2787         if selfA._toStore("BMA"):
2788             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2789     #
2790     return 0
2791
2792 # ==============================================================================
2793 def stdkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
2794     """
2795     Standard Kalman Filter
2796     """
2797     if selfA._parameters["EstimationOf"] == "Parameters":
2798         selfA._parameters["StoreInternalVariables"] = True
2799     #
2800     # Opérateurs
2801     # ----------
2802     Ht = HO["Tangent"].asMatrix(Xb)
2803     Ha = HO["Adjoint"].asMatrix(Xb)
2804     #
2805     if selfA._parameters["EstimationOf"] == "State":
2806         Mt = EM["Tangent"].asMatrix(Xb)
2807         Ma = EM["Adjoint"].asMatrix(Xb)
2808     #
2809     if CM is not None and "Tangent" in CM and U is not None:
2810         Cm = CM["Tangent"].asMatrix(Xb)
2811     else:
2812         Cm = None
2813     #
2814     # Durée d'observation et tailles
2815     if hasattr(Y,"stepnumber"):
2816         duration = Y.stepnumber()
2817         __p = numpy.cumprod(Y.shape())[-1]
2818     else:
2819         duration = 2
2820         __p = numpy.array(Y).size
2821     #
2822     # Précalcul des inversions de B et R
2823     if selfA._parameters["StoreInternalVariables"] \
2824         or selfA._toStore("CostFunctionJ") \
2825         or selfA._toStore("CostFunctionJb") \
2826         or selfA._toStore("CostFunctionJo") \
2827         or selfA._toStore("CurrentOptimum") \
2828         or selfA._toStore("APosterioriCovariance"):
2829         BI = B.getI()
2830         RI = R.getI()
2831     #
2832     __n = Xb.size
2833     #
2834     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2835         Xn = Xb
2836         Pn = B
2837         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2838         selfA.StoredVariables["Analysis"].store( Xb )
2839         if selfA._toStore("APosterioriCovariance"):
2840             if hasattr(B,"asfullmatrix"):
2841                 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
2842             else:
2843                 selfA.StoredVariables["APosterioriCovariance"].store( B )
2844         selfA._setInternalState("seed", numpy.random.get_state())
2845     elif selfA._parameters["nextStep"]:
2846         Xn = selfA._getInternalState("Xn")
2847         Pn = selfA._getInternalState("Pn")
2848     #
2849     if selfA._parameters["EstimationOf"] == "Parameters":
2850         XaMin            = Xn
2851         previousJMinimum = numpy.finfo(float).max
2852     #
2853     for step in range(duration-1):
2854         if hasattr(Y,"store"):
2855             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
2856         else:
2857             Ynpu = numpy.ravel( Y ).reshape((__p,1))
2858         #
2859         if U is not None:
2860             if hasattr(U,"store") and len(U)>1:
2861                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2862             elif hasattr(U,"store") and len(U)==1:
2863                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2864             else:
2865                 Un = numpy.asmatrix(numpy.ravel( U )).T
2866         else:
2867             Un = None
2868         #
2869         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
2870             Xn_predicted = Mt * Xn
2871             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2872                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2873                 Xn_predicted = Xn_predicted + Cm * Un
2874             Pn_predicted = Q + Mt * Pn * Ma
2875         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
2876             # --- > Par principe, M = Id, Q = 0
2877             Xn_predicted = Xn
2878             Pn_predicted = Pn
2879         #
2880         if selfA._parameters["EstimationOf"] == "State":
2881             HX_predicted = Ht * Xn_predicted
2882             _Innovation  = Ynpu - HX_predicted
2883         elif selfA._parameters["EstimationOf"] == "Parameters":
2884             HX_predicted = Ht * Xn_predicted
2885             _Innovation  = Ynpu - HX_predicted
2886             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
2887                 _Innovation = _Innovation - Cm * Un
2888         #
2889         Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha))
2890         Xn = Xn_predicted + Kn * _Innovation
2891         Pn = Pn_predicted - Kn * Ht * Pn_predicted
2892         #
2893         Xa = Xn # Pointeurs
2894         #--------------------------
2895         selfA._setInternalState("Xn", Xn)
2896         selfA._setInternalState("Pn", Pn)
2897         #--------------------------
2898         #
2899         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2900         # ---> avec analysis
2901         selfA.StoredVariables["Analysis"].store( Xa )
2902         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2903             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( Ht * Xa )
2904         if selfA._toStore("InnovationAtCurrentAnalysis"):
2905             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2906         # ---> avec current state
2907         if selfA._parameters["StoreInternalVariables"] \
2908             or selfA._toStore("CurrentState"):
2909             selfA.StoredVariables["CurrentState"].store( Xn )
2910         if selfA._toStore("ForecastState"):
2911             selfA.StoredVariables["ForecastState"].store( Xn_predicted )
2912         if selfA._toStore("ForecastCovariance"):
2913             selfA.StoredVariables["ForecastCovariance"].store( Pn_predicted )
2914         if selfA._toStore("BMA"):
2915             selfA.StoredVariables["BMA"].store( Xn_predicted - Xa )
2916         if selfA._toStore("InnovationAtCurrentState"):
2917             selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
2918         if selfA._toStore("SimulatedObservationAtCurrentState") \
2919             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2920             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
2921         # ---> autres
2922         if selfA._parameters["StoreInternalVariables"] \
2923             or selfA._toStore("CostFunctionJ") \
2924             or selfA._toStore("CostFunctionJb") \
2925             or selfA._toStore("CostFunctionJo") \
2926             or selfA._toStore("CurrentOptimum") \
2927             or selfA._toStore("APosterioriCovariance"):
2928             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2929             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
2930             J   = Jb + Jo
2931             selfA.StoredVariables["CostFunctionJb"].store( Jb )
2932             selfA.StoredVariables["CostFunctionJo"].store( Jo )
2933             selfA.StoredVariables["CostFunctionJ" ].store( J )
2934             #
2935             if selfA._toStore("IndexOfOptimum") \
2936                 or selfA._toStore("CurrentOptimum") \
2937                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2938                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2939                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2940                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2941                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2942             if selfA._toStore("IndexOfOptimum"):
2943                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2944             if selfA._toStore("CurrentOptimum"):
2945                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2946             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2947                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2948             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2949                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2950             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2951                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2952             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2953                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2954         if selfA._toStore("APosterioriCovariance"):
2955             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2956         if selfA._parameters["EstimationOf"] == "Parameters" \
2957             and J < previousJMinimum:
2958             previousJMinimum    = J
2959             XaMin               = Xa
2960             if selfA._toStore("APosterioriCovariance"):
2961                 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
2962     #
2963     # Stockage final supplémentaire de l'optimum en estimation de paramètres
2964     # ----------------------------------------------------------------------
2965     if selfA._parameters["EstimationOf"] == "Parameters":
2966         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2967         selfA.StoredVariables["Analysis"].store( XaMin )
2968         if selfA._toStore("APosterioriCovariance"):
2969             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2970         if selfA._toStore("BMA"):
2971             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2972     #
2973     return 0
2974
2975 # ==============================================================================
2976 def std3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
2977     """
2978     3DVAR
2979     """
2980     #
2981     # Initialisations
2982     # ---------------
2983     #
2984     # Opérateurs
2985     Hm = HO["Direct"].appliedTo
2986     Ha = HO["Adjoint"].appliedInXTo
2987     #
2988     # Utilisation éventuelle d'un vecteur H(Xb) précalculé
2989     if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
2990         HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
2991     else:
2992         HXb = Hm( Xb )
2993     HXb = numpy.asmatrix(numpy.ravel( HXb )).T
2994     if Y.size != HXb.size:
2995         raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
2996     if max(Y.shape) != max(HXb.shape):
2997         raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
2998     #
2999     if selfA._toStore("JacobianMatrixAtBackground"):
3000         HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
3001         HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
3002         selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
3003     #
3004     # Précalcul des inversions de B et R
3005     BI = B.getI()
3006     RI = R.getI()
3007     #
3008     # Point de démarrage de l'optimisation
3009     Xini = selfA._parameters["InitializationPoint"]
3010     #
3011     # Définition de la fonction-coût
3012     # ------------------------------
3013     def CostFunction(x):
3014         _X  = numpy.asmatrix(numpy.ravel( x )).T
3015         if selfA._parameters["StoreInternalVariables"] or \
3016             selfA._toStore("CurrentState") or \
3017             selfA._toStore("CurrentOptimum"):
3018             selfA.StoredVariables["CurrentState"].store( _X )
3019         _HX = Hm( _X )
3020         _HX = numpy.asmatrix(numpy.ravel( _HX )).T
3021         _Innovation = Y - _HX
3022         if selfA._toStore("SimulatedObservationAtCurrentState") or \
3023             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3024             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
3025         if selfA._toStore("InnovationAtCurrentState"):
3026             selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
3027         #
3028         Jb  = float( 0.5 * (_X - Xb).T * BI * (_X - Xb) )
3029         Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
3030         J   = Jb + Jo
3031         #
3032         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
3033         selfA.StoredVariables["CostFunctionJb"].store( Jb )
3034         selfA.StoredVariables["CostFunctionJo"].store( Jo )
3035         selfA.StoredVariables["CostFunctionJ" ].store( J )
3036         if selfA._toStore("IndexOfOptimum") or \
3037             selfA._toStore("CurrentOptimum") or \
3038             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
3039             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
3040             selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
3041             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3042             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3043         if selfA._toStore("IndexOfOptimum"):
3044             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3045         if selfA._toStore("CurrentOptimum"):
3046             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
3047         if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3048             selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
3049         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3050             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3051         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3052             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3053         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3054             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3055         return J
3056     #
3057     def GradientOfCostFunction(x):
3058         _X      = numpy.asmatrix(numpy.ravel( x )).T
3059         _HX     = Hm( _X )
3060         _HX     = numpy.asmatrix(numpy.ravel( _HX )).T
3061         GradJb  = BI * (_X - Xb)
3062         GradJo  = - Ha( (_X, RI * (Y - _HX)) )
3063         GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
3064         return GradJ
3065     #
3066     # Minimisation de la fonctionnelle
3067     # --------------------------------
3068     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
3069     #
3070     if selfA._parameters["Minimizer"] == "LBFGSB":
3071         if "0.19" <= scipy.version.version <= "1.1.0":
3072             import lbfgsbhlt as optimiseur
3073         else:
3074             import scipy.optimize as optimiseur
3075         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
3076             func        = CostFunction,
3077             x0          = Xini,
3078             fprime      = GradientOfCostFunction,
3079             args        = (),
3080             bounds      = selfA._parameters["Bounds"],
3081             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
3082             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
3083             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
3084             iprint      = selfA._parameters["optiprint"],
3085             )
3086         nfeval = Informations['funcalls']
3087         rc     = Informations['warnflag']
3088     elif selfA._parameters["Minimizer"] == "TNC":
3089         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
3090             func        = CostFunction,
3091             x0          = Xini,
3092             fprime      = GradientOfCostFunction,
3093             args        = (),
3094             bounds      = selfA._parameters["Bounds"],
3095             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
3096             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
3097             ftol        = selfA._parameters["CostDecrementTolerance"],
3098             messages    = selfA._parameters["optmessages"],
3099             )
3100     elif selfA._parameters["Minimizer"] == "CG":
3101         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
3102             f           = CostFunction,
3103             x0          = Xini,
3104             fprime      = GradientOfCostFunction,
3105             args        = (),
3106             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3107             gtol        = selfA._parameters["GradientNormTolerance"],
3108             disp        = selfA._parameters["optdisp"],
3109             full_output = True,
3110             )
3111     elif selfA._parameters["Minimizer"] == "NCG":
3112         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
3113             f           = CostFunction,
3114             x0          = Xini,
3115             fprime      = GradientOfCostFunction,
3116             args        = (),
3117             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3118             avextol     = selfA._parameters["CostDecrementTolerance"],
3119             disp        = selfA._parameters["optdisp"],
3120             full_output = True,
3121             )
3122     elif selfA._parameters["Minimizer"] == "BFGS":
3123         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
3124             f           = CostFunction,
3125             x0          = Xini,
3126             fprime      = GradientOfCostFunction,
3127             args        = (),
3128             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3129             gtol        = selfA._parameters["GradientNormTolerance"],
3130             disp        = selfA._parameters["optdisp"],
3131             full_output = True,
3132             )
3133     else:
3134         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
3135     #
3136     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3137     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
3138     #
3139     # Correction pour pallier a un bug de TNC sur le retour du Minimum
3140     # ----------------------------------------------------------------
3141     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
3142         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
3143     #
3144     # Obtention de l'analyse
3145     # ----------------------
3146     Xa = numpy.asmatrix(numpy.ravel( Minimum )).T
3147     #
3148     selfA.StoredVariables["Analysis"].store( Xa )
3149     #
3150     if selfA._toStore("OMA") or \
3151         selfA._toStore("SigmaObs2") or \
3152         selfA._toStore("SimulationQuantiles") or \
3153         selfA._toStore("SimulatedObservationAtOptimum"):
3154         if selfA._toStore("SimulatedObservationAtCurrentState"):
3155             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
3156         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3157             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
3158         else:
3159             HXa = Hm( Xa )
3160     #
3161     # Calcul de la covariance d'analyse
3162     # ---------------------------------
3163     if selfA._toStore("APosterioriCovariance") or \
3164         selfA._toStore("SimulationQuantiles") or \
3165         selfA._toStore("JacobianMatrixAtOptimum") or \
3166         selfA._toStore("KalmanGainAtOptimum"):
3167         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
3168         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
3169     if selfA._toStore("APosterioriCovariance") or \
3170         selfA._toStore("SimulationQuantiles") or \
3171         selfA._toStore("KalmanGainAtOptimum"):
3172         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
3173         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
3174     if selfA._toStore("APosterioriCovariance") or \
3175         selfA._toStore("SimulationQuantiles"):
3176         HessienneI = []
3177         nb = Xa.size
3178         for i in range(nb):
3179             _ee    = numpy.matrix(numpy.zeros(nb)).T
3180             _ee[i] = 1.
3181             _HtEE  = numpy.dot(HtM,_ee)
3182             _HtEE  = numpy.asmatrix(numpy.ravel( _HtEE )).T
3183             HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
3184         HessienneI = numpy.matrix( HessienneI )
3185         A = HessienneI.I
3186         if min(A.shape) != max(A.shape):
3187             raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
3188         if (numpy.diag(A) < 0).any():
3189             raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
3190         if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
3191             try:
3192                 L = numpy.linalg.cholesky( A )
3193             except:
3194                 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
3195     if selfA._toStore("APosterioriCovariance"):
3196         selfA.StoredVariables["APosterioriCovariance"].store( A )
3197     if selfA._toStore("JacobianMatrixAtOptimum"):
3198         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
3199     if selfA._toStore("KalmanGainAtOptimum"):
3200         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
3201         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
3202         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
3203     #
3204     # Calculs et/ou stockages supplémentaires
3205     # ---------------------------------------
3206     if selfA._toStore("Innovation") or \
3207         selfA._toStore("SigmaObs2") or \
3208         selfA._toStore("MahalanobisConsistency") or \
3209         selfA._toStore("OMB"):
3210         d  = Y - HXb
3211     if selfA._toStore("Innovation"):
3212         selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
3213     if selfA._toStore("BMA"):
3214         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
3215     if selfA._toStore("OMA"):
3216         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
3217     if selfA._toStore("OMB"):
3218         selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
3219     if selfA._toStore("SigmaObs2"):
3220         TraceR = R.trace(Y.size)
3221         selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
3222     if selfA._toStore("MahalanobisConsistency"):
3223         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
3224     if selfA._toStore("SimulationQuantiles"):
3225         QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
3226     if selfA._toStore("SimulatedObservationAtBackground"):
3227         selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
3228     if selfA._toStore("SimulatedObservationAtOptimum"):
3229         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
3230     #
3231     return 0
3232
3233 # ==============================================================================
3234 def std4dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3235     """
3236     4DVAR
3237     """
3238     #
3239     # Initialisations
3240     # ---------------
3241     #
3242     # Opérateurs
3243     Hm = HO["Direct"].appliedControledFormTo
3244     Mm = EM["Direct"].appliedControledFormTo
3245     #
3246     if CM is not None and "Tangent" in CM and U is not None:
3247         Cm = CM["Tangent"].asMatrix(Xb)
3248     else:
3249         Cm = None
3250     #
3251     def Un(_step):
3252         if U is not None:
3253             if hasattr(U,"store") and 1<=_step<len(U) :
3254                 _Un = numpy.asmatrix(numpy.ravel( U[_step] )).T
3255             elif hasattr(U,"store") and len(U)==1:
3256                 _Un = numpy.asmatrix(numpy.ravel( U[0] )).T
3257             else:
3258                 _Un = numpy.asmatrix(numpy.ravel( U )).T
3259         else:
3260             _Un = None
3261         return _Un
3262     def CmUn(_xn,_un):
3263         if Cm is not None and _un is not None: # Attention : si Cm est aussi dans M, doublon !
3264             _Cm   = Cm.reshape(_xn.size,_un.size) # ADAO & check shape
3265             _CmUn = _Cm * _un
3266         else:
3267             _CmUn = 0.
3268         return _CmUn
3269     #
3270     # Remarque : les observations sont exploitées à partir du pas de temps
3271     # numéro 1, et sont utilisées dans Yo comme rangées selon ces indices.
3272     # Donc le pas 0 n'est pas utilisé puisque la première étape commence
3273     # avec l'observation du pas 1.
3274     #
3275     # Nombre de pas identique au nombre de pas d'observations
3276     if hasattr(Y,"stepnumber"):
3277         duration = Y.stepnumber()
3278     else:
3279         duration = 2
3280     #
3281     # Précalcul des inversions de B et R
3282     BI = B.getI()
3283     RI = R.getI()
3284     #
3285     # Point de démarrage de l'optimisation
3286     Xini = selfA._parameters["InitializationPoint"]
3287     #
3288     # Définition de la fonction-coût
3289     # ------------------------------
3290     selfA.DirectCalculation = [None,] # Le pas 0 n'est pas observé
3291     selfA.DirectInnovation  = [None,] # Le pas 0 n'est pas observé
3292     def CostFunction(x):
3293         _X  = numpy.asmatrix(numpy.ravel( x )).T
3294         if selfA._parameters["StoreInternalVariables"] or \
3295             selfA._toStore("CurrentState") or \
3296             selfA._toStore("CurrentOptimum"):
3297             selfA.StoredVariables["CurrentState"].store( _X )
3298         Jb  = float( 0.5 * (_X - Xb).T * BI * (_X - Xb) )
3299         selfA.DirectCalculation = [None,]
3300         selfA.DirectInnovation  = [None,]
3301         Jo  = 0.
3302         _Xn = _X
3303         for step in range(0,duration-1):
3304             if hasattr(Y,"store"):
3305                 _Ynpu = numpy.asmatrix(numpy.ravel( Y[step+1] )).T
3306             else:
3307                 _Ynpu = numpy.asmatrix(numpy.ravel( Y )).T
3308             _Un = Un(step)
3309             #
3310             # Etape d'évolution
3311             if selfA._parameters["EstimationOf"] == "State":
3312                 _Xn = Mm( (_Xn, _Un) ) + CmUn(_Xn, _Un)
3313             elif selfA._parameters["EstimationOf"] == "Parameters":
3314                 pass
3315             #
3316             if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
3317                 __Bounds = ForceNumericBounds( selfA._parameters["Bounds"] )
3318                 _Xn = numpy.max(numpy.hstack((_Xn,numpy.asmatrix(__Bounds)[:,0])),axis=1)
3319                 _Xn = numpy.min(numpy.hstack((_Xn,numpy.asmatrix(__Bounds)[:,1])),axis=1)
3320             #
3321             # Etape de différence aux observations
3322             if selfA._parameters["EstimationOf"] == "State":
3323                 _YmHMX = _Ynpu - numpy.asmatrix(numpy.ravel( Hm( (_Xn, None) ) )).T
3324             elif selfA._parameters["EstimationOf"] == "Parameters":
3325                 _YmHMX = _Ynpu - numpy.asmatrix(numpy.ravel( Hm( (_Xn, _Un) ) )).T - CmUn(_Xn, _Un)
3326             #
3327             # Stockage de l'état
3328             selfA.DirectCalculation.append( _Xn )
3329             selfA.DirectInnovation.append( _YmHMX )
3330             #
3331             # Ajout dans la fonctionnelle d'observation
3332             Jo = Jo + 0.5 * float( _YmHMX.T * RI * _YmHMX )
3333         J = Jb + Jo
3334         #
3335         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
3336         selfA.StoredVariables["CostFunctionJb"].store( Jb )
3337         selfA.StoredVariables["CostFunctionJo"].store( Jo )
3338         selfA.StoredVariables["CostFunctionJ" ].store( J )
3339         if selfA._toStore("IndexOfOptimum") or \
3340             selfA._toStore("CurrentOptimum") or \
3341             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
3342             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
3343             selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3344             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3345         if selfA._toStore("IndexOfOptimum"):
3346             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3347         if selfA._toStore("CurrentOptimum"):
3348             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
3349         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3350             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3351         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3352             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3353         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3354             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3355         return J
3356     #
3357     def GradientOfCostFunction(x):
3358         _X      = numpy.asmatrix(numpy.ravel( x )).T
3359         GradJb  = BI * (_X - Xb)
3360         GradJo  = 0.
3361         for step in range(duration-1,0,-1):
3362             # Étape de récupération du dernier stockage de l'évolution
3363             _Xn = selfA.DirectCalculation.pop()
3364             # Étape de récupération du dernier stockage de l'innovation
3365             _YmHMX = selfA.DirectInnovation.pop()
3366             # Calcul des adjoints
3367             Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
3368             Ha = Ha.reshape(_Xn.size,_YmHMX.size) # ADAO & check shape
3369             Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
3370             Ma = Ma.reshape(_Xn.size,_Xn.size) # ADAO & check shape
3371             # Calcul du gradient par état adjoint
3372             GradJo = GradJo + Ha * RI * _YmHMX # Équivaut pour Ha linéaire à : Ha( (_Xn, RI * _YmHMX) )
3373             GradJo = Ma * GradJo               # Équivaut pour Ma linéaire à : Ma( (_Xn, GradJo) )
3374         GradJ = numpy.ravel( GradJb ) - numpy.ravel( GradJo )
3375         return GradJ
3376     #
3377     # Minimisation de la fonctionnelle
3378     # --------------------------------
3379     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
3380     #
3381     if selfA._parameters["Minimizer"] == "LBFGSB":
3382         if "0.19" <= scipy.version.version <= "1.1.0":
3383             import lbfgsbhlt as optimiseur
3384         else:
3385             import scipy.optimize as optimiseur
3386         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
3387             func        = CostFunction,
3388             x0          = Xini,
3389             fprime      = GradientOfCostFunction,
3390             args        = (),
3391             bounds      = selfA._parameters["Bounds"],
3392             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
3393             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
3394             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
3395             iprint      = selfA._parameters["optiprint"],
3396             )
3397         nfeval = Informations['funcalls']
3398         rc     = Informations['warnflag']
3399     elif selfA._parameters["Minimizer"] == "TNC":
3400         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
3401             func        = CostFunction,
3402             x0          = Xini,
3403             fprime      = GradientOfCostFunction,
3404             args        = (),
3405             bounds      = selfA._parameters["Bounds"],
3406             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
3407             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
3408             ftol        = selfA._parameters["CostDecrementTolerance"],
3409             messages    = selfA._parameters["optmessages"],
3410             )
3411     elif selfA._parameters["Minimizer"] == "CG":
3412         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
3413             f           = CostFunction,
3414             x0          = Xini,
3415             fprime      = GradientOfCostFunction,
3416             args        = (),
3417             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3418             gtol        = selfA._parameters["GradientNormTolerance"],
3419             disp        = selfA._parameters["optdisp"],
3420             full_output = True,
3421             )
3422     elif selfA._parameters["Minimizer"] == "NCG":
3423         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
3424             f           = CostFunction,
3425             x0          = Xini,
3426             fprime      = GradientOfCostFunction,
3427             args        = (),
3428             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3429             avextol     = selfA._parameters["CostDecrementTolerance"],
3430             disp        = selfA._parameters["optdisp"],
3431             full_output = True,
3432             )
3433     elif selfA._parameters["Minimizer"] == "BFGS":
3434         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
3435             f           = CostFunction,
3436             x0          = Xini,
3437             fprime      = GradientOfCostFunction,
3438             args        = (),
3439             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3440             gtol        = selfA._parameters["GradientNormTolerance"],
3441             disp        = selfA._parameters["optdisp"],
3442             full_output = True,
3443             )
3444     else:
3445         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
3446     #
3447     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3448     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
3449     #
3450     # Correction pour pallier a un bug de TNC sur le retour du Minimum
3451     # ----------------------------------------------------------------
3452     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
3453         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
3454     #
3455     # Obtention de l'analyse
3456     # ----------------------
3457     Xa = numpy.asmatrix(numpy.ravel( Minimum )).T
3458     #
3459     selfA.StoredVariables["Analysis"].store( Xa )
3460     #
3461     # Calculs et/ou stockages supplémentaires
3462     # ---------------------------------------
3463     if selfA._toStore("BMA"):
3464         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
3465     #
3466     return 0
3467
3468 # ==============================================================================
3469 def uckf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3470     """
3471     Unscented Kalman Filter
3472     """
3473     if selfA._parameters["EstimationOf"] == "Parameters":
3474         selfA._parameters["StoreInternalVariables"] = True
3475     selfA._parameters["Bounds"] = ForceNumericBounds( selfA._parameters["Bounds"] )
3476     #
3477     L     = Xb.size
3478     Alpha = selfA._parameters["Alpha"]
3479     Beta  = selfA._parameters["Beta"]
3480     if selfA._parameters["Kappa"] == 0:
3481         if selfA._parameters["EstimationOf"] == "State":
3482             Kappa = 0
3483         elif selfA._parameters["EstimationOf"] == "Parameters":
3484             Kappa = 3 - L
3485     else:
3486         Kappa = selfA._parameters["Kappa"]
3487     Lambda = float( Alpha**2 ) * ( L + Kappa ) - L
3488     Gamma  = math.sqrt( L + Lambda )
3489     #
3490     Ww = []
3491     Ww.append( 0. )
3492     for i in range(2*L):
3493         Ww.append( 1. / (2.*(L + Lambda)) )
3494     #
3495     Wm = numpy.array( Ww )
3496     Wm[0] = Lambda / (L + Lambda)
3497     Wc = numpy.array( Ww )
3498     Wc[0] = Lambda / (L + Lambda) + (1. - Alpha**2 + Beta)
3499     #
3500     # Opérateurs
3501     Hm = HO["Direct"].appliedControledFormTo
3502     #
3503     if selfA._parameters["EstimationOf"] == "State":
3504         Mm = EM["Direct"].appliedControledFormTo
3505     #
3506     if CM is not None and "Tangent" in CM and U is not None:
3507         Cm = CM["Tangent"].asMatrix(Xb)
3508     else:
3509         Cm = None
3510     #
3511     # Durée d'observation et tailles
3512     if hasattr(Y,"stepnumber"):
3513         duration = Y.stepnumber()
3514         __p = numpy.cumprod(Y.shape())[-1]
3515     else:
3516         duration = 2
3517         __p = numpy.array(Y).size
3518     #
3519     # Précalcul des inversions de B et R
3520     if selfA._parameters["StoreInternalVariables"] \
3521         or selfA._toStore("CostFunctionJ") \
3522         or selfA._toStore("CostFunctionJb") \
3523         or selfA._toStore("CostFunctionJo"):
3524         BI = B.getI()
3525         RI = R.getI()
3526     #
3527     __n = Xb.size
3528     #
3529     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
3530         Xn = Xb
3531         if hasattr(B,"asfullmatrix"):
3532             Pn = B.asfullmatrix(__n)
3533         else:
3534             Pn = B
3535         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3536         selfA.StoredVariables["Analysis"].store( Xb )
3537         if selfA._toStore("APosterioriCovariance"):
3538             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
3539     elif selfA._parameters["nextStep"]:
3540         Xn = selfA._getInternalState("Xn")
3541         Pn = selfA._getInternalState("Pn")
3542     #
3543     if selfA._parameters["EstimationOf"] == "Parameters":
3544         XaMin            = Xn
3545         previousJMinimum = numpy.finfo(float).max
3546     #
3547     for step in range(duration-1):
3548         if hasattr(Y,"store"):
3549             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
3550         else:
3551             Ynpu = numpy.ravel( Y ).reshape((__p,1))
3552         #
3553         if U is not None:
3554             if hasattr(U,"store") and len(U)>1:
3555                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
3556             elif hasattr(U,"store") and len(U)==1:
3557                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
3558             else:
3559                 Un = numpy.asmatrix(numpy.ravel( U )).T
3560         else:
3561             Un = None
3562         #
3563         Pndemi = numpy.linalg.cholesky(Pn)
3564         Xnp = numpy.hstack([Xn, Xn+Gamma*Pndemi, Xn-Gamma*Pndemi])
3565         nbSpts = 2*Xn.size+1
3566         #
3567         if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
3568             for point in range(nbSpts):
3569                 Xnp[:,point] = numpy.max(numpy.hstack((Xnp[:,point],numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1)
3570                 Xnp[:,point] = numpy.min(numpy.hstack((Xnp[:,point],numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1)
3571         #
3572         XEtnnp = []
3573         for point in range(nbSpts):
3574             if selfA._parameters["EstimationOf"] == "State":
3575                 XEtnnpi = numpy.asmatrix(numpy.ravel( Mm( (Xnp[:,point], Un) ) )).T
3576                 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
3577                     Cm = Cm.reshape(Xn.size,Un.size) # ADAO & check shape
3578                     XEtnnpi = XEtnnpi + Cm * Un
3579                 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
3580                     XEtnnpi = numpy.max(numpy.hstack((XEtnnpi,numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1)
3581                     XEtnnpi = numpy.min(numpy.hstack((XEtnnpi,numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1)
3582             elif selfA._parameters["EstimationOf"] == "Parameters":
3583                 # --- > Par principe, M = Id, Q = 0
3584                 XEtnnpi = Xnp[:,point]
3585             XEtnnp.append( XEtnnpi )
3586         XEtnnp = numpy.hstack( XEtnnp )
3587         #
3588         Xncm = numpy.matrix( XEtnnp.getA()*numpy.array(Wm) ).sum(axis=1)
3589         #
3590         if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
3591             Xncm = numpy.max(numpy.hstack((Xncm,numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1)
3592             Xncm = numpy.min(numpy.hstack((Xncm,numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1)
3593         #
3594         if selfA._parameters["EstimationOf"] == "State":        Pnm = Q
3595         elif selfA._parameters["EstimationOf"] == "Parameters": Pnm = 0.
3596         for point in range(nbSpts):
3597             Pnm += Wc[i] * (XEtnnp[:,point]-Xncm) * (XEtnnp[:,point]-Xncm).T
3598         #
3599         if selfA._parameters["EstimationOf"] == "Parameters" and selfA._parameters["Bounds"] is not None:
3600             Pnmdemi = selfA._parameters["Reconditioner"] * numpy.linalg.cholesky(Pnm)
3601         else:
3602             Pnmdemi = numpy.linalg.cholesky(Pnm)
3603         #
3604         Xnnp = numpy.hstack([Xncm, Xncm+Gamma*Pnmdemi, Xncm-Gamma*Pnmdemi])
3605         #
3606         if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
3607             for point in range(nbSpts):
3608                 Xnnp[:,point] = numpy.max(numpy.hstack((Xnnp[:,point],numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1)
3609                 Xnnp[:,point] = numpy.min(numpy.hstack((Xnnp[:,point],numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1)
3610         #
3611         Ynnp = []
3612         for point in range(nbSpts):
3613             if selfA._parameters["EstimationOf"] == "State":
3614                 Ynnpi = numpy.asmatrix(numpy.ravel( Hm( (Xnnp[:,point], None) ) )).T
3615             elif selfA._parameters["EstimationOf"] == "Parameters":
3616                 Ynnpi = numpy.asmatrix(numpy.ravel( Hm( (Xnnp[:,point], Un) ) )).T
3617             Ynnp.append( Ynnpi )
3618         Ynnp = numpy.hstack( Ynnp )
3619         #
3620         Yncm = numpy.matrix( Ynnp.getA()*numpy.array(Wm) ).sum(axis=1)
3621         #
3622         Pyyn = R
3623         Pxyn = 0.
3624         for point in range(nbSpts):
3625             Pyyn += Wc[i] * (Ynnp[:,point]-Yncm) * (Ynnp[:,point]-Yncm).T
3626             Pxyn += Wc[i] * (Xnnp[:,point]-Xncm) * (Ynnp[:,point]-Yncm).T
3627         #
3628         _Innovation  = Ynpu - Yncm
3629         if selfA._parameters["EstimationOf"] == "Parameters":
3630             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
3631                 _Innovation = _Innovation - Cm * Un
3632         #
3633         Kn = Pxyn * Pyyn.I
3634         Xn = Xncm + Kn * _Innovation
3635         Pn = Pnm - Kn * Pyyn * Kn.T
3636         #
3637         if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
3638             Xn = numpy.max(numpy.hstack((Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1)
3639             Xn = numpy.min(numpy.hstack((Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1)
3640         #
3641         Xa = Xn # Pointeurs
3642         #--------------------------
3643         selfA._setInternalState("Xn", Xn)
3644         selfA._setInternalState("Pn", Pn)
3645         #--------------------------
3646         #
3647         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3648         # ---> avec analysis
3649         selfA.StoredVariables["Analysis"].store( Xa )
3650         # ---> avec current state
3651         if selfA._parameters["StoreInternalVariables"] \
3652             or selfA._toStore("CurrentState"):
3653             selfA.StoredVariables["CurrentState"].store( Xn )
3654         if selfA._toStore("InnovationAtCurrentState"):
3655             selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
3656         if selfA._parameters["StoreInternalVariables"] \
3657             or selfA._toStore("CostFunctionJ") \
3658             or selfA._toStore("CostFunctionJb") \
3659             or selfA._toStore("CostFunctionJo"):
3660             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
3661             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
3662             J   = Jb + Jo
3663             selfA.StoredVariables["CostFunctionJb"].store( Jb )
3664             selfA.StoredVariables["CostFunctionJo"].store( Jo )
3665             selfA.StoredVariables["CostFunctionJ" ].store( J )
3666         if selfA._toStore("APosterioriCovariance"):
3667             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
3668         if selfA._parameters["EstimationOf"] == "Parameters" \
3669             and J < previousJMinimum:
3670             previousJMinimum    = J
3671             XaMin               = Xa
3672             if selfA._toStore("APosterioriCovariance"):
3673                 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
3674     #
3675     # Stockage final supplémentaire de l'optimum en estimation de paramètres
3676     # ----------------------------------------------------------------------
3677     if selfA._parameters["EstimationOf"] == "Parameters":
3678         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3679         selfA.StoredVariables["Analysis"].store( XaMin )
3680         if selfA._toStore("APosterioriCovariance"):
3681             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
3682         if selfA._toStore("BMA"):
3683             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
3684     #
3685     return 0
3686
3687 # ==============================================================================
3688 def van3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3689     """
3690     3DVAR variational analysis with no inversion of B
3691     """
3692     #
3693     # Initialisations
3694     # ---------------
3695     #
3696     # Opérateurs
3697     Hm = HO["Direct"].appliedTo
3698     Ha = HO["Adjoint"].appliedInXTo
3699     #
3700     # Précalcul des inversions de B et R
3701     BT = B.getT()
3702     RI = R.getI()
3703     #
3704     # Point de démarrage de l'optimisation
3705     Xini = numpy.zeros(Xb.shape)
3706     #
3707     # Définition de la fonction-coût
3708     # ------------------------------
3709     def CostFunction(v):
3710         _V = numpy.asmatrix(numpy.ravel( v )).T
3711         _X = Xb + B * _V
3712         if selfA._parameters["StoreInternalVariables"] or \
3713             selfA._toStore("CurrentState") or \
3714             selfA._toStore("CurrentOptimum"):
3715             selfA.StoredVariables["CurrentState"].store( _X )
3716         _HX = Hm( _X )
3717         _HX = numpy.asmatrix(numpy.ravel( _HX )).T
3718         _Innovation = Y - _HX
3719         if selfA._toStore("SimulatedObservationAtCurrentState") or \
3720             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3721             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
3722         if selfA._toStore("InnovationAtCurrentState"):
3723             selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
3724         #
3725         Jb  = float( 0.5 * _V.T * BT * _V )
3726         Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
3727         J   = Jb + Jo
3728         #
3729         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
3730         selfA.StoredVariables["CostFunctionJb"].store( Jb )
3731         selfA.StoredVariables["CostFunctionJo"].store( Jo )
3732         selfA.StoredVariables["CostFunctionJ" ].store( J )
3733         if selfA._toStore("IndexOfOptimum") or \
3734             selfA._toStore("CurrentOptimum") or \
3735             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
3736             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
3737             selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
3738             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3739             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3740         if selfA._toStore("IndexOfOptimum"):
3741             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3742         if selfA._toStore("CurrentOptimum"):
3743             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
3744         if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3745             selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
3746         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3747             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3748         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3749             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3750         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3751             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3752         return J
3753     #
3754     def GradientOfCostFunction(v):
3755         _V = numpy.asmatrix(numpy.ravel( v )).T
3756         _X = Xb + B * _V
3757         _HX     = Hm( _X )
3758         _HX     = numpy.asmatrix(numpy.ravel( _HX )).T
3759         GradJb  = BT * _V
3760         GradJo  = - Ha( (_X, RI * (Y - _HX)) )
3761         GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
3762         return GradJ
3763     #
3764     # Minimisation de la fonctionnelle
3765     # --------------------------------
3766     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
3767     #
3768     if selfA._parameters["Minimizer"] == "LBFGSB":
3769         if "0.19" <= scipy.version.version <= "1.1.0":
3770             import lbfgsbhlt as optimiseur
3771         else:
3772             import scipy.optimize as optimiseur
3773         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
3774             func        = CostFunction,
3775             x0          = Xini,
3776             fprime      = GradientOfCostFunction,
3777             args        = (),
3778             bounds      = RecentredBounds(selfA._parameters["Bounds"], Xb),
3779             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
3780             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
3781             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
3782             iprint      = selfA._parameters["optiprint"],
3783             )
3784         nfeval = Informations['funcalls']
3785         rc     = Informations['warnflag']
3786     elif selfA._parameters["Minimizer"] == "TNC":
3787         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
3788             func        = CostFunction,
3789             x0          = Xini,
3790             fprime      = GradientOfCostFunction,
3791             args        = (),
3792             bounds      = RecentredBounds(selfA._parameters["Bounds"], Xb),
3793             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
3794             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
3795             ftol        = selfA._parameters["CostDecrementTolerance"],
3796             messages    = selfA._parameters["optmessages"],
3797             )
3798     elif selfA._parameters["Minimizer"] == "CG":
3799         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
3800             f           = CostFunction,
3801             x0          = Xini,
3802             fprime      = GradientOfCostFunction,
3803             args        = (),
3804             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3805             gtol        = selfA._parameters["GradientNormTolerance"],
3806             disp        = selfA._parameters["optdisp"],
3807             full_output = True,
3808             )
3809     elif selfA._parameters["Minimizer"] == "NCG":
3810         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
3811             f           = CostFunction,
3812             x0          = Xini,
3813             fprime      = GradientOfCostFunction,
3814             args        = (),
3815             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3816             avextol     = selfA._parameters["CostDecrementTolerance"],
3817             disp        = selfA._parameters["optdisp"],
3818             full_output = True,
3819             )
3820     elif selfA._parameters["Minimizer"] == "BFGS":
3821         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
3822             f           = CostFunction,
3823             x0          = Xini,
3824             fprime      = GradientOfCostFunction,
3825             args        = (),
3826             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3827             gtol        = selfA._parameters["GradientNormTolerance"],
3828             disp        = selfA._parameters["optdisp"],
3829             full_output = True,
3830             )
3831     else:
3832         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
3833     #
3834     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3835     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
3836     #
3837     # Correction pour pallier a un bug de TNC sur le retour du Minimum
3838     # ----------------------------------------------------------------
3839     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
3840         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
3841         Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
3842     else:
3843         Minimum = Xb + B * numpy.asmatrix(numpy.ravel( Minimum )).T
3844     #
3845     # Obtention de l'analyse
3846     # ----------------------
3847     Xa = Minimum
3848     #
3849     selfA.StoredVariables["Analysis"].store( Xa )
3850     #
3851     if selfA._toStore("OMA") or \
3852         selfA._toStore("SigmaObs2") or \
3853         selfA._toStore("SimulationQuantiles") or \
3854         selfA._toStore("SimulatedObservationAtOptimum"):
3855         if selfA._toStore("SimulatedObservationAtCurrentState"):
3856             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
3857         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3858             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
3859         else:
3860             HXa = Hm( Xa )
3861     #
3862     # Calcul de la covariance d'analyse
3863     # ---------------------------------
3864     if selfA._toStore("APosterioriCovariance") or \
3865         selfA._toStore("SimulationQuantiles") or \
3866         selfA._toStore("JacobianMatrixAtOptimum") or \
3867         selfA._toStore("KalmanGainAtOptimum"):
3868         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
3869         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
3870     if selfA._toStore("APosterioriCovariance") or \
3871         selfA._toStore("SimulationQuantiles") or \
3872         selfA._toStore("KalmanGainAtOptimum"):
3873         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
3874         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
3875     if selfA._toStore("APosterioriCovariance") or \
3876         selfA._toStore("SimulationQuantiles"):
3877         BI = B.getI()
3878         HessienneI = []
3879         nb = Xa.size
3880         for i in range(nb):
3881             _ee    = numpy.matrix(numpy.zeros(nb)).T
3882             _ee[i] = 1.
3883             _HtEE  = numpy.dot(HtM,_ee)
3884             _HtEE  = numpy.asmatrix(numpy.ravel( _HtEE )).T
3885             HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
3886         HessienneI = numpy.matrix( HessienneI )
3887         A = HessienneI.I
3888         if min(A.shape) != max(A.shape):
3889             raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
3890         if (numpy.diag(A) < 0).any():
3891             raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
3892         if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
3893             try:
3894                 L = numpy.linalg.cholesky( A )
3895             except:
3896                 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
3897     if selfA._toStore("APosterioriCovariance"):
3898         selfA.StoredVariables["APosterioriCovariance"].store( A )
3899     if selfA._toStore("JacobianMatrixAtOptimum"):
3900         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
3901     if selfA._toStore("KalmanGainAtOptimum"):
3902         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
3903         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
3904         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
3905     #
3906     # Calculs et/ou stockages supplémentaires
3907     # ---------------------------------------
3908     if selfA._toStore("Innovation") or \
3909         selfA._toStore("SigmaObs2") or \
3910         selfA._toStore("MahalanobisConsistency") or \
3911         selfA._toStore("OMB"):
3912         d  = Y - HXb
3913     if selfA._toStore("Innovation"):
3914         selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
3915     if selfA._toStore("BMA"):
3916         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
3917     if selfA._toStore("OMA"):
3918         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
3919     if selfA._toStore("OMB"):
3920         selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
3921     if selfA._toStore("SigmaObs2"):
3922         TraceR = R.trace(Y.size)
3923         selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
3924     if selfA._toStore("MahalanobisConsistency"):
3925         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
3926     if selfA._toStore("SimulationQuantiles"):
3927         QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
3928     if selfA._toStore("SimulatedObservationAtBackground"):
3929         selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
3930     if selfA._toStore("SimulatedObservationAtOptimum"):
3931         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
3932     #
3933     return 0
3934
3935 # ==============================================================================
3936 if __name__ == "__main__":
3937     print('\n AUTODIAGNOSTIC\n')