Salome HOME
Minor improvements for efficiency on internal variables
[modules/adao.git] / src / daComposant / daCore / NumericObjects.py
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) 2008-2021 EDF R&D
4 #
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
9 #
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13 # Lesser General Public License for more details.
14 #
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
18 #
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
20 #
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
22
23 __doc__ = """
24     Définit les objets numériques génériques.
25 """
26 __author__ = "Jean-Philippe ARGAUD"
27
28 import os, time, copy, types, sys, logging
29 import math, numpy, scipy, scipy.optimize, scipy.version
30 from daCore.BasicObjects import Operator
31 from daCore.PlatformInfo import PlatformInfo
32 mpr = PlatformInfo().MachinePrecision()
33 mfp = PlatformInfo().MaximumPrecision()
34 # logging.getLogger().setLevel(logging.DEBUG)
35
36 # ==============================================================================
37 def ExecuteFunction( triplet ):
38     assert len(triplet) == 3, "Incorrect number of arguments"
39     X, xArgs, funcrepr = triplet
40     __X = numpy.asmatrix(numpy.ravel( X )).T
41     __sys_path_tmp = sys.path ; sys.path.insert(0,funcrepr["__userFunction__path"])
42     __module = __import__(funcrepr["__userFunction__modl"], globals(), locals(), [])
43     __fonction = getattr(__module,funcrepr["__userFunction__name"])
44     sys.path = __sys_path_tmp ; del __sys_path_tmp
45     if isinstance(xArgs, dict):
46         __HX  = __fonction( __X, **xArgs )
47     else:
48         __HX  = __fonction( __X )
49     return numpy.ravel( __HX )
50
51 # ==============================================================================
52 class FDApproximation(object):
53     """
54     Cette classe sert d'interface pour définir les opérateurs approximés. A la
55     création d'un objet, en fournissant une fonction "Function", on obtient un
56     objet qui dispose de 3 méthodes "DirectOperator", "TangentOperator" et
57     "AdjointOperator". On contrôle l'approximation DF avec l'incrément
58     multiplicatif "increment" valant par défaut 1%, ou avec l'incrément fixe
59     "dX" qui sera multiplié par "increment" (donc en %), et on effectue de DF
60     centrées si le booléen "centeredDF" est vrai.
61     """
62     def __init__(self,
63             name                  = "FDApproximation",
64             Function              = None,
65             centeredDF            = False,
66             increment             = 0.01,
67             dX                    = None,
68             extraArguments        = None,
69             avoidingRedundancy    = True,
70             toleranceInRedundancy = 1.e-18,
71             lenghtOfRedundancy    = -1,
72             mpEnabled             = False,
73             mpWorkers             = None,
74             mfEnabled             = False,
75             ):
76         self.__name = str(name)
77         self.__extraArgs = extraArguments
78         if mpEnabled:
79             try:
80                 import multiprocessing
81                 self.__mpEnabled = True
82             except ImportError:
83                 self.__mpEnabled = False
84         else:
85             self.__mpEnabled = False
86         self.__mpWorkers = mpWorkers
87         if self.__mpWorkers is not None and self.__mpWorkers < 1:
88             self.__mpWorkers = None
89         logging.debug("FDA Calculs en multiprocessing : %s (nombre de processus : %s)"%(self.__mpEnabled,self.__mpWorkers))
90         #
91         if mfEnabled:
92             self.__mfEnabled = True
93         else:
94             self.__mfEnabled = False
95         logging.debug("FDA Calculs en multifonctions : %s"%(self.__mfEnabled,))
96         #
97         if avoidingRedundancy:
98             self.__avoidRC = True
99             self.__tolerBP = float(toleranceInRedundancy)
100             self.__lenghtRJ = int(lenghtOfRedundancy)
101             self.__listJPCP = [] # Jacobian Previous Calculated Points
102             self.__listJPCI = [] # Jacobian Previous Calculated Increment
103             self.__listJPCR = [] # Jacobian Previous Calculated Results
104             self.__listJPPN = [] # Jacobian Previous Calculated Point Norms
105             self.__listJPIN = [] # Jacobian Previous Calculated Increment Norms
106         else:
107             self.__avoidRC = False
108         #
109         if self.__mpEnabled:
110             if isinstance(Function,types.FunctionType):
111                 logging.debug("FDA Calculs en multiprocessing : FunctionType")
112                 self.__userFunction__name = Function.__name__
113                 try:
114                     mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
115                 except:
116                     mod = os.path.abspath(Function.__globals__['__file__'])
117                 if not os.path.isfile(mod):
118                     raise ImportError("No user defined function or method found with the name %s"%(mod,))
119                 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
120                 self.__userFunction__path = os.path.dirname(mod)
121                 del mod
122                 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
123                 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
124             elif isinstance(Function,types.MethodType):
125                 logging.debug("FDA Calculs en multiprocessing : MethodType")
126                 self.__userFunction__name = Function.__name__
127                 try:
128                     mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
129                 except:
130                     mod = os.path.abspath(Function.__func__.__globals__['__file__'])
131                 if not os.path.isfile(mod):
132                     raise ImportError("No user defined function or method found with the name %s"%(mod,))
133                 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
134                 self.__userFunction__path = os.path.dirname(mod)
135                 del mod
136                 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
137                 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
138             else:
139                 raise TypeError("User defined function or method has to be provided for finite differences approximation.")
140         else:
141             self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
142             self.__userFunction = self.__userOperator.appliedTo
143         #
144         self.__centeredDF = bool(centeredDF)
145         if abs(float(increment)) > 1.e-15:
146             self.__increment  = float(increment)
147         else:
148             self.__increment  = 0.01
149         if dX is None:
150             self.__dX     = None
151         else:
152             self.__dX     = numpy.asmatrix(numpy.ravel( dX )).T
153         logging.debug("FDA Reduction des doublons de calcul : %s"%self.__avoidRC)
154         if self.__avoidRC:
155             logging.debug("FDA Tolerance de determination des doublons : %.2e"%self.__tolerBP)
156
157     # ---------------------------------------------------------
158     def __doublon__(self, e, l, n, v=None):
159         __ac, __iac = False, -1
160         for i in range(len(l)-1,-1,-1):
161             if numpy.linalg.norm(e - l[i]) < self.__tolerBP * n[i]:
162                 __ac, __iac = True, i
163                 if v is not None: logging.debug("FDA Cas%s déja calculé, récupération du doublon %i"%(v,__iac))
164                 break
165         return __ac, __iac
166
167     # ---------------------------------------------------------
168     def DirectOperator(self, X, **extraArgs ):
169         """
170         Calcul du direct à l'aide de la fonction fournie.
171
172         NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
173         ne doivent pas être données ici à la fonction utilisateur.
174         """
175         logging.debug("FDA Calcul DirectOperator (explicite)")
176         if self.__mfEnabled:
177             _HX = self.__userFunction( X, argsAsSerie = True )
178         else:
179             _X = numpy.asmatrix(numpy.ravel( X )).T
180             _HX = numpy.ravel(self.__userFunction( _X ))
181         #
182         return _HX
183
184     # ---------------------------------------------------------
185     def TangentMatrix(self, X ):
186         """
187         Calcul de l'opérateur tangent comme la Jacobienne par différences finies,
188         c'est-à-dire le gradient de H en X. On utilise des différences finies
189         directionnelles autour du point X. X est un numpy.matrix.
190
191         Différences finies centrées (approximation d'ordre 2):
192         1/ Pour chaque composante i de X, on ajoute et on enlève la perturbation
193            dX[i] à la  composante X[i], pour composer X_plus_dXi et X_moins_dXi, et
194            on calcule les réponses HX_plus_dXi = H( X_plus_dXi ) et HX_moins_dXi =
195            H( X_moins_dXi )
196         2/ On effectue les différences (HX_plus_dXi-HX_moins_dXi) et on divise par
197            le pas 2*dXi
198         3/ Chaque résultat, par composante, devient une colonne de la Jacobienne
199
200         Différences finies non centrées (approximation d'ordre 1):
201         1/ Pour chaque composante i de X, on ajoute la perturbation dX[i] à la
202            composante X[i] pour composer X_plus_dXi, et on calcule la réponse
203            HX_plus_dXi = H( X_plus_dXi )
204         2/ On calcule la valeur centrale HX = H(X)
205         3/ On effectue les différences (HX_plus_dXi-HX) et on divise par
206            le pas dXi
207         4/ Chaque résultat, par composante, devient une colonne de la Jacobienne
208
209         """
210         logging.debug("FDA Début du calcul de la Jacobienne")
211         logging.debug("FDA   Incrément de............: %s*X"%float(self.__increment))
212         logging.debug("FDA   Approximation centrée...: %s"%(self.__centeredDF))
213         #
214         if X is None or len(X)==0:
215             raise ValueError("Nominal point X for approximate derivatives can not be None or void (given X: %s)."%(str(X),))
216         #
217         _X = numpy.asmatrix(numpy.ravel( X )).T
218         #
219         if self.__dX is None:
220             _dX  = self.__increment * _X
221         else:
222             _dX = numpy.asmatrix(numpy.ravel( self.__dX )).T
223         #
224         if (_dX == 0.).any():
225             moyenne = _dX.mean()
226             if moyenne == 0.:
227                 _dX = numpy.where( _dX == 0., float(self.__increment), _dX )
228             else:
229                 _dX = numpy.where( _dX == 0., moyenne, _dX )
230         #
231         __alreadyCalculated  = False
232         if self.__avoidRC:
233             __bidon, __alreadyCalculatedP = self.__doublon__(_X,  self.__listJPCP, self.__listJPPN, None)
234             __bidon, __alreadyCalculatedI = self.__doublon__(_dX, self.__listJPCI, self.__listJPIN, None)
235             if __alreadyCalculatedP == __alreadyCalculatedI > -1:
236                 __alreadyCalculated, __i = True, __alreadyCalculatedP
237                 logging.debug("FDA Cas J déja calculé, récupération du doublon %i"%__i)
238         #
239         if __alreadyCalculated:
240             logging.debug("FDA   Calcul Jacobienne (par récupération du doublon %i)"%__i)
241             _Jacobienne = self.__listJPCR[__i]
242         else:
243             logging.debug("FDA   Calcul Jacobienne (explicite)")
244             if self.__centeredDF:
245                 #
246                 if self.__mpEnabled and not self.__mfEnabled:
247                     funcrepr = {
248                         "__userFunction__path" : self.__userFunction__path,
249                         "__userFunction__modl" : self.__userFunction__modl,
250                         "__userFunction__name" : self.__userFunction__name,
251                     }
252                     _jobs = []
253                     for i in range( len(_dX) ):
254                         _dXi            = _dX[i]
255                         _X_plus_dXi     = numpy.array( _X.A1, dtype=float )
256                         _X_plus_dXi[i]  = _X[i] + _dXi
257                         _X_moins_dXi    = numpy.array( _X.A1, dtype=float )
258                         _X_moins_dXi[i] = _X[i] - _dXi
259                         #
260                         _jobs.append( (_X_plus_dXi,  self.__extraArgs, funcrepr) )
261                         _jobs.append( (_X_moins_dXi, self.__extraArgs, funcrepr) )
262                     #
263                     import multiprocessing
264                     self.__pool = multiprocessing.Pool(self.__mpWorkers)
265                     _HX_plusmoins_dX = self.__pool.map( ExecuteFunction, _jobs )
266                     self.__pool.close()
267                     self.__pool.join()
268                     #
269                     _Jacobienne  = []
270                     for i in range( len(_dX) ):
271                         _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
272                     #
273                 elif self.__mfEnabled:
274                     _xserie = []
275                     for i in range( len(_dX) ):
276                         _dXi            = _dX[i]
277                         _X_plus_dXi     = numpy.array( _X.A1, dtype=float )
278                         _X_plus_dXi[i]  = _X[i] + _dXi
279                         _X_moins_dXi    = numpy.array( _X.A1, dtype=float )
280                         _X_moins_dXi[i] = _X[i] - _dXi
281                         #
282                         _xserie.append( _X_plus_dXi )
283                         _xserie.append( _X_moins_dXi )
284                     #
285                     _HX_plusmoins_dX = self.DirectOperator( _xserie )
286                      #
287                     _Jacobienne  = []
288                     for i in range( len(_dX) ):
289                         _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
290                     #
291                 else:
292                     _Jacobienne  = []
293                     for i in range( _dX.size ):
294                         _dXi            = _dX[i]
295                         _X_plus_dXi     = numpy.array( _X.A1, dtype=float )
296                         _X_plus_dXi[i]  = _X[i] + _dXi
297                         _X_moins_dXi    = numpy.array( _X.A1, dtype=float )
298                         _X_moins_dXi[i] = _X[i] - _dXi
299                         #
300                         _HX_plus_dXi    = self.DirectOperator( _X_plus_dXi )
301                         _HX_moins_dXi   = self.DirectOperator( _X_moins_dXi )
302                         #
303                         _Jacobienne.append( numpy.ravel( _HX_plus_dXi - _HX_moins_dXi ) / (2.*_dXi) )
304                 #
305             else:
306                 #
307                 if self.__mpEnabled and not self.__mfEnabled:
308                     funcrepr = {
309                         "__userFunction__path" : self.__userFunction__path,
310                         "__userFunction__modl" : self.__userFunction__modl,
311                         "__userFunction__name" : self.__userFunction__name,
312                     }
313                     _jobs = []
314                     _jobs.append( (_X.A1, self.__extraArgs, funcrepr) )
315                     for i in range( len(_dX) ):
316                         _X_plus_dXi    = numpy.array( _X.A1, dtype=float )
317                         _X_plus_dXi[i] = _X[i] + _dX[i]
318                         #
319                         _jobs.append( (_X_plus_dXi, self.__extraArgs, funcrepr) )
320                     #
321                     import multiprocessing
322                     self.__pool = multiprocessing.Pool(self.__mpWorkers)
323                     _HX_plus_dX = self.__pool.map( ExecuteFunction, _jobs )
324                     self.__pool.close()
325                     self.__pool.join()
326                     #
327                     _HX = _HX_plus_dX.pop(0)
328                     #
329                     _Jacobienne = []
330                     for i in range( len(_dX) ):
331                         _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
332                     #
333                 elif self.__mfEnabled:
334                     _xserie = []
335                     _xserie.append( _X.A1 )
336                     for i in range( len(_dX) ):
337                         _X_plus_dXi    = numpy.array( _X.A1, dtype=float )
338                         _X_plus_dXi[i] = _X[i] + _dX[i]
339                         #
340                         _xserie.append( _X_plus_dXi )
341                     #
342                     _HX_plus_dX = self.DirectOperator( _xserie )
343                     #
344                     _HX = _HX_plus_dX.pop(0)
345                     #
346                     _Jacobienne = []
347                     for i in range( len(_dX) ):
348                         _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
349                    #
350                 else:
351                     _Jacobienne  = []
352                     _HX = self.DirectOperator( _X )
353                     for i in range( _dX.size ):
354                         _dXi            = _dX[i]
355                         _X_plus_dXi     = numpy.array( _X.A1, dtype=float )
356                         _X_plus_dXi[i]  = _X[i] + _dXi
357                         #
358                         _HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
359                         #
360                         _Jacobienne.append( numpy.ravel(( _HX_plus_dXi - _HX ) / _dXi) )
361                 #
362             #
363             _Jacobienne = numpy.asmatrix( numpy.vstack( _Jacobienne ) ).T
364             if self.__avoidRC:
365                 if self.__lenghtRJ < 0: self.__lenghtRJ = 2 * _X.size
366                 while len(self.__listJPCP) > self.__lenghtRJ:
367                     self.__listJPCP.pop(0)
368                     self.__listJPCI.pop(0)
369                     self.__listJPCR.pop(0)
370                     self.__listJPPN.pop(0)
371                     self.__listJPIN.pop(0)
372                 self.__listJPCP.append( copy.copy(_X) )
373                 self.__listJPCI.append( copy.copy(_dX) )
374                 self.__listJPCR.append( copy.copy(_Jacobienne) )
375                 self.__listJPPN.append( numpy.linalg.norm(_X) )
376                 self.__listJPIN.append( numpy.linalg.norm(_Jacobienne) )
377         #
378         logging.debug("FDA Fin du calcul de la Jacobienne")
379         #
380         return _Jacobienne
381
382     # ---------------------------------------------------------
383     def TangentOperator(self, paire, **extraArgs ):
384         """
385         Calcul du tangent à l'aide de la Jacobienne.
386
387         NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
388         ne doivent pas être données ici à la fonction utilisateur.
389         """
390         if self.__mfEnabled:
391             assert len(paire) == 1, "Incorrect lenght of arguments"
392             _paire = paire[0]
393             assert len(_paire) == 2, "Incorrect number of arguments"
394         else:
395             assert len(paire) == 2, "Incorrect number of arguments"
396             _paire = paire
397         X, dX = _paire
398         _Jacobienne = self.TangentMatrix( X )
399         if dX is None or len(dX) == 0:
400             #
401             # Calcul de la forme matricielle si le second argument est None
402             # -------------------------------------------------------------
403             if self.__mfEnabled: return [_Jacobienne,]
404             else:                return _Jacobienne
405         else:
406             #
407             # Calcul de la valeur linéarisée de H en X appliqué à dX
408             # ------------------------------------------------------
409             _dX = numpy.asmatrix(numpy.ravel( dX )).T
410             _HtX = numpy.dot(_Jacobienne, _dX)
411             if self.__mfEnabled: return [_HtX.A1,]
412             else:                return _HtX.A1
413
414     # ---------------------------------------------------------
415     def AdjointOperator(self, paire, **extraArgs ):
416         """
417         Calcul de l'adjoint à l'aide de la Jacobienne.
418
419         NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
420         ne doivent pas être données ici à la fonction utilisateur.
421         """
422         if self.__mfEnabled:
423             assert len(paire) == 1, "Incorrect lenght of arguments"
424             _paire = paire[0]
425             assert len(_paire) == 2, "Incorrect number of arguments"
426         else:
427             assert len(paire) == 2, "Incorrect number of arguments"
428             _paire = paire
429         X, Y = _paire
430         _JacobienneT = self.TangentMatrix( X ).T
431         if Y is None or len(Y) == 0:
432             #
433             # Calcul de la forme matricielle si le second argument est None
434             # -------------------------------------------------------------
435             if self.__mfEnabled: return [_JacobienneT,]
436             else:                return _JacobienneT
437         else:
438             #
439             # Calcul de la valeur de l'adjoint en X appliqué à Y
440             # --------------------------------------------------
441             _Y = numpy.asmatrix(numpy.ravel( Y )).T
442             _HaY = numpy.dot(_JacobienneT, _Y)
443             if self.__mfEnabled: return [_HaY.A1,]
444             else:                return _HaY.A1
445
446 # ==============================================================================
447 def EnsembleOfCenteredPerturbations( _bgcenter, _bgcovariance, _nbmembers ):
448     "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
449     #
450     _bgcenter = numpy.ravel(_bgcenter)[:,None]
451     if _nbmembers < 1:
452         raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
453     #
454     if _bgcovariance is None:
455         BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
456     else:
457         _Z = numpy.random.multivariate_normal(numpy.zeros(_bgcenter.size), _bgcovariance, size=_nbmembers).T
458         BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers) + _Z
459     #
460     return BackgroundEnsemble
461
462 # ==============================================================================
463 def EnsembleOfBackgroundPerturbations( _bgcenter, _bgcovariance, _nbmembers, _withSVD = True):
464     "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
465     def __CenteredRandomAnomalies(Zr, N):
466         """
467         Génère une matrice de N anomalies aléatoires centrées sur Zr selon les
468         notes manuscrites de MB et conforme au code de PS avec eps = -1
469         """
470         eps = -1
471         Q = numpy.identity(N-1)-numpy.ones((N-1,N-1))/numpy.sqrt(N)/(numpy.sqrt(N)-eps)
472         Q = numpy.concatenate((Q, [eps*numpy.ones(N-1)/numpy.sqrt(N)]), axis=0)
473         R, _ = numpy.linalg.qr(numpy.random.normal(size = (N-1,N-1)))
474         Q = numpy.dot(Q,R)
475         Zr = numpy.dot(Q,Zr)
476         return Zr.T
477     #
478     _bgcenter = numpy.ravel(_bgcenter).reshape((-1,1))
479     if _nbmembers < 1:
480         raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
481     if _bgcovariance is None:
482         BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
483     else:
484         if _withSVD:
485             U, s, V = numpy.linalg.svd(_bgcovariance, full_matrices=False)
486             _nbctl = _bgcenter.size
487             if _nbmembers > _nbctl:
488                 _Z = numpy.concatenate((numpy.dot(
489                     numpy.diag(numpy.sqrt(s[:_nbctl])), V[:_nbctl]),
490                     numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1-_nbctl)), axis = 0)
491             else:
492                 _Z = numpy.dot(numpy.diag(numpy.sqrt(s[:_nbmembers-1])), V[:_nbmembers-1])
493             _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
494             BackgroundEnsemble = _bgcenter + _Zca
495         else:
496             if max(abs(_bgcovariance.flatten())) > 0:
497                 _nbctl = _bgcenter.size
498                 _Z = numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1)
499                 _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
500                 BackgroundEnsemble = _bgcenter + _Zca
501             else:
502                 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
503     #
504     return BackgroundEnsemble
505
506 # ==============================================================================
507 def EnsembleOfAnomalies( Ensemble, OptMean = None, Normalisation = 1.):
508     "Renvoie les anomalies centrées à partir d'un ensemble TailleEtat*NbMembres"
509     if OptMean is None:
510         __Em = numpy.asarray(Ensemble).mean(axis=1, dtype=mfp).astype('float').reshape((-1,1))
511     else:
512         __Em = numpy.ravel(OptMean).reshape((-1,1))
513     #
514     return Normalisation * (numpy.asarray(Ensemble) - __Em)
515
516 # ==============================================================================
517 def EnsembleErrorCovariance( Ensemble, __quick = False ):
518     "Renvoie l'estimation empirique de la covariance d'ensemble"
519     if __quick:
520         # Covariance rapide mais rarement définie positive
521         __Covariance = numpy.cov(Ensemble)
522     else:
523         # Résultat souvent identique à numpy.cov, mais plus robuste
524         __n, __m = numpy.asarray(Ensemble).shape
525         __Anomalies = EnsembleOfAnomalies( Ensemble )
526         # Estimation empirique
527         __Covariance = (__Anomalies @ __Anomalies.T) / (__m-1)
528         # Assure la symétrie
529         __Covariance = (__Covariance + __Covariance.T) * 0.5
530         # Assure la positivité
531         __epsilon    = mpr*numpy.trace(__Covariance)
532         __Covariance = __Covariance + __epsilon * numpy.identity(__n)
533     #
534     return __Covariance
535
536 # ==============================================================================
537 def EnsemblePerturbationWithGivenCovariance( __Ensemble, __Covariance, __Seed=None ):
538     "Ajout d'une perturbation à chaque membre d'un ensemble selon une covariance prescrite"
539     if hasattr(__Covariance,"assparsematrix"):
540         if (abs(__Ensemble).mean() > mpr) and (abs(__Covariance.assparsematrix())/abs(__Ensemble).mean() < mpr).all():
541             # Traitement d'une covariance nulle ou presque
542             return __Ensemble
543         if (abs(__Ensemble).mean() <= mpr) and (abs(__Covariance.assparsematrix()) < mpr).all():
544             # Traitement d'une covariance nulle ou presque
545             return __Ensemble
546     else:
547         if (abs(__Ensemble).mean() > mpr) and (abs(__Covariance)/abs(__Ensemble).mean() < mpr).all():
548             # Traitement d'une covariance nulle ou presque
549             return __Ensemble
550         if (abs(__Ensemble).mean() <= mpr) and (abs(__Covariance) < mpr).all():
551             # Traitement d'une covariance nulle ou presque
552             return __Ensemble
553     #
554     __n, __m = __Ensemble.shape
555     if __Seed is not None: numpy.random.seed(__Seed)
556     #
557     if hasattr(__Covariance,"isscalar") and __Covariance.isscalar():
558         # Traitement d'une covariance multiple de l'identité
559         __zero = 0.
560         __std  = numpy.sqrt(__Covariance.assparsematrix())
561         __Ensemble += numpy.random.normal(__zero, __std, size=(__m,__n)).T
562     #
563     elif hasattr(__Covariance,"isvector") and __Covariance.isvector():
564         # Traitement d'une covariance diagonale avec variances non identiques
565         __zero = numpy.zeros(__n)
566         __std  = numpy.sqrt(__Covariance.assparsematrix())
567         __Ensemble += numpy.asarray([numpy.random.normal(__zero, __std) for i in range(__m)]).T
568     #
569     elif hasattr(__Covariance,"ismatrix") and __Covariance.ismatrix():
570         # Traitement d'une covariance pleine
571         __Ensemble += numpy.random.multivariate_normal(numpy.zeros(__n), __Covariance.asfullmatrix(__n), size=__m).T
572     #
573     elif isinstance(__Covariance, numpy.ndarray):
574         # Traitement d'une covariance numpy pleine, sachant qu'on arrive ici en dernier
575         __Ensemble += numpy.random.multivariate_normal(numpy.zeros(__n), __Covariance, size=__m).T
576     #
577     else:
578         raise ValueError("Error in ensemble perturbation with inadequate covariance specification")
579     #
580     return __Ensemble
581
582 # ==============================================================================
583 def CovarianceInflation(
584         InputCovOrEns,
585         InflationType   = None,
586         InflationFactor = None,
587         BackgroundCov   = None,
588         ):
589     """
590     Inflation applicable soit sur Pb ou Pa, soit sur les ensembles EXb ou EXa
591
592     Synthèse : Hunt 2007, section 2.3.5
593     """
594     if InflationFactor is None:
595         return InputCovOrEns
596     else:
597         InflationFactor = float(InflationFactor)
598     #
599     if InflationType in ["MultiplicativeOnAnalysisCovariance", "MultiplicativeOnBackgroundCovariance"]:
600         if InflationFactor < 1.:
601             raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
602         if InflationFactor < 1.+mpr:
603             return InputCovOrEns
604         OutputCovOrEns = InflationFactor**2 * InputCovOrEns
605     #
606     elif InflationType in ["MultiplicativeOnAnalysisAnomalies", "MultiplicativeOnBackgroundAnomalies"]:
607         if InflationFactor < 1.:
608             raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
609         if InflationFactor < 1.+mpr:
610             return InputCovOrEns
611         InputCovOrEnsMean = InputCovOrEns.mean(axis=1, dtype=mfp).astype('float')
612         OutputCovOrEns = InputCovOrEnsMean[:,numpy.newaxis] \
613             + InflationFactor * (InputCovOrEns - InputCovOrEnsMean[:,numpy.newaxis])
614     #
615     elif InflationType in ["AdditiveOnAnalysisCovariance", "AdditiveOnBackgroundCovariance"]:
616         if InflationFactor < 0.:
617             raise ValueError("Inflation factor for additive inflation has to be greater or equal than 0.")
618         if InflationFactor < mpr:
619             return InputCovOrEns
620         __n, __m = numpy.asarray(InputCovOrEns).shape
621         if __n != __m:
622             raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
623         OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * numpy.identity(__n)
624     #
625     elif InflationType == "HybridOnBackgroundCovariance":
626         if InflationFactor < 0.:
627             raise ValueError("Inflation factor for hybrid inflation has to be greater or equal than 0.")
628         if InflationFactor < mpr:
629             return InputCovOrEns
630         __n, __m = numpy.asarray(InputCovOrEns).shape
631         if __n != __m:
632             raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
633         if BackgroundCov is None:
634             raise ValueError("Background covariance matrix B has to be given for hybrid inflation.")
635         if InputCovOrEns.shape != BackgroundCov.shape:
636             raise ValueError("Ensemble covariance matrix has to be of same size than background covariance matrix B.")
637         OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * BackgroundCov
638     #
639     elif InflationType == "Relaxation":
640         raise NotImplementedError("InflationType Relaxation")
641     #
642     else:
643         raise ValueError("Error in inflation type, '%s' is not a valid keyword."%InflationType)
644     #
645     return OutputCovOrEns
646
647 # ==============================================================================
648 def enks(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="EnKS16-KalmanFilterFormula"):
649     """
650     EnKS
651     """
652     #
653     # Opérateurs
654     H = HO["Direct"].appliedControledFormTo
655     #
656     if selfA._parameters["EstimationOf"] == "State":
657         M = EM["Direct"].appliedControledFormTo
658     #
659     if CM is not None and "Tangent" in CM and U is not None:
660         Cm = CM["Tangent"].asMatrix(Xb)
661     else:
662         Cm = None
663     #
664     # Précalcul des inversions de B et R
665     RIdemi = R.sqrtmI()
666     #
667     # Durée d'observation et tailles
668     LagL = selfA._parameters["SmootherLagL"]
669     if (not hasattr(Y,"store")) or (not hasattr(Y,"stepnumber")):
670         raise ValueError("Fixed-lag smoother requires a series of observation")
671     if Y.stepnumber() < LagL:
672         raise ValueError("Fixed-lag smoother requires a series of observation greater then the lag L")
673     duration = Y.stepnumber()
674     __p = numpy.cumprod(Y.shape())[-1]
675     __n = Xb.size
676     __m = selfA._parameters["NumberOfMembers"]
677     #
678     if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
679     else:                         Pn = B
680     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
681         selfA.StoredVariables["Analysis"].store( Xb )
682         if selfA._toStore("APosterioriCovariance"):
683             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
684             covarianceXa = Pn
685     #
686     # Calcul direct initial (on privilégie la mémorisation au recalcul)
687     __seed = numpy.random.get_state()
688     selfB = copy.deepcopy(selfA)
689     selfB._parameters["StoreSupplementaryCalculations"] = ["CurrentEnsembleState"]
690     if VariantM == "EnKS16-KalmanFilterFormula":
691         etkf(selfB, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM = "KalmanFilterFormula")
692     else:
693         raise ValueError("VariantM has to be chosen in the authorized methods list.")
694     if LagL > 0:
695         EL  = selfB.StoredVariables["CurrentEnsembleState"][LagL-1]
696     else:
697         EL = EnsembleOfBackgroundPerturbations( Xb, None, __m ) # Cf. etkf
698     selfA._parameters["SetSeed"] = numpy.random.set_state(__seed)
699     #
700     for step in range(LagL,duration-1):
701         #
702         sEL = selfB.StoredVariables["CurrentEnsembleState"][step+1-LagL:step+1]
703         sEL.append(None)
704         #
705         if hasattr(Y,"store"):
706             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
707         else:
708             Ynpu = numpy.ravel( Y ).reshape((__p,1))
709         #
710         if U is not None:
711             if hasattr(U,"store") and len(U)>1:
712                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
713             elif hasattr(U,"store") and len(U)==1:
714                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
715             else:
716                 Un = numpy.asmatrix(numpy.ravel( U )).T
717         else:
718             Un = None
719         #
720         #--------------------------
721         if VariantM == "EnKS16-KalmanFilterFormula":
722             if selfA._parameters["EstimationOf"] == "State": # Forecast
723                 EL = M( [(EL[:,i], Un) for i in range(__m)],
724                     argsAsSerie = True,
725                     returnSerieAsArrayMatrix = True )
726                 EL = EnsemblePerturbationWithGivenCovariance( EL, Q )
727                 EZ = H( [(EL[:,i], Un) for i in range(__m)],
728                     argsAsSerie = True,
729                     returnSerieAsArrayMatrix = True )
730                 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
731                     Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
732                     EZ = EZ + Cm * Un
733             elif selfA._parameters["EstimationOf"] == "Parameters":
734                 # --- > Par principe, M = Id, Q = 0
735                 EZ = H( [(EL[:,i], Un) for i in range(__m)],
736                     argsAsSerie = True,
737                     returnSerieAsArrayMatrix = True )
738             #
739             vEm   = EL.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
740             vZm   = EZ.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
741             #
742             mS    = RIdemi @ EnsembleOfAnomalies( EZ, vZm, 1./math.sqrt(__m-1) )
743             delta = RIdemi @ ( Ynpu - vZm )
744             mT    = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS )
745             vw    = mT @ mS.T @ delta
746             #
747             Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
748             mU    = numpy.identity(__m)
749             wTU   = (vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU)
750             #
751             EX    = EnsembleOfAnomalies( EL, vEm, 1./math.sqrt(__m-1) )
752             EL    = vEm + EX @ wTU
753             #
754             sEL[LagL] = EL
755             for irl in range(LagL): # Lissage des L précédentes analysis
756                 vEm = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
757                 EX = EnsembleOfAnomalies( sEL[irl], vEm, 1./math.sqrt(__m-1) )
758                 sEL[irl] = vEm + EX @ wTU
759             #
760             # Conservation de l'analyse retrospective d'ordre 0 avant rotation
761             Xa = sEL[0].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
762             if selfA._toStore("APosterioriCovariance"):
763                 EXn = sEL[0]
764             #
765             for irl in range(LagL):
766                 sEL[irl] = sEL[irl+1]
767             sEL[LagL] = None
768         #--------------------------
769         else:
770             raise ValueError("VariantM has to be chosen in the authorized methods list.")
771         #
772         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
773         # ---> avec analysis
774         selfA.StoredVariables["Analysis"].store( Xa )
775         if selfA._toStore("APosterioriCovariance"):
776             selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(EXn) )
777     #
778     # Stockage des dernières analyses incomplètement remises à jour
779     for irl in range(LagL):
780         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
781         Xa = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
782         selfA.StoredVariables["Analysis"].store( Xa )
783     #
784     return 0
785
786 # ==============================================================================
787 def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
788     """
789     Ensemble-Transform EnKF
790     """
791     if selfA._parameters["EstimationOf"] == "Parameters":
792         selfA._parameters["StoreInternalVariables"] = True
793     #
794     # Opérateurs
795     # ----------
796     H = HO["Direct"].appliedControledFormTo
797     #
798     if selfA._parameters["EstimationOf"] == "State":
799         M = EM["Direct"].appliedControledFormTo
800     #
801     if CM is not None and "Tangent" in CM and U is not None:
802         Cm = CM["Tangent"].asMatrix(Xb)
803     else:
804         Cm = None
805     #
806     # Nombre de pas identique au nombre de pas d'observations
807     # -------------------------------------------------------
808     if hasattr(Y,"stepnumber"):
809         duration = Y.stepnumber()
810         __p = numpy.cumprod(Y.shape())[-1]
811     else:
812         duration = 2
813         __p = numpy.array(Y).size
814     #
815     # Précalcul des inversions de B et R
816     # ----------------------------------
817     if selfA._parameters["StoreInternalVariables"] \
818         or selfA._toStore("CostFunctionJ") \
819         or selfA._toStore("CostFunctionJb") \
820         or selfA._toStore("CostFunctionJo") \
821         or selfA._toStore("CurrentOptimum") \
822         or selfA._toStore("APosterioriCovariance"):
823         BI = B.getI()
824         RI = R.getI()
825     elif VariantM != "KalmanFilterFormula":
826         RI = R.getI()
827     if VariantM == "KalmanFilterFormula":
828         RIdemi = R.sqrtmI()
829     #
830     # Initialisation
831     # --------------
832     __n = Xb.size
833     __m = selfA._parameters["NumberOfMembers"]
834     if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
835     else:                         Pn = B
836     Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
837     #~ Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
838     #
839     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
840         selfA.StoredVariables["Analysis"].store( Xb )
841         if selfA._toStore("APosterioriCovariance"):
842             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
843             covarianceXa = Pn
844     #
845     previousJMinimum = numpy.finfo(float).max
846     #
847     for step in range(duration-1):
848         if hasattr(Y,"store"):
849             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
850         else:
851             Ynpu = numpy.ravel( Y ).reshape((__p,1))
852         #
853         if U is not None:
854             if hasattr(U,"store") and len(U)>1:
855                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
856             elif hasattr(U,"store") and len(U)==1:
857                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
858             else:
859                 Un = numpy.asmatrix(numpy.ravel( U )).T
860         else:
861             Un = None
862         #
863         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
864             Xn = CovarianceInflation( Xn,
865                 selfA._parameters["InflationType"],
866                 selfA._parameters["InflationFactor"],
867                 )
868         #
869         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
870             EMX = M( [(Xn[:,i], Un) for i in range(__m)],
871                 argsAsSerie = True,
872                 returnSerieAsArrayMatrix = True )
873             Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
874             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
875                 argsAsSerie = True,
876                 returnSerieAsArrayMatrix = True )
877             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
878                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
879                 Xn_predicted = Xn_predicted + Cm * Un
880         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
881             # --- > Par principe, M = Id, Q = 0
882             Xn_predicted = Xn
883             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
884                 argsAsSerie = True,
885                 returnSerieAsArrayMatrix = True )
886         #
887         # Mean of forecast and observation of forecast
888         Xfm  = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
889         Hfm  = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
890         #
891         # Anomalies
892         EaX   = EnsembleOfAnomalies( Xn_predicted, Xfm )
893         EaHX  = EnsembleOfAnomalies( HX_predicted, Hfm)
894         #
895         #--------------------------
896         if VariantM == "KalmanFilterFormula":
897             mS    = RIdemi * EaHX / math.sqrt(__m-1)
898             delta = RIdemi * ( Ynpu - Hfm )
899             mT    = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS )
900             vw    = mT @ mS.T @ delta
901             #
902             Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
903             mU    = numpy.identity(__m)
904             #
905             EaX   = EaX / math.sqrt(__m-1)
906             Xn    = Xfm + EaX @ ( vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU )
907         #--------------------------
908         elif VariantM == "Variational":
909             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
910             def CostFunction(w):
911                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
912                 _Jo = 0.5 * _A.T @ (RI * _A)
913                 _Jb = 0.5 * (__m-1) * w.T @ w
914                 _J  = _Jo + _Jb
915                 return float(_J)
916             def GradientOfCostFunction(w):
917                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
918                 _GardJo = - EaHX.T @ (RI * _A)
919                 _GradJb = (__m-1) * w.reshape((__m,1))
920                 _GradJ  = _GardJo + _GradJb
921                 return numpy.ravel(_GradJ)
922             vw = scipy.optimize.fmin_cg(
923                 f           = CostFunction,
924                 x0          = numpy.zeros(__m),
925                 fprime      = GradientOfCostFunction,
926                 args        = (),
927                 disp        = False,
928                 )
929             #
930             Hto = EaHX.T @ (RI * EaHX)
931             Htb = (__m-1) * numpy.identity(__m)
932             Hta = Hto + Htb
933             #
934             Pta = numpy.linalg.inv( Hta )
935             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
936             #
937             Xn  = Xfm + EaX @ (vw[:,None] + EWa)
938         #--------------------------
939         elif VariantM == "FiniteSize11": # Jauge Boc2011
940             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
941             def CostFunction(w):
942                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
943                 _Jo = 0.5 * _A.T @ (RI * _A)
944                 _Jb = 0.5 * __m * math.log(1 + 1/__m + w.T @ w)
945                 _J  = _Jo + _Jb
946                 return float(_J)
947             def GradientOfCostFunction(w):
948                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
949                 _GardJo = - EaHX.T @ (RI * _A)
950                 _GradJb = __m * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
951                 _GradJ  = _GardJo + _GradJb
952                 return numpy.ravel(_GradJ)
953             vw = scipy.optimize.fmin_cg(
954                 f           = CostFunction,
955                 x0          = numpy.zeros(__m),
956                 fprime      = GradientOfCostFunction,
957                 args        = (),
958                 disp        = False,
959                 )
960             #
961             Hto = EaHX.T @ (RI * EaHX)
962             Htb = __m * \
963                 ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
964                 / (1 + 1/__m + vw.T @ vw)**2
965             Hta = Hto + Htb
966             #
967             Pta = numpy.linalg.inv( Hta )
968             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
969             #
970             Xn  = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
971         #--------------------------
972         elif VariantM == "FiniteSize15": # Jauge Boc2015
973             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
974             def CostFunction(w):
975                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
976                 _Jo = 0.5 * _A.T * RI * _A
977                 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w)
978                 _J  = _Jo + _Jb
979                 return float(_J)
980             def GradientOfCostFunction(w):
981                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
982                 _GardJo = - EaHX.T @ (RI * _A)
983                 _GradJb = (__m+1) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
984                 _GradJ  = _GardJo + _GradJb
985                 return numpy.ravel(_GradJ)
986             vw = scipy.optimize.fmin_cg(
987                 f           = CostFunction,
988                 x0          = numpy.zeros(__m),
989                 fprime      = GradientOfCostFunction,
990                 args        = (),
991                 disp        = False,
992                 )
993             #
994             Hto = EaHX.T @ (RI * EaHX)
995             Htb = (__m+1) * \
996                 ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
997                 / (1 + 1/__m + vw.T @ vw)**2
998             Hta = Hto + Htb
999             #
1000             Pta = numpy.linalg.inv( Hta )
1001             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1002             #
1003             Xn  = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
1004         #--------------------------
1005         elif VariantM == "FiniteSize16": # Jauge Boc2016
1006             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1007             def CostFunction(w):
1008                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1009                 _Jo = 0.5 * _A.T @ (RI * _A)
1010                 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w / (__m-1))
1011                 _J  = _Jo + _Jb
1012                 return float(_J)
1013             def GradientOfCostFunction(w):
1014                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1015                 _GardJo = - EaHX.T @ (RI * _A)
1016                 _GradJb = ((__m+1) / (__m-1)) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w / (__m-1))
1017                 _GradJ  = _GardJo + _GradJb
1018                 return numpy.ravel(_GradJ)
1019             vw = scipy.optimize.fmin_cg(
1020                 f           = CostFunction,
1021                 x0          = numpy.zeros(__m),
1022                 fprime      = GradientOfCostFunction,
1023                 args        = (),
1024                 disp        = False,
1025                 )
1026             #
1027             Hto = EaHX.T @ (RI * EaHX)
1028             Htb = ((__m+1) / (__m-1)) * \
1029                 ( (1 + 1/__m + vw.T @ vw / (__m-1)) * numpy.identity(__m) - 2 * vw @ vw.T / (__m-1) ) \
1030                 / (1 + 1/__m + vw.T @ vw / (__m-1))**2
1031             Hta = Hto + Htb
1032             #
1033             Pta = numpy.linalg.inv( Hta )
1034             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1035             #
1036             Xn  = Xfm + EaX @ (vw[:,None] + EWa)
1037         #--------------------------
1038         else:
1039             raise ValueError("VariantM has to be chosen in the authorized methods list.")
1040         #
1041         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
1042             Xn = CovarianceInflation( Xn,
1043                 selfA._parameters["InflationType"],
1044                 selfA._parameters["InflationFactor"],
1045                 )
1046         #
1047         Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1048         #--------------------------
1049         #
1050         if selfA._parameters["StoreInternalVariables"] \
1051             or selfA._toStore("CostFunctionJ") \
1052             or selfA._toStore("CostFunctionJb") \
1053             or selfA._toStore("CostFunctionJo") \
1054             or selfA._toStore("APosterioriCovariance") \
1055             or selfA._toStore("InnovationAtCurrentAnalysis") \
1056             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1057             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1058             _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
1059             _Innovation = Ynpu - _HXa
1060         #
1061         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1062         # ---> avec analysis
1063         selfA.StoredVariables["Analysis"].store( Xa )
1064         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1065             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
1066         if selfA._toStore("InnovationAtCurrentAnalysis"):
1067             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1068         # ---> avec current state
1069         if selfA._parameters["StoreInternalVariables"] \
1070             or selfA._toStore("CurrentState"):
1071             selfA.StoredVariables["CurrentState"].store( Xn )
1072         if selfA._toStore("ForecastState"):
1073             selfA.StoredVariables["ForecastState"].store( EMX )
1074         if selfA._toStore("BMA"):
1075             selfA.StoredVariables["BMA"].store( EMX - Xa.reshape((__n,1)) )
1076         if selfA._toStore("InnovationAtCurrentState"):
1077             selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
1078         if selfA._toStore("SimulatedObservationAtCurrentState") \
1079             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1080             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
1081         # ---> autres
1082         if selfA._parameters["StoreInternalVariables"] \
1083             or selfA._toStore("CostFunctionJ") \
1084             or selfA._toStore("CostFunctionJb") \
1085             or selfA._toStore("CostFunctionJo") \
1086             or selfA._toStore("CurrentOptimum") \
1087             or selfA._toStore("APosterioriCovariance"):
1088             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
1089             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
1090             J   = Jb + Jo
1091             selfA.StoredVariables["CostFunctionJb"].store( Jb )
1092             selfA.StoredVariables["CostFunctionJo"].store( Jo )
1093             selfA.StoredVariables["CostFunctionJ" ].store( J )
1094             #
1095             if selfA._toStore("IndexOfOptimum") \
1096                 or selfA._toStore("CurrentOptimum") \
1097                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1098                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1099                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1100                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1101                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1102             if selfA._toStore("IndexOfOptimum"):
1103                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1104             if selfA._toStore("CurrentOptimum"):
1105                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1106             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1107                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1108             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1109                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1110             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1111                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1112             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1113                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1114         if selfA._toStore("APosterioriCovariance"):
1115             selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
1116         if selfA._parameters["EstimationOf"] == "Parameters" \
1117             and J < previousJMinimum:
1118             previousJMinimum    = J
1119             XaMin               = Xa
1120             if selfA._toStore("APosterioriCovariance"):
1121                 covarianceXaMin = Pn
1122         # ---> Pour les smoothers
1123         if selfA._toStore("CurrentEnsembleState"):
1124             selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
1125     #
1126     # Stockage final supplémentaire de l'optimum en estimation de paramètres
1127     # ----------------------------------------------------------------------
1128     if selfA._parameters["EstimationOf"] == "Parameters":
1129         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1130         selfA.StoredVariables["Analysis"].store( XaMin )
1131         if selfA._toStore("APosterioriCovariance"):
1132             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1133         if selfA._toStore("BMA"):
1134             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1135     #
1136     return 0
1137
1138 # ==============================================================================
1139 def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12",
1140     BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
1141     """
1142     Iterative EnKF
1143     """
1144     if selfA._parameters["EstimationOf"] == "Parameters":
1145         selfA._parameters["StoreInternalVariables"] = True
1146     #
1147     # Opérateurs
1148     # ----------
1149     H = HO["Direct"].appliedControledFormTo
1150     #
1151     if selfA._parameters["EstimationOf"] == "State":
1152         M = EM["Direct"].appliedControledFormTo
1153     #
1154     if CM is not None and "Tangent" in CM and U is not None:
1155         Cm = CM["Tangent"].asMatrix(Xb)
1156     else:
1157         Cm = None
1158     #
1159     # Nombre de pas identique au nombre de pas d'observations
1160     # -------------------------------------------------------
1161     if hasattr(Y,"stepnumber"):
1162         duration = Y.stepnumber()
1163         __p = numpy.cumprod(Y.shape())[-1]
1164     else:
1165         duration = 2
1166         __p = numpy.array(Y).size
1167     #
1168     # Précalcul des inversions de B et R
1169     # ----------------------------------
1170     if selfA._parameters["StoreInternalVariables"] \
1171         or selfA._toStore("CostFunctionJ") \
1172         or selfA._toStore("CostFunctionJb") \
1173         or selfA._toStore("CostFunctionJo") \
1174         or selfA._toStore("CurrentOptimum") \
1175         or selfA._toStore("APosterioriCovariance"):
1176         BI = B.getI()
1177     RI = R.getI()
1178     #
1179     # Initialisation
1180     # --------------
1181     __n = Xb.size
1182     __m = selfA._parameters["NumberOfMembers"]
1183     if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
1184     else:                         Pn = B
1185     if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
1186     else:                         Rn = R
1187     if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
1188     else:                         Qn = Q
1189     Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
1190     #
1191     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1192         selfA.StoredVariables["Analysis"].store( Xb )
1193         if selfA._toStore("APosterioriCovariance"):
1194             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1195             covarianceXa = Pn
1196     #
1197     previousJMinimum = numpy.finfo(float).max
1198     #
1199     for step in range(duration-1):
1200         if hasattr(Y,"store"):
1201             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1202         else:
1203             Ynpu = numpy.ravel( Y ).reshape((__p,1))
1204         #
1205         if U is not None:
1206             if hasattr(U,"store") and len(U)>1:
1207                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
1208             elif hasattr(U,"store") and len(U)==1:
1209                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
1210             else:
1211                 Un = numpy.asmatrix(numpy.ravel( U )).T
1212         else:
1213             Un = None
1214         #
1215         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
1216             Xn = CovarianceInflation( Xn,
1217                 selfA._parameters["InflationType"],
1218                 selfA._parameters["InflationFactor"],
1219                 )
1220         #
1221         #--------------------------
1222         if VariantM == "IEnKF12":
1223             Xfm = numpy.ravel(Xn.mean(axis=1, dtype=mfp).astype('float'))
1224             EaX = EnsembleOfAnomalies( Xn ) / math.sqrt(__m-1)
1225             __j = 0
1226             Deltaw = 1
1227             if not BnotT:
1228                 Ta  = numpy.identity(__m)
1229             vw  = numpy.zeros(__m)
1230             while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
1231                 vx1 = (Xfm + EaX @ vw).reshape((__n,1))
1232                 #
1233                 if BnotT:
1234                     E1 = vx1 + _epsilon * EaX
1235                 else:
1236                     E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
1237                 #
1238                 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q
1239                     E2 = M( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
1240                         argsAsSerie = True,
1241                         returnSerieAsArrayMatrix = True )
1242                 elif selfA._parameters["EstimationOf"] == "Parameters":
1243                     # --- > Par principe, M = Id
1244                     E2 = Xn
1245                 vx2 = E2.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1246                 vy1 = H((vx2, Un)).reshape((__p,1))
1247                 #
1248                 HE2 = H( [(E2[:,i,numpy.newaxis], Un) for i in range(__m)],
1249                     argsAsSerie = True,
1250                     returnSerieAsArrayMatrix = True )
1251                 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
1252                 #
1253                 if BnotT:
1254                     EaY = (HE2 - vy2) / _epsilon
1255                 else:
1256                     EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
1257                 #
1258                 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy1 )))
1259                 mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY)
1260                 Deltaw = - numpy.linalg.solve(mH,GradJ)
1261                 #
1262                 vw = vw + Deltaw
1263                 #
1264                 if not BnotT:
1265                     Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
1266                 #
1267                 __j = __j + 1
1268             #
1269             A2 = EnsembleOfAnomalies( E2 )
1270             #
1271             if BnotT:
1272                 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
1273                 A2 = math.sqrt(__m-1) * A2 @ Ta / _epsilon
1274             #
1275             Xn = vx2 + A2
1276         #--------------------------
1277         else:
1278             raise ValueError("VariantM has to be chosen in the authorized methods list.")
1279         #
1280         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
1281             Xn = CovarianceInflation( Xn,
1282                 selfA._parameters["InflationType"],
1283                 selfA._parameters["InflationFactor"],
1284                 )
1285         #
1286         Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1287         #--------------------------
1288         #
1289         if selfA._parameters["StoreInternalVariables"] \
1290             or selfA._toStore("CostFunctionJ") \
1291             or selfA._toStore("CostFunctionJb") \
1292             or selfA._toStore("CostFunctionJo") \
1293             or selfA._toStore("APosterioriCovariance") \
1294             or selfA._toStore("InnovationAtCurrentAnalysis") \
1295             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1296             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1297             _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
1298             _Innovation = Ynpu - _HXa
1299         #
1300         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1301         # ---> avec analysis
1302         selfA.StoredVariables["Analysis"].store( Xa )
1303         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1304             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
1305         if selfA._toStore("InnovationAtCurrentAnalysis"):
1306             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1307         # ---> avec current state
1308         if selfA._parameters["StoreInternalVariables"] \
1309             or selfA._toStore("CurrentState"):
1310             selfA.StoredVariables["CurrentState"].store( Xn )
1311         if selfA._toStore("ForecastState"):
1312             selfA.StoredVariables["ForecastState"].store( E2 )
1313         if selfA._toStore("BMA"):
1314             selfA.StoredVariables["BMA"].store( E2 - Xa )
1315         if selfA._toStore("InnovationAtCurrentState"):
1316             selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
1317         if selfA._toStore("SimulatedObservationAtCurrentState") \
1318             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1319             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
1320         # ---> autres
1321         if selfA._parameters["StoreInternalVariables"] \
1322             or selfA._toStore("CostFunctionJ") \
1323             or selfA._toStore("CostFunctionJb") \
1324             or selfA._toStore("CostFunctionJo") \
1325             or selfA._toStore("CurrentOptimum") \
1326             or selfA._toStore("APosterioriCovariance"):
1327             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
1328             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
1329             J   = Jb + Jo
1330             selfA.StoredVariables["CostFunctionJb"].store( Jb )
1331             selfA.StoredVariables["CostFunctionJo"].store( Jo )
1332             selfA.StoredVariables["CostFunctionJ" ].store( J )
1333             #
1334             if selfA._toStore("IndexOfOptimum") \
1335                 or selfA._toStore("CurrentOptimum") \
1336                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1337                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1338                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1339                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1340                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1341             if selfA._toStore("IndexOfOptimum"):
1342                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1343             if selfA._toStore("CurrentOptimum"):
1344                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1345             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1346                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1347             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1348                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1349             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1350                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1351             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1352                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1353         if selfA._toStore("APosterioriCovariance"):
1354             selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
1355         if selfA._parameters["EstimationOf"] == "Parameters" \
1356             and J < previousJMinimum:
1357             previousJMinimum    = J
1358             XaMin               = Xa
1359             if selfA._toStore("APosterioriCovariance"):
1360                 covarianceXaMin = Pn
1361     #
1362     # Stockage final supplémentaire de l'optimum en estimation de paramètres
1363     # ----------------------------------------------------------------------
1364     if selfA._parameters["EstimationOf"] == "Parameters":
1365         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1366         selfA.StoredVariables["Analysis"].store( XaMin )
1367         if selfA._toStore("APosterioriCovariance"):
1368             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1369         if selfA._toStore("BMA"):
1370             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1371     #
1372     return 0
1373
1374 # ==============================================================================
1375 def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1376     """
1377     3DVAR incrémental
1378     """
1379     #
1380     # Initialisations
1381     # ---------------
1382     #
1383     # Opérateur non-linéaire pour la boucle externe
1384     Hm = HO["Direct"].appliedTo
1385     #
1386     # Précalcul des inversions de B et R
1387     BI = B.getI()
1388     RI = R.getI()
1389     #
1390     # Point de démarrage de l'optimisation
1391     Xini = selfA._parameters["InitializationPoint"]
1392     #
1393     HXb = numpy.asmatrix(numpy.ravel( Hm( Xb ) )).T
1394     Innovation = Y - HXb
1395     #
1396     # Outer Loop
1397     # ----------
1398     iOuter = 0
1399     J      = 1./mpr
1400     DeltaJ = 1./mpr
1401     Xr     = Xini.reshape((-1,1))
1402     while abs(DeltaJ) >= selfA._parameters["CostDecrementTolerance"] and iOuter <= selfA._parameters["MaximumNumberOfSteps"]:
1403         #
1404         # Inner Loop
1405         # ----------
1406         Ht = HO["Tangent"].asMatrix(Xr)
1407         Ht = Ht.reshape(Y.size,Xr.size) # ADAO & check shape
1408         #
1409         # Définition de la fonction-coût
1410         # ------------------------------
1411         def CostFunction(dx):
1412             _dX  = numpy.asmatrix(numpy.ravel( dx )).T
1413             if selfA._parameters["StoreInternalVariables"] or \
1414                 selfA._toStore("CurrentState") or \
1415                 selfA._toStore("CurrentOptimum"):
1416                 selfA.StoredVariables["CurrentState"].store( Xb + _dX )
1417             _HdX = Ht * _dX
1418             _HdX = numpy.asmatrix(numpy.ravel( _HdX )).T
1419             _dInnovation = Innovation - _HdX
1420             if selfA._toStore("SimulatedObservationAtCurrentState") or \
1421                 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1422                 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HXb + _HdX )
1423             if selfA._toStore("InnovationAtCurrentState"):
1424                 selfA.StoredVariables["InnovationAtCurrentState"].store( _dInnovation )
1425             #
1426             Jb  = float( 0.5 * _dX.T * BI * _dX )
1427             Jo  = float( 0.5 * _dInnovation.T * RI * _dInnovation )
1428             J   = Jb + Jo
1429             #
1430             selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
1431             selfA.StoredVariables["CostFunctionJb"].store( Jb )
1432             selfA.StoredVariables["CostFunctionJo"].store( Jo )
1433             selfA.StoredVariables["CostFunctionJ" ].store( J )
1434             if selfA._toStore("IndexOfOptimum") or \
1435                 selfA._toStore("CurrentOptimum") or \
1436                 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
1437                 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
1438                 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
1439                 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1440                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1441             if selfA._toStore("IndexOfOptimum"):
1442                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1443             if selfA._toStore("CurrentOptimum"):
1444                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
1445             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1446                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
1447             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1448                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1449             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1450                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1451             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1452                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1453             return J
1454         #
1455         def GradientOfCostFunction(dx):
1456             _dX          = numpy.asmatrix(numpy.ravel( dx )).T
1457             _HdX         = Ht * _dX
1458             _HdX         = numpy.asmatrix(numpy.ravel( _HdX )).T
1459             _dInnovation = Innovation - _HdX
1460             GradJb       = BI * _dX
1461             GradJo       = - Ht.T @ (RI * _dInnovation)
1462             GradJ        = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
1463             return GradJ
1464         #
1465         # Minimisation de la fonctionnelle
1466         # --------------------------------
1467         nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
1468         #
1469         if selfA._parameters["Minimizer"] == "LBFGSB":
1470             # Minimum, J_optimal, Informations = scipy.optimize.fmin_l_bfgs_b(
1471             if "0.19" <= scipy.version.version <= "1.1.0":
1472                 import lbfgsbhlt as optimiseur
1473             else:
1474                 import scipy.optimize as optimiseur
1475             Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
1476                 func        = CostFunction,
1477                 x0          = numpy.zeros(Xini.size),
1478                 fprime      = GradientOfCostFunction,
1479                 args        = (),
1480                 bounds      = selfA._parameters["Bounds"],
1481                 maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
1482                 factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
1483                 pgtol       = selfA._parameters["ProjectedGradientTolerance"],
1484                 iprint      = selfA._parameters["optiprint"],
1485                 )
1486             nfeval = Informations['funcalls']
1487             rc     = Informations['warnflag']
1488         elif selfA._parameters["Minimizer"] == "TNC":
1489             Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
1490                 func        = CostFunction,
1491                 x0          = numpy.zeros(Xini.size),
1492                 fprime      = GradientOfCostFunction,
1493                 args        = (),
1494                 bounds      = selfA._parameters["Bounds"],
1495                 maxfun      = selfA._parameters["MaximumNumberOfSteps"],
1496                 pgtol       = selfA._parameters["ProjectedGradientTolerance"],
1497                 ftol        = selfA._parameters["CostDecrementTolerance"],
1498                 messages    = selfA._parameters["optmessages"],
1499                 )
1500         elif selfA._parameters["Minimizer"] == "CG":
1501             Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
1502                 f           = CostFunction,
1503                 x0          = numpy.zeros(Xini.size),
1504                 fprime      = GradientOfCostFunction,
1505                 args        = (),
1506                 maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1507                 gtol        = selfA._parameters["GradientNormTolerance"],
1508                 disp        = selfA._parameters["optdisp"],
1509                 full_output = True,
1510                 )
1511         elif selfA._parameters["Minimizer"] == "NCG":
1512             Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
1513                 f           = CostFunction,
1514                 x0          = numpy.zeros(Xini.size),
1515                 fprime      = GradientOfCostFunction,
1516                 args        = (),
1517                 maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1518                 avextol     = selfA._parameters["CostDecrementTolerance"],
1519                 disp        = selfA._parameters["optdisp"],
1520                 full_output = True,
1521                 )
1522         elif selfA._parameters["Minimizer"] == "BFGS":
1523             Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
1524                 f           = CostFunction,
1525                 x0          = numpy.zeros(Xini.size),
1526                 fprime      = GradientOfCostFunction,
1527                 args        = (),
1528                 maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1529                 gtol        = selfA._parameters["GradientNormTolerance"],
1530                 disp        = selfA._parameters["optdisp"],
1531                 full_output = True,
1532                 )
1533         else:
1534             raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
1535         #
1536         IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1537         MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
1538         #
1539         if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
1540             Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
1541             Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
1542         else:
1543             Minimum = Xb + numpy.asmatrix(numpy.ravel( Minimum )).T
1544         #
1545         Xr     = Minimum
1546         DeltaJ = selfA.StoredVariables["CostFunctionJ" ][-1] - J
1547         iOuter = selfA.StoredVariables["CurrentIterationNumber"][-1]
1548     #
1549     # Obtention de l'analyse
1550     # ----------------------
1551     Xa = Xr
1552     #
1553     selfA.StoredVariables["Analysis"].store( Xa )
1554     #
1555     if selfA._toStore("OMA") or \
1556         selfA._toStore("SigmaObs2") or \
1557         selfA._toStore("SimulationQuantiles") or \
1558         selfA._toStore("SimulatedObservationAtOptimum"):
1559         if selfA._toStore("SimulatedObservationAtCurrentState"):
1560             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
1561         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1562             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
1563         else:
1564             HXa = Hm( Xa )
1565     #
1566     # Calcul de la covariance d'analyse
1567     # ---------------------------------
1568     if selfA._toStore("APosterioriCovariance") or \
1569         selfA._toStore("SimulationQuantiles") or \
1570         selfA._toStore("JacobianMatrixAtOptimum") or \
1571         selfA._toStore("KalmanGainAtOptimum"):
1572         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
1573         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
1574     if selfA._toStore("APosterioriCovariance") or \
1575         selfA._toStore("SimulationQuantiles") or \
1576         selfA._toStore("KalmanGainAtOptimum"):
1577         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
1578         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
1579     if selfA._toStore("APosterioriCovariance") or \
1580         selfA._toStore("SimulationQuantiles"):
1581         HessienneI = []
1582         nb = Xa.size
1583         for i in range(nb):
1584             _ee    = numpy.matrix(numpy.zeros(nb)).T
1585             _ee[i] = 1.
1586             _HtEE  = numpy.dot(HtM,_ee)
1587             _HtEE  = numpy.asmatrix(numpy.ravel( _HtEE )).T
1588             HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
1589         HessienneI = numpy.matrix( HessienneI )
1590         A = HessienneI.I
1591         if min(A.shape) != max(A.shape):
1592             raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
1593         if (numpy.diag(A) < 0).any():
1594             raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
1595         if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
1596             try:
1597                 L = numpy.linalg.cholesky( A )
1598             except:
1599                 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
1600     if selfA._toStore("APosterioriCovariance"):
1601         selfA.StoredVariables["APosterioriCovariance"].store( A )
1602     if selfA._toStore("JacobianMatrixAtOptimum"):
1603         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
1604     if selfA._toStore("KalmanGainAtOptimum"):
1605         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
1606         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
1607         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
1608     #
1609     # Calculs et/ou stockages supplémentaires
1610     # ---------------------------------------
1611     if selfA._toStore("Innovation") or \
1612         selfA._toStore("SigmaObs2") or \
1613         selfA._toStore("MahalanobisConsistency") or \
1614         selfA._toStore("OMB"):
1615         d  = Y - HXb
1616     if selfA._toStore("Innovation"):
1617         selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
1618     if selfA._toStore("BMA"):
1619         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
1620     if selfA._toStore("OMA"):
1621         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
1622     if selfA._toStore("OMB"):
1623         selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
1624     if selfA._toStore("SigmaObs2"):
1625         TraceR = R.trace(Y.size)
1626         selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
1627     if selfA._toStore("MahalanobisConsistency"):
1628         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
1629     if selfA._toStore("SimulationQuantiles"):
1630         nech = selfA._parameters["NumberOfSamplesForQuantiles"]
1631         HXa  = numpy.matrix(numpy.ravel( HXa )).T
1632         EXr  = None
1633         YfQ  = None
1634         for i in range(nech):
1635             if selfA._parameters["SimulationForQuantiles"] == "Linear":
1636                 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
1637                 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
1638                 Yr = HXa + dYr
1639                 if selfA._toStore("SampledStateForQuantiles"): Xr = Xa+dXr
1640             elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
1641                 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
1642                 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
1643             if YfQ is None:
1644                 YfQ = Yr
1645                 if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.ravel(Xr)
1646             else:
1647                 YfQ = numpy.hstack((YfQ,Yr))
1648                 if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.vstack((EXr,numpy.ravel(Xr)))
1649         YfQ.sort(axis=-1)
1650         YQ = None
1651         for quantile in selfA._parameters["Quantiles"]:
1652             if not (0. <= float(quantile) <= 1.): continue
1653             indice = int(nech * float(quantile) - 1./nech)
1654             if YQ is None: YQ = YfQ[:,indice]
1655             else:          YQ = numpy.hstack((YQ,YfQ[:,indice]))
1656         selfA.StoredVariables["SimulationQuantiles"].store( YQ )
1657         if selfA._toStore("SampledStateForQuantiles"):
1658             selfA.StoredVariables["SampledStateForQuantiles"].store( EXr.T )
1659     if selfA._toStore("SimulatedObservationAtBackground"):
1660         selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
1661     if selfA._toStore("SimulatedObservationAtOptimum"):
1662         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
1663     #
1664     return 0
1665
1666 # ==============================================================================
1667 def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13",
1668     BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
1669     """
1670     Maximum Likelihood Ensemble Filter
1671     """
1672     if selfA._parameters["EstimationOf"] == "Parameters":
1673         selfA._parameters["StoreInternalVariables"] = True
1674     #
1675     # Opérateurs
1676     # ----------
1677     H = HO["Direct"].appliedControledFormTo
1678     #
1679     if selfA._parameters["EstimationOf"] == "State":
1680         M = EM["Direct"].appliedControledFormTo
1681     #
1682     if CM is not None and "Tangent" in CM and U is not None:
1683         Cm = CM["Tangent"].asMatrix(Xb)
1684     else:
1685         Cm = None
1686     #
1687     # Nombre de pas identique au nombre de pas d'observations
1688     # -------------------------------------------------------
1689     if hasattr(Y,"stepnumber"):
1690         duration = Y.stepnumber()
1691         __p = numpy.cumprod(Y.shape())[-1]
1692     else:
1693         duration = 2
1694         __p = numpy.array(Y).size
1695     #
1696     # Précalcul des inversions de B et R
1697     # ----------------------------------
1698     if selfA._parameters["StoreInternalVariables"] \
1699         or selfA._toStore("CostFunctionJ") \
1700         or selfA._toStore("CostFunctionJb") \
1701         or selfA._toStore("CostFunctionJo") \
1702         or selfA._toStore("CurrentOptimum") \
1703         or selfA._toStore("APosterioriCovariance"):
1704         BI = B.getI()
1705     RI = R.getI()
1706     #
1707     # Initialisation
1708     # --------------
1709     __n = Xb.size
1710     __m = selfA._parameters["NumberOfMembers"]
1711     if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
1712     else:                         Pn = B
1713     if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
1714     else:                         Rn = R
1715     Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
1716     #
1717     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1718         selfA.StoredVariables["Analysis"].store( Xb )
1719         if selfA._toStore("APosterioriCovariance"):
1720             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1721             covarianceXa = Pn
1722     #
1723     previousJMinimum = numpy.finfo(float).max
1724     #
1725     for step in range(duration-1):
1726         if hasattr(Y,"store"):
1727             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1728         else:
1729             Ynpu = numpy.ravel( Y ).reshape((__p,1))
1730         #
1731         if U is not None:
1732             if hasattr(U,"store") and len(U)>1:
1733                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
1734             elif hasattr(U,"store") and len(U)==1:
1735                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
1736             else:
1737                 Un = numpy.asmatrix(numpy.ravel( U )).T
1738         else:
1739             Un = None
1740         #
1741         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
1742             Xn = CovarianceInflation( Xn,
1743                 selfA._parameters["InflationType"],
1744                 selfA._parameters["InflationFactor"],
1745                 )
1746         #
1747         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
1748             EMX = M( [(Xn[:,i], Un) for i in range(__m)],
1749                 argsAsSerie = True,
1750                 returnSerieAsArrayMatrix = True )
1751             Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
1752             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1753                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
1754                 Xn_predicted = Xn_predicted + Cm * Un
1755         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
1756             # --- > Par principe, M = Id, Q = 0
1757             Xn_predicted = Xn
1758         #
1759         #--------------------------
1760         if VariantM == "MLEF13":
1761             Xfm = numpy.ravel(Xn_predicted.mean(axis=1, dtype=mfp).astype('float'))
1762             EaX = EnsembleOfAnomalies( Xn_predicted, Xfm, 1./math.sqrt(__m-1) )
1763             Ua  = numpy.identity(__m)
1764             __j = 0
1765             Deltaw = 1
1766             if not BnotT:
1767                 Ta  = numpy.identity(__m)
1768             vw  = numpy.zeros(__m)
1769             while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
1770                 vx1 = (Xfm + EaX @ vw).reshape((__n,1))
1771                 #
1772                 if BnotT:
1773                     E1 = vx1 + _epsilon * EaX
1774                 else:
1775                     E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
1776                 #
1777                 HE2 = H( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
1778                     argsAsSerie = True,
1779                     returnSerieAsArrayMatrix = True )
1780                 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
1781                 #
1782                 if BnotT:
1783                     EaY = (HE2 - vy2) / _epsilon
1784                 else:
1785                     EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
1786                 #
1787                 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy2 )))
1788                 mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY)
1789                 Deltaw = - numpy.linalg.solve(mH,GradJ)
1790                 #
1791                 vw = vw + Deltaw
1792                 #
1793                 if not BnotT:
1794                     Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
1795                 #
1796                 __j = __j + 1
1797             #
1798             if BnotT:
1799                 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
1800             #
1801             Xn = vx1 + math.sqrt(__m-1) * EaX @ Ta @ Ua
1802         #--------------------------
1803         else:
1804             raise ValueError("VariantM has to be chosen in the authorized methods list.")
1805         #
1806         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
1807             Xn = CovarianceInflation( Xn,
1808                 selfA._parameters["InflationType"],
1809                 selfA._parameters["InflationFactor"],
1810                 )
1811         #
1812         Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1813         #--------------------------
1814         #
1815         if selfA._parameters["StoreInternalVariables"] \
1816             or selfA._toStore("CostFunctionJ") \
1817             or selfA._toStore("CostFunctionJb") \
1818             or selfA._toStore("CostFunctionJo") \
1819             or selfA._toStore("APosterioriCovariance") \
1820             or selfA._toStore("InnovationAtCurrentAnalysis") \
1821             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1822             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1823             _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
1824             _Innovation = Ynpu - _HXa
1825         #
1826         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1827         # ---> avec analysis
1828         selfA.StoredVariables["Analysis"].store( Xa )
1829         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1830             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
1831         if selfA._toStore("InnovationAtCurrentAnalysis"):
1832             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1833         # ---> avec current state
1834         if selfA._parameters["StoreInternalVariables"] \
1835             or selfA._toStore("CurrentState"):
1836             selfA.StoredVariables["CurrentState"].store( Xn )
1837         if selfA._toStore("ForecastState"):
1838             selfA.StoredVariables["ForecastState"].store( EMX )
1839         if selfA._toStore("BMA"):
1840             selfA.StoredVariables["BMA"].store( EMX - Xa )
1841         if selfA._toStore("InnovationAtCurrentState"):
1842             selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
1843         if selfA._toStore("SimulatedObservationAtCurrentState") \
1844             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1845             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
1846         # ---> autres
1847         if selfA._parameters["StoreInternalVariables"] \
1848             or selfA._toStore("CostFunctionJ") \
1849             or selfA._toStore("CostFunctionJb") \
1850             or selfA._toStore("CostFunctionJo") \
1851             or selfA._toStore("CurrentOptimum") \
1852             or selfA._toStore("APosterioriCovariance"):
1853             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
1854             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
1855             J   = Jb + Jo
1856             selfA.StoredVariables["CostFunctionJb"].store( Jb )
1857             selfA.StoredVariables["CostFunctionJo"].store( Jo )
1858             selfA.StoredVariables["CostFunctionJ" ].store( J )
1859             #
1860             if selfA._toStore("IndexOfOptimum") \
1861                 or selfA._toStore("CurrentOptimum") \
1862                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1863                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1864                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1865                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1866                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1867             if selfA._toStore("IndexOfOptimum"):
1868                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1869             if selfA._toStore("CurrentOptimum"):
1870                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1871             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1872                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1873             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1874                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1875             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1876                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1877             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1878                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1879         if selfA._toStore("APosterioriCovariance"):
1880             selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
1881         if selfA._parameters["EstimationOf"] == "Parameters" \
1882             and J < previousJMinimum:
1883             previousJMinimum    = J
1884             XaMin               = Xa
1885             if selfA._toStore("APosterioriCovariance"):
1886                 covarianceXaMin = Pn
1887     #
1888     # Stockage final supplémentaire de l'optimum en estimation de paramètres
1889     # ----------------------------------------------------------------------
1890     if selfA._parameters["EstimationOf"] == "Parameters":
1891         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1892         selfA.StoredVariables["Analysis"].store( XaMin )
1893         if selfA._toStore("APosterioriCovariance"):
1894             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1895         if selfA._toStore("BMA"):
1896             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1897     #
1898     return 0
1899
1900 # ==============================================================================
1901 def mmqr(
1902         func     = None,
1903         x0       = None,
1904         fprime   = None,
1905         bounds   = None,
1906         quantile = 0.5,
1907         maxfun   = 15000,
1908         toler    = 1.e-06,
1909         y        = None,
1910         ):
1911     """
1912     Implémentation informatique de l'algorithme MMQR, basée sur la publication :
1913     David R. Hunter, Kenneth Lange, "Quantile Regression via an MM Algorithm",
1914     Journal of Computational and Graphical Statistics, 9, 1, pp.60-77, 2000.
1915     """
1916     #
1917     # Recuperation des donnees et informations initiales
1918     # --------------------------------------------------
1919     variables = numpy.ravel( x0 )
1920     mesures   = numpy.ravel( y )
1921     increment = sys.float_info[0]
1922     p         = variables.size
1923     n         = mesures.size
1924     quantile  = float(quantile)
1925     #
1926     # Calcul des parametres du MM
1927     # ---------------------------
1928     tn      = float(toler) / n
1929     e0      = -tn / math.log(tn)
1930     epsilon = (e0-tn)/(1+math.log(e0))
1931     #
1932     # Calculs d'initialisation
1933     # ------------------------
1934     residus  = mesures - numpy.ravel( func( variables ) )
1935     poids    = 1./(epsilon+numpy.abs(residus))
1936     veps     = 1. - 2. * quantile - residus * poids
1937     lastsurrogate = -numpy.sum(residus*veps) - (1.-2.*quantile)*numpy.sum(residus)
1938     iteration = 0
1939     #
1940     # Recherche iterative
1941     # -------------------
1942     while (increment > toler) and (iteration < maxfun) :
1943         iteration += 1
1944         #
1945         Derivees  = numpy.array(fprime(variables))
1946         Derivees  = Derivees.reshape(n,p) # Necessaire pour remettre en place la matrice si elle passe par des tuyaux YACS
1947         DeriveesT = Derivees.transpose()
1948         M         =   numpy.dot( DeriveesT , (numpy.array(numpy.matrix(p*[poids,]).T)*Derivees) )
1949         SM        =   numpy.transpose(numpy.dot( DeriveesT , veps ))
1950         step      = - numpy.linalg.lstsq( M, SM, rcond=-1 )[0]
1951         #
1952         variables = variables + step
1953         if bounds is not None:
1954             # Attention : boucle infinie à éviter si un intervalle est trop petit
1955             while( (variables < numpy.ravel(numpy.asmatrix(bounds)[:,0])).any() or (variables > numpy.ravel(numpy.asmatrix(bounds)[:,1])).any() ):
1956                 step      = step/2.
1957                 variables = variables - step
1958         residus   = mesures - numpy.ravel( func(variables) )
1959         surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
1960         #
1961         while ( (surrogate > lastsurrogate) and ( max(list(numpy.abs(step))) > 1.e-16 ) ) :
1962             step      = step/2.
1963             variables = variables - step
1964             residus   = mesures - numpy.ravel( func(variables) )
1965             surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
1966         #
1967         increment     = lastsurrogate-surrogate
1968         poids         = 1./(epsilon+numpy.abs(residus))
1969         veps          = 1. - 2. * quantile - residus * poids
1970         lastsurrogate = -numpy.sum(residus * veps) - (1.-2.*quantile)*numpy.sum(residus)
1971     #
1972     # Mesure d'écart
1973     # --------------
1974     Ecart = quantile * numpy.sum(residus) - numpy.sum( residus[residus<0] )
1975     #
1976     return variables, Ecart, [n,p,iteration,increment,0]
1977
1978 # ==============================================================================
1979 def multi3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, oneCycle):
1980     """
1981     3DVAR multi-pas et multi-méthodes
1982     """
1983     #
1984     # Initialisation
1985     # --------------
1986     Xn = numpy.ravel(Xb).reshape((-1,1))
1987     #
1988     if selfA._parameters["EstimationOf"] == "State":
1989         M = EM["Direct"].appliedTo
1990         #
1991         if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1992             selfA.StoredVariables["Analysis"].store( Xn )
1993             if selfA._toStore("APosterioriCovariance"):
1994                 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(Xn.size)
1995                 else:                         Pn = B
1996                 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1997             if selfA._toStore("ForecastState"):
1998                 selfA.StoredVariables["ForecastState"].store( Xn )
1999     #
2000     if hasattr(Y,"stepnumber"):
2001         duration = Y.stepnumber()
2002     else:
2003         duration = 2
2004     #
2005     # Multi-pas
2006     # ---------
2007     for step in range(duration-1):
2008         if hasattr(Y,"store"):
2009             Ynpu = numpy.ravel( Y[step+1] ).reshape((-1,1))
2010         else:
2011             Ynpu = numpy.ravel( Y ).reshape((-1,1))
2012         #
2013         if selfA._parameters["EstimationOf"] == "State": # Forecast
2014             Xn = selfA.StoredVariables["Analysis"][-1]
2015             Xn_predicted = M( Xn )
2016             if selfA._toStore("ForecastState"):
2017                 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
2018         elif selfA._parameters["EstimationOf"] == "Parameters": # No forecast
2019             # --- > Par principe, M = Id, Q = 0
2020             Xn_predicted = Xn
2021         Xn_predicted = numpy.ravel(Xn_predicted).reshape((-1,1))
2022         #
2023         oneCycle(selfA, Xn_predicted, Ynpu, U, HO, None, None, R, B, None)
2024     #
2025     return 0
2026
2027 # ==============================================================================
2028 def psas3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
2029     """
2030     3DVAR PSAS
2031     """
2032     #
2033     # Initialisations
2034     # ---------------
2035     #
2036     # Opérateurs
2037     Hm = HO["Direct"].appliedTo
2038     #
2039     # Utilisation éventuelle d'un vecteur H(Xb) précalculé
2040     if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
2041         HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
2042     else:
2043         HXb = Hm( Xb )
2044     HXb = numpy.asmatrix(numpy.ravel( HXb )).T
2045     if Y.size != HXb.size:
2046         raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
2047     if max(Y.shape) != max(HXb.shape):
2048         raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
2049     #
2050     if selfA._toStore("JacobianMatrixAtBackground"):
2051         HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
2052         HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
2053         selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
2054     #
2055     Ht = HO["Tangent"].asMatrix(Xb)
2056     BHT = B * Ht.T
2057     HBHTpR = R + Ht * BHT
2058     Innovation = Y - HXb
2059     #
2060     # Point de démarrage de l'optimisation
2061     Xini = numpy.zeros(Xb.shape)
2062     #
2063     # Définition de la fonction-coût
2064     # ------------------------------
2065     def CostFunction(w):
2066         _W = numpy.asmatrix(numpy.ravel( w )).T
2067         if selfA._parameters["StoreInternalVariables"] or \
2068             selfA._toStore("CurrentState") or \
2069             selfA._toStore("CurrentOptimum"):
2070             selfA.StoredVariables["CurrentState"].store( Xb + BHT * _W )
2071         if selfA._toStore("SimulatedObservationAtCurrentState") or \
2072             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2073             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Hm( Xb + BHT * _W ) )
2074         if selfA._toStore("InnovationAtCurrentState"):
2075             selfA.StoredVariables["InnovationAtCurrentState"].store( Innovation )
2076         #
2077         Jb  = float( 0.5 * _W.T * HBHTpR * _W )
2078         Jo  = float( - _W.T * Innovation )
2079         J   = Jb + Jo
2080         #
2081         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
2082         selfA.StoredVariables["CostFunctionJb"].store( Jb )
2083         selfA.StoredVariables["CostFunctionJo"].store( Jo )
2084         selfA.StoredVariables["CostFunctionJ" ].store( J )
2085         if selfA._toStore("IndexOfOptimum") or \
2086             selfA._toStore("CurrentOptimum") or \
2087             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
2088             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
2089             selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
2090             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2091             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2092         if selfA._toStore("IndexOfOptimum"):
2093             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2094         if selfA._toStore("CurrentOptimum"):
2095             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
2096         if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2097             selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
2098         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2099             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2100         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2101             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2102         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2103             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2104         return J
2105     #
2106     def GradientOfCostFunction(w):
2107         _W = numpy.asmatrix(numpy.ravel( w )).T
2108         GradJb  = HBHTpR * _W
2109         GradJo  = - Innovation
2110         GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
2111         return GradJ
2112     #
2113     # Minimisation de la fonctionnelle
2114     # --------------------------------
2115     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
2116     #
2117     if selfA._parameters["Minimizer"] == "LBFGSB":
2118         if "0.19" <= scipy.version.version <= "1.1.0":
2119             import lbfgsbhlt as optimiseur
2120         else:
2121             import scipy.optimize as optimiseur
2122         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
2123             func        = CostFunction,
2124             x0          = Xini,
2125             fprime      = GradientOfCostFunction,
2126             args        = (),
2127             bounds      = selfA._parameters["Bounds"],
2128             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
2129             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
2130             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
2131             iprint      = selfA._parameters["optiprint"],
2132             )
2133         nfeval = Informations['funcalls']
2134         rc     = Informations['warnflag']
2135     elif selfA._parameters["Minimizer"] == "TNC":
2136         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
2137             func        = CostFunction,
2138             x0          = Xini,
2139             fprime      = GradientOfCostFunction,
2140             args        = (),
2141             bounds      = selfA._parameters["Bounds"],
2142             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
2143             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
2144             ftol        = selfA._parameters["CostDecrementTolerance"],
2145             messages    = selfA._parameters["optmessages"],
2146             )
2147     elif selfA._parameters["Minimizer"] == "CG":
2148         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
2149             f           = CostFunction,
2150             x0          = Xini,
2151             fprime      = GradientOfCostFunction,
2152             args        = (),
2153             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2154             gtol        = selfA._parameters["GradientNormTolerance"],
2155             disp        = selfA._parameters["optdisp"],
2156             full_output = True,
2157             )
2158     elif selfA._parameters["Minimizer"] == "NCG":
2159         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
2160             f           = CostFunction,
2161             x0          = Xini,
2162             fprime      = GradientOfCostFunction,
2163             args        = (),
2164             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2165             avextol     = selfA._parameters["CostDecrementTolerance"],
2166             disp        = selfA._parameters["optdisp"],
2167             full_output = True,
2168             )
2169     elif selfA._parameters["Minimizer"] == "BFGS":
2170         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
2171             f           = CostFunction,
2172             x0          = Xini,
2173             fprime      = GradientOfCostFunction,
2174             args        = (),
2175             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2176             gtol        = selfA._parameters["GradientNormTolerance"],
2177             disp        = selfA._parameters["optdisp"],
2178             full_output = True,
2179             )
2180     else:
2181         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
2182     #
2183     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2184     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
2185     #
2186     # Correction pour pallier a un bug de TNC sur le retour du Minimum
2187     # ----------------------------------------------------------------
2188     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
2189         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
2190         Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
2191     else:
2192         Minimum = Xb + BHT * numpy.asmatrix(numpy.ravel( Minimum )).T
2193     #
2194     # Obtention de l'analyse
2195     # ----------------------
2196     Xa = Minimum
2197     #
2198     selfA.StoredVariables["Analysis"].store( Xa )
2199     #
2200     if selfA._toStore("OMA") or \
2201         selfA._toStore("SigmaObs2") or \
2202         selfA._toStore("SimulationQuantiles") or \
2203         selfA._toStore("SimulatedObservationAtOptimum"):
2204         if selfA._toStore("SimulatedObservationAtCurrentState"):
2205             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
2206         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2207             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
2208         else:
2209             HXa = Hm( Xa )
2210     #
2211     # Calcul de la covariance d'analyse
2212     # ---------------------------------
2213     if selfA._toStore("APosterioriCovariance") or \
2214         selfA._toStore("SimulationQuantiles") or \
2215         selfA._toStore("JacobianMatrixAtOptimum") or \
2216         selfA._toStore("KalmanGainAtOptimum"):
2217         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
2218         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
2219     if selfA._toStore("APosterioriCovariance") or \
2220         selfA._toStore("SimulationQuantiles") or \
2221         selfA._toStore("KalmanGainAtOptimum"):
2222         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
2223         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
2224     if selfA._toStore("APosterioriCovariance") or \
2225         selfA._toStore("SimulationQuantiles"):
2226         BI = B.getI()
2227         RI = R.getI()
2228         HessienneI = []
2229         nb = Xa.size
2230         for i in range(nb):
2231             _ee    = numpy.matrix(numpy.zeros(nb)).T
2232             _ee[i] = 1.
2233             _HtEE  = numpy.dot(HtM,_ee)
2234             _HtEE  = numpy.asmatrix(numpy.ravel( _HtEE )).T
2235             HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
2236         HessienneI = numpy.matrix( HessienneI )
2237         A = HessienneI.I
2238         if min(A.shape) != max(A.shape):
2239             raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
2240         if (numpy.diag(A) < 0).any():
2241             raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
2242         if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
2243             try:
2244                 L = numpy.linalg.cholesky( A )
2245             except:
2246                 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
2247     if selfA._toStore("APosterioriCovariance"):
2248         selfA.StoredVariables["APosterioriCovariance"].store( A )
2249     if selfA._toStore("JacobianMatrixAtOptimum"):
2250         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
2251     if selfA._toStore("KalmanGainAtOptimum"):
2252         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
2253         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
2254         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
2255     #
2256     # Calculs et/ou stockages supplémentaires
2257     # ---------------------------------------
2258     if selfA._toStore("Innovation") or \
2259         selfA._toStore("SigmaObs2") or \
2260         selfA._toStore("MahalanobisConsistency") or \
2261         selfA._toStore("OMB"):
2262         d  = Y - HXb
2263     if selfA._toStore("Innovation"):
2264         selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
2265     if selfA._toStore("BMA"):
2266         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
2267     if selfA._toStore("OMA"):
2268         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
2269     if selfA._toStore("OMB"):
2270         selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
2271     if selfA._toStore("SigmaObs2"):
2272         TraceR = R.trace(Y.size)
2273         selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
2274     if selfA._toStore("MahalanobisConsistency"):
2275         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
2276     if selfA._toStore("SimulationQuantiles"):
2277         nech = selfA._parameters["NumberOfSamplesForQuantiles"]
2278         HXa  = numpy.matrix(numpy.ravel( HXa )).T
2279         EXr  = None
2280         YfQ  = None
2281         for i in range(nech):
2282             if selfA._parameters["SimulationForQuantiles"] == "Linear":
2283                 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
2284                 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
2285                 Yr = HXa + dYr
2286                 if selfA._toStore("SampledStateForQuantiles"): Xr = Xa+dXr
2287             elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
2288                 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
2289                 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
2290             if YfQ is None:
2291                 YfQ = Yr
2292                 if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.ravel(Xr)
2293             else:
2294                 YfQ = numpy.hstack((YfQ,Yr))
2295                 if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.vstack((EXr,numpy.ravel(Xr)))
2296         YfQ.sort(axis=-1)
2297         YQ = None
2298         for quantile in selfA._parameters["Quantiles"]:
2299             if not (0. <= float(quantile) <= 1.): continue
2300             indice = int(nech * float(quantile) - 1./nech)
2301             if YQ is None: YQ = YfQ[:,indice]
2302             else:          YQ = numpy.hstack((YQ,YfQ[:,indice]))
2303         selfA.StoredVariables["SimulationQuantiles"].store( YQ )
2304         if selfA._toStore("SampledStateForQuantiles"):
2305             selfA.StoredVariables["SampledStateForQuantiles"].store( EXr.T )
2306     if selfA._toStore("SimulatedObservationAtBackground"):
2307         selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
2308     if selfA._toStore("SimulatedObservationAtOptimum"):
2309         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
2310     #
2311     return 0
2312
2313 # ==============================================================================
2314 def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
2315     """
2316     Stochastic EnKF
2317     """
2318     if selfA._parameters["EstimationOf"] == "Parameters":
2319         selfA._parameters["StoreInternalVariables"] = True
2320     #
2321     # Opérateurs
2322     H = HO["Direct"].appliedControledFormTo
2323     #
2324     if selfA._parameters["EstimationOf"] == "State":
2325         M = EM["Direct"].appliedControledFormTo
2326     #
2327     if CM is not None and "Tangent" in CM and U is not None:
2328         Cm = CM["Tangent"].asMatrix(Xb)
2329     else:
2330         Cm = None
2331     #
2332     # Durée d'observation et tailles
2333     if hasattr(Y,"stepnumber"):
2334         duration = Y.stepnumber()
2335         __p = numpy.cumprod(Y.shape())[-1]
2336     else:
2337         duration = 2
2338         __p = numpy.array(Y).size
2339     #
2340     # Précalcul des inversions de B et R
2341     if selfA._parameters["StoreInternalVariables"] \
2342         or selfA._toStore("CostFunctionJ") \
2343         or selfA._toStore("CostFunctionJb") \
2344         or selfA._toStore("CostFunctionJo") \
2345         or selfA._toStore("CurrentOptimum") \
2346         or selfA._toStore("APosterioriCovariance"):
2347         BI = B.getI()
2348         RI = R.getI()
2349     #
2350     __n = Xb.size
2351     __m = selfA._parameters["NumberOfMembers"]
2352     #
2353     if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
2354     else:                         Pn = B
2355     if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
2356     else:                         Rn = R
2357     Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
2358     #
2359     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2360         selfA.StoredVariables["Analysis"].store( Xb )
2361         if selfA._toStore("APosterioriCovariance"):
2362             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2363             covarianceXa = Pn
2364     #
2365     previousJMinimum = numpy.finfo(float).max
2366     #
2367     for step in range(duration-1):
2368         if hasattr(Y,"store"):
2369             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
2370         else:
2371             Ynpu = numpy.ravel( Y ).reshape((__p,1))
2372         #
2373         if U is not None:
2374             if hasattr(U,"store") and len(U)>1:
2375                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2376             elif hasattr(U,"store") and len(U)==1:
2377                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2378             else:
2379                 Un = numpy.asmatrix(numpy.ravel( U )).T
2380         else:
2381             Un = None
2382         #
2383         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2384             Xn = CovarianceInflation( Xn,
2385                 selfA._parameters["InflationType"],
2386                 selfA._parameters["InflationFactor"],
2387                 )
2388         #
2389         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
2390             EMX = M( [(Xn[:,i], Un) for i in range(__m)],
2391                 argsAsSerie = True,
2392                 returnSerieAsArrayMatrix = True )
2393             Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
2394             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2395                 argsAsSerie = True,
2396                 returnSerieAsArrayMatrix = True )
2397             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2398                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2399                 Xn_predicted = Xn_predicted + Cm * Un
2400         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
2401             # --- > Par principe, M = Id, Q = 0
2402             Xn_predicted = Xn
2403             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2404                 argsAsSerie = True,
2405                 returnSerieAsArrayMatrix = True )
2406         #
2407         # Mean of forecast and observation of forecast
2408         Xfm  = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2409         Hfm  = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2410         #
2411         #--------------------------
2412         if VariantM == "KalmanFilterFormula05":
2413             PfHT, HPfHT = 0., 0.
2414             for i in range(__m):
2415                 Exfi = Xn_predicted[:,i].reshape((__n,1)) - Xfm
2416                 Eyfi = HX_predicted[:,i].reshape((__p,1)) - Hfm
2417                 PfHT  += Exfi * Eyfi.T
2418                 HPfHT += Eyfi * Eyfi.T
2419             PfHT  = (1./(__m-1)) * PfHT
2420             HPfHT = (1./(__m-1)) * HPfHT
2421             Kn     = PfHT * ( R + HPfHT ).I
2422             del PfHT, HPfHT
2423             #
2424             for i in range(__m):
2425                 ri = numpy.random.multivariate_normal(numpy.zeros(__p), Rn)
2426                 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(Ynpu) + ri - HX_predicted[:,i])
2427         #--------------------------
2428         elif VariantM == "KalmanFilterFormula16":
2429             EpY   = EnsembleOfCenteredPerturbations(Ynpu, Rn, __m)
2430             EpYm  = EpY.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2431             #
2432             EaX   = EnsembleOfAnomalies( Xn_predicted ) / math.sqrt(__m-1)
2433             EaY = (HX_predicted - Hfm - EpY + EpYm) / math.sqrt(__m-1)
2434             #
2435             Kn = EaX @ EaY.T @ numpy.linalg.inv( EaY @ EaY.T)
2436             #
2437             for i in range(__m):
2438                 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(EpY[:,i]) - HX_predicted[:,i])
2439         #--------------------------
2440         else:
2441             raise ValueError("VariantM has to be chosen in the authorized methods list.")
2442         #
2443         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2444             Xn = CovarianceInflation( Xn,
2445                 selfA._parameters["InflationType"],
2446                 selfA._parameters["InflationFactor"],
2447                 )
2448         #
2449         Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2450         #--------------------------
2451         #
2452         if selfA._parameters["StoreInternalVariables"] \
2453             or selfA._toStore("CostFunctionJ") \
2454             or selfA._toStore("CostFunctionJb") \
2455             or selfA._toStore("CostFunctionJo") \
2456             or selfA._toStore("APosterioriCovariance") \
2457             or selfA._toStore("InnovationAtCurrentAnalysis") \
2458             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2459             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2460             _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
2461             _Innovation = Ynpu - _HXa
2462         #
2463         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2464         # ---> avec analysis
2465         selfA.StoredVariables["Analysis"].store( Xa )
2466         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2467             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2468         if selfA._toStore("InnovationAtCurrentAnalysis"):
2469             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2470         # ---> avec current state
2471         if selfA._parameters["StoreInternalVariables"] \
2472             or selfA._toStore("CurrentState"):
2473             selfA.StoredVariables["CurrentState"].store( Xn )
2474         if selfA._toStore("ForecastState"):
2475             selfA.StoredVariables["ForecastState"].store( EMX )
2476         if selfA._toStore("BMA"):
2477             selfA.StoredVariables["BMA"].store( EMX - Xa )
2478         if selfA._toStore("InnovationAtCurrentState"):
2479             selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
2480         if selfA._toStore("SimulatedObservationAtCurrentState") \
2481             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2482             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
2483         # ---> autres
2484         if selfA._parameters["StoreInternalVariables"] \
2485             or selfA._toStore("CostFunctionJ") \
2486             or selfA._toStore("CostFunctionJb") \
2487             or selfA._toStore("CostFunctionJo") \
2488             or selfA._toStore("CurrentOptimum") \
2489             or selfA._toStore("APosterioriCovariance"):
2490             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2491             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
2492             J   = Jb + Jo
2493             selfA.StoredVariables["CostFunctionJb"].store( Jb )
2494             selfA.StoredVariables["CostFunctionJo"].store( Jo )
2495             selfA.StoredVariables["CostFunctionJ" ].store( J )
2496             #
2497             if selfA._toStore("IndexOfOptimum") \
2498                 or selfA._toStore("CurrentOptimum") \
2499                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2500                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2501                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2502                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2503                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2504             if selfA._toStore("IndexOfOptimum"):
2505                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2506             if selfA._toStore("CurrentOptimum"):
2507                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2508             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2509                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2510             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2511                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2512             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2513                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2514             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2515                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2516         if selfA._toStore("APosterioriCovariance"):
2517             selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
2518         if selfA._parameters["EstimationOf"] == "Parameters" \
2519             and J < previousJMinimum:
2520             previousJMinimum    = J
2521             XaMin               = Xa
2522             if selfA._toStore("APosterioriCovariance"):
2523                 covarianceXaMin = Pn
2524     #
2525     # Stockage final supplémentaire de l'optimum en estimation de paramètres
2526     # ----------------------------------------------------------------------
2527     if selfA._parameters["EstimationOf"] == "Parameters":
2528         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2529         selfA.StoredVariables["Analysis"].store( XaMin )
2530         if selfA._toStore("APosterioriCovariance"):
2531             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2532         if selfA._toStore("BMA"):
2533             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2534     #
2535     return 0
2536
2537 # ==============================================================================
2538 def std3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
2539     """
2540     3DVAR
2541     """
2542     #
2543     # Initialisations
2544     # ---------------
2545     #
2546     # Opérateurs
2547     Hm = HO["Direct"].appliedTo
2548     Ha = HO["Adjoint"].appliedInXTo
2549     #
2550     # Utilisation éventuelle d'un vecteur H(Xb) précalculé
2551     if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
2552         HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
2553     else:
2554         HXb = Hm( Xb )
2555     HXb = numpy.asmatrix(numpy.ravel( HXb )).T
2556     if Y.size != HXb.size:
2557         raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
2558     if max(Y.shape) != max(HXb.shape):
2559         raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
2560     #
2561     if selfA._toStore("JacobianMatrixAtBackground"):
2562         HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
2563         HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
2564         selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
2565     #
2566     # Précalcul des inversions de B et R
2567     BI = B.getI()
2568     RI = R.getI()
2569     #
2570     # Point de démarrage de l'optimisation
2571     Xini = selfA._parameters["InitializationPoint"]
2572     #
2573     # Définition de la fonction-coût
2574     # ------------------------------
2575     def CostFunction(x):
2576         _X  = numpy.asmatrix(numpy.ravel( x )).T
2577         if selfA._parameters["StoreInternalVariables"] or \
2578             selfA._toStore("CurrentState") or \
2579             selfA._toStore("CurrentOptimum"):
2580             selfA.StoredVariables["CurrentState"].store( _X )
2581         _HX = Hm( _X )
2582         _HX = numpy.asmatrix(numpy.ravel( _HX )).T
2583         _Innovation = Y - _HX
2584         if selfA._toStore("SimulatedObservationAtCurrentState") or \
2585             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2586             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
2587         if selfA._toStore("InnovationAtCurrentState"):
2588             selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
2589         #
2590         Jb  = float( 0.5 * (_X - Xb).T * BI * (_X - Xb) )
2591         Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
2592         J   = Jb + Jo
2593         #
2594         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
2595         selfA.StoredVariables["CostFunctionJb"].store( Jb )
2596         selfA.StoredVariables["CostFunctionJo"].store( Jo )
2597         selfA.StoredVariables["CostFunctionJ" ].store( J )
2598         if selfA._toStore("IndexOfOptimum") or \
2599             selfA._toStore("CurrentOptimum") or \
2600             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
2601             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
2602             selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
2603             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2604             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2605         if selfA._toStore("IndexOfOptimum"):
2606             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2607         if selfA._toStore("CurrentOptimum"):
2608             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
2609         if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2610             selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
2611         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2612             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2613         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2614             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2615         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2616             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2617         return J
2618     #
2619     def GradientOfCostFunction(x):
2620         _X      = numpy.asmatrix(numpy.ravel( x )).T
2621         _HX     = Hm( _X )
2622         _HX     = numpy.asmatrix(numpy.ravel( _HX )).T
2623         GradJb  = BI * (_X - Xb)
2624         GradJo  = - Ha( (_X, RI * (Y - _HX)) )
2625         GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
2626         return GradJ
2627     #
2628     # Minimisation de la fonctionnelle
2629     # --------------------------------
2630     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
2631     #
2632     if selfA._parameters["Minimizer"] == "LBFGSB":
2633         if "0.19" <= scipy.version.version <= "1.1.0":
2634             import lbfgsbhlt as optimiseur
2635         else:
2636             import scipy.optimize as optimiseur
2637         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
2638             func        = CostFunction,
2639             x0          = Xini,
2640             fprime      = GradientOfCostFunction,
2641             args        = (),
2642             bounds      = selfA._parameters["Bounds"],
2643             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
2644             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
2645             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
2646             iprint      = selfA._parameters["optiprint"],
2647             )
2648         nfeval = Informations['funcalls']
2649         rc     = Informations['warnflag']
2650     elif selfA._parameters["Minimizer"] == "TNC":
2651         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
2652             func        = CostFunction,
2653             x0          = Xini,
2654             fprime      = GradientOfCostFunction,
2655             args        = (),
2656             bounds      = selfA._parameters["Bounds"],
2657             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
2658             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
2659             ftol        = selfA._parameters["CostDecrementTolerance"],
2660             messages    = selfA._parameters["optmessages"],
2661             )
2662     elif selfA._parameters["Minimizer"] == "CG":
2663         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
2664             f           = CostFunction,
2665             x0          = Xini,
2666             fprime      = GradientOfCostFunction,
2667             args        = (),
2668             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2669             gtol        = selfA._parameters["GradientNormTolerance"],
2670             disp        = selfA._parameters["optdisp"],
2671             full_output = True,
2672             )
2673     elif selfA._parameters["Minimizer"] == "NCG":
2674         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
2675             f           = CostFunction,
2676             x0          = Xini,
2677             fprime      = GradientOfCostFunction,
2678             args        = (),
2679             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2680             avextol     = selfA._parameters["CostDecrementTolerance"],
2681             disp        = selfA._parameters["optdisp"],
2682             full_output = True,
2683             )
2684     elif selfA._parameters["Minimizer"] == "BFGS":
2685         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
2686             f           = CostFunction,
2687             x0          = Xini,
2688             fprime      = GradientOfCostFunction,
2689             args        = (),
2690             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2691             gtol        = selfA._parameters["GradientNormTolerance"],
2692             disp        = selfA._parameters["optdisp"],
2693             full_output = True,
2694             )
2695     else:
2696         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
2697     #
2698     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2699     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
2700     #
2701     # Correction pour pallier a un bug de TNC sur le retour du Minimum
2702     # ----------------------------------------------------------------
2703     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
2704         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
2705     #
2706     # Obtention de l'analyse
2707     # ----------------------
2708     Xa = numpy.asmatrix(numpy.ravel( Minimum )).T
2709     #
2710     selfA.StoredVariables["Analysis"].store( Xa )
2711     #
2712     if selfA._toStore("OMA") or \
2713         selfA._toStore("SigmaObs2") or \
2714         selfA._toStore("SimulationQuantiles") or \
2715         selfA._toStore("SimulatedObservationAtOptimum"):
2716         if selfA._toStore("SimulatedObservationAtCurrentState"):
2717             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
2718         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2719             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
2720         else:
2721             HXa = Hm( Xa )
2722     #
2723     # Calcul de la covariance d'analyse
2724     # ---------------------------------
2725     if selfA._toStore("APosterioriCovariance") or \
2726         selfA._toStore("SimulationQuantiles") or \
2727         selfA._toStore("JacobianMatrixAtOptimum") or \
2728         selfA._toStore("KalmanGainAtOptimum"):
2729         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
2730         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
2731     if selfA._toStore("APosterioriCovariance") or \
2732         selfA._toStore("SimulationQuantiles") or \
2733         selfA._toStore("KalmanGainAtOptimum"):
2734         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
2735         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
2736     if selfA._toStore("APosterioriCovariance") or \
2737         selfA._toStore("SimulationQuantiles"):
2738         HessienneI = []
2739         nb = Xa.size
2740         for i in range(nb):
2741             _ee    = numpy.matrix(numpy.zeros(nb)).T
2742             _ee[i] = 1.
2743             _HtEE  = numpy.dot(HtM,_ee)
2744             _HtEE  = numpy.asmatrix(numpy.ravel( _HtEE )).T
2745             HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
2746         HessienneI = numpy.matrix( HessienneI )
2747         A = HessienneI.I
2748         if min(A.shape) != max(A.shape):
2749             raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
2750         if (numpy.diag(A) < 0).any():
2751             raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
2752         if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
2753             try:
2754                 L = numpy.linalg.cholesky( A )
2755             except:
2756                 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
2757     if selfA._toStore("APosterioriCovariance"):
2758         selfA.StoredVariables["APosterioriCovariance"].store( A )
2759     if selfA._toStore("JacobianMatrixAtOptimum"):
2760         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
2761     if selfA._toStore("KalmanGainAtOptimum"):
2762         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
2763         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
2764         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
2765     #
2766     # Calculs et/ou stockages supplémentaires
2767     # ---------------------------------------
2768     if selfA._toStore("Innovation") or \
2769         selfA._toStore("SigmaObs2") or \
2770         selfA._toStore("MahalanobisConsistency") or \
2771         selfA._toStore("OMB"):
2772         d  = Y - HXb
2773     if selfA._toStore("Innovation"):
2774         selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
2775     if selfA._toStore("BMA"):
2776         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
2777     if selfA._toStore("OMA"):
2778         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
2779     if selfA._toStore("OMB"):
2780         selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
2781     if selfA._toStore("SigmaObs2"):
2782         TraceR = R.trace(Y.size)
2783         selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
2784     if selfA._toStore("MahalanobisConsistency"):
2785         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
2786     if selfA._toStore("SimulationQuantiles"):
2787         nech = selfA._parameters["NumberOfSamplesForQuantiles"]
2788         HXa  = numpy.matrix(numpy.ravel( HXa )).T
2789         EXr  = None
2790         YfQ  = None
2791         for i in range(nech):
2792             if selfA._parameters["SimulationForQuantiles"] == "Linear":
2793                 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
2794                 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
2795                 Yr = HXa + dYr
2796                 if selfA._toStore("SampledStateForQuantiles"): Xr = Xa+dXr
2797             elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
2798                 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
2799                 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
2800             if YfQ is None:
2801                 YfQ = Yr
2802                 if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.ravel(Xr)
2803             else:
2804                 YfQ = numpy.hstack((YfQ,Yr))
2805                 if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.vstack((EXr,numpy.ravel(Xr)))
2806         YfQ.sort(axis=-1)
2807         YQ = None
2808         for quantile in selfA._parameters["Quantiles"]:
2809             if not (0. <= float(quantile) <= 1.): continue
2810             indice = int(nech * float(quantile) - 1./nech)
2811             if YQ is None: YQ = YfQ[:,indice]
2812             else:          YQ = numpy.hstack((YQ,YfQ[:,indice]))
2813         selfA.StoredVariables["SimulationQuantiles"].store( YQ )
2814         if selfA._toStore("SampledStateForQuantiles"):
2815             selfA.StoredVariables["SampledStateForQuantiles"].store( EXr.T )
2816     if selfA._toStore("SimulatedObservationAtBackground"):
2817         selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
2818     if selfA._toStore("SimulatedObservationAtOptimum"):
2819         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
2820     #
2821     return 0
2822
2823 # ==============================================================================
2824 def std4dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
2825     """
2826     4DVAR
2827     """
2828     #
2829     # Initialisations
2830     # ---------------
2831     #
2832     # Opérateurs
2833     Hm = HO["Direct"].appliedControledFormTo
2834     Mm = EM["Direct"].appliedControledFormTo
2835     #
2836     if CM is not None and "Tangent" in CM and U is not None:
2837         Cm = CM["Tangent"].asMatrix(Xb)
2838     else:
2839         Cm = None
2840     #
2841     def Un(_step):
2842         if U is not None:
2843             if hasattr(U,"store") and 1<=_step<len(U) :
2844                 _Un = numpy.asmatrix(numpy.ravel( U[_step] )).T
2845             elif hasattr(U,"store") and len(U)==1:
2846                 _Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2847             else:
2848                 _Un = numpy.asmatrix(numpy.ravel( U )).T
2849         else:
2850             _Un = None
2851         return _Un
2852     def CmUn(_xn,_un):
2853         if Cm is not None and _un is not None: # Attention : si Cm est aussi dans M, doublon !
2854             _Cm   = Cm.reshape(_xn.size,_un.size) # ADAO & check shape
2855             _CmUn = _Cm * _un
2856         else:
2857             _CmUn = 0.
2858         return _CmUn
2859     #
2860     # Remarque : les observations sont exploitées à partir du pas de temps
2861     # numéro 1, et sont utilisées dans Yo comme rangées selon ces indices.
2862     # Donc le pas 0 n'est pas utilisé puisque la première étape commence
2863     # avec l'observation du pas 1.
2864     #
2865     # Nombre de pas identique au nombre de pas d'observations
2866     if hasattr(Y,"stepnumber"):
2867         duration = Y.stepnumber()
2868     else:
2869         duration = 2
2870     #
2871     # Précalcul des inversions de B et R
2872     BI = B.getI()
2873     RI = R.getI()
2874     #
2875     # Point de démarrage de l'optimisation
2876     Xini = selfA._parameters["InitializationPoint"]
2877     #
2878     # Définition de la fonction-coût
2879     # ------------------------------
2880     selfA.DirectCalculation = [None,] # Le pas 0 n'est pas observé
2881     selfA.DirectInnovation  = [None,] # Le pas 0 n'est pas observé
2882     def CostFunction(x):
2883         _X  = numpy.asmatrix(numpy.ravel( x )).T
2884         if selfA._parameters["StoreInternalVariables"] or \
2885             selfA._toStore("CurrentState") or \
2886             selfA._toStore("CurrentOptimum"):
2887             selfA.StoredVariables["CurrentState"].store( _X )
2888         Jb  = float( 0.5 * (_X - Xb).T * BI * (_X - Xb) )
2889         selfA.DirectCalculation = [None,]
2890         selfA.DirectInnovation  = [None,]
2891         Jo  = 0.
2892         _Xn = _X
2893         for step in range(0,duration-1):
2894             if hasattr(Y,"store"):
2895                 _Ynpu = numpy.asmatrix(numpy.ravel( Y[step+1] )).T
2896             else:
2897                 _Ynpu = numpy.asmatrix(numpy.ravel( Y )).T
2898             _Un = Un(step)
2899             #
2900             # Etape d'évolution
2901             if selfA._parameters["EstimationOf"] == "State":
2902                 _Xn = Mm( (_Xn, _Un) ) + CmUn(_Xn, _Un)
2903             elif selfA._parameters["EstimationOf"] == "Parameters":
2904                 pass
2905             #
2906             if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
2907                 _Xn = numpy.max(numpy.hstack((_Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1)
2908                 _Xn = numpy.min(numpy.hstack((_Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1)
2909             #
2910             # Etape de différence aux observations
2911             if selfA._parameters["EstimationOf"] == "State":
2912                 _YmHMX = _Ynpu - numpy.asmatrix(numpy.ravel( Hm( (_Xn, None) ) )).T
2913             elif selfA._parameters["EstimationOf"] == "Parameters":
2914                 _YmHMX = _Ynpu - numpy.asmatrix(numpy.ravel( Hm( (_Xn, _Un) ) )).T - CmUn(_Xn, _Un)
2915             #
2916             # Stockage de l'état
2917             selfA.DirectCalculation.append( _Xn )
2918             selfA.DirectInnovation.append( _YmHMX )
2919             #
2920             # Ajout dans la fonctionnelle d'observation
2921             Jo = Jo + 0.5 * float( _YmHMX.T * RI * _YmHMX )
2922         J = Jb + Jo
2923         #
2924         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
2925         selfA.StoredVariables["CostFunctionJb"].store( Jb )
2926         selfA.StoredVariables["CostFunctionJo"].store( Jo )
2927         selfA.StoredVariables["CostFunctionJ" ].store( J )
2928         if selfA._toStore("IndexOfOptimum") or \
2929             selfA._toStore("CurrentOptimum") or \
2930             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
2931             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
2932             selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2933             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2934         if selfA._toStore("IndexOfOptimum"):
2935             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2936         if selfA._toStore("CurrentOptimum"):
2937             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
2938         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2939             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2940         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2941             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2942         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2943             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2944         return J
2945     #
2946     def GradientOfCostFunction(x):
2947         _X      = numpy.asmatrix(numpy.ravel( x )).T
2948         GradJb  = BI * (_X - Xb)
2949         GradJo  = 0.
2950         for step in range(duration-1,0,-1):
2951             # Étape de récupération du dernier stockage de l'évolution
2952             _Xn = selfA.DirectCalculation.pop()
2953             # Étape de récupération du dernier stockage de l'innovation
2954             _YmHMX = selfA.DirectInnovation.pop()
2955             # Calcul des adjoints
2956             Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
2957             Ha = Ha.reshape(_Xn.size,_YmHMX.size) # ADAO & check shape
2958             Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
2959             Ma = Ma.reshape(_Xn.size,_Xn.size) # ADAO & check shape
2960             # Calcul du gradient par état adjoint
2961             GradJo = GradJo + Ha * RI * _YmHMX # Équivaut pour Ha linéaire à : Ha( (_Xn, RI * _YmHMX) )
2962             GradJo = Ma * GradJo               # Équivaut pour Ma linéaire à : Ma( (_Xn, GradJo) )
2963         GradJ = numpy.ravel( GradJb ) - numpy.ravel( GradJo )
2964         return GradJ
2965     #
2966     # Minimisation de la fonctionnelle
2967     # --------------------------------
2968     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
2969     #
2970     if selfA._parameters["Minimizer"] == "LBFGSB":
2971         if "0.19" <= scipy.version.version <= "1.1.0":
2972             import lbfgsbhlt as optimiseur
2973         else:
2974             import scipy.optimize as optimiseur
2975         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
2976             func        = CostFunction,
2977             x0          = Xini,
2978             fprime      = GradientOfCostFunction,
2979             args        = (),
2980             bounds      = selfA._parameters["Bounds"],
2981             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
2982             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
2983             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
2984             iprint      = selfA._parameters["optiprint"],
2985             )
2986         nfeval = Informations['funcalls']
2987         rc     = Informations['warnflag']
2988     elif selfA._parameters["Minimizer"] == "TNC":
2989         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
2990             func        = CostFunction,
2991             x0          = Xini,
2992             fprime      = GradientOfCostFunction,
2993             args        = (),
2994             bounds      = selfA._parameters["Bounds"],
2995             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
2996             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
2997             ftol        = selfA._parameters["CostDecrementTolerance"],
2998             messages    = selfA._parameters["optmessages"],
2999             )
3000     elif selfA._parameters["Minimizer"] == "CG":
3001         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
3002             f           = CostFunction,
3003             x0          = Xini,
3004             fprime      = GradientOfCostFunction,
3005             args        = (),
3006             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3007             gtol        = selfA._parameters["GradientNormTolerance"],
3008             disp        = selfA._parameters["optdisp"],
3009             full_output = True,
3010             )
3011     elif selfA._parameters["Minimizer"] == "NCG":
3012         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
3013             f           = CostFunction,
3014             x0          = Xini,
3015             fprime      = GradientOfCostFunction,
3016             args        = (),
3017             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3018             avextol     = selfA._parameters["CostDecrementTolerance"],
3019             disp        = selfA._parameters["optdisp"],
3020             full_output = True,
3021             )
3022     elif selfA._parameters["Minimizer"] == "BFGS":
3023         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
3024             f           = CostFunction,
3025             x0          = Xini,
3026             fprime      = GradientOfCostFunction,
3027             args        = (),
3028             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3029             gtol        = selfA._parameters["GradientNormTolerance"],
3030             disp        = selfA._parameters["optdisp"],
3031             full_output = True,
3032             )
3033     else:
3034         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
3035     #
3036     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3037     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
3038     #
3039     # Correction pour pallier a un bug de TNC sur le retour du Minimum
3040     # ----------------------------------------------------------------
3041     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
3042         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
3043     #
3044     # Obtention de l'analyse
3045     # ----------------------
3046     Xa = numpy.asmatrix(numpy.ravel( Minimum )).T
3047     #
3048     selfA.StoredVariables["Analysis"].store( Xa )
3049     #
3050     # Calculs et/ou stockages supplémentaires
3051     # ---------------------------------------
3052     if selfA._toStore("BMA"):
3053         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
3054     #
3055     return 0
3056
3057 # ==============================================================================
3058 def van3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3059     """
3060     3DVAR variational analysis with no inversion of B
3061     """
3062     #
3063     # Initialisations
3064     # ---------------
3065     #
3066     # Opérateurs
3067     Hm = HO["Direct"].appliedTo
3068     Ha = HO["Adjoint"].appliedInXTo
3069     #
3070     # Précalcul des inversions de B et R
3071     BT = B.getT()
3072     RI = R.getI()
3073     #
3074     # Point de démarrage de l'optimisation
3075     Xini = numpy.zeros(Xb.shape)
3076     #
3077     # Définition de la fonction-coût
3078     # ------------------------------
3079     def CostFunction(v):
3080         _V = numpy.asmatrix(numpy.ravel( v )).T
3081         _X = Xb + B * _V
3082         if selfA._parameters["StoreInternalVariables"] or \
3083             selfA._toStore("CurrentState") or \
3084             selfA._toStore("CurrentOptimum"):
3085             selfA.StoredVariables["CurrentState"].store( _X )
3086         _HX = Hm( _X )
3087         _HX = numpy.asmatrix(numpy.ravel( _HX )).T
3088         _Innovation = Y - _HX
3089         if selfA._toStore("SimulatedObservationAtCurrentState") or \
3090             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3091             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
3092         if selfA._toStore("InnovationAtCurrentState"):
3093             selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
3094         #
3095         Jb  = float( 0.5 * _V.T * BT * _V )
3096         Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
3097         J   = Jb + Jo
3098         #
3099         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
3100         selfA.StoredVariables["CostFunctionJb"].store( Jb )
3101         selfA.StoredVariables["CostFunctionJo"].store( Jo )
3102         selfA.StoredVariables["CostFunctionJ" ].store( J )
3103         if selfA._toStore("IndexOfOptimum") or \
3104             selfA._toStore("CurrentOptimum") or \
3105             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
3106             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
3107             selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
3108             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3109             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3110         if selfA._toStore("IndexOfOptimum"):
3111             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3112         if selfA._toStore("CurrentOptimum"):
3113             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
3114         if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3115             selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
3116         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3117             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3118         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3119             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3120         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3121             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3122         return J
3123     #
3124     def GradientOfCostFunction(v):
3125         _V = numpy.asmatrix(numpy.ravel( v )).T
3126         _X = Xb + B * _V
3127         _HX     = Hm( _X )
3128         _HX     = numpy.asmatrix(numpy.ravel( _HX )).T
3129         GradJb  = BT * _V
3130         GradJo  = - Ha( (_X, RI * (Y - _HX)) )
3131         GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
3132         return GradJ
3133     #
3134     # Minimisation de la fonctionnelle
3135     # --------------------------------
3136     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
3137     #
3138     if selfA._parameters["Minimizer"] == "LBFGSB":
3139         if "0.19" <= scipy.version.version <= "1.1.0":
3140             import lbfgsbhlt as optimiseur
3141         else:
3142             import scipy.optimize as optimiseur
3143         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
3144             func        = CostFunction,
3145             x0          = Xini,
3146             fprime      = GradientOfCostFunction,
3147             args        = (),
3148             bounds      = selfA._parameters["Bounds"],
3149             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
3150             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
3151             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
3152             iprint      = selfA._parameters["optiprint"],
3153             )
3154         nfeval = Informations['funcalls']
3155         rc     = Informations['warnflag']
3156     elif selfA._parameters["Minimizer"] == "TNC":
3157         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
3158             func        = CostFunction,
3159             x0          = Xini,
3160             fprime      = GradientOfCostFunction,
3161             args        = (),
3162             bounds      = selfA._parameters["Bounds"],
3163             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
3164             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
3165             ftol        = selfA._parameters["CostDecrementTolerance"],
3166             messages    = selfA._parameters["optmessages"],
3167             )
3168     elif selfA._parameters["Minimizer"] == "CG":
3169         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
3170             f           = CostFunction,
3171             x0          = Xini,
3172             fprime      = GradientOfCostFunction,
3173             args        = (),
3174             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3175             gtol        = selfA._parameters["GradientNormTolerance"],
3176             disp        = selfA._parameters["optdisp"],
3177             full_output = True,
3178             )
3179     elif selfA._parameters["Minimizer"] == "NCG":
3180         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
3181             f           = CostFunction,
3182             x0          = Xini,
3183             fprime      = GradientOfCostFunction,
3184             args        = (),
3185             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3186             avextol     = selfA._parameters["CostDecrementTolerance"],
3187             disp        = selfA._parameters["optdisp"],
3188             full_output = True,
3189             )
3190     elif selfA._parameters["Minimizer"] == "BFGS":
3191         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
3192             f           = CostFunction,
3193             x0          = Xini,
3194             fprime      = GradientOfCostFunction,
3195             args        = (),
3196             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3197             gtol        = selfA._parameters["GradientNormTolerance"],
3198             disp        = selfA._parameters["optdisp"],
3199             full_output = True,
3200             )
3201     else:
3202         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
3203     #
3204     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3205     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
3206     #
3207     # Correction pour pallier a un bug de TNC sur le retour du Minimum
3208     # ----------------------------------------------------------------
3209     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
3210         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
3211         Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
3212     else:
3213         Minimum = Xb + B * numpy.asmatrix(numpy.ravel( Minimum )).T
3214     #
3215     # Obtention de l'analyse
3216     # ----------------------
3217     Xa = Minimum
3218     #
3219     selfA.StoredVariables["Analysis"].store( Xa )
3220     #
3221     if selfA._toStore("OMA") or \
3222         selfA._toStore("SigmaObs2") or \
3223         selfA._toStore("SimulationQuantiles") or \
3224         selfA._toStore("SimulatedObservationAtOptimum"):
3225         if selfA._toStore("SimulatedObservationAtCurrentState"):
3226             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
3227         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3228             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
3229         else:
3230             HXa = Hm( Xa )
3231     #
3232     # Calcul de la covariance d'analyse
3233     # ---------------------------------
3234     if selfA._toStore("APosterioriCovariance") or \
3235         selfA._toStore("SimulationQuantiles") or \
3236         selfA._toStore("JacobianMatrixAtOptimum") or \
3237         selfA._toStore("KalmanGainAtOptimum"):
3238         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
3239         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
3240     if selfA._toStore("APosterioriCovariance") or \
3241         selfA._toStore("SimulationQuantiles") or \
3242         selfA._toStore("KalmanGainAtOptimum"):
3243         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
3244         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
3245     if selfA._toStore("APosterioriCovariance") or \
3246         selfA._toStore("SimulationQuantiles"):
3247         BI = B.getI()
3248         HessienneI = []
3249         nb = Xa.size
3250         for i in range(nb):
3251             _ee    = numpy.matrix(numpy.zeros(nb)).T
3252             _ee[i] = 1.
3253             _HtEE  = numpy.dot(HtM,_ee)
3254             _HtEE  = numpy.asmatrix(numpy.ravel( _HtEE )).T
3255             HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
3256         HessienneI = numpy.matrix( HessienneI )
3257         A = HessienneI.I
3258         if min(A.shape) != max(A.shape):
3259             raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
3260         if (numpy.diag(A) < 0).any():
3261             raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
3262         if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
3263             try:
3264                 L = numpy.linalg.cholesky( A )
3265             except:
3266                 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
3267     if selfA._toStore("APosterioriCovariance"):
3268         selfA.StoredVariables["APosterioriCovariance"].store( A )
3269     if selfA._toStore("JacobianMatrixAtOptimum"):
3270         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
3271     if selfA._toStore("KalmanGainAtOptimum"):
3272         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
3273         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
3274         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
3275     #
3276     # Calculs et/ou stockages supplémentaires
3277     # ---------------------------------------
3278     if selfA._toStore("Innovation") or \
3279         selfA._toStore("SigmaObs2") or \
3280         selfA._toStore("MahalanobisConsistency") or \
3281         selfA._toStore("OMB"):
3282         d  = Y - HXb
3283     if selfA._toStore("Innovation"):
3284         selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
3285     if selfA._toStore("BMA"):
3286         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
3287     if selfA._toStore("OMA"):
3288         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
3289     if selfA._toStore("OMB"):
3290         selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
3291     if selfA._toStore("SigmaObs2"):
3292         TraceR = R.trace(Y.size)
3293         selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
3294     if selfA._toStore("MahalanobisConsistency"):
3295         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
3296     if selfA._toStore("SimulationQuantiles"):
3297         nech = selfA._parameters["NumberOfSamplesForQuantiles"]
3298         HXa  = numpy.matrix(numpy.ravel( HXa )).T
3299         EXr  = None
3300         YfQ  = None
3301         for i in range(nech):
3302             if selfA._parameters["SimulationForQuantiles"] == "Linear":
3303                 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
3304                 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
3305                 Yr = HXa + dYr
3306                 if selfA._toStore("SampledStateForQuantiles"): Xr = Xa+dXr
3307             elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
3308                 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
3309                 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
3310             if YfQ is None:
3311                 YfQ = Yr
3312                 if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.ravel(Xr)
3313             else:
3314                 YfQ = numpy.hstack((YfQ,Yr))
3315                 if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.vstack((EXr,numpy.ravel(Xr)))
3316         YfQ.sort(axis=-1)
3317         YQ = None
3318         for quantile in selfA._parameters["Quantiles"]:
3319             if not (0. <= float(quantile) <= 1.): continue
3320             indice = int(nech * float(quantile) - 1./nech)
3321             if YQ is None: YQ = YfQ[:,indice]
3322             else:          YQ = numpy.hstack((YQ,YfQ[:,indice]))
3323         selfA.StoredVariables["SimulationQuantiles"].store( YQ )
3324         if selfA._toStore("SampledStateForQuantiles"):
3325             selfA.StoredVariables["SampledStateForQuantiles"].store( EXr.T )
3326     if selfA._toStore("SimulatedObservationAtBackground"):
3327         selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
3328     if selfA._toStore("SimulatedObservationAtOptimum"):
3329         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
3330     #
3331     return 0
3332
3333 # ==============================================================================
3334 if __name__ == "__main__":
3335     print('\n AUTODIAGNOSTIC\n')