Salome HOME
Minor internal improvements
[modules/adao.git] / src / daComposant / daCore / NumericObjects.py
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) 2008-2021 EDF R&D
4 #
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
9 #
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13 # Lesser General Public License for more details.
14 #
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
18 #
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
20 #
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
22
23 __doc__ = """
24     Définit les objets numériques génériques.
25 """
26 __author__ = "Jean-Philippe ARGAUD"
27
28 import os, time, copy, types, sys, logging
29 import math, numpy, scipy, scipy.optimize, scipy.version
30 from daCore.BasicObjects import Operator
31 from daCore.PlatformInfo import PlatformInfo
32 mpr = PlatformInfo().MachinePrecision()
33 mfp = PlatformInfo().MaximumPrecision()
34 # logging.getLogger().setLevel(logging.DEBUG)
35
36 # ==============================================================================
37 def ExecuteFunction( triplet ):
38     assert len(triplet) == 3, "Incorrect number of arguments"
39     X, xArgs, funcrepr = triplet
40     __X = numpy.asmatrix(numpy.ravel( X )).T
41     __sys_path_tmp = sys.path ; sys.path.insert(0,funcrepr["__userFunction__path"])
42     __module = __import__(funcrepr["__userFunction__modl"], globals(), locals(), [])
43     __fonction = getattr(__module,funcrepr["__userFunction__name"])
44     sys.path = __sys_path_tmp ; del __sys_path_tmp
45     if isinstance(xArgs, dict):
46         __HX  = __fonction( __X, **xArgs )
47     else:
48         __HX  = __fonction( __X )
49     return numpy.ravel( __HX )
50
51 # ==============================================================================
52 class FDApproximation(object):
53     """
54     Cette classe sert d'interface pour définir les opérateurs approximés. A la
55     création d'un objet, en fournissant une fonction "Function", on obtient un
56     objet qui dispose de 3 méthodes "DirectOperator", "TangentOperator" et
57     "AdjointOperator". On contrôle l'approximation DF avec l'incrément
58     multiplicatif "increment" valant par défaut 1%, ou avec l'incrément fixe
59     "dX" qui sera multiplié par "increment" (donc en %), et on effectue de DF
60     centrées si le booléen "centeredDF" est vrai.
61     """
62     def __init__(self,
63             name                  = "FDApproximation",
64             Function              = None,
65             centeredDF            = False,
66             increment             = 0.01,
67             dX                    = None,
68             extraArguments        = None,
69             avoidingRedundancy    = True,
70             toleranceInRedundancy = 1.e-18,
71             lenghtOfRedundancy    = -1,
72             mpEnabled             = False,
73             mpWorkers             = None,
74             mfEnabled             = False,
75             ):
76         self.__name = str(name)
77         self.__extraArgs = extraArguments
78         if mpEnabled:
79             try:
80                 import multiprocessing
81                 self.__mpEnabled = True
82             except ImportError:
83                 self.__mpEnabled = False
84         else:
85             self.__mpEnabled = False
86         self.__mpWorkers = mpWorkers
87         if self.__mpWorkers is not None and self.__mpWorkers < 1:
88             self.__mpWorkers = None
89         logging.debug("FDA Calculs en multiprocessing : %s (nombre de processus : %s)"%(self.__mpEnabled,self.__mpWorkers))
90         #
91         if mfEnabled:
92             self.__mfEnabled = True
93         else:
94             self.__mfEnabled = False
95         logging.debug("FDA Calculs en multifonctions : %s"%(self.__mfEnabled,))
96         #
97         if avoidingRedundancy:
98             self.__avoidRC = True
99             self.__tolerBP = float(toleranceInRedundancy)
100             self.__lenghtRJ = int(lenghtOfRedundancy)
101             self.__listJPCP = [] # Jacobian Previous Calculated Points
102             self.__listJPCI = [] # Jacobian Previous Calculated Increment
103             self.__listJPCR = [] # Jacobian Previous Calculated Results
104             self.__listJPPN = [] # Jacobian Previous Calculated Point Norms
105             self.__listJPIN = [] # Jacobian Previous Calculated Increment Norms
106         else:
107             self.__avoidRC = False
108         #
109         if self.__mpEnabled:
110             if isinstance(Function,types.FunctionType):
111                 logging.debug("FDA Calculs en multiprocessing : FunctionType")
112                 self.__userFunction__name = Function.__name__
113                 try:
114                     mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
115                 except:
116                     mod = os.path.abspath(Function.__globals__['__file__'])
117                 if not os.path.isfile(mod):
118                     raise ImportError("No user defined function or method found with the name %s"%(mod,))
119                 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
120                 self.__userFunction__path = os.path.dirname(mod)
121                 del mod
122                 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
123                 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
124             elif isinstance(Function,types.MethodType):
125                 logging.debug("FDA Calculs en multiprocessing : MethodType")
126                 self.__userFunction__name = Function.__name__
127                 try:
128                     mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
129                 except:
130                     mod = os.path.abspath(Function.__func__.__globals__['__file__'])
131                 if not os.path.isfile(mod):
132                     raise ImportError("No user defined function or method found with the name %s"%(mod,))
133                 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
134                 self.__userFunction__path = os.path.dirname(mod)
135                 del mod
136                 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
137                 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
138             else:
139                 raise TypeError("User defined function or method has to be provided for finite differences approximation.")
140         else:
141             self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
142             self.__userFunction = self.__userOperator.appliedTo
143         #
144         self.__centeredDF = bool(centeredDF)
145         if abs(float(increment)) > 1.e-15:
146             self.__increment  = float(increment)
147         else:
148             self.__increment  = 0.01
149         if dX is None:
150             self.__dX     = None
151         else:
152             self.__dX     = numpy.asmatrix(numpy.ravel( dX )).T
153         logging.debug("FDA Reduction des doublons de calcul : %s"%self.__avoidRC)
154         if self.__avoidRC:
155             logging.debug("FDA Tolerance de determination des doublons : %.2e"%self.__tolerBP)
156
157     # ---------------------------------------------------------
158     def __doublon__(self, e, l, n, v=None):
159         __ac, __iac = False, -1
160         for i in range(len(l)-1,-1,-1):
161             if numpy.linalg.norm(e - l[i]) < self.__tolerBP * n[i]:
162                 __ac, __iac = True, i
163                 if v is not None: logging.debug("FDA Cas%s déja calculé, récupération du doublon %i"%(v,__iac))
164                 break
165         return __ac, __iac
166
167     # ---------------------------------------------------------
168     def DirectOperator(self, X, **extraArgs ):
169         """
170         Calcul du direct à l'aide de la fonction fournie.
171
172         NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
173         ne doivent pas être données ici à la fonction utilisateur.
174         """
175         logging.debug("FDA Calcul DirectOperator (explicite)")
176         if self.__mfEnabled:
177             _HX = self.__userFunction( X, argsAsSerie = True )
178         else:
179             _X = numpy.asmatrix(numpy.ravel( X )).T
180             _HX = numpy.ravel(self.__userFunction( _X ))
181         #
182         return _HX
183
184     # ---------------------------------------------------------
185     def TangentMatrix(self, X ):
186         """
187         Calcul de l'opérateur tangent comme la Jacobienne par différences finies,
188         c'est-à-dire le gradient de H en X. On utilise des différences finies
189         directionnelles autour du point X. X est un numpy.matrix.
190
191         Différences finies centrées (approximation d'ordre 2):
192         1/ Pour chaque composante i de X, on ajoute et on enlève la perturbation
193            dX[i] à la  composante X[i], pour composer X_plus_dXi et X_moins_dXi, et
194            on calcule les réponses HX_plus_dXi = H( X_plus_dXi ) et HX_moins_dXi =
195            H( X_moins_dXi )
196         2/ On effectue les différences (HX_plus_dXi-HX_moins_dXi) et on divise par
197            le pas 2*dXi
198         3/ Chaque résultat, par composante, devient une colonne de la Jacobienne
199
200         Différences finies non centrées (approximation d'ordre 1):
201         1/ Pour chaque composante i de X, on ajoute la perturbation dX[i] à la
202            composante X[i] pour composer X_plus_dXi, et on calcule la réponse
203            HX_plus_dXi = H( X_plus_dXi )
204         2/ On calcule la valeur centrale HX = H(X)
205         3/ On effectue les différences (HX_plus_dXi-HX) et on divise par
206            le pas dXi
207         4/ Chaque résultat, par composante, devient une colonne de la Jacobienne
208
209         """
210         logging.debug("FDA Début du calcul de la Jacobienne")
211         logging.debug("FDA   Incrément de............: %s*X"%float(self.__increment))
212         logging.debug("FDA   Approximation centrée...: %s"%(self.__centeredDF))
213         #
214         if X is None or len(X)==0:
215             raise ValueError("Nominal point X for approximate derivatives can not be None or void (given X: %s)."%(str(X),))
216         #
217         _X = numpy.asmatrix(numpy.ravel( X )).T
218         #
219         if self.__dX is None:
220             _dX  = self.__increment * _X
221         else:
222             _dX = numpy.asmatrix(numpy.ravel( self.__dX )).T
223         #
224         if (_dX == 0.).any():
225             moyenne = _dX.mean()
226             if moyenne == 0.:
227                 _dX = numpy.where( _dX == 0., float(self.__increment), _dX )
228             else:
229                 _dX = numpy.where( _dX == 0., moyenne, _dX )
230         #
231         __alreadyCalculated  = False
232         if self.__avoidRC:
233             __bidon, __alreadyCalculatedP = self.__doublon__(_X,  self.__listJPCP, self.__listJPPN, None)
234             __bidon, __alreadyCalculatedI = self.__doublon__(_dX, self.__listJPCI, self.__listJPIN, None)
235             if __alreadyCalculatedP == __alreadyCalculatedI > -1:
236                 __alreadyCalculated, __i = True, __alreadyCalculatedP
237                 logging.debug("FDA Cas J déja calculé, récupération du doublon %i"%__i)
238         #
239         if __alreadyCalculated:
240             logging.debug("FDA   Calcul Jacobienne (par récupération du doublon %i)"%__i)
241             _Jacobienne = self.__listJPCR[__i]
242         else:
243             logging.debug("FDA   Calcul Jacobienne (explicite)")
244             if self.__centeredDF:
245                 #
246                 if self.__mpEnabled and not self.__mfEnabled:
247                     funcrepr = {
248                         "__userFunction__path" : self.__userFunction__path,
249                         "__userFunction__modl" : self.__userFunction__modl,
250                         "__userFunction__name" : self.__userFunction__name,
251                     }
252                     _jobs = []
253                     for i in range( len(_dX) ):
254                         _dXi            = _dX[i]
255                         _X_plus_dXi     = numpy.array( _X.A1, dtype=float )
256                         _X_plus_dXi[i]  = _X[i] + _dXi
257                         _X_moins_dXi    = numpy.array( _X.A1, dtype=float )
258                         _X_moins_dXi[i] = _X[i] - _dXi
259                         #
260                         _jobs.append( (_X_plus_dXi,  self.__extraArgs, funcrepr) )
261                         _jobs.append( (_X_moins_dXi, self.__extraArgs, funcrepr) )
262                     #
263                     import multiprocessing
264                     self.__pool = multiprocessing.Pool(self.__mpWorkers)
265                     _HX_plusmoins_dX = self.__pool.map( ExecuteFunction, _jobs )
266                     self.__pool.close()
267                     self.__pool.join()
268                     #
269                     _Jacobienne  = []
270                     for i in range( len(_dX) ):
271                         _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
272                     #
273                 elif self.__mfEnabled:
274                     _xserie = []
275                     for i in range( len(_dX) ):
276                         _dXi            = _dX[i]
277                         _X_plus_dXi     = numpy.array( _X.A1, dtype=float )
278                         _X_plus_dXi[i]  = _X[i] + _dXi
279                         _X_moins_dXi    = numpy.array( _X.A1, dtype=float )
280                         _X_moins_dXi[i] = _X[i] - _dXi
281                         #
282                         _xserie.append( _X_plus_dXi )
283                         _xserie.append( _X_moins_dXi )
284                     #
285                     _HX_plusmoins_dX = self.DirectOperator( _xserie )
286                      #
287                     _Jacobienne  = []
288                     for i in range( len(_dX) ):
289                         _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
290                     #
291                 else:
292                     _Jacobienne  = []
293                     for i in range( _dX.size ):
294                         _dXi            = _dX[i]
295                         _X_plus_dXi     = numpy.array( _X.A1, dtype=float )
296                         _X_plus_dXi[i]  = _X[i] + _dXi
297                         _X_moins_dXi    = numpy.array( _X.A1, dtype=float )
298                         _X_moins_dXi[i] = _X[i] - _dXi
299                         #
300                         _HX_plus_dXi    = self.DirectOperator( _X_plus_dXi )
301                         _HX_moins_dXi   = self.DirectOperator( _X_moins_dXi )
302                         #
303                         _Jacobienne.append( numpy.ravel( _HX_plus_dXi - _HX_moins_dXi ) / (2.*_dXi) )
304                 #
305             else:
306                 #
307                 if self.__mpEnabled and not self.__mfEnabled:
308                     funcrepr = {
309                         "__userFunction__path" : self.__userFunction__path,
310                         "__userFunction__modl" : self.__userFunction__modl,
311                         "__userFunction__name" : self.__userFunction__name,
312                     }
313                     _jobs = []
314                     _jobs.append( (_X.A1, self.__extraArgs, funcrepr) )
315                     for i in range( len(_dX) ):
316                         _X_plus_dXi    = numpy.array( _X.A1, dtype=float )
317                         _X_plus_dXi[i] = _X[i] + _dX[i]
318                         #
319                         _jobs.append( (_X_plus_dXi, self.__extraArgs, funcrepr) )
320                     #
321                     import multiprocessing
322                     self.__pool = multiprocessing.Pool(self.__mpWorkers)
323                     _HX_plus_dX = self.__pool.map( ExecuteFunction, _jobs )
324                     self.__pool.close()
325                     self.__pool.join()
326                     #
327                     _HX = _HX_plus_dX.pop(0)
328                     #
329                     _Jacobienne = []
330                     for i in range( len(_dX) ):
331                         _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
332                     #
333                 elif self.__mfEnabled:
334                     _xserie = []
335                     _xserie.append( _X.A1 )
336                     for i in range( len(_dX) ):
337                         _X_plus_dXi    = numpy.array( _X.A1, dtype=float )
338                         _X_plus_dXi[i] = _X[i] + _dX[i]
339                         #
340                         _xserie.append( _X_plus_dXi )
341                     #
342                     _HX_plus_dX = self.DirectOperator( _xserie )
343                     #
344                     _HX = _HX_plus_dX.pop(0)
345                     #
346                     _Jacobienne = []
347                     for i in range( len(_dX) ):
348                         _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
349                    #
350                 else:
351                     _Jacobienne  = []
352                     _HX = self.DirectOperator( _X )
353                     for i in range( _dX.size ):
354                         _dXi            = _dX[i]
355                         _X_plus_dXi     = numpy.array( _X.A1, dtype=float )
356                         _X_plus_dXi[i]  = _X[i] + _dXi
357                         #
358                         _HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
359                         #
360                         _Jacobienne.append( numpy.ravel(( _HX_plus_dXi - _HX ) / _dXi) )
361                 #
362             #
363             _Jacobienne = numpy.asmatrix( numpy.vstack( _Jacobienne ) ).T
364             if self.__avoidRC:
365                 if self.__lenghtRJ < 0: self.__lenghtRJ = 2 * _X.size
366                 while len(self.__listJPCP) > self.__lenghtRJ:
367                     self.__listJPCP.pop(0)
368                     self.__listJPCI.pop(0)
369                     self.__listJPCR.pop(0)
370                     self.__listJPPN.pop(0)
371                     self.__listJPIN.pop(0)
372                 self.__listJPCP.append( copy.copy(_X) )
373                 self.__listJPCI.append( copy.copy(_dX) )
374                 self.__listJPCR.append( copy.copy(_Jacobienne) )
375                 self.__listJPPN.append( numpy.linalg.norm(_X) )
376                 self.__listJPIN.append( numpy.linalg.norm(_Jacobienne) )
377         #
378         logging.debug("FDA Fin du calcul de la Jacobienne")
379         #
380         return _Jacobienne
381
382     # ---------------------------------------------------------
383     def TangentOperator(self, paire, **extraArgs ):
384         """
385         Calcul du tangent à l'aide de la Jacobienne.
386
387         NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
388         ne doivent pas être données ici à la fonction utilisateur.
389         """
390         if self.__mfEnabled:
391             assert len(paire) == 1, "Incorrect lenght of arguments"
392             _paire = paire[0]
393             assert len(_paire) == 2, "Incorrect number of arguments"
394         else:
395             assert len(paire) == 2, "Incorrect number of arguments"
396             _paire = paire
397         X, dX = _paire
398         _Jacobienne = self.TangentMatrix( X )
399         if dX is None or len(dX) == 0:
400             #
401             # Calcul de la forme matricielle si le second argument est None
402             # -------------------------------------------------------------
403             if self.__mfEnabled: return [_Jacobienne,]
404             else:                return _Jacobienne
405         else:
406             #
407             # Calcul de la valeur linéarisée de H en X appliqué à dX
408             # ------------------------------------------------------
409             _dX = numpy.asmatrix(numpy.ravel( dX )).T
410             _HtX = numpy.dot(_Jacobienne, _dX)
411             if self.__mfEnabled: return [_HtX.A1,]
412             else:                return _HtX.A1
413
414     # ---------------------------------------------------------
415     def AdjointOperator(self, paire, **extraArgs ):
416         """
417         Calcul de l'adjoint à l'aide de la Jacobienne.
418
419         NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
420         ne doivent pas être données ici à la fonction utilisateur.
421         """
422         if self.__mfEnabled:
423             assert len(paire) == 1, "Incorrect lenght of arguments"
424             _paire = paire[0]
425             assert len(_paire) == 2, "Incorrect number of arguments"
426         else:
427             assert len(paire) == 2, "Incorrect number of arguments"
428             _paire = paire
429         X, Y = _paire
430         _JacobienneT = self.TangentMatrix( X ).T
431         if Y is None or len(Y) == 0:
432             #
433             # Calcul de la forme matricielle si le second argument est None
434             # -------------------------------------------------------------
435             if self.__mfEnabled: return [_JacobienneT,]
436             else:                return _JacobienneT
437         else:
438             #
439             # Calcul de la valeur de l'adjoint en X appliqué à Y
440             # --------------------------------------------------
441             _Y = numpy.asmatrix(numpy.ravel( Y )).T
442             _HaY = numpy.dot(_JacobienneT, _Y)
443             if self.__mfEnabled: return [_HaY.A1,]
444             else:                return _HaY.A1
445
446 # ==============================================================================
447 def EnsembleOfCenteredPerturbations( _bgcenter, _bgcovariance, _nbmembers ):
448     "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
449     #
450     _bgcenter = numpy.ravel(_bgcenter)[:,None]
451     if _nbmembers < 1:
452         raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
453     #
454     if _bgcovariance is None:
455         BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
456     else:
457         _Z = numpy.random.multivariate_normal(numpy.zeros(_bgcenter.size), _bgcovariance, size=_nbmembers).T
458         BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers) + _Z
459     #
460     return BackgroundEnsemble
461
462 # ==============================================================================
463 def EnsembleOfBackgroundPerturbations( _bgcenter, _bgcovariance, _nbmembers, _withSVD = True):
464     "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
465     def __CenteredRandomAnomalies(Zr, N):
466         """
467         Génère une matrice de N anomalies aléatoires centrées sur Zr selon les
468         notes manuscrites de MB et conforme au code de PS avec eps = -1
469         """
470         eps = -1
471         Q = numpy.identity(N-1)-numpy.ones((N-1,N-1))/numpy.sqrt(N)/(numpy.sqrt(N)-eps)
472         Q = numpy.concatenate((Q, [eps*numpy.ones(N-1)/numpy.sqrt(N)]), axis=0)
473         R, _ = numpy.linalg.qr(numpy.random.normal(size = (N-1,N-1)))
474         Q = numpy.dot(Q,R)
475         Zr = numpy.dot(Q,Zr)
476         return Zr.T
477     #
478     _bgcenter = numpy.ravel(_bgcenter).reshape((-1,1))
479     if _nbmembers < 1:
480         raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
481     if _bgcovariance is None:
482         BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
483     else:
484         if _withSVD:
485             U, s, V = numpy.linalg.svd(_bgcovariance, full_matrices=False)
486             _nbctl = _bgcenter.size
487             if _nbmembers > _nbctl:
488                 _Z = numpy.concatenate((numpy.dot(
489                     numpy.diag(numpy.sqrt(s[:_nbctl])), V[:_nbctl]),
490                     numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1-_nbctl)), axis = 0)
491             else:
492                 _Z = numpy.dot(numpy.diag(numpy.sqrt(s[:_nbmembers-1])), V[:_nbmembers-1])
493             _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
494             BackgroundEnsemble = _bgcenter + _Zca
495         else:
496             if max(abs(_bgcovariance.flatten())) > 0:
497                 _nbctl = _bgcenter.size
498                 _Z = numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1)
499                 _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
500                 BackgroundEnsemble = _bgcenter + _Zca
501             else:
502                 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
503     #
504     return BackgroundEnsemble
505
506 # ==============================================================================
507 def EnsembleOfAnomalies( Ensemble, OptMean = None, Normalisation = 1.):
508     "Renvoie les anomalies centrées à partir d'un ensemble TailleEtat*NbMembres"
509     if OptMean is None:
510         __Em = numpy.asarray(Ensemble).mean(axis=1, dtype=mfp).astype('float').reshape((-1,1))
511     else:
512         __Em = numpy.ravel(OptMean).reshape((-1,1))
513     #
514     return Normalisation * (numpy.asarray(Ensemble) - __Em)
515
516 # ==============================================================================
517 def EnsembleErrorCovariance( Ensemble, __quick = False ):
518     "Renvoie l'estimation empirique de la covariance d'ensemble"
519     if __quick:
520         # Covariance rapide mais rarement définie positive
521         __Covariance = numpy.cov(Ensemble)
522     else:
523         # Résultat souvent identique à numpy.cov, mais plus robuste
524         __n, __m = numpy.asarray(Ensemble).shape
525         __Anomalies = EnsembleOfAnomalies( Ensemble )
526         # Estimation empirique
527         __Covariance = (__Anomalies @ __Anomalies.T) / (__m-1)
528         # Assure la symétrie
529         __Covariance = (__Covariance + __Covariance.T) * 0.5
530         # Assure la positivité
531         __epsilon    = mpr*numpy.trace(__Covariance)
532         __Covariance = __Covariance + __epsilon * numpy.identity(__n)
533     #
534     return __Covariance
535
536 # ==============================================================================
537 def EnsemblePerturbationWithGivenCovariance( __Ensemble, __Covariance, __Seed=None ):
538     "Ajout d'une perturbation à chaque membre d'un ensemble selon une covariance prescrite"
539     if hasattr(__Covariance,"assparsematrix"):
540         if (abs(__Ensemble).mean() > mpr) and (abs(__Covariance.assparsematrix())/abs(__Ensemble).mean() < mpr).all():
541             # Traitement d'une covariance nulle ou presque
542             return __Ensemble
543         if (abs(__Ensemble).mean() <= mpr) and (abs(__Covariance.assparsematrix()) < mpr).all():
544             # Traitement d'une covariance nulle ou presque
545             return __Ensemble
546     else:
547         if (abs(__Ensemble).mean() > mpr) and (abs(__Covariance)/abs(__Ensemble).mean() < mpr).all():
548             # Traitement d'une covariance nulle ou presque
549             return __Ensemble
550         if (abs(__Ensemble).mean() <= mpr) and (abs(__Covariance) < mpr).all():
551             # Traitement d'une covariance nulle ou presque
552             return __Ensemble
553     #
554     __n, __m = __Ensemble.shape
555     if __Seed is not None: numpy.random.seed(__Seed)
556     #
557     if hasattr(__Covariance,"isscalar") and __Covariance.isscalar():
558         # Traitement d'une covariance multiple de l'identité
559         __zero = 0.
560         __std  = numpy.sqrt(__Covariance.assparsematrix())
561         __Ensemble += numpy.random.normal(__zero, __std, size=(__m,__n)).T
562     #
563     elif hasattr(__Covariance,"isvector") and __Covariance.isvector():
564         # Traitement d'une covariance diagonale avec variances non identiques
565         __zero = numpy.zeros(__n)
566         __std  = numpy.sqrt(__Covariance.assparsematrix())
567         __Ensemble += numpy.asarray([numpy.random.normal(__zero, __std) for i in range(__m)]).T
568     #
569     elif hasattr(__Covariance,"ismatrix") and __Covariance.ismatrix():
570         # Traitement d'une covariance pleine
571         __Ensemble += numpy.random.multivariate_normal(numpy.zeros(__n), __Covariance.asfullmatrix(__n), size=__m).T
572     #
573     elif isinstance(__Covariance, numpy.ndarray):
574         # Traitement d'une covariance numpy pleine, sachant qu'on arrive ici en dernier
575         __Ensemble += numpy.random.multivariate_normal(numpy.zeros(__n), __Covariance, size=__m).T
576     #
577     else:
578         raise ValueError("Error in ensemble perturbation with inadequate covariance specification")
579     #
580     return __Ensemble
581
582 # ==============================================================================
583 def CovarianceInflation(
584         InputCovOrEns,
585         InflationType   = None,
586         InflationFactor = None,
587         BackgroundCov   = None,
588         ):
589     """
590     Inflation applicable soit sur Pb ou Pa, soit sur les ensembles EXb ou EXa
591
592     Synthèse : Hunt 2007, section 2.3.5
593     """
594     if InflationFactor is None:
595         return InputCovOrEns
596     else:
597         InflationFactor = float(InflationFactor)
598     #
599     if InflationType in ["MultiplicativeOnAnalysisCovariance", "MultiplicativeOnBackgroundCovariance"]:
600         if InflationFactor < 1.:
601             raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
602         if InflationFactor < 1.+mpr:
603             return InputCovOrEns
604         OutputCovOrEns = InflationFactor**2 * InputCovOrEns
605     #
606     elif InflationType in ["MultiplicativeOnAnalysisAnomalies", "MultiplicativeOnBackgroundAnomalies"]:
607         if InflationFactor < 1.:
608             raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
609         if InflationFactor < 1.+mpr:
610             return InputCovOrEns
611         InputCovOrEnsMean = InputCovOrEns.mean(axis=1, dtype=mfp).astype('float')
612         OutputCovOrEns = InputCovOrEnsMean[:,numpy.newaxis] \
613             + InflationFactor * (InputCovOrEns - InputCovOrEnsMean[:,numpy.newaxis])
614     #
615     elif InflationType in ["AdditiveOnAnalysisCovariance", "AdditiveOnBackgroundCovariance"]:
616         if InflationFactor < 0.:
617             raise ValueError("Inflation factor for additive inflation has to be greater or equal than 0.")
618         if InflationFactor < mpr:
619             return InputCovOrEns
620         __n, __m = numpy.asarray(InputCovOrEns).shape
621         if __n != __m:
622             raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
623         OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * numpy.identity(__n)
624     #
625     elif InflationType == "HybridOnBackgroundCovariance":
626         if InflationFactor < 0.:
627             raise ValueError("Inflation factor for hybrid inflation has to be greater or equal than 0.")
628         if InflationFactor < mpr:
629             return InputCovOrEns
630         __n, __m = numpy.asarray(InputCovOrEns).shape
631         if __n != __m:
632             raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
633         if BackgroundCov is None:
634             raise ValueError("Background covariance matrix B has to be given for hybrid inflation.")
635         if InputCovOrEns.shape != BackgroundCov.shape:
636             raise ValueError("Ensemble covariance matrix has to be of same size than background covariance matrix B.")
637         OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * BackgroundCov
638     #
639     elif InflationType == "Relaxation":
640         raise NotImplementedError("InflationType Relaxation")
641     #
642     else:
643         raise ValueError("Error in inflation type, '%s' is not a valid keyword."%InflationType)
644     #
645     return OutputCovOrEns
646
647 # ==============================================================================
648 def QuantilesEstimations(selfA, A, Xa, HXa = None, Hm = None, HtM = None):
649     "Estimation des quantiles a posteriori (selfA est modifié)"
650     nbsamples = selfA._parameters["NumberOfSamplesForQuantiles"]
651     #
652     # Échantillonnage des états
653     YfQ  = None
654     EXr  = None
655     if selfA._parameters["SimulationForQuantiles"] == "Linear":
656         HXa  = numpy.matrix(numpy.ravel( HXa )).T
657     for i in range(nbsamples):
658         if selfA._parameters["SimulationForQuantiles"] == "Linear" and HtM is not None:
659             dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
660             dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
661             Yr = HXa + dYr
662             if selfA._toStore("SampledStateForQuantiles"): Xr = Xa+dXr
663         elif selfA._parameters["SimulationForQuantiles"] == "NonLinear" and Hm is not None:
664             Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
665             Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
666         if YfQ is None:
667             YfQ = Yr
668             if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.ravel(Xr)
669         else:
670             YfQ = numpy.hstack((YfQ,Yr))
671             if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.vstack((EXr,numpy.ravel(Xr)))
672     #
673     # Extraction des quantiles
674     YfQ.sort(axis=-1)
675     YQ = None
676     for quantile in selfA._parameters["Quantiles"]:
677         if not (0. <= float(quantile) <= 1.): continue
678         indice = int(nbsamples * float(quantile) - 1./nbsamples)
679         if YQ is None: YQ = YfQ[:,indice]
680         else:          YQ = numpy.hstack((YQ,YfQ[:,indice]))
681     selfA.StoredVariables["SimulationQuantiles"].store( YQ )
682     if selfA._toStore("SampledStateForQuantiles"):
683         selfA.StoredVariables["SampledStateForQuantiles"].store( EXr.T )
684     #
685     return 0
686
687 # ==============================================================================
688 def enks(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="EnKS16-KalmanFilterFormula"):
689     """
690     EnKS
691     """
692     #
693     # Opérateurs
694     H = HO["Direct"].appliedControledFormTo
695     #
696     if selfA._parameters["EstimationOf"] == "State":
697         M = EM["Direct"].appliedControledFormTo
698     #
699     if CM is not None and "Tangent" in CM and U is not None:
700         Cm = CM["Tangent"].asMatrix(Xb)
701     else:
702         Cm = None
703     #
704     # Précalcul des inversions de B et R
705     RIdemi = R.sqrtmI()
706     #
707     # Durée d'observation et tailles
708     LagL = selfA._parameters["SmootherLagL"]
709     if (not hasattr(Y,"store")) or (not hasattr(Y,"stepnumber")):
710         raise ValueError("Fixed-lag smoother requires a series of observation")
711     if Y.stepnumber() < LagL:
712         raise ValueError("Fixed-lag smoother requires a series of observation greater then the lag L")
713     duration = Y.stepnumber()
714     __p = numpy.cumprod(Y.shape())[-1]
715     __n = Xb.size
716     __m = selfA._parameters["NumberOfMembers"]
717     #
718     if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
719     else:                         Pn = B
720     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
721         selfA.StoredVariables["Analysis"].store( Xb )
722         if selfA._toStore("APosterioriCovariance"):
723             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
724             covarianceXa = Pn
725     #
726     # Calcul direct initial (on privilégie la mémorisation au recalcul)
727     __seed = numpy.random.get_state()
728     selfB = copy.deepcopy(selfA)
729     selfB._parameters["StoreSupplementaryCalculations"] = ["CurrentEnsembleState"]
730     if VariantM == "EnKS16-KalmanFilterFormula":
731         etkf(selfB, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM = "KalmanFilterFormula")
732     else:
733         raise ValueError("VariantM has to be chosen in the authorized methods list.")
734     if LagL > 0:
735         EL  = selfB.StoredVariables["CurrentEnsembleState"][LagL-1]
736     else:
737         EL = EnsembleOfBackgroundPerturbations( Xb, None, __m ) # Cf. etkf
738     selfA._parameters["SetSeed"] = numpy.random.set_state(__seed)
739     #
740     for step in range(LagL,duration-1):
741         #
742         sEL = selfB.StoredVariables["CurrentEnsembleState"][step+1-LagL:step+1]
743         sEL.append(None)
744         #
745         if hasattr(Y,"store"):
746             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
747         else:
748             Ynpu = numpy.ravel( Y ).reshape((__p,1))
749         #
750         if U is not None:
751             if hasattr(U,"store") and len(U)>1:
752                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
753             elif hasattr(U,"store") and len(U)==1:
754                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
755             else:
756                 Un = numpy.asmatrix(numpy.ravel( U )).T
757         else:
758             Un = None
759         #
760         #--------------------------
761         if VariantM == "EnKS16-KalmanFilterFormula":
762             if selfA._parameters["EstimationOf"] == "State": # Forecast
763                 EL = M( [(EL[:,i], Un) for i in range(__m)],
764                     argsAsSerie = True,
765                     returnSerieAsArrayMatrix = True )
766                 EL = EnsemblePerturbationWithGivenCovariance( EL, Q )
767                 EZ = H( [(EL[:,i], Un) for i in range(__m)],
768                     argsAsSerie = True,
769                     returnSerieAsArrayMatrix = True )
770                 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
771                     Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
772                     EZ = EZ + Cm * Un
773             elif selfA._parameters["EstimationOf"] == "Parameters":
774                 # --- > Par principe, M = Id, Q = 0
775                 EZ = H( [(EL[:,i], Un) for i in range(__m)],
776                     argsAsSerie = True,
777                     returnSerieAsArrayMatrix = True )
778             #
779             vEm   = EL.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
780             vZm   = EZ.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
781             #
782             mS    = RIdemi @ EnsembleOfAnomalies( EZ, vZm, 1./math.sqrt(__m-1) )
783             delta = RIdemi @ ( Ynpu - vZm )
784             mT    = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS )
785             vw    = mT @ mS.T @ delta
786             #
787             Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
788             mU    = numpy.identity(__m)
789             wTU   = (vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU)
790             #
791             EX    = EnsembleOfAnomalies( EL, vEm, 1./math.sqrt(__m-1) )
792             EL    = vEm + EX @ wTU
793             #
794             sEL[LagL] = EL
795             for irl in range(LagL): # Lissage des L précédentes analysis
796                 vEm = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
797                 EX = EnsembleOfAnomalies( sEL[irl], vEm, 1./math.sqrt(__m-1) )
798                 sEL[irl] = vEm + EX @ wTU
799             #
800             # Conservation de l'analyse retrospective d'ordre 0 avant rotation
801             Xa = sEL[0].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
802             if selfA._toStore("APosterioriCovariance"):
803                 EXn = sEL[0]
804             #
805             for irl in range(LagL):
806                 sEL[irl] = sEL[irl+1]
807             sEL[LagL] = None
808         #--------------------------
809         else:
810             raise ValueError("VariantM has to be chosen in the authorized methods list.")
811         #
812         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
813         # ---> avec analysis
814         selfA.StoredVariables["Analysis"].store( Xa )
815         if selfA._toStore("APosterioriCovariance"):
816             selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(EXn) )
817     #
818     # Stockage des dernières analyses incomplètement remises à jour
819     for irl in range(LagL):
820         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
821         Xa = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
822         selfA.StoredVariables["Analysis"].store( Xa )
823     #
824     return 0
825
826 # ==============================================================================
827 def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
828     """
829     Ensemble-Transform EnKF
830     """
831     if selfA._parameters["EstimationOf"] == "Parameters":
832         selfA._parameters["StoreInternalVariables"] = True
833     #
834     # Opérateurs
835     # ----------
836     H = HO["Direct"].appliedControledFormTo
837     #
838     if selfA._parameters["EstimationOf"] == "State":
839         M = EM["Direct"].appliedControledFormTo
840     #
841     if CM is not None and "Tangent" in CM and U is not None:
842         Cm = CM["Tangent"].asMatrix(Xb)
843     else:
844         Cm = None
845     #
846     # Nombre de pas identique au nombre de pas d'observations
847     # -------------------------------------------------------
848     if hasattr(Y,"stepnumber"):
849         duration = Y.stepnumber()
850         __p = numpy.cumprod(Y.shape())[-1]
851     else:
852         duration = 2
853         __p = numpy.array(Y).size
854     #
855     # Précalcul des inversions de B et R
856     # ----------------------------------
857     if selfA._parameters["StoreInternalVariables"] \
858         or selfA._toStore("CostFunctionJ") \
859         or selfA._toStore("CostFunctionJb") \
860         or selfA._toStore("CostFunctionJo") \
861         or selfA._toStore("CurrentOptimum") \
862         or selfA._toStore("APosterioriCovariance"):
863         BI = B.getI()
864         RI = R.getI()
865     elif VariantM != "KalmanFilterFormula":
866         RI = R.getI()
867     if VariantM == "KalmanFilterFormula":
868         RIdemi = R.sqrtmI()
869     #
870     # Initialisation
871     # --------------
872     __n = Xb.size
873     __m = selfA._parameters["NumberOfMembers"]
874     if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
875     else:                         Pn = B
876     Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
877     #~ Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
878     #
879     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
880         selfA.StoredVariables["Analysis"].store( Xb )
881         if selfA._toStore("APosterioriCovariance"):
882             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
883             covarianceXa = Pn
884     #
885     previousJMinimum = numpy.finfo(float).max
886     #
887     for step in range(duration-1):
888         if hasattr(Y,"store"):
889             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
890         else:
891             Ynpu = numpy.ravel( Y ).reshape((__p,1))
892         #
893         if U is not None:
894             if hasattr(U,"store") and len(U)>1:
895                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
896             elif hasattr(U,"store") and len(U)==1:
897                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
898             else:
899                 Un = numpy.asmatrix(numpy.ravel( U )).T
900         else:
901             Un = None
902         #
903         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
904             Xn = CovarianceInflation( Xn,
905                 selfA._parameters["InflationType"],
906                 selfA._parameters["InflationFactor"],
907                 )
908         #
909         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
910             EMX = M( [(Xn[:,i], Un) for i in range(__m)],
911                 argsAsSerie = True,
912                 returnSerieAsArrayMatrix = True )
913             Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
914             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
915                 argsAsSerie = True,
916                 returnSerieAsArrayMatrix = True )
917             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
918                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
919                 Xn_predicted = Xn_predicted + Cm * Un
920         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
921             # --- > Par principe, M = Id, Q = 0
922             Xn_predicted = Xn
923             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
924                 argsAsSerie = True,
925                 returnSerieAsArrayMatrix = True )
926         #
927         # Mean of forecast and observation of forecast
928         Xfm  = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
929         Hfm  = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
930         #
931         # Anomalies
932         EaX   = EnsembleOfAnomalies( Xn_predicted, Xfm )
933         EaHX  = EnsembleOfAnomalies( HX_predicted, Hfm)
934         #
935         #--------------------------
936         if VariantM == "KalmanFilterFormula":
937             mS    = RIdemi * EaHX / math.sqrt(__m-1)
938             delta = RIdemi * ( Ynpu - Hfm )
939             mT    = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS )
940             vw    = mT @ mS.T @ delta
941             #
942             Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
943             mU    = numpy.identity(__m)
944             #
945             EaX   = EaX / math.sqrt(__m-1)
946             Xn    = Xfm + EaX @ ( vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU )
947         #--------------------------
948         elif VariantM == "Variational":
949             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
950             def CostFunction(w):
951                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
952                 _Jo = 0.5 * _A.T @ (RI * _A)
953                 _Jb = 0.5 * (__m-1) * w.T @ w
954                 _J  = _Jo + _Jb
955                 return float(_J)
956             def GradientOfCostFunction(w):
957                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
958                 _GardJo = - EaHX.T @ (RI * _A)
959                 _GradJb = (__m-1) * w.reshape((__m,1))
960                 _GradJ  = _GardJo + _GradJb
961                 return numpy.ravel(_GradJ)
962             vw = scipy.optimize.fmin_cg(
963                 f           = CostFunction,
964                 x0          = numpy.zeros(__m),
965                 fprime      = GradientOfCostFunction,
966                 args        = (),
967                 disp        = False,
968                 )
969             #
970             Hto = EaHX.T @ (RI * EaHX)
971             Htb = (__m-1) * numpy.identity(__m)
972             Hta = Hto + Htb
973             #
974             Pta = numpy.linalg.inv( Hta )
975             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
976             #
977             Xn  = Xfm + EaX @ (vw[:,None] + EWa)
978         #--------------------------
979         elif VariantM == "FiniteSize11": # Jauge Boc2011
980             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
981             def CostFunction(w):
982                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
983                 _Jo = 0.5 * _A.T @ (RI * _A)
984                 _Jb = 0.5 * __m * math.log(1 + 1/__m + w.T @ w)
985                 _J  = _Jo + _Jb
986                 return float(_J)
987             def GradientOfCostFunction(w):
988                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
989                 _GardJo = - EaHX.T @ (RI * _A)
990                 _GradJb = __m * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
991                 _GradJ  = _GardJo + _GradJb
992                 return numpy.ravel(_GradJ)
993             vw = scipy.optimize.fmin_cg(
994                 f           = CostFunction,
995                 x0          = numpy.zeros(__m),
996                 fprime      = GradientOfCostFunction,
997                 args        = (),
998                 disp        = False,
999                 )
1000             #
1001             Hto = EaHX.T @ (RI * EaHX)
1002             Htb = __m * \
1003                 ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
1004                 / (1 + 1/__m + vw.T @ vw)**2
1005             Hta = Hto + Htb
1006             #
1007             Pta = numpy.linalg.inv( Hta )
1008             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1009             #
1010             Xn  = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
1011         #--------------------------
1012         elif VariantM == "FiniteSize15": # Jauge Boc2015
1013             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1014             def CostFunction(w):
1015                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1016                 _Jo = 0.5 * _A.T * RI * _A
1017                 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w)
1018                 _J  = _Jo + _Jb
1019                 return float(_J)
1020             def GradientOfCostFunction(w):
1021                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1022                 _GardJo = - EaHX.T @ (RI * _A)
1023                 _GradJb = (__m+1) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
1024                 _GradJ  = _GardJo + _GradJb
1025                 return numpy.ravel(_GradJ)
1026             vw = scipy.optimize.fmin_cg(
1027                 f           = CostFunction,
1028                 x0          = numpy.zeros(__m),
1029                 fprime      = GradientOfCostFunction,
1030                 args        = (),
1031                 disp        = False,
1032                 )
1033             #
1034             Hto = EaHX.T @ (RI * EaHX)
1035             Htb = (__m+1) * \
1036                 ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
1037                 / (1 + 1/__m + vw.T @ vw)**2
1038             Hta = Hto + Htb
1039             #
1040             Pta = numpy.linalg.inv( Hta )
1041             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1042             #
1043             Xn  = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
1044         #--------------------------
1045         elif VariantM == "FiniteSize16": # Jauge Boc2016
1046             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1047             def CostFunction(w):
1048                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1049                 _Jo = 0.5 * _A.T @ (RI * _A)
1050                 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w / (__m-1))
1051                 _J  = _Jo + _Jb
1052                 return float(_J)
1053             def GradientOfCostFunction(w):
1054                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1055                 _GardJo = - EaHX.T @ (RI * _A)
1056                 _GradJb = ((__m+1) / (__m-1)) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w / (__m-1))
1057                 _GradJ  = _GardJo + _GradJb
1058                 return numpy.ravel(_GradJ)
1059             vw = scipy.optimize.fmin_cg(
1060                 f           = CostFunction,
1061                 x0          = numpy.zeros(__m),
1062                 fprime      = GradientOfCostFunction,
1063                 args        = (),
1064                 disp        = False,
1065                 )
1066             #
1067             Hto = EaHX.T @ (RI * EaHX)
1068             Htb = ((__m+1) / (__m-1)) * \
1069                 ( (1 + 1/__m + vw.T @ vw / (__m-1)) * numpy.identity(__m) - 2 * vw @ vw.T / (__m-1) ) \
1070                 / (1 + 1/__m + vw.T @ vw / (__m-1))**2
1071             Hta = Hto + Htb
1072             #
1073             Pta = numpy.linalg.inv( Hta )
1074             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1075             #
1076             Xn  = Xfm + EaX @ (vw[:,None] + EWa)
1077         #--------------------------
1078         else:
1079             raise ValueError("VariantM has to be chosen in the authorized methods list.")
1080         #
1081         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
1082             Xn = CovarianceInflation( Xn,
1083                 selfA._parameters["InflationType"],
1084                 selfA._parameters["InflationFactor"],
1085                 )
1086         #
1087         Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1088         #--------------------------
1089         #
1090         if selfA._parameters["StoreInternalVariables"] \
1091             or selfA._toStore("CostFunctionJ") \
1092             or selfA._toStore("CostFunctionJb") \
1093             or selfA._toStore("CostFunctionJo") \
1094             or selfA._toStore("APosterioriCovariance") \
1095             or selfA._toStore("InnovationAtCurrentAnalysis") \
1096             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1097             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1098             _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
1099             _Innovation = Ynpu - _HXa
1100         #
1101         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1102         # ---> avec analysis
1103         selfA.StoredVariables["Analysis"].store( Xa )
1104         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1105             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
1106         if selfA._toStore("InnovationAtCurrentAnalysis"):
1107             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1108         # ---> avec current state
1109         if selfA._parameters["StoreInternalVariables"] \
1110             or selfA._toStore("CurrentState"):
1111             selfA.StoredVariables["CurrentState"].store( Xn )
1112         if selfA._toStore("ForecastState"):
1113             selfA.StoredVariables["ForecastState"].store( EMX )
1114         if selfA._toStore("BMA"):
1115             selfA.StoredVariables["BMA"].store( EMX - Xa.reshape((__n,1)) )
1116         if selfA._toStore("InnovationAtCurrentState"):
1117             selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
1118         if selfA._toStore("SimulatedObservationAtCurrentState") \
1119             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1120             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
1121         # ---> autres
1122         if selfA._parameters["StoreInternalVariables"] \
1123             or selfA._toStore("CostFunctionJ") \
1124             or selfA._toStore("CostFunctionJb") \
1125             or selfA._toStore("CostFunctionJo") \
1126             or selfA._toStore("CurrentOptimum") \
1127             or selfA._toStore("APosterioriCovariance"):
1128             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
1129             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
1130             J   = Jb + Jo
1131             selfA.StoredVariables["CostFunctionJb"].store( Jb )
1132             selfA.StoredVariables["CostFunctionJo"].store( Jo )
1133             selfA.StoredVariables["CostFunctionJ" ].store( J )
1134             #
1135             if selfA._toStore("IndexOfOptimum") \
1136                 or selfA._toStore("CurrentOptimum") \
1137                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1138                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1139                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1140                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1141                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1142             if selfA._toStore("IndexOfOptimum"):
1143                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1144             if selfA._toStore("CurrentOptimum"):
1145                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1146             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1147                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1148             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1149                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1150             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1151                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1152             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1153                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1154         if selfA._toStore("APosterioriCovariance"):
1155             selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
1156         if selfA._parameters["EstimationOf"] == "Parameters" \
1157             and J < previousJMinimum:
1158             previousJMinimum    = J
1159             XaMin               = Xa
1160             if selfA._toStore("APosterioriCovariance"):
1161                 covarianceXaMin = Pn
1162         # ---> Pour les smoothers
1163         if selfA._toStore("CurrentEnsembleState"):
1164             selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
1165     #
1166     # Stockage final supplémentaire de l'optimum en estimation de paramètres
1167     # ----------------------------------------------------------------------
1168     if selfA._parameters["EstimationOf"] == "Parameters":
1169         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1170         selfA.StoredVariables["Analysis"].store( XaMin )
1171         if selfA._toStore("APosterioriCovariance"):
1172             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1173         if selfA._toStore("BMA"):
1174             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1175     #
1176     return 0
1177
1178 # ==============================================================================
1179 def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12",
1180     BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
1181     """
1182     Iterative EnKF
1183     """
1184     if selfA._parameters["EstimationOf"] == "Parameters":
1185         selfA._parameters["StoreInternalVariables"] = True
1186     #
1187     # Opérateurs
1188     # ----------
1189     H = HO["Direct"].appliedControledFormTo
1190     #
1191     if selfA._parameters["EstimationOf"] == "State":
1192         M = EM["Direct"].appliedControledFormTo
1193     #
1194     if CM is not None and "Tangent" in CM and U is not None:
1195         Cm = CM["Tangent"].asMatrix(Xb)
1196     else:
1197         Cm = None
1198     #
1199     # Nombre de pas identique au nombre de pas d'observations
1200     # -------------------------------------------------------
1201     if hasattr(Y,"stepnumber"):
1202         duration = Y.stepnumber()
1203         __p = numpy.cumprod(Y.shape())[-1]
1204     else:
1205         duration = 2
1206         __p = numpy.array(Y).size
1207     #
1208     # Précalcul des inversions de B et R
1209     # ----------------------------------
1210     if selfA._parameters["StoreInternalVariables"] \
1211         or selfA._toStore("CostFunctionJ") \
1212         or selfA._toStore("CostFunctionJb") \
1213         or selfA._toStore("CostFunctionJo") \
1214         or selfA._toStore("CurrentOptimum") \
1215         or selfA._toStore("APosterioriCovariance"):
1216         BI = B.getI()
1217     RI = R.getI()
1218     #
1219     # Initialisation
1220     # --------------
1221     __n = Xb.size
1222     __m = selfA._parameters["NumberOfMembers"]
1223     if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
1224     else:                         Pn = B
1225     if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
1226     else:                         Rn = R
1227     if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
1228     else:                         Qn = Q
1229     Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
1230     #
1231     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1232         selfA.StoredVariables["Analysis"].store( Xb )
1233         if selfA._toStore("APosterioriCovariance"):
1234             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1235             covarianceXa = Pn
1236     #
1237     previousJMinimum = numpy.finfo(float).max
1238     #
1239     for step in range(duration-1):
1240         if hasattr(Y,"store"):
1241             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1242         else:
1243             Ynpu = numpy.ravel( Y ).reshape((__p,1))
1244         #
1245         if U is not None:
1246             if hasattr(U,"store") and len(U)>1:
1247                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
1248             elif hasattr(U,"store") and len(U)==1:
1249                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
1250             else:
1251                 Un = numpy.asmatrix(numpy.ravel( U )).T
1252         else:
1253             Un = None
1254         #
1255         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
1256             Xn = CovarianceInflation( Xn,
1257                 selfA._parameters["InflationType"],
1258                 selfA._parameters["InflationFactor"],
1259                 )
1260         #
1261         #--------------------------
1262         if VariantM == "IEnKF12":
1263             Xfm = numpy.ravel(Xn.mean(axis=1, dtype=mfp).astype('float'))
1264             EaX = EnsembleOfAnomalies( Xn ) / math.sqrt(__m-1)
1265             __j = 0
1266             Deltaw = 1
1267             if not BnotT:
1268                 Ta  = numpy.identity(__m)
1269             vw  = numpy.zeros(__m)
1270             while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
1271                 vx1 = (Xfm + EaX @ vw).reshape((__n,1))
1272                 #
1273                 if BnotT:
1274                     E1 = vx1 + _epsilon * EaX
1275                 else:
1276                     E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
1277                 #
1278                 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q
1279                     E2 = M( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
1280                         argsAsSerie = True,
1281                         returnSerieAsArrayMatrix = True )
1282                 elif selfA._parameters["EstimationOf"] == "Parameters":
1283                     # --- > Par principe, M = Id
1284                     E2 = Xn
1285                 vx2 = E2.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1286                 vy1 = H((vx2, Un)).reshape((__p,1))
1287                 #
1288                 HE2 = H( [(E2[:,i,numpy.newaxis], Un) for i in range(__m)],
1289                     argsAsSerie = True,
1290                     returnSerieAsArrayMatrix = True )
1291                 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
1292                 #
1293                 if BnotT:
1294                     EaY = (HE2 - vy2) / _epsilon
1295                 else:
1296                     EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
1297                 #
1298                 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy1 )))
1299                 mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY)
1300                 Deltaw = - numpy.linalg.solve(mH,GradJ)
1301                 #
1302                 vw = vw + Deltaw
1303                 #
1304                 if not BnotT:
1305                     Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
1306                 #
1307                 __j = __j + 1
1308             #
1309             A2 = EnsembleOfAnomalies( E2 )
1310             #
1311             if BnotT:
1312                 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
1313                 A2 = math.sqrt(__m-1) * A2 @ Ta / _epsilon
1314             #
1315             Xn = vx2 + A2
1316         #--------------------------
1317         else:
1318             raise ValueError("VariantM has to be chosen in the authorized methods list.")
1319         #
1320         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
1321             Xn = CovarianceInflation( Xn,
1322                 selfA._parameters["InflationType"],
1323                 selfA._parameters["InflationFactor"],
1324                 )
1325         #
1326         Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1327         #--------------------------
1328         #
1329         if selfA._parameters["StoreInternalVariables"] \
1330             or selfA._toStore("CostFunctionJ") \
1331             or selfA._toStore("CostFunctionJb") \
1332             or selfA._toStore("CostFunctionJo") \
1333             or selfA._toStore("APosterioriCovariance") \
1334             or selfA._toStore("InnovationAtCurrentAnalysis") \
1335             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1336             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1337             _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
1338             _Innovation = Ynpu - _HXa
1339         #
1340         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1341         # ---> avec analysis
1342         selfA.StoredVariables["Analysis"].store( Xa )
1343         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1344             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
1345         if selfA._toStore("InnovationAtCurrentAnalysis"):
1346             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1347         # ---> avec current state
1348         if selfA._parameters["StoreInternalVariables"] \
1349             or selfA._toStore("CurrentState"):
1350             selfA.StoredVariables["CurrentState"].store( Xn )
1351         if selfA._toStore("ForecastState"):
1352             selfA.StoredVariables["ForecastState"].store( E2 )
1353         if selfA._toStore("BMA"):
1354             selfA.StoredVariables["BMA"].store( E2 - Xa )
1355         if selfA._toStore("InnovationAtCurrentState"):
1356             selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
1357         if selfA._toStore("SimulatedObservationAtCurrentState") \
1358             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1359             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
1360         # ---> autres
1361         if selfA._parameters["StoreInternalVariables"] \
1362             or selfA._toStore("CostFunctionJ") \
1363             or selfA._toStore("CostFunctionJb") \
1364             or selfA._toStore("CostFunctionJo") \
1365             or selfA._toStore("CurrentOptimum") \
1366             or selfA._toStore("APosterioriCovariance"):
1367             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
1368             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
1369             J   = Jb + Jo
1370             selfA.StoredVariables["CostFunctionJb"].store( Jb )
1371             selfA.StoredVariables["CostFunctionJo"].store( Jo )
1372             selfA.StoredVariables["CostFunctionJ" ].store( J )
1373             #
1374             if selfA._toStore("IndexOfOptimum") \
1375                 or selfA._toStore("CurrentOptimum") \
1376                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1377                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1378                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1379                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1380                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1381             if selfA._toStore("IndexOfOptimum"):
1382                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1383             if selfA._toStore("CurrentOptimum"):
1384                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1385             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1386                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1387             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1388                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1389             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1390                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1391             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1392                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1393         if selfA._toStore("APosterioriCovariance"):
1394             selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
1395         if selfA._parameters["EstimationOf"] == "Parameters" \
1396             and J < previousJMinimum:
1397             previousJMinimum    = J
1398             XaMin               = Xa
1399             if selfA._toStore("APosterioriCovariance"):
1400                 covarianceXaMin = Pn
1401     #
1402     # Stockage final supplémentaire de l'optimum en estimation de paramètres
1403     # ----------------------------------------------------------------------
1404     if selfA._parameters["EstimationOf"] == "Parameters":
1405         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1406         selfA.StoredVariables["Analysis"].store( XaMin )
1407         if selfA._toStore("APosterioriCovariance"):
1408             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1409         if selfA._toStore("BMA"):
1410             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1411     #
1412     return 0
1413
1414 # ==============================================================================
1415 def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1416     """
1417     3DVAR incrémental
1418     """
1419     #
1420     # Initialisations
1421     # ---------------
1422     #
1423     # Opérateur non-linéaire pour la boucle externe
1424     Hm = HO["Direct"].appliedTo
1425     #
1426     # Précalcul des inversions de B et R
1427     BI = B.getI()
1428     RI = R.getI()
1429     #
1430     # Point de démarrage de l'optimisation
1431     Xini = selfA._parameters["InitializationPoint"]
1432     #
1433     HXb = numpy.asmatrix(numpy.ravel( Hm( Xb ) )).T
1434     Innovation = Y - HXb
1435     #
1436     # Outer Loop
1437     # ----------
1438     iOuter = 0
1439     J      = 1./mpr
1440     DeltaJ = 1./mpr
1441     Xr     = Xini.reshape((-1,1))
1442     while abs(DeltaJ) >= selfA._parameters["CostDecrementTolerance"] and iOuter <= selfA._parameters["MaximumNumberOfSteps"]:
1443         #
1444         # Inner Loop
1445         # ----------
1446         Ht = HO["Tangent"].asMatrix(Xr)
1447         Ht = Ht.reshape(Y.size,Xr.size) # ADAO & check shape
1448         #
1449         # Définition de la fonction-coût
1450         # ------------------------------
1451         def CostFunction(dx):
1452             _dX  = numpy.asmatrix(numpy.ravel( dx )).T
1453             if selfA._parameters["StoreInternalVariables"] or \
1454                 selfA._toStore("CurrentState") or \
1455                 selfA._toStore("CurrentOptimum"):
1456                 selfA.StoredVariables["CurrentState"].store( Xb + _dX )
1457             _HdX = Ht * _dX
1458             _HdX = numpy.asmatrix(numpy.ravel( _HdX )).T
1459             _dInnovation = Innovation - _HdX
1460             if selfA._toStore("SimulatedObservationAtCurrentState") or \
1461                 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1462                 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HXb + _HdX )
1463             if selfA._toStore("InnovationAtCurrentState"):
1464                 selfA.StoredVariables["InnovationAtCurrentState"].store( _dInnovation )
1465             #
1466             Jb  = float( 0.5 * _dX.T * BI * _dX )
1467             Jo  = float( 0.5 * _dInnovation.T * RI * _dInnovation )
1468             J   = Jb + Jo
1469             #
1470             selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
1471             selfA.StoredVariables["CostFunctionJb"].store( Jb )
1472             selfA.StoredVariables["CostFunctionJo"].store( Jo )
1473             selfA.StoredVariables["CostFunctionJ" ].store( J )
1474             if selfA._toStore("IndexOfOptimum") or \
1475                 selfA._toStore("CurrentOptimum") or \
1476                 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
1477                 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
1478                 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
1479                 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1480                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1481             if selfA._toStore("IndexOfOptimum"):
1482                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1483             if selfA._toStore("CurrentOptimum"):
1484                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
1485             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1486                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
1487             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1488                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1489             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1490                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1491             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1492                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1493             return J
1494         #
1495         def GradientOfCostFunction(dx):
1496             _dX          = numpy.asmatrix(numpy.ravel( dx )).T
1497             _HdX         = Ht * _dX
1498             _HdX         = numpy.asmatrix(numpy.ravel( _HdX )).T
1499             _dInnovation = Innovation - _HdX
1500             GradJb       = BI * _dX
1501             GradJo       = - Ht.T @ (RI * _dInnovation)
1502             GradJ        = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
1503             return GradJ
1504         #
1505         # Minimisation de la fonctionnelle
1506         # --------------------------------
1507         nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
1508         #
1509         if selfA._parameters["Minimizer"] == "LBFGSB":
1510             # Minimum, J_optimal, Informations = scipy.optimize.fmin_l_bfgs_b(
1511             if "0.19" <= scipy.version.version <= "1.1.0":
1512                 import lbfgsbhlt as optimiseur
1513             else:
1514                 import scipy.optimize as optimiseur
1515             Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
1516                 func        = CostFunction,
1517                 x0          = numpy.zeros(Xini.size),
1518                 fprime      = GradientOfCostFunction,
1519                 args        = (),
1520                 bounds      = selfA._parameters["Bounds"],
1521                 maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
1522                 factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
1523                 pgtol       = selfA._parameters["ProjectedGradientTolerance"],
1524                 iprint      = selfA._parameters["optiprint"],
1525                 )
1526             nfeval = Informations['funcalls']
1527             rc     = Informations['warnflag']
1528         elif selfA._parameters["Minimizer"] == "TNC":
1529             Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
1530                 func        = CostFunction,
1531                 x0          = numpy.zeros(Xini.size),
1532                 fprime      = GradientOfCostFunction,
1533                 args        = (),
1534                 bounds      = selfA._parameters["Bounds"],
1535                 maxfun      = selfA._parameters["MaximumNumberOfSteps"],
1536                 pgtol       = selfA._parameters["ProjectedGradientTolerance"],
1537                 ftol        = selfA._parameters["CostDecrementTolerance"],
1538                 messages    = selfA._parameters["optmessages"],
1539                 )
1540         elif selfA._parameters["Minimizer"] == "CG":
1541             Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
1542                 f           = CostFunction,
1543                 x0          = numpy.zeros(Xini.size),
1544                 fprime      = GradientOfCostFunction,
1545                 args        = (),
1546                 maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1547                 gtol        = selfA._parameters["GradientNormTolerance"],
1548                 disp        = selfA._parameters["optdisp"],
1549                 full_output = True,
1550                 )
1551         elif selfA._parameters["Minimizer"] == "NCG":
1552             Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
1553                 f           = CostFunction,
1554                 x0          = numpy.zeros(Xini.size),
1555                 fprime      = GradientOfCostFunction,
1556                 args        = (),
1557                 maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1558                 avextol     = selfA._parameters["CostDecrementTolerance"],
1559                 disp        = selfA._parameters["optdisp"],
1560                 full_output = True,
1561                 )
1562         elif selfA._parameters["Minimizer"] == "BFGS":
1563             Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
1564                 f           = CostFunction,
1565                 x0          = numpy.zeros(Xini.size),
1566                 fprime      = GradientOfCostFunction,
1567                 args        = (),
1568                 maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1569                 gtol        = selfA._parameters["GradientNormTolerance"],
1570                 disp        = selfA._parameters["optdisp"],
1571                 full_output = True,
1572                 )
1573         else:
1574             raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
1575         #
1576         IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1577         MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
1578         #
1579         if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
1580             Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
1581             Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
1582         else:
1583             Minimum = Xb + numpy.asmatrix(numpy.ravel( Minimum )).T
1584         #
1585         Xr     = Minimum
1586         DeltaJ = selfA.StoredVariables["CostFunctionJ" ][-1] - J
1587         iOuter = selfA.StoredVariables["CurrentIterationNumber"][-1]
1588     #
1589     # Obtention de l'analyse
1590     # ----------------------
1591     Xa = Xr
1592     #
1593     selfA.StoredVariables["Analysis"].store( Xa )
1594     #
1595     if selfA._toStore("OMA") or \
1596         selfA._toStore("SigmaObs2") or \
1597         selfA._toStore("SimulationQuantiles") or \
1598         selfA._toStore("SimulatedObservationAtOptimum"):
1599         if selfA._toStore("SimulatedObservationAtCurrentState"):
1600             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
1601         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1602             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
1603         else:
1604             HXa = Hm( Xa )
1605     #
1606     # Calcul de la covariance d'analyse
1607     # ---------------------------------
1608     if selfA._toStore("APosterioriCovariance") or \
1609         selfA._toStore("SimulationQuantiles") or \
1610         selfA._toStore("JacobianMatrixAtOptimum") or \
1611         selfA._toStore("KalmanGainAtOptimum"):
1612         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
1613         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
1614     if selfA._toStore("APosterioriCovariance") or \
1615         selfA._toStore("SimulationQuantiles") or \
1616         selfA._toStore("KalmanGainAtOptimum"):
1617         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
1618         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
1619     if selfA._toStore("APosterioriCovariance") or \
1620         selfA._toStore("SimulationQuantiles"):
1621         HessienneI = []
1622         nb = Xa.size
1623         for i in range(nb):
1624             _ee    = numpy.matrix(numpy.zeros(nb)).T
1625             _ee[i] = 1.
1626             _HtEE  = numpy.dot(HtM,_ee)
1627             _HtEE  = numpy.asmatrix(numpy.ravel( _HtEE )).T
1628             HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
1629         HessienneI = numpy.matrix( HessienneI )
1630         A = HessienneI.I
1631         if min(A.shape) != max(A.shape):
1632             raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
1633         if (numpy.diag(A) < 0).any():
1634             raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
1635         if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
1636             try:
1637                 L = numpy.linalg.cholesky( A )
1638             except:
1639                 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
1640     if selfA._toStore("APosterioriCovariance"):
1641         selfA.StoredVariables["APosterioriCovariance"].store( A )
1642     if selfA._toStore("JacobianMatrixAtOptimum"):
1643         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
1644     if selfA._toStore("KalmanGainAtOptimum"):
1645         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
1646         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
1647         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
1648     #
1649     # Calculs et/ou stockages supplémentaires
1650     # ---------------------------------------
1651     if selfA._toStore("Innovation") or \
1652         selfA._toStore("SigmaObs2") or \
1653         selfA._toStore("MahalanobisConsistency") or \
1654         selfA._toStore("OMB"):
1655         d  = Y - HXb
1656     if selfA._toStore("Innovation"):
1657         selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
1658     if selfA._toStore("BMA"):
1659         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
1660     if selfA._toStore("OMA"):
1661         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
1662     if selfA._toStore("OMB"):
1663         selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
1664     if selfA._toStore("SigmaObs2"):
1665         TraceR = R.trace(Y.size)
1666         selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
1667     if selfA._toStore("MahalanobisConsistency"):
1668         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
1669     if selfA._toStore("SimulationQuantiles"):
1670         nech = selfA._parameters["NumberOfSamplesForQuantiles"]
1671         HXa  = numpy.matrix(numpy.ravel( HXa )).T
1672         EXr  = None
1673         YfQ  = None
1674         for i in range(nech):
1675             if selfA._parameters["SimulationForQuantiles"] == "Linear":
1676                 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
1677                 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
1678                 Yr = HXa + dYr
1679                 if selfA._toStore("SampledStateForQuantiles"): Xr = Xa+dXr
1680             elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
1681                 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
1682                 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
1683             if YfQ is None:
1684                 YfQ = Yr
1685                 if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.ravel(Xr)
1686             else:
1687                 YfQ = numpy.hstack((YfQ,Yr))
1688                 if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.vstack((EXr,numpy.ravel(Xr)))
1689         YfQ.sort(axis=-1)
1690         YQ = None
1691         for quantile in selfA._parameters["Quantiles"]:
1692             if not (0. <= float(quantile) <= 1.): continue
1693             indice = int(nech * float(quantile) - 1./nech)
1694             if YQ is None: YQ = YfQ[:,indice]
1695             else:          YQ = numpy.hstack((YQ,YfQ[:,indice]))
1696         selfA.StoredVariables["SimulationQuantiles"].store( YQ )
1697         if selfA._toStore("SampledStateForQuantiles"):
1698             selfA.StoredVariables["SampledStateForQuantiles"].store( EXr.T )
1699     if selfA._toStore("SimulatedObservationAtBackground"):
1700         selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
1701     if selfA._toStore("SimulatedObservationAtOptimum"):
1702         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
1703     #
1704     return 0
1705
1706 # ==============================================================================
1707 def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13",
1708     BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
1709     """
1710     Maximum Likelihood Ensemble Filter
1711     """
1712     if selfA._parameters["EstimationOf"] == "Parameters":
1713         selfA._parameters["StoreInternalVariables"] = True
1714     #
1715     # Opérateurs
1716     # ----------
1717     H = HO["Direct"].appliedControledFormTo
1718     #
1719     if selfA._parameters["EstimationOf"] == "State":
1720         M = EM["Direct"].appliedControledFormTo
1721     #
1722     if CM is not None and "Tangent" in CM and U is not None:
1723         Cm = CM["Tangent"].asMatrix(Xb)
1724     else:
1725         Cm = None
1726     #
1727     # Nombre de pas identique au nombre de pas d'observations
1728     # -------------------------------------------------------
1729     if hasattr(Y,"stepnumber"):
1730         duration = Y.stepnumber()
1731         __p = numpy.cumprod(Y.shape())[-1]
1732     else:
1733         duration = 2
1734         __p = numpy.array(Y).size
1735     #
1736     # Précalcul des inversions de B et R
1737     # ----------------------------------
1738     if selfA._parameters["StoreInternalVariables"] \
1739         or selfA._toStore("CostFunctionJ") \
1740         or selfA._toStore("CostFunctionJb") \
1741         or selfA._toStore("CostFunctionJo") \
1742         or selfA._toStore("CurrentOptimum") \
1743         or selfA._toStore("APosterioriCovariance"):
1744         BI = B.getI()
1745     RI = R.getI()
1746     #
1747     # Initialisation
1748     # --------------
1749     __n = Xb.size
1750     __m = selfA._parameters["NumberOfMembers"]
1751     if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
1752     else:                         Pn = B
1753     if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
1754     else:                         Rn = R
1755     Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
1756     #
1757     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1758         selfA.StoredVariables["Analysis"].store( Xb )
1759         if selfA._toStore("APosterioriCovariance"):
1760             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1761             covarianceXa = Pn
1762     #
1763     previousJMinimum = numpy.finfo(float).max
1764     #
1765     for step in range(duration-1):
1766         if hasattr(Y,"store"):
1767             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1768         else:
1769             Ynpu = numpy.ravel( Y ).reshape((__p,1))
1770         #
1771         if U is not None:
1772             if hasattr(U,"store") and len(U)>1:
1773                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
1774             elif hasattr(U,"store") and len(U)==1:
1775                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
1776             else:
1777                 Un = numpy.asmatrix(numpy.ravel( U )).T
1778         else:
1779             Un = None
1780         #
1781         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
1782             Xn = CovarianceInflation( Xn,
1783                 selfA._parameters["InflationType"],
1784                 selfA._parameters["InflationFactor"],
1785                 )
1786         #
1787         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
1788             EMX = M( [(Xn[:,i], Un) for i in range(__m)],
1789                 argsAsSerie = True,
1790                 returnSerieAsArrayMatrix = True )
1791             Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
1792             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1793                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
1794                 Xn_predicted = Xn_predicted + Cm * Un
1795         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
1796             # --- > Par principe, M = Id, Q = 0
1797             Xn_predicted = Xn
1798         #
1799         #--------------------------
1800         if VariantM == "MLEF13":
1801             Xfm = numpy.ravel(Xn_predicted.mean(axis=1, dtype=mfp).astype('float'))
1802             EaX = EnsembleOfAnomalies( Xn_predicted, Xfm, 1./math.sqrt(__m-1) )
1803             Ua  = numpy.identity(__m)
1804             __j = 0
1805             Deltaw = 1
1806             if not BnotT:
1807                 Ta  = numpy.identity(__m)
1808             vw  = numpy.zeros(__m)
1809             while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
1810                 vx1 = (Xfm + EaX @ vw).reshape((__n,1))
1811                 #
1812                 if BnotT:
1813                     E1 = vx1 + _epsilon * EaX
1814                 else:
1815                     E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
1816                 #
1817                 HE2 = H( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
1818                     argsAsSerie = True,
1819                     returnSerieAsArrayMatrix = True )
1820                 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
1821                 #
1822                 if BnotT:
1823                     EaY = (HE2 - vy2) / _epsilon
1824                 else:
1825                     EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
1826                 #
1827                 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy2 )))
1828                 mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY)
1829                 Deltaw = - numpy.linalg.solve(mH,GradJ)
1830                 #
1831                 vw = vw + Deltaw
1832                 #
1833                 if not BnotT:
1834                     Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
1835                 #
1836                 __j = __j + 1
1837             #
1838             if BnotT:
1839                 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
1840             #
1841             Xn = vx1 + math.sqrt(__m-1) * EaX @ Ta @ Ua
1842         #--------------------------
1843         else:
1844             raise ValueError("VariantM has to be chosen in the authorized methods list.")
1845         #
1846         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
1847             Xn = CovarianceInflation( Xn,
1848                 selfA._parameters["InflationType"],
1849                 selfA._parameters["InflationFactor"],
1850                 )
1851         #
1852         Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1853         #--------------------------
1854         #
1855         if selfA._parameters["StoreInternalVariables"] \
1856             or selfA._toStore("CostFunctionJ") \
1857             or selfA._toStore("CostFunctionJb") \
1858             or selfA._toStore("CostFunctionJo") \
1859             or selfA._toStore("APosterioriCovariance") \
1860             or selfA._toStore("InnovationAtCurrentAnalysis") \
1861             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1862             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1863             _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
1864             _Innovation = Ynpu - _HXa
1865         #
1866         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1867         # ---> avec analysis
1868         selfA.StoredVariables["Analysis"].store( Xa )
1869         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1870             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
1871         if selfA._toStore("InnovationAtCurrentAnalysis"):
1872             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1873         # ---> avec current state
1874         if selfA._parameters["StoreInternalVariables"] \
1875             or selfA._toStore("CurrentState"):
1876             selfA.StoredVariables["CurrentState"].store( Xn )
1877         if selfA._toStore("ForecastState"):
1878             selfA.StoredVariables["ForecastState"].store( EMX )
1879         if selfA._toStore("BMA"):
1880             selfA.StoredVariables["BMA"].store( EMX - Xa )
1881         if selfA._toStore("InnovationAtCurrentState"):
1882             selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
1883         if selfA._toStore("SimulatedObservationAtCurrentState") \
1884             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1885             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
1886         # ---> autres
1887         if selfA._parameters["StoreInternalVariables"] \
1888             or selfA._toStore("CostFunctionJ") \
1889             or selfA._toStore("CostFunctionJb") \
1890             or selfA._toStore("CostFunctionJo") \
1891             or selfA._toStore("CurrentOptimum") \
1892             or selfA._toStore("APosterioriCovariance"):
1893             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
1894             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
1895             J   = Jb + Jo
1896             selfA.StoredVariables["CostFunctionJb"].store( Jb )
1897             selfA.StoredVariables["CostFunctionJo"].store( Jo )
1898             selfA.StoredVariables["CostFunctionJ" ].store( J )
1899             #
1900             if selfA._toStore("IndexOfOptimum") \
1901                 or selfA._toStore("CurrentOptimum") \
1902                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1903                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1904                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1905                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1906                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1907             if selfA._toStore("IndexOfOptimum"):
1908                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1909             if selfA._toStore("CurrentOptimum"):
1910                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1911             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1912                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1913             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1914                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1915             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1916                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1917             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1918                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1919         if selfA._toStore("APosterioriCovariance"):
1920             selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
1921         if selfA._parameters["EstimationOf"] == "Parameters" \
1922             and J < previousJMinimum:
1923             previousJMinimum    = J
1924             XaMin               = Xa
1925             if selfA._toStore("APosterioriCovariance"):
1926                 covarianceXaMin = Pn
1927     #
1928     # Stockage final supplémentaire de l'optimum en estimation de paramètres
1929     # ----------------------------------------------------------------------
1930     if selfA._parameters["EstimationOf"] == "Parameters":
1931         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1932         selfA.StoredVariables["Analysis"].store( XaMin )
1933         if selfA._toStore("APosterioriCovariance"):
1934             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1935         if selfA._toStore("BMA"):
1936             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1937     #
1938     return 0
1939
1940 # ==============================================================================
1941 def mmqr(
1942         func     = None,
1943         x0       = None,
1944         fprime   = None,
1945         bounds   = None,
1946         quantile = 0.5,
1947         maxfun   = 15000,
1948         toler    = 1.e-06,
1949         y        = None,
1950         ):
1951     """
1952     Implémentation informatique de l'algorithme MMQR, basée sur la publication :
1953     David R. Hunter, Kenneth Lange, "Quantile Regression via an MM Algorithm",
1954     Journal of Computational and Graphical Statistics, 9, 1, pp.60-77, 2000.
1955     """
1956     #
1957     # Recuperation des donnees et informations initiales
1958     # --------------------------------------------------
1959     variables = numpy.ravel( x0 )
1960     mesures   = numpy.ravel( y )
1961     increment = sys.float_info[0]
1962     p         = variables.size
1963     n         = mesures.size
1964     quantile  = float(quantile)
1965     #
1966     # Calcul des parametres du MM
1967     # ---------------------------
1968     tn      = float(toler) / n
1969     e0      = -tn / math.log(tn)
1970     epsilon = (e0-tn)/(1+math.log(e0))
1971     #
1972     # Calculs d'initialisation
1973     # ------------------------
1974     residus  = mesures - numpy.ravel( func( variables ) )
1975     poids    = 1./(epsilon+numpy.abs(residus))
1976     veps     = 1. - 2. * quantile - residus * poids
1977     lastsurrogate = -numpy.sum(residus*veps) - (1.-2.*quantile)*numpy.sum(residus)
1978     iteration = 0
1979     #
1980     # Recherche iterative
1981     # -------------------
1982     while (increment > toler) and (iteration < maxfun) :
1983         iteration += 1
1984         #
1985         Derivees  = numpy.array(fprime(variables))
1986         Derivees  = Derivees.reshape(n,p) # Necessaire pour remettre en place la matrice si elle passe par des tuyaux YACS
1987         DeriveesT = Derivees.transpose()
1988         M         =   numpy.dot( DeriveesT , (numpy.array(numpy.matrix(p*[poids,]).T)*Derivees) )
1989         SM        =   numpy.transpose(numpy.dot( DeriveesT , veps ))
1990         step      = - numpy.linalg.lstsq( M, SM, rcond=-1 )[0]
1991         #
1992         variables = variables + step
1993         if bounds is not None:
1994             # Attention : boucle infinie à éviter si un intervalle est trop petit
1995             while( (variables < numpy.ravel(numpy.asmatrix(bounds)[:,0])).any() or (variables > numpy.ravel(numpy.asmatrix(bounds)[:,1])).any() ):
1996                 step      = step/2.
1997                 variables = variables - step
1998         residus   = mesures - numpy.ravel( func(variables) )
1999         surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
2000         #
2001         while ( (surrogate > lastsurrogate) and ( max(list(numpy.abs(step))) > 1.e-16 ) ) :
2002             step      = step/2.
2003             variables = variables - step
2004             residus   = mesures - numpy.ravel( func(variables) )
2005             surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
2006         #
2007         increment     = lastsurrogate-surrogate
2008         poids         = 1./(epsilon+numpy.abs(residus))
2009         veps          = 1. - 2. * quantile - residus * poids
2010         lastsurrogate = -numpy.sum(residus * veps) - (1.-2.*quantile)*numpy.sum(residus)
2011     #
2012     # Mesure d'écart
2013     # --------------
2014     Ecart = quantile * numpy.sum(residus) - numpy.sum( residus[residus<0] )
2015     #
2016     return variables, Ecart, [n,p,iteration,increment,0]
2017
2018 # ==============================================================================
2019 def multi3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, oneCycle):
2020     """
2021     3DVAR multi-pas et multi-méthodes
2022     """
2023     #
2024     # Initialisation
2025     # --------------
2026     Xn = numpy.ravel(Xb).reshape((-1,1))
2027     #
2028     if selfA._parameters["EstimationOf"] == "State":
2029         M = EM["Direct"].appliedTo
2030         #
2031         if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2032             selfA.StoredVariables["Analysis"].store( Xn )
2033             if selfA._toStore("APosterioriCovariance"):
2034                 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(Xn.size)
2035                 else:                         Pn = B
2036                 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2037             if selfA._toStore("ForecastState"):
2038                 selfA.StoredVariables["ForecastState"].store( Xn )
2039     #
2040     if hasattr(Y,"stepnumber"):
2041         duration = Y.stepnumber()
2042     else:
2043         duration = 2
2044     #
2045     # Multi-pas
2046     # ---------
2047     for step in range(duration-1):
2048         if hasattr(Y,"store"):
2049             Ynpu = numpy.ravel( Y[step+1] ).reshape((-1,1))
2050         else:
2051             Ynpu = numpy.ravel( Y ).reshape((-1,1))
2052         #
2053         if selfA._parameters["EstimationOf"] == "State": # Forecast
2054             Xn = selfA.StoredVariables["Analysis"][-1]
2055             Xn_predicted = M( Xn )
2056             if selfA._toStore("ForecastState"):
2057                 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
2058         elif selfA._parameters["EstimationOf"] == "Parameters": # No forecast
2059             # --- > Par principe, M = Id, Q = 0
2060             Xn_predicted = Xn
2061         Xn_predicted = numpy.ravel(Xn_predicted).reshape((-1,1))
2062         #
2063         oneCycle(selfA, Xn_predicted, Ynpu, U, HO, None, None, R, B, None)
2064     #
2065     return 0
2066
2067 # ==============================================================================
2068 def psas3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
2069     """
2070     3DVAR PSAS
2071     """
2072     #
2073     # Initialisations
2074     # ---------------
2075     #
2076     # Opérateurs
2077     Hm = HO["Direct"].appliedTo
2078     #
2079     # Utilisation éventuelle d'un vecteur H(Xb) précalculé
2080     if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
2081         HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
2082     else:
2083         HXb = Hm( Xb )
2084     HXb = numpy.asmatrix(numpy.ravel( HXb )).T
2085     if Y.size != HXb.size:
2086         raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
2087     if max(Y.shape) != max(HXb.shape):
2088         raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
2089     #
2090     if selfA._toStore("JacobianMatrixAtBackground"):
2091         HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
2092         HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
2093         selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
2094     #
2095     Ht = HO["Tangent"].asMatrix(Xb)
2096     BHT = B * Ht.T
2097     HBHTpR = R + Ht * BHT
2098     Innovation = Y - HXb
2099     #
2100     # Point de démarrage de l'optimisation
2101     Xini = numpy.zeros(Xb.shape)
2102     #
2103     # Définition de la fonction-coût
2104     # ------------------------------
2105     def CostFunction(w):
2106         _W = numpy.asmatrix(numpy.ravel( w )).T
2107         if selfA._parameters["StoreInternalVariables"] or \
2108             selfA._toStore("CurrentState") or \
2109             selfA._toStore("CurrentOptimum"):
2110             selfA.StoredVariables["CurrentState"].store( Xb + BHT * _W )
2111         if selfA._toStore("SimulatedObservationAtCurrentState") or \
2112             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2113             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Hm( Xb + BHT * _W ) )
2114         if selfA._toStore("InnovationAtCurrentState"):
2115             selfA.StoredVariables["InnovationAtCurrentState"].store( Innovation )
2116         #
2117         Jb  = float( 0.5 * _W.T * HBHTpR * _W )
2118         Jo  = float( - _W.T * Innovation )
2119         J   = Jb + Jo
2120         #
2121         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
2122         selfA.StoredVariables["CostFunctionJb"].store( Jb )
2123         selfA.StoredVariables["CostFunctionJo"].store( Jo )
2124         selfA.StoredVariables["CostFunctionJ" ].store( J )
2125         if selfA._toStore("IndexOfOptimum") or \
2126             selfA._toStore("CurrentOptimum") or \
2127             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
2128             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
2129             selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
2130             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2131             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2132         if selfA._toStore("IndexOfOptimum"):
2133             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2134         if selfA._toStore("CurrentOptimum"):
2135             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
2136         if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2137             selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
2138         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2139             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2140         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2141             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2142         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2143             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2144         return J
2145     #
2146     def GradientOfCostFunction(w):
2147         _W = numpy.asmatrix(numpy.ravel( w )).T
2148         GradJb  = HBHTpR * _W
2149         GradJo  = - Innovation
2150         GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
2151         return GradJ
2152     #
2153     # Minimisation de la fonctionnelle
2154     # --------------------------------
2155     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
2156     #
2157     if selfA._parameters["Minimizer"] == "LBFGSB":
2158         if "0.19" <= scipy.version.version <= "1.1.0":
2159             import lbfgsbhlt as optimiseur
2160         else:
2161             import scipy.optimize as optimiseur
2162         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
2163             func        = CostFunction,
2164             x0          = Xini,
2165             fprime      = GradientOfCostFunction,
2166             args        = (),
2167             bounds      = selfA._parameters["Bounds"],
2168             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
2169             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
2170             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
2171             iprint      = selfA._parameters["optiprint"],
2172             )
2173         nfeval = Informations['funcalls']
2174         rc     = Informations['warnflag']
2175     elif selfA._parameters["Minimizer"] == "TNC":
2176         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
2177             func        = CostFunction,
2178             x0          = Xini,
2179             fprime      = GradientOfCostFunction,
2180             args        = (),
2181             bounds      = selfA._parameters["Bounds"],
2182             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
2183             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
2184             ftol        = selfA._parameters["CostDecrementTolerance"],
2185             messages    = selfA._parameters["optmessages"],
2186             )
2187     elif selfA._parameters["Minimizer"] == "CG":
2188         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
2189             f           = CostFunction,
2190             x0          = Xini,
2191             fprime      = GradientOfCostFunction,
2192             args        = (),
2193             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2194             gtol        = selfA._parameters["GradientNormTolerance"],
2195             disp        = selfA._parameters["optdisp"],
2196             full_output = True,
2197             )
2198     elif selfA._parameters["Minimizer"] == "NCG":
2199         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
2200             f           = CostFunction,
2201             x0          = Xini,
2202             fprime      = GradientOfCostFunction,
2203             args        = (),
2204             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2205             avextol     = selfA._parameters["CostDecrementTolerance"],
2206             disp        = selfA._parameters["optdisp"],
2207             full_output = True,
2208             )
2209     elif selfA._parameters["Minimizer"] == "BFGS":
2210         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
2211             f           = CostFunction,
2212             x0          = Xini,
2213             fprime      = GradientOfCostFunction,
2214             args        = (),
2215             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2216             gtol        = selfA._parameters["GradientNormTolerance"],
2217             disp        = selfA._parameters["optdisp"],
2218             full_output = True,
2219             )
2220     else:
2221         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
2222     #
2223     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2224     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
2225     #
2226     # Correction pour pallier a un bug de TNC sur le retour du Minimum
2227     # ----------------------------------------------------------------
2228     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
2229         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
2230         Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
2231     else:
2232         Minimum = Xb + BHT * numpy.asmatrix(numpy.ravel( Minimum )).T
2233     #
2234     # Obtention de l'analyse
2235     # ----------------------
2236     Xa = Minimum
2237     #
2238     selfA.StoredVariables["Analysis"].store( Xa )
2239     #
2240     if selfA._toStore("OMA") or \
2241         selfA._toStore("SigmaObs2") or \
2242         selfA._toStore("SimulationQuantiles") or \
2243         selfA._toStore("SimulatedObservationAtOptimum"):
2244         if selfA._toStore("SimulatedObservationAtCurrentState"):
2245             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
2246         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2247             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
2248         else:
2249             HXa = Hm( Xa )
2250     #
2251     # Calcul de la covariance d'analyse
2252     # ---------------------------------
2253     if selfA._toStore("APosterioriCovariance") or \
2254         selfA._toStore("SimulationQuantiles") or \
2255         selfA._toStore("JacobianMatrixAtOptimum") or \
2256         selfA._toStore("KalmanGainAtOptimum"):
2257         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
2258         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
2259     if selfA._toStore("APosterioriCovariance") or \
2260         selfA._toStore("SimulationQuantiles") or \
2261         selfA._toStore("KalmanGainAtOptimum"):
2262         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
2263         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
2264     if selfA._toStore("APosterioriCovariance") or \
2265         selfA._toStore("SimulationQuantiles"):
2266         BI = B.getI()
2267         RI = R.getI()
2268         HessienneI = []
2269         nb = Xa.size
2270         for i in range(nb):
2271             _ee    = numpy.matrix(numpy.zeros(nb)).T
2272             _ee[i] = 1.
2273             _HtEE  = numpy.dot(HtM,_ee)
2274             _HtEE  = numpy.asmatrix(numpy.ravel( _HtEE )).T
2275             HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
2276         HessienneI = numpy.matrix( HessienneI )
2277         A = HessienneI.I
2278         if min(A.shape) != max(A.shape):
2279             raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
2280         if (numpy.diag(A) < 0).any():
2281             raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
2282         if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
2283             try:
2284                 L = numpy.linalg.cholesky( A )
2285             except:
2286                 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
2287     if selfA._toStore("APosterioriCovariance"):
2288         selfA.StoredVariables["APosterioriCovariance"].store( A )
2289     if selfA._toStore("JacobianMatrixAtOptimum"):
2290         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
2291     if selfA._toStore("KalmanGainAtOptimum"):
2292         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
2293         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
2294         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
2295     #
2296     # Calculs et/ou stockages supplémentaires
2297     # ---------------------------------------
2298     if selfA._toStore("Innovation") or \
2299         selfA._toStore("SigmaObs2") or \
2300         selfA._toStore("MahalanobisConsistency") or \
2301         selfA._toStore("OMB"):
2302         d  = Y - HXb
2303     if selfA._toStore("Innovation"):
2304         selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
2305     if selfA._toStore("BMA"):
2306         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
2307     if selfA._toStore("OMA"):
2308         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
2309     if selfA._toStore("OMB"):
2310         selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
2311     if selfA._toStore("SigmaObs2"):
2312         TraceR = R.trace(Y.size)
2313         selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
2314     if selfA._toStore("MahalanobisConsistency"):
2315         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
2316     if selfA._toStore("SimulationQuantiles"):
2317         nech = selfA._parameters["NumberOfSamplesForQuantiles"]
2318         HXa  = numpy.matrix(numpy.ravel( HXa )).T
2319         EXr  = None
2320         YfQ  = None
2321         for i in range(nech):
2322             if selfA._parameters["SimulationForQuantiles"] == "Linear":
2323                 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
2324                 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
2325                 Yr = HXa + dYr
2326                 if selfA._toStore("SampledStateForQuantiles"): Xr = Xa+dXr
2327             elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
2328                 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
2329                 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
2330             if YfQ is None:
2331                 YfQ = Yr
2332                 if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.ravel(Xr)
2333             else:
2334                 YfQ = numpy.hstack((YfQ,Yr))
2335                 if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.vstack((EXr,numpy.ravel(Xr)))
2336         YfQ.sort(axis=-1)
2337         YQ = None
2338         for quantile in selfA._parameters["Quantiles"]:
2339             if not (0. <= float(quantile) <= 1.): continue
2340             indice = int(nech * float(quantile) - 1./nech)
2341             if YQ is None: YQ = YfQ[:,indice]
2342             else:          YQ = numpy.hstack((YQ,YfQ[:,indice]))
2343         selfA.StoredVariables["SimulationQuantiles"].store( YQ )
2344         if selfA._toStore("SampledStateForQuantiles"):
2345             selfA.StoredVariables["SampledStateForQuantiles"].store( EXr.T )
2346     if selfA._toStore("SimulatedObservationAtBackground"):
2347         selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
2348     if selfA._toStore("SimulatedObservationAtOptimum"):
2349         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
2350     #
2351     return 0
2352
2353 # ==============================================================================
2354 def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
2355     """
2356     Stochastic EnKF
2357     """
2358     if selfA._parameters["EstimationOf"] == "Parameters":
2359         selfA._parameters["StoreInternalVariables"] = True
2360     #
2361     # Opérateurs
2362     H = HO["Direct"].appliedControledFormTo
2363     #
2364     if selfA._parameters["EstimationOf"] == "State":
2365         M = EM["Direct"].appliedControledFormTo
2366     #
2367     if CM is not None and "Tangent" in CM and U is not None:
2368         Cm = CM["Tangent"].asMatrix(Xb)
2369     else:
2370         Cm = None
2371     #
2372     # Durée d'observation et tailles
2373     if hasattr(Y,"stepnumber"):
2374         duration = Y.stepnumber()
2375         __p = numpy.cumprod(Y.shape())[-1]
2376     else:
2377         duration = 2
2378         __p = numpy.array(Y).size
2379     #
2380     # Précalcul des inversions de B et R
2381     if selfA._parameters["StoreInternalVariables"] \
2382         or selfA._toStore("CostFunctionJ") \
2383         or selfA._toStore("CostFunctionJb") \
2384         or selfA._toStore("CostFunctionJo") \
2385         or selfA._toStore("CurrentOptimum") \
2386         or selfA._toStore("APosterioriCovariance"):
2387         BI = B.getI()
2388         RI = R.getI()
2389     #
2390     __n = Xb.size
2391     __m = selfA._parameters["NumberOfMembers"]
2392     #
2393     if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
2394     else:                         Pn = B
2395     if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
2396     else:                         Rn = R
2397     Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
2398     #
2399     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2400         selfA.StoredVariables["Analysis"].store( Xb )
2401         if selfA._toStore("APosterioriCovariance"):
2402             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2403             covarianceXa = Pn
2404     #
2405     previousJMinimum = numpy.finfo(float).max
2406     #
2407     for step in range(duration-1):
2408         if hasattr(Y,"store"):
2409             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
2410         else:
2411             Ynpu = numpy.ravel( Y ).reshape((__p,1))
2412         #
2413         if U is not None:
2414             if hasattr(U,"store") and len(U)>1:
2415                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2416             elif hasattr(U,"store") and len(U)==1:
2417                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2418             else:
2419                 Un = numpy.asmatrix(numpy.ravel( U )).T
2420         else:
2421             Un = None
2422         #
2423         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2424             Xn = CovarianceInflation( Xn,
2425                 selfA._parameters["InflationType"],
2426                 selfA._parameters["InflationFactor"],
2427                 )
2428         #
2429         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
2430             EMX = M( [(Xn[:,i], Un) for i in range(__m)],
2431                 argsAsSerie = True,
2432                 returnSerieAsArrayMatrix = True )
2433             Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
2434             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2435                 argsAsSerie = True,
2436                 returnSerieAsArrayMatrix = True )
2437             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2438                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2439                 Xn_predicted = Xn_predicted + Cm * Un
2440         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
2441             # --- > Par principe, M = Id, Q = 0
2442             Xn_predicted = Xn
2443             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2444                 argsAsSerie = True,
2445                 returnSerieAsArrayMatrix = True )
2446         #
2447         # Mean of forecast and observation of forecast
2448         Xfm  = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2449         Hfm  = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2450         #
2451         #--------------------------
2452         if VariantM == "KalmanFilterFormula05":
2453             PfHT, HPfHT = 0., 0.
2454             for i in range(__m):
2455                 Exfi = Xn_predicted[:,i].reshape((__n,1)) - Xfm
2456                 Eyfi = HX_predicted[:,i].reshape((__p,1)) - Hfm
2457                 PfHT  += Exfi * Eyfi.T
2458                 HPfHT += Eyfi * Eyfi.T
2459             PfHT  = (1./(__m-1)) * PfHT
2460             HPfHT = (1./(__m-1)) * HPfHT
2461             Kn     = PfHT * ( R + HPfHT ).I
2462             del PfHT, HPfHT
2463             #
2464             for i in range(__m):
2465                 ri = numpy.random.multivariate_normal(numpy.zeros(__p), Rn)
2466                 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(Ynpu) + ri - HX_predicted[:,i])
2467         #--------------------------
2468         elif VariantM == "KalmanFilterFormula16":
2469             EpY   = EnsembleOfCenteredPerturbations(Ynpu, Rn, __m)
2470             EpYm  = EpY.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2471             #
2472             EaX   = EnsembleOfAnomalies( Xn_predicted ) / math.sqrt(__m-1)
2473             EaY = (HX_predicted - Hfm - EpY + EpYm) / math.sqrt(__m-1)
2474             #
2475             Kn = EaX @ EaY.T @ numpy.linalg.inv( EaY @ EaY.T)
2476             #
2477             for i in range(__m):
2478                 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(EpY[:,i]) - HX_predicted[:,i])
2479         #--------------------------
2480         else:
2481             raise ValueError("VariantM has to be chosen in the authorized methods list.")
2482         #
2483         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2484             Xn = CovarianceInflation( Xn,
2485                 selfA._parameters["InflationType"],
2486                 selfA._parameters["InflationFactor"],
2487                 )
2488         #
2489         Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2490         #--------------------------
2491         #
2492         if selfA._parameters["StoreInternalVariables"] \
2493             or selfA._toStore("CostFunctionJ") \
2494             or selfA._toStore("CostFunctionJb") \
2495             or selfA._toStore("CostFunctionJo") \
2496             or selfA._toStore("APosterioriCovariance") \
2497             or selfA._toStore("InnovationAtCurrentAnalysis") \
2498             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2499             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2500             _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
2501             _Innovation = Ynpu - _HXa
2502         #
2503         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2504         # ---> avec analysis
2505         selfA.StoredVariables["Analysis"].store( Xa )
2506         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2507             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2508         if selfA._toStore("InnovationAtCurrentAnalysis"):
2509             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2510         # ---> avec current state
2511         if selfA._parameters["StoreInternalVariables"] \
2512             or selfA._toStore("CurrentState"):
2513             selfA.StoredVariables["CurrentState"].store( Xn )
2514         if selfA._toStore("ForecastState"):
2515             selfA.StoredVariables["ForecastState"].store( EMX )
2516         if selfA._toStore("BMA"):
2517             selfA.StoredVariables["BMA"].store( EMX - Xa )
2518         if selfA._toStore("InnovationAtCurrentState"):
2519             selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
2520         if selfA._toStore("SimulatedObservationAtCurrentState") \
2521             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2522             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
2523         # ---> autres
2524         if selfA._parameters["StoreInternalVariables"] \
2525             or selfA._toStore("CostFunctionJ") \
2526             or selfA._toStore("CostFunctionJb") \
2527             or selfA._toStore("CostFunctionJo") \
2528             or selfA._toStore("CurrentOptimum") \
2529             or selfA._toStore("APosterioriCovariance"):
2530             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2531             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
2532             J   = Jb + Jo
2533             selfA.StoredVariables["CostFunctionJb"].store( Jb )
2534             selfA.StoredVariables["CostFunctionJo"].store( Jo )
2535             selfA.StoredVariables["CostFunctionJ" ].store( J )
2536             #
2537             if selfA._toStore("IndexOfOptimum") \
2538                 or selfA._toStore("CurrentOptimum") \
2539                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2540                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2541                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2542                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2543                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2544             if selfA._toStore("IndexOfOptimum"):
2545                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2546             if selfA._toStore("CurrentOptimum"):
2547                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2548             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2549                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2550             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2551                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2552             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2553                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2554             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2555                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2556         if selfA._toStore("APosterioriCovariance"):
2557             selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
2558         if selfA._parameters["EstimationOf"] == "Parameters" \
2559             and J < previousJMinimum:
2560             previousJMinimum    = J
2561             XaMin               = Xa
2562             if selfA._toStore("APosterioriCovariance"):
2563                 covarianceXaMin = Pn
2564     #
2565     # Stockage final supplémentaire de l'optimum en estimation de paramètres
2566     # ----------------------------------------------------------------------
2567     if selfA._parameters["EstimationOf"] == "Parameters":
2568         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2569         selfA.StoredVariables["Analysis"].store( XaMin )
2570         if selfA._toStore("APosterioriCovariance"):
2571             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2572         if selfA._toStore("BMA"):
2573             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2574     #
2575     return 0
2576
2577 # ==============================================================================
2578 def std3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
2579     """
2580     3DVAR
2581     """
2582     #
2583     # Initialisations
2584     # ---------------
2585     #
2586     # Opérateurs
2587     Hm = HO["Direct"].appliedTo
2588     Ha = HO["Adjoint"].appliedInXTo
2589     #
2590     # Utilisation éventuelle d'un vecteur H(Xb) précalculé
2591     if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
2592         HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
2593     else:
2594         HXb = Hm( Xb )
2595     HXb = numpy.asmatrix(numpy.ravel( HXb )).T
2596     if Y.size != HXb.size:
2597         raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
2598     if max(Y.shape) != max(HXb.shape):
2599         raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
2600     #
2601     if selfA._toStore("JacobianMatrixAtBackground"):
2602         HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
2603         HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
2604         selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
2605     #
2606     # Précalcul des inversions de B et R
2607     BI = B.getI()
2608     RI = R.getI()
2609     #
2610     # Point de démarrage de l'optimisation
2611     Xini = selfA._parameters["InitializationPoint"]
2612     #
2613     # Définition de la fonction-coût
2614     # ------------------------------
2615     def CostFunction(x):
2616         _X  = numpy.asmatrix(numpy.ravel( x )).T
2617         if selfA._parameters["StoreInternalVariables"] or \
2618             selfA._toStore("CurrentState") or \
2619             selfA._toStore("CurrentOptimum"):
2620             selfA.StoredVariables["CurrentState"].store( _X )
2621         _HX = Hm( _X )
2622         _HX = numpy.asmatrix(numpy.ravel( _HX )).T
2623         _Innovation = Y - _HX
2624         if selfA._toStore("SimulatedObservationAtCurrentState") or \
2625             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2626             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
2627         if selfA._toStore("InnovationAtCurrentState"):
2628             selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
2629         #
2630         Jb  = float( 0.5 * (_X - Xb).T * BI * (_X - Xb) )
2631         Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
2632         J   = Jb + Jo
2633         #
2634         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
2635         selfA.StoredVariables["CostFunctionJb"].store( Jb )
2636         selfA.StoredVariables["CostFunctionJo"].store( Jo )
2637         selfA.StoredVariables["CostFunctionJ" ].store( J )
2638         if selfA._toStore("IndexOfOptimum") or \
2639             selfA._toStore("CurrentOptimum") or \
2640             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
2641             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
2642             selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
2643             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2644             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2645         if selfA._toStore("IndexOfOptimum"):
2646             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2647         if selfA._toStore("CurrentOptimum"):
2648             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
2649         if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2650             selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
2651         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2652             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2653         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2654             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2655         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2656             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2657         return J
2658     #
2659     def GradientOfCostFunction(x):
2660         _X      = numpy.asmatrix(numpy.ravel( x )).T
2661         _HX     = Hm( _X )
2662         _HX     = numpy.asmatrix(numpy.ravel( _HX )).T
2663         GradJb  = BI * (_X - Xb)
2664         GradJo  = - Ha( (_X, RI * (Y - _HX)) )
2665         GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
2666         return GradJ
2667     #
2668     # Minimisation de la fonctionnelle
2669     # --------------------------------
2670     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
2671     #
2672     if selfA._parameters["Minimizer"] == "LBFGSB":
2673         if "0.19" <= scipy.version.version <= "1.1.0":
2674             import lbfgsbhlt as optimiseur
2675         else:
2676             import scipy.optimize as optimiseur
2677         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
2678             func        = CostFunction,
2679             x0          = Xini,
2680             fprime      = GradientOfCostFunction,
2681             args        = (),
2682             bounds      = selfA._parameters["Bounds"],
2683             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
2684             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
2685             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
2686             iprint      = selfA._parameters["optiprint"],
2687             )
2688         nfeval = Informations['funcalls']
2689         rc     = Informations['warnflag']
2690     elif selfA._parameters["Minimizer"] == "TNC":
2691         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
2692             func        = CostFunction,
2693             x0          = Xini,
2694             fprime      = GradientOfCostFunction,
2695             args        = (),
2696             bounds      = selfA._parameters["Bounds"],
2697             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
2698             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
2699             ftol        = selfA._parameters["CostDecrementTolerance"],
2700             messages    = selfA._parameters["optmessages"],
2701             )
2702     elif selfA._parameters["Minimizer"] == "CG":
2703         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
2704             f           = CostFunction,
2705             x0          = Xini,
2706             fprime      = GradientOfCostFunction,
2707             args        = (),
2708             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2709             gtol        = selfA._parameters["GradientNormTolerance"],
2710             disp        = selfA._parameters["optdisp"],
2711             full_output = True,
2712             )
2713     elif selfA._parameters["Minimizer"] == "NCG":
2714         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
2715             f           = CostFunction,
2716             x0          = Xini,
2717             fprime      = GradientOfCostFunction,
2718             args        = (),
2719             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2720             avextol     = selfA._parameters["CostDecrementTolerance"],
2721             disp        = selfA._parameters["optdisp"],
2722             full_output = True,
2723             )
2724     elif selfA._parameters["Minimizer"] == "BFGS":
2725         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
2726             f           = CostFunction,
2727             x0          = Xini,
2728             fprime      = GradientOfCostFunction,
2729             args        = (),
2730             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2731             gtol        = selfA._parameters["GradientNormTolerance"],
2732             disp        = selfA._parameters["optdisp"],
2733             full_output = True,
2734             )
2735     else:
2736         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
2737     #
2738     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2739     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
2740     #
2741     # Correction pour pallier a un bug de TNC sur le retour du Minimum
2742     # ----------------------------------------------------------------
2743     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
2744         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
2745     #
2746     # Obtention de l'analyse
2747     # ----------------------
2748     Xa = numpy.asmatrix(numpy.ravel( Minimum )).T
2749     #
2750     selfA.StoredVariables["Analysis"].store( Xa )
2751     #
2752     if selfA._toStore("OMA") or \
2753         selfA._toStore("SigmaObs2") or \
2754         selfA._toStore("SimulationQuantiles") or \
2755         selfA._toStore("SimulatedObservationAtOptimum"):
2756         if selfA._toStore("SimulatedObservationAtCurrentState"):
2757             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
2758         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2759             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
2760         else:
2761             HXa = Hm( Xa )
2762     #
2763     # Calcul de la covariance d'analyse
2764     # ---------------------------------
2765     if selfA._toStore("APosterioriCovariance") or \
2766         selfA._toStore("SimulationQuantiles") or \
2767         selfA._toStore("JacobianMatrixAtOptimum") or \
2768         selfA._toStore("KalmanGainAtOptimum"):
2769         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
2770         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
2771     if selfA._toStore("APosterioriCovariance") or \
2772         selfA._toStore("SimulationQuantiles") or \
2773         selfA._toStore("KalmanGainAtOptimum"):
2774         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
2775         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
2776     if selfA._toStore("APosterioriCovariance") or \
2777         selfA._toStore("SimulationQuantiles"):
2778         HessienneI = []
2779         nb = Xa.size
2780         for i in range(nb):
2781             _ee    = numpy.matrix(numpy.zeros(nb)).T
2782             _ee[i] = 1.
2783             _HtEE  = numpy.dot(HtM,_ee)
2784             _HtEE  = numpy.asmatrix(numpy.ravel( _HtEE )).T
2785             HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
2786         HessienneI = numpy.matrix( HessienneI )
2787         A = HessienneI.I
2788         if min(A.shape) != max(A.shape):
2789             raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
2790         if (numpy.diag(A) < 0).any():
2791             raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
2792         if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
2793             try:
2794                 L = numpy.linalg.cholesky( A )
2795             except:
2796                 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
2797     if selfA._toStore("APosterioriCovariance"):
2798         selfA.StoredVariables["APosterioriCovariance"].store( A )
2799     if selfA._toStore("JacobianMatrixAtOptimum"):
2800         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
2801     if selfA._toStore("KalmanGainAtOptimum"):
2802         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
2803         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
2804         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
2805     #
2806     # Calculs et/ou stockages supplémentaires
2807     # ---------------------------------------
2808     if selfA._toStore("Innovation") or \
2809         selfA._toStore("SigmaObs2") or \
2810         selfA._toStore("MahalanobisConsistency") or \
2811         selfA._toStore("OMB"):
2812         d  = Y - HXb
2813     if selfA._toStore("Innovation"):
2814         selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
2815     if selfA._toStore("BMA"):
2816         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
2817     if selfA._toStore("OMA"):
2818         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
2819     if selfA._toStore("OMB"):
2820         selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
2821     if selfA._toStore("SigmaObs2"):
2822         TraceR = R.trace(Y.size)
2823         selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
2824     if selfA._toStore("MahalanobisConsistency"):
2825         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
2826     if selfA._toStore("SimulationQuantiles"):
2827         QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
2828     if selfA._toStore("SimulatedObservationAtBackground"):
2829         selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
2830     if selfA._toStore("SimulatedObservationAtOptimum"):
2831         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
2832     #
2833     return 0
2834
2835 # ==============================================================================
2836 def std4dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
2837     """
2838     4DVAR
2839     """
2840     #
2841     # Initialisations
2842     # ---------------
2843     #
2844     # Opérateurs
2845     Hm = HO["Direct"].appliedControledFormTo
2846     Mm = EM["Direct"].appliedControledFormTo
2847     #
2848     if CM is not None and "Tangent" in CM and U is not None:
2849         Cm = CM["Tangent"].asMatrix(Xb)
2850     else:
2851         Cm = None
2852     #
2853     def Un(_step):
2854         if U is not None:
2855             if hasattr(U,"store") and 1<=_step<len(U) :
2856                 _Un = numpy.asmatrix(numpy.ravel( U[_step] )).T
2857             elif hasattr(U,"store") and len(U)==1:
2858                 _Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2859             else:
2860                 _Un = numpy.asmatrix(numpy.ravel( U )).T
2861         else:
2862             _Un = None
2863         return _Un
2864     def CmUn(_xn,_un):
2865         if Cm is not None and _un is not None: # Attention : si Cm est aussi dans M, doublon !
2866             _Cm   = Cm.reshape(_xn.size,_un.size) # ADAO & check shape
2867             _CmUn = _Cm * _un
2868         else:
2869             _CmUn = 0.
2870         return _CmUn
2871     #
2872     # Remarque : les observations sont exploitées à partir du pas de temps
2873     # numéro 1, et sont utilisées dans Yo comme rangées selon ces indices.
2874     # Donc le pas 0 n'est pas utilisé puisque la première étape commence
2875     # avec l'observation du pas 1.
2876     #
2877     # Nombre de pas identique au nombre de pas d'observations
2878     if hasattr(Y,"stepnumber"):
2879         duration = Y.stepnumber()
2880     else:
2881         duration = 2
2882     #
2883     # Précalcul des inversions de B et R
2884     BI = B.getI()
2885     RI = R.getI()
2886     #
2887     # Point de démarrage de l'optimisation
2888     Xini = selfA._parameters["InitializationPoint"]
2889     #
2890     # Définition de la fonction-coût
2891     # ------------------------------
2892     selfA.DirectCalculation = [None,] # Le pas 0 n'est pas observé
2893     selfA.DirectInnovation  = [None,] # Le pas 0 n'est pas observé
2894     def CostFunction(x):
2895         _X  = numpy.asmatrix(numpy.ravel( x )).T
2896         if selfA._parameters["StoreInternalVariables"] or \
2897             selfA._toStore("CurrentState") or \
2898             selfA._toStore("CurrentOptimum"):
2899             selfA.StoredVariables["CurrentState"].store( _X )
2900         Jb  = float( 0.5 * (_X - Xb).T * BI * (_X - Xb) )
2901         selfA.DirectCalculation = [None,]
2902         selfA.DirectInnovation  = [None,]
2903         Jo  = 0.
2904         _Xn = _X
2905         for step in range(0,duration-1):
2906             if hasattr(Y,"store"):
2907                 _Ynpu = numpy.asmatrix(numpy.ravel( Y[step+1] )).T
2908             else:
2909                 _Ynpu = numpy.asmatrix(numpy.ravel( Y )).T
2910             _Un = Un(step)
2911             #
2912             # Etape d'évolution
2913             if selfA._parameters["EstimationOf"] == "State":
2914                 _Xn = Mm( (_Xn, _Un) ) + CmUn(_Xn, _Un)
2915             elif selfA._parameters["EstimationOf"] == "Parameters":
2916                 pass
2917             #
2918             if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
2919                 _Xn = numpy.max(numpy.hstack((_Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1)
2920                 _Xn = numpy.min(numpy.hstack((_Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1)
2921             #
2922             # Etape de différence aux observations
2923             if selfA._parameters["EstimationOf"] == "State":
2924                 _YmHMX = _Ynpu - numpy.asmatrix(numpy.ravel( Hm( (_Xn, None) ) )).T
2925             elif selfA._parameters["EstimationOf"] == "Parameters":
2926                 _YmHMX = _Ynpu - numpy.asmatrix(numpy.ravel( Hm( (_Xn, _Un) ) )).T - CmUn(_Xn, _Un)
2927             #
2928             # Stockage de l'état
2929             selfA.DirectCalculation.append( _Xn )
2930             selfA.DirectInnovation.append( _YmHMX )
2931             #
2932             # Ajout dans la fonctionnelle d'observation
2933             Jo = Jo + 0.5 * float( _YmHMX.T * RI * _YmHMX )
2934         J = Jb + Jo
2935         #
2936         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
2937         selfA.StoredVariables["CostFunctionJb"].store( Jb )
2938         selfA.StoredVariables["CostFunctionJo"].store( Jo )
2939         selfA.StoredVariables["CostFunctionJ" ].store( J )
2940         if selfA._toStore("IndexOfOptimum") or \
2941             selfA._toStore("CurrentOptimum") or \
2942             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
2943             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
2944             selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2945             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2946         if selfA._toStore("IndexOfOptimum"):
2947             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2948         if selfA._toStore("CurrentOptimum"):
2949             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
2950         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2951             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2952         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2953             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2954         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2955             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2956         return J
2957     #
2958     def GradientOfCostFunction(x):
2959         _X      = numpy.asmatrix(numpy.ravel( x )).T
2960         GradJb  = BI * (_X - Xb)
2961         GradJo  = 0.
2962         for step in range(duration-1,0,-1):
2963             # Étape de récupération du dernier stockage de l'évolution
2964             _Xn = selfA.DirectCalculation.pop()
2965             # Étape de récupération du dernier stockage de l'innovation
2966             _YmHMX = selfA.DirectInnovation.pop()
2967             # Calcul des adjoints
2968             Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
2969             Ha = Ha.reshape(_Xn.size,_YmHMX.size) # ADAO & check shape
2970             Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
2971             Ma = Ma.reshape(_Xn.size,_Xn.size) # ADAO & check shape
2972             # Calcul du gradient par état adjoint
2973             GradJo = GradJo + Ha * RI * _YmHMX # Équivaut pour Ha linéaire à : Ha( (_Xn, RI * _YmHMX) )
2974             GradJo = Ma * GradJo               # Équivaut pour Ma linéaire à : Ma( (_Xn, GradJo) )
2975         GradJ = numpy.ravel( GradJb ) - numpy.ravel( GradJo )
2976         return GradJ
2977     #
2978     # Minimisation de la fonctionnelle
2979     # --------------------------------
2980     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
2981     #
2982     if selfA._parameters["Minimizer"] == "LBFGSB":
2983         if "0.19" <= scipy.version.version <= "1.1.0":
2984             import lbfgsbhlt as optimiseur
2985         else:
2986             import scipy.optimize as optimiseur
2987         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
2988             func        = CostFunction,
2989             x0          = Xini,
2990             fprime      = GradientOfCostFunction,
2991             args        = (),
2992             bounds      = selfA._parameters["Bounds"],
2993             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
2994             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
2995             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
2996             iprint      = selfA._parameters["optiprint"],
2997             )
2998         nfeval = Informations['funcalls']
2999         rc     = Informations['warnflag']
3000     elif selfA._parameters["Minimizer"] == "TNC":
3001         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
3002             func        = CostFunction,
3003             x0          = Xini,
3004             fprime      = GradientOfCostFunction,
3005             args        = (),
3006             bounds      = selfA._parameters["Bounds"],
3007             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
3008             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
3009             ftol        = selfA._parameters["CostDecrementTolerance"],
3010             messages    = selfA._parameters["optmessages"],
3011             )
3012     elif selfA._parameters["Minimizer"] == "CG":
3013         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
3014             f           = CostFunction,
3015             x0          = Xini,
3016             fprime      = GradientOfCostFunction,
3017             args        = (),
3018             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3019             gtol        = selfA._parameters["GradientNormTolerance"],
3020             disp        = selfA._parameters["optdisp"],
3021             full_output = True,
3022             )
3023     elif selfA._parameters["Minimizer"] == "NCG":
3024         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
3025             f           = CostFunction,
3026             x0          = Xini,
3027             fprime      = GradientOfCostFunction,
3028             args        = (),
3029             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3030             avextol     = selfA._parameters["CostDecrementTolerance"],
3031             disp        = selfA._parameters["optdisp"],
3032             full_output = True,
3033             )
3034     elif selfA._parameters["Minimizer"] == "BFGS":
3035         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
3036             f           = CostFunction,
3037             x0          = Xini,
3038             fprime      = GradientOfCostFunction,
3039             args        = (),
3040             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3041             gtol        = selfA._parameters["GradientNormTolerance"],
3042             disp        = selfA._parameters["optdisp"],
3043             full_output = True,
3044             )
3045     else:
3046         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
3047     #
3048     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3049     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
3050     #
3051     # Correction pour pallier a un bug de TNC sur le retour du Minimum
3052     # ----------------------------------------------------------------
3053     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
3054         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
3055     #
3056     # Obtention de l'analyse
3057     # ----------------------
3058     Xa = numpy.asmatrix(numpy.ravel( Minimum )).T
3059     #
3060     selfA.StoredVariables["Analysis"].store( Xa )
3061     #
3062     # Calculs et/ou stockages supplémentaires
3063     # ---------------------------------------
3064     if selfA._toStore("BMA"):
3065         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
3066     #
3067     return 0
3068
3069 # ==============================================================================
3070 def van3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3071     """
3072     3DVAR variational analysis with no inversion of B
3073     """
3074     #
3075     # Initialisations
3076     # ---------------
3077     #
3078     # Opérateurs
3079     Hm = HO["Direct"].appliedTo
3080     Ha = HO["Adjoint"].appliedInXTo
3081     #
3082     # Précalcul des inversions de B et R
3083     BT = B.getT()
3084     RI = R.getI()
3085     #
3086     # Point de démarrage de l'optimisation
3087     Xini = numpy.zeros(Xb.shape)
3088     #
3089     # Définition de la fonction-coût
3090     # ------------------------------
3091     def CostFunction(v):
3092         _V = numpy.asmatrix(numpy.ravel( v )).T
3093         _X = Xb + B * _V
3094         if selfA._parameters["StoreInternalVariables"] or \
3095             selfA._toStore("CurrentState") or \
3096             selfA._toStore("CurrentOptimum"):
3097             selfA.StoredVariables["CurrentState"].store( _X )
3098         _HX = Hm( _X )
3099         _HX = numpy.asmatrix(numpy.ravel( _HX )).T
3100         _Innovation = Y - _HX
3101         if selfA._toStore("SimulatedObservationAtCurrentState") or \
3102             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3103             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
3104         if selfA._toStore("InnovationAtCurrentState"):
3105             selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
3106         #
3107         Jb  = float( 0.5 * _V.T * BT * _V )
3108         Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
3109         J   = Jb + Jo
3110         #
3111         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
3112         selfA.StoredVariables["CostFunctionJb"].store( Jb )
3113         selfA.StoredVariables["CostFunctionJo"].store( Jo )
3114         selfA.StoredVariables["CostFunctionJ" ].store( J )
3115         if selfA._toStore("IndexOfOptimum") or \
3116             selfA._toStore("CurrentOptimum") or \
3117             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
3118             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
3119             selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
3120             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3121             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3122         if selfA._toStore("IndexOfOptimum"):
3123             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3124         if selfA._toStore("CurrentOptimum"):
3125             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
3126         if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3127             selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
3128         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3129             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3130         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3131             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3132         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3133             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3134         return J
3135     #
3136     def GradientOfCostFunction(v):
3137         _V = numpy.asmatrix(numpy.ravel( v )).T
3138         _X = Xb + B * _V
3139         _HX     = Hm( _X )
3140         _HX     = numpy.asmatrix(numpy.ravel( _HX )).T
3141         GradJb  = BT * _V
3142         GradJo  = - Ha( (_X, RI * (Y - _HX)) )
3143         GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
3144         return GradJ
3145     #
3146     # Minimisation de la fonctionnelle
3147     # --------------------------------
3148     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
3149     #
3150     if selfA._parameters["Minimizer"] == "LBFGSB":
3151         if "0.19" <= scipy.version.version <= "1.1.0":
3152             import lbfgsbhlt as optimiseur
3153         else:
3154             import scipy.optimize as optimiseur
3155         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
3156             func        = CostFunction,
3157             x0          = Xini,
3158             fprime      = GradientOfCostFunction,
3159             args        = (),
3160             bounds      = selfA._parameters["Bounds"],
3161             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
3162             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
3163             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
3164             iprint      = selfA._parameters["optiprint"],
3165             )
3166         nfeval = Informations['funcalls']
3167         rc     = Informations['warnflag']
3168     elif selfA._parameters["Minimizer"] == "TNC":
3169         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
3170             func        = CostFunction,
3171             x0          = Xini,
3172             fprime      = GradientOfCostFunction,
3173             args        = (),
3174             bounds      = selfA._parameters["Bounds"],
3175             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
3176             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
3177             ftol        = selfA._parameters["CostDecrementTolerance"],
3178             messages    = selfA._parameters["optmessages"],
3179             )
3180     elif selfA._parameters["Minimizer"] == "CG":
3181         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
3182             f           = CostFunction,
3183             x0          = Xini,
3184             fprime      = GradientOfCostFunction,
3185             args        = (),
3186             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3187             gtol        = selfA._parameters["GradientNormTolerance"],
3188             disp        = selfA._parameters["optdisp"],
3189             full_output = True,
3190             )
3191     elif selfA._parameters["Minimizer"] == "NCG":
3192         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
3193             f           = CostFunction,
3194             x0          = Xini,
3195             fprime      = GradientOfCostFunction,
3196             args        = (),
3197             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3198             avextol     = selfA._parameters["CostDecrementTolerance"],
3199             disp        = selfA._parameters["optdisp"],
3200             full_output = True,
3201             )
3202     elif selfA._parameters["Minimizer"] == "BFGS":
3203         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
3204             f           = CostFunction,
3205             x0          = Xini,
3206             fprime      = GradientOfCostFunction,
3207             args        = (),
3208             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3209             gtol        = selfA._parameters["GradientNormTolerance"],
3210             disp        = selfA._parameters["optdisp"],
3211             full_output = True,
3212             )
3213     else:
3214         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
3215     #
3216     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3217     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
3218     #
3219     # Correction pour pallier a un bug de TNC sur le retour du Minimum
3220     # ----------------------------------------------------------------
3221     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
3222         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
3223         Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
3224     else:
3225         Minimum = Xb + B * numpy.asmatrix(numpy.ravel( Minimum )).T
3226     #
3227     # Obtention de l'analyse
3228     # ----------------------
3229     Xa = Minimum
3230     #
3231     selfA.StoredVariables["Analysis"].store( Xa )
3232     #
3233     if selfA._toStore("OMA") or \
3234         selfA._toStore("SigmaObs2") or \
3235         selfA._toStore("SimulationQuantiles") or \
3236         selfA._toStore("SimulatedObservationAtOptimum"):
3237         if selfA._toStore("SimulatedObservationAtCurrentState"):
3238             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
3239         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3240             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
3241         else:
3242             HXa = Hm( Xa )
3243     #
3244     # Calcul de la covariance d'analyse
3245     # ---------------------------------
3246     if selfA._toStore("APosterioriCovariance") or \
3247         selfA._toStore("SimulationQuantiles") or \
3248         selfA._toStore("JacobianMatrixAtOptimum") or \
3249         selfA._toStore("KalmanGainAtOptimum"):
3250         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
3251         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
3252     if selfA._toStore("APosterioriCovariance") or \
3253         selfA._toStore("SimulationQuantiles") or \
3254         selfA._toStore("KalmanGainAtOptimum"):
3255         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
3256         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
3257     if selfA._toStore("APosterioriCovariance") or \
3258         selfA._toStore("SimulationQuantiles"):
3259         BI = B.getI()
3260         HessienneI = []
3261         nb = Xa.size
3262         for i in range(nb):
3263             _ee    = numpy.matrix(numpy.zeros(nb)).T
3264             _ee[i] = 1.
3265             _HtEE  = numpy.dot(HtM,_ee)
3266             _HtEE  = numpy.asmatrix(numpy.ravel( _HtEE )).T
3267             HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
3268         HessienneI = numpy.matrix( HessienneI )
3269         A = HessienneI.I
3270         if min(A.shape) != max(A.shape):
3271             raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
3272         if (numpy.diag(A) < 0).any():
3273             raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
3274         if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
3275             try:
3276                 L = numpy.linalg.cholesky( A )
3277             except:
3278                 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
3279     if selfA._toStore("APosterioriCovariance"):
3280         selfA.StoredVariables["APosterioriCovariance"].store( A )
3281     if selfA._toStore("JacobianMatrixAtOptimum"):
3282         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
3283     if selfA._toStore("KalmanGainAtOptimum"):
3284         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
3285         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
3286         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
3287     #
3288     # Calculs et/ou stockages supplémentaires
3289     # ---------------------------------------
3290     if selfA._toStore("Innovation") or \
3291         selfA._toStore("SigmaObs2") or \
3292         selfA._toStore("MahalanobisConsistency") or \
3293         selfA._toStore("OMB"):
3294         d  = Y - HXb
3295     if selfA._toStore("Innovation"):
3296         selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
3297     if selfA._toStore("BMA"):
3298         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
3299     if selfA._toStore("OMA"):
3300         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
3301     if selfA._toStore("OMB"):
3302         selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
3303     if selfA._toStore("SigmaObs2"):
3304         TraceR = R.trace(Y.size)
3305         selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
3306     if selfA._toStore("MahalanobisConsistency"):
3307         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
3308     if selfA._toStore("SimulationQuantiles"):
3309         nech = selfA._parameters["NumberOfSamplesForQuantiles"]
3310         HXa  = numpy.matrix(numpy.ravel( HXa )).T
3311         EXr  = None
3312         YfQ  = None
3313         for i in range(nech):
3314             if selfA._parameters["SimulationForQuantiles"] == "Linear":
3315                 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
3316                 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
3317                 Yr = HXa + dYr
3318                 if selfA._toStore("SampledStateForQuantiles"): Xr = Xa+dXr
3319             elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
3320                 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
3321                 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
3322             if YfQ is None:
3323                 YfQ = Yr
3324                 if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.ravel(Xr)
3325             else:
3326                 YfQ = numpy.hstack((YfQ,Yr))
3327                 if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.vstack((EXr,numpy.ravel(Xr)))
3328         YfQ.sort(axis=-1)
3329         YQ = None
3330         for quantile in selfA._parameters["Quantiles"]:
3331             if not (0. <= float(quantile) <= 1.): continue
3332             indice = int(nech * float(quantile) - 1./nech)
3333             if YQ is None: YQ = YfQ[:,indice]
3334             else:          YQ = numpy.hstack((YQ,YfQ[:,indice]))
3335         selfA.StoredVariables["SimulationQuantiles"].store( YQ )
3336         if selfA._toStore("SampledStateForQuantiles"):
3337             selfA.StoredVariables["SampledStateForQuantiles"].store( EXr.T )
3338     if selfA._toStore("SimulatedObservationAtBackground"):
3339         selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
3340     if selfA._toStore("SimulatedObservationAtOptimum"):
3341         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
3342     #
3343     return 0
3344
3345 # ==============================================================================
3346 if __name__ == "__main__":
3347     print('\n AUTODIAGNOSTIC\n')