]> SALOME platform Git repositories - modules/adao.git/blob - src/daComposant/daCore/NumericObjects.py
Salome HOME
737180aeaa8e8e6623f5b8971aabfbd65af5144f
[modules/adao.git] / src / daComposant / daCore / NumericObjects.py
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) 2008-2021 EDF R&D
4 #
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
9 #
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13 # Lesser General Public License for more details.
14 #
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
18 #
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
20 #
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
22
23 __doc__ = """
24     Définit les objets numériques génériques.
25 """
26 __author__ = "Jean-Philippe ARGAUD"
27
28 import os, time, copy, types, sys, logging
29 import math, numpy, scipy, scipy.optimize, scipy.version
30 from daCore.BasicObjects import Operator
31 from daCore.PlatformInfo import PlatformInfo
32 mpr = PlatformInfo().MachinePrecision()
33 mfp = PlatformInfo().MaximumPrecision()
34 # logging.getLogger().setLevel(logging.DEBUG)
35
36 # ==============================================================================
37 def ExecuteFunction( triplet ):
38     assert len(triplet) == 3, "Incorrect number of arguments"
39     X, xArgs, funcrepr = triplet
40     __X = numpy.asmatrix(numpy.ravel( X )).T
41     __sys_path_tmp = sys.path ; sys.path.insert(0,funcrepr["__userFunction__path"])
42     __module = __import__(funcrepr["__userFunction__modl"], globals(), locals(), [])
43     __fonction = getattr(__module,funcrepr["__userFunction__name"])
44     sys.path = __sys_path_tmp ; del __sys_path_tmp
45     if isinstance(xArgs, dict):
46         __HX  = __fonction( __X, **xArgs )
47     else:
48         __HX  = __fonction( __X )
49     return numpy.ravel( __HX )
50
51 # ==============================================================================
52 class FDApproximation(object):
53     """
54     Cette classe sert d'interface pour définir les opérateurs approximés. A la
55     création d'un objet, en fournissant une fonction "Function", on obtient un
56     objet qui dispose de 3 méthodes "DirectOperator", "TangentOperator" et
57     "AdjointOperator". On contrôle l'approximation DF avec l'incrément
58     multiplicatif "increment" valant par défaut 1%, ou avec l'incrément fixe
59     "dX" qui sera multiplié par "increment" (donc en %), et on effectue de DF
60     centrées si le booléen "centeredDF" est vrai.
61     """
62     def __init__(self,
63             name                  = "FDApproximation",
64             Function              = None,
65             centeredDF            = False,
66             increment             = 0.01,
67             dX                    = None,
68             extraArguments        = None,
69             avoidingRedundancy    = True,
70             toleranceInRedundancy = 1.e-18,
71             lenghtOfRedundancy    = -1,
72             mpEnabled             = False,
73             mpWorkers             = None,
74             mfEnabled             = False,
75             ):
76         self.__name = str(name)
77         self.__extraArgs = extraArguments
78         if mpEnabled:
79             try:
80                 import multiprocessing
81                 self.__mpEnabled = True
82             except ImportError:
83                 self.__mpEnabled = False
84         else:
85             self.__mpEnabled = False
86         self.__mpWorkers = mpWorkers
87         if self.__mpWorkers is not None and self.__mpWorkers < 1:
88             self.__mpWorkers = None
89         logging.debug("FDA Calculs en multiprocessing : %s (nombre de processus : %s)"%(self.__mpEnabled,self.__mpWorkers))
90         #
91         if mfEnabled:
92             self.__mfEnabled = True
93         else:
94             self.__mfEnabled = False
95         logging.debug("FDA Calculs en multifonctions : %s"%(self.__mfEnabled,))
96         #
97         if avoidingRedundancy:
98             self.__avoidRC = True
99             self.__tolerBP = float(toleranceInRedundancy)
100             self.__lenghtRJ = int(lenghtOfRedundancy)
101             self.__listJPCP = [] # Jacobian Previous Calculated Points
102             self.__listJPCI = [] # Jacobian Previous Calculated Increment
103             self.__listJPCR = [] # Jacobian Previous Calculated Results
104             self.__listJPPN = [] # Jacobian Previous Calculated Point Norms
105             self.__listJPIN = [] # Jacobian Previous Calculated Increment Norms
106         else:
107             self.__avoidRC = False
108         #
109         if self.__mpEnabled:
110             if isinstance(Function,types.FunctionType):
111                 logging.debug("FDA Calculs en multiprocessing : FunctionType")
112                 self.__userFunction__name = Function.__name__
113                 try:
114                     mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
115                 except:
116                     mod = os.path.abspath(Function.__globals__['__file__'])
117                 if not os.path.isfile(mod):
118                     raise ImportError("No user defined function or method found with the name %s"%(mod,))
119                 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
120                 self.__userFunction__path = os.path.dirname(mod)
121                 del mod
122                 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
123                 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
124             elif isinstance(Function,types.MethodType):
125                 logging.debug("FDA Calculs en multiprocessing : MethodType")
126                 self.__userFunction__name = Function.__name__
127                 try:
128                     mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
129                 except:
130                     mod = os.path.abspath(Function.__func__.__globals__['__file__'])
131                 if not os.path.isfile(mod):
132                     raise ImportError("No user defined function or method found with the name %s"%(mod,))
133                 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
134                 self.__userFunction__path = os.path.dirname(mod)
135                 del mod
136                 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
137                 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
138             else:
139                 raise TypeError("User defined function or method has to be provided for finite differences approximation.")
140         else:
141             self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
142             self.__userFunction = self.__userOperator.appliedTo
143         #
144         self.__centeredDF = bool(centeredDF)
145         if abs(float(increment)) > 1.e-15:
146             self.__increment  = float(increment)
147         else:
148             self.__increment  = 0.01
149         if dX is None:
150             self.__dX     = None
151         else:
152             self.__dX     = numpy.asmatrix(numpy.ravel( dX )).T
153         logging.debug("FDA Reduction des doublons de calcul : %s"%self.__avoidRC)
154         if self.__avoidRC:
155             logging.debug("FDA Tolerance de determination des doublons : %.2e"%self.__tolerBP)
156
157     # ---------------------------------------------------------
158     def __doublon__(self, e, l, n, v=None):
159         __ac, __iac = False, -1
160         for i in range(len(l)-1,-1,-1):
161             if numpy.linalg.norm(e - l[i]) < self.__tolerBP * n[i]:
162                 __ac, __iac = True, i
163                 if v is not None: logging.debug("FDA Cas%s déja calculé, récupération du doublon %i"%(v,__iac))
164                 break
165         return __ac, __iac
166
167     # ---------------------------------------------------------
168     def DirectOperator(self, X, **extraArgs ):
169         """
170         Calcul du direct à l'aide de la fonction fournie.
171
172         NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
173         ne doivent pas être données ici à la fonction utilisateur.
174         """
175         logging.debug("FDA Calcul DirectOperator (explicite)")
176         if self.__mfEnabled:
177             _HX = self.__userFunction( X, argsAsSerie = True )
178         else:
179             _X = numpy.asmatrix(numpy.ravel( X )).T
180             _HX = numpy.ravel(self.__userFunction( _X ))
181         #
182         return _HX
183
184     # ---------------------------------------------------------
185     def TangentMatrix(self, X ):
186         """
187         Calcul de l'opérateur tangent comme la Jacobienne par différences finies,
188         c'est-à-dire le gradient de H en X. On utilise des différences finies
189         directionnelles autour du point X. X est un numpy.matrix.
190
191         Différences finies centrées (approximation d'ordre 2):
192         1/ Pour chaque composante i de X, on ajoute et on enlève la perturbation
193            dX[i] à la  composante X[i], pour composer X_plus_dXi et X_moins_dXi, et
194            on calcule les réponses HX_plus_dXi = H( X_plus_dXi ) et HX_moins_dXi =
195            H( X_moins_dXi )
196         2/ On effectue les différences (HX_plus_dXi-HX_moins_dXi) et on divise par
197            le pas 2*dXi
198         3/ Chaque résultat, par composante, devient une colonne de la Jacobienne
199
200         Différences finies non centrées (approximation d'ordre 1):
201         1/ Pour chaque composante i de X, on ajoute la perturbation dX[i] à la
202            composante X[i] pour composer X_plus_dXi, et on calcule la réponse
203            HX_plus_dXi = H( X_plus_dXi )
204         2/ On calcule la valeur centrale HX = H(X)
205         3/ On effectue les différences (HX_plus_dXi-HX) et on divise par
206            le pas dXi
207         4/ Chaque résultat, par composante, devient une colonne de la Jacobienne
208
209         """
210         logging.debug("FDA Début du calcul de la Jacobienne")
211         logging.debug("FDA   Incrément de............: %s*X"%float(self.__increment))
212         logging.debug("FDA   Approximation centrée...: %s"%(self.__centeredDF))
213         #
214         if X is None or len(X)==0:
215             raise ValueError("Nominal point X for approximate derivatives can not be None or void (given X: %s)."%(str(X),))
216         #
217         _X = numpy.asmatrix(numpy.ravel( X )).T
218         #
219         if self.__dX is None:
220             _dX  = self.__increment * _X
221         else:
222             _dX = numpy.asmatrix(numpy.ravel( self.__dX )).T
223         #
224         if (_dX == 0.).any():
225             moyenne = _dX.mean()
226             if moyenne == 0.:
227                 _dX = numpy.where( _dX == 0., float(self.__increment), _dX )
228             else:
229                 _dX = numpy.where( _dX == 0., moyenne, _dX )
230         #
231         __alreadyCalculated  = False
232         if self.__avoidRC:
233             __bidon, __alreadyCalculatedP = self.__doublon__(_X,  self.__listJPCP, self.__listJPPN, None)
234             __bidon, __alreadyCalculatedI = self.__doublon__(_dX, self.__listJPCI, self.__listJPIN, None)
235             if __alreadyCalculatedP == __alreadyCalculatedI > -1:
236                 __alreadyCalculated, __i = True, __alreadyCalculatedP
237                 logging.debug("FDA Cas J déja calculé, récupération du doublon %i"%__i)
238         #
239         if __alreadyCalculated:
240             logging.debug("FDA   Calcul Jacobienne (par récupération du doublon %i)"%__i)
241             _Jacobienne = self.__listJPCR[__i]
242         else:
243             logging.debug("FDA   Calcul Jacobienne (explicite)")
244             if self.__centeredDF:
245                 #
246                 if self.__mpEnabled and not self.__mfEnabled:
247                     funcrepr = {
248                         "__userFunction__path" : self.__userFunction__path,
249                         "__userFunction__modl" : self.__userFunction__modl,
250                         "__userFunction__name" : self.__userFunction__name,
251                     }
252                     _jobs = []
253                     for i in range( len(_dX) ):
254                         _dXi            = _dX[i]
255                         _X_plus_dXi     = numpy.array( _X.A1, dtype=float )
256                         _X_plus_dXi[i]  = _X[i] + _dXi
257                         _X_moins_dXi    = numpy.array( _X.A1, dtype=float )
258                         _X_moins_dXi[i] = _X[i] - _dXi
259                         #
260                         _jobs.append( (_X_plus_dXi,  self.__extraArgs, funcrepr) )
261                         _jobs.append( (_X_moins_dXi, self.__extraArgs, funcrepr) )
262                     #
263                     import multiprocessing
264                     self.__pool = multiprocessing.Pool(self.__mpWorkers)
265                     _HX_plusmoins_dX = self.__pool.map( ExecuteFunction, _jobs )
266                     self.__pool.close()
267                     self.__pool.join()
268                     #
269                     _Jacobienne  = []
270                     for i in range( len(_dX) ):
271                         _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
272                     #
273                 elif self.__mfEnabled:
274                     _xserie = []
275                     for i in range( len(_dX) ):
276                         _dXi            = _dX[i]
277                         _X_plus_dXi     = numpy.array( _X.A1, dtype=float )
278                         _X_plus_dXi[i]  = _X[i] + _dXi
279                         _X_moins_dXi    = numpy.array( _X.A1, dtype=float )
280                         _X_moins_dXi[i] = _X[i] - _dXi
281                         #
282                         _xserie.append( _X_plus_dXi )
283                         _xserie.append( _X_moins_dXi )
284                     #
285                     _HX_plusmoins_dX = self.DirectOperator( _xserie )
286                      #
287                     _Jacobienne  = []
288                     for i in range( len(_dX) ):
289                         _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
290                     #
291                 else:
292                     _Jacobienne  = []
293                     for i in range( _dX.size ):
294                         _dXi            = _dX[i]
295                         _X_plus_dXi     = numpy.array( _X.A1, dtype=float )
296                         _X_plus_dXi[i]  = _X[i] + _dXi
297                         _X_moins_dXi    = numpy.array( _X.A1, dtype=float )
298                         _X_moins_dXi[i] = _X[i] - _dXi
299                         #
300                         _HX_plus_dXi    = self.DirectOperator( _X_plus_dXi )
301                         _HX_moins_dXi   = self.DirectOperator( _X_moins_dXi )
302                         #
303                         _Jacobienne.append( numpy.ravel( _HX_plus_dXi - _HX_moins_dXi ) / (2.*_dXi) )
304                 #
305             else:
306                 #
307                 if self.__mpEnabled and not self.__mfEnabled:
308                     funcrepr = {
309                         "__userFunction__path" : self.__userFunction__path,
310                         "__userFunction__modl" : self.__userFunction__modl,
311                         "__userFunction__name" : self.__userFunction__name,
312                     }
313                     _jobs = []
314                     _jobs.append( (_X.A1, self.__extraArgs, funcrepr) )
315                     for i in range( len(_dX) ):
316                         _X_plus_dXi    = numpy.array( _X.A1, dtype=float )
317                         _X_plus_dXi[i] = _X[i] + _dX[i]
318                         #
319                         _jobs.append( (_X_plus_dXi, self.__extraArgs, funcrepr) )
320                     #
321                     import multiprocessing
322                     self.__pool = multiprocessing.Pool(self.__mpWorkers)
323                     _HX_plus_dX = self.__pool.map( ExecuteFunction, _jobs )
324                     self.__pool.close()
325                     self.__pool.join()
326                     #
327                     _HX = _HX_plus_dX.pop(0)
328                     #
329                     _Jacobienne = []
330                     for i in range( len(_dX) ):
331                         _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
332                     #
333                 elif self.__mfEnabled:
334                     _xserie = []
335                     _xserie.append( _X.A1 )
336                     for i in range( len(_dX) ):
337                         _X_plus_dXi    = numpy.array( _X.A1, dtype=float )
338                         _X_plus_dXi[i] = _X[i] + _dX[i]
339                         #
340                         _xserie.append( _X_plus_dXi )
341                     #
342                     _HX_plus_dX = self.DirectOperator( _xserie )
343                     #
344                     _HX = _HX_plus_dX.pop(0)
345                     #
346                     _Jacobienne = []
347                     for i in range( len(_dX) ):
348                         _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
349                    #
350                 else:
351                     _Jacobienne  = []
352                     _HX = self.DirectOperator( _X )
353                     for i in range( _dX.size ):
354                         _dXi            = _dX[i]
355                         _X_plus_dXi     = numpy.array( _X.A1, dtype=float )
356                         _X_plus_dXi[i]  = _X[i] + _dXi
357                         #
358                         _HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
359                         #
360                         _Jacobienne.append( numpy.ravel(( _HX_plus_dXi - _HX ) / _dXi) )
361                 #
362             #
363             _Jacobienne = numpy.asmatrix( numpy.vstack( _Jacobienne ) ).T
364             if self.__avoidRC:
365                 if self.__lenghtRJ < 0: self.__lenghtRJ = 2 * _X.size
366                 while len(self.__listJPCP) > self.__lenghtRJ:
367                     self.__listJPCP.pop(0)
368                     self.__listJPCI.pop(0)
369                     self.__listJPCR.pop(0)
370                     self.__listJPPN.pop(0)
371                     self.__listJPIN.pop(0)
372                 self.__listJPCP.append( copy.copy(_X) )
373                 self.__listJPCI.append( copy.copy(_dX) )
374                 self.__listJPCR.append( copy.copy(_Jacobienne) )
375                 self.__listJPPN.append( numpy.linalg.norm(_X) )
376                 self.__listJPIN.append( numpy.linalg.norm(_Jacobienne) )
377         #
378         logging.debug("FDA Fin du calcul de la Jacobienne")
379         #
380         return _Jacobienne
381
382     # ---------------------------------------------------------
383     def TangentOperator(self, paire, **extraArgs ):
384         """
385         Calcul du tangent à l'aide de la Jacobienne.
386
387         NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
388         ne doivent pas être données ici à la fonction utilisateur.
389         """
390         if self.__mfEnabled:
391             assert len(paire) == 1, "Incorrect lenght of arguments"
392             _paire = paire[0]
393             assert len(_paire) == 2, "Incorrect number of arguments"
394         else:
395             assert len(paire) == 2, "Incorrect number of arguments"
396             _paire = paire
397         X, dX = _paire
398         _Jacobienne = self.TangentMatrix( X )
399         if dX is None or len(dX) == 0:
400             #
401             # Calcul de la forme matricielle si le second argument est None
402             # -------------------------------------------------------------
403             if self.__mfEnabled: return [_Jacobienne,]
404             else:                return _Jacobienne
405         else:
406             #
407             # Calcul de la valeur linéarisée de H en X appliqué à dX
408             # ------------------------------------------------------
409             _dX = numpy.asmatrix(numpy.ravel( dX )).T
410             _HtX = numpy.dot(_Jacobienne, _dX)
411             if self.__mfEnabled: return [_HtX.A1,]
412             else:                return _HtX.A1
413
414     # ---------------------------------------------------------
415     def AdjointOperator(self, paire, **extraArgs ):
416         """
417         Calcul de l'adjoint à l'aide de la Jacobienne.
418
419         NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
420         ne doivent pas être données ici à la fonction utilisateur.
421         """
422         if self.__mfEnabled:
423             assert len(paire) == 1, "Incorrect lenght of arguments"
424             _paire = paire[0]
425             assert len(_paire) == 2, "Incorrect number of arguments"
426         else:
427             assert len(paire) == 2, "Incorrect number of arguments"
428             _paire = paire
429         X, Y = _paire
430         _JacobienneT = self.TangentMatrix( X ).T
431         if Y is None or len(Y) == 0:
432             #
433             # Calcul de la forme matricielle si le second argument est None
434             # -------------------------------------------------------------
435             if self.__mfEnabled: return [_JacobienneT,]
436             else:                return _JacobienneT
437         else:
438             #
439             # Calcul de la valeur de l'adjoint en X appliqué à Y
440             # --------------------------------------------------
441             _Y = numpy.asmatrix(numpy.ravel( Y )).T
442             _HaY = numpy.dot(_JacobienneT, _Y)
443             if self.__mfEnabled: return [_HaY.A1,]
444             else:                return _HaY.A1
445
446 # ==============================================================================
447 def EnsembleOfCenteredPerturbations( _bgcenter, _bgcovariance, _nbmembers ):
448     "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
449     #
450     _bgcenter = numpy.ravel(_bgcenter)[:,None]
451     if _nbmembers < 1:
452         raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
453     #
454     if _bgcovariance is None:
455         BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
456     else:
457         _Z = numpy.random.multivariate_normal(numpy.zeros(_bgcenter.size), _bgcovariance, size=_nbmembers).T
458         BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers) + _Z
459     #
460     return BackgroundEnsemble
461
462 # ==============================================================================
463 def EnsembleOfBackgroundPerturbations( _bgcenter, _bgcovariance, _nbmembers, _withSVD = True):
464     "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
465     def __CenteredRandomAnomalies(Zr, N):
466         """
467         Génère une matrice de N anomalies aléatoires centrées sur Zr selon les
468         notes manuscrites de MB et conforme au code de PS avec eps = -1
469         """
470         eps = -1
471         Q = numpy.identity(N-1)-numpy.ones((N-1,N-1))/numpy.sqrt(N)/(numpy.sqrt(N)-eps)
472         Q = numpy.concatenate((Q, [eps*numpy.ones(N-1)/numpy.sqrt(N)]), axis=0)
473         R, _ = numpy.linalg.qr(numpy.random.normal(size = (N-1,N-1)))
474         Q = numpy.dot(Q,R)
475         Zr = numpy.dot(Q,Zr)
476         return Zr.T
477     #
478     _bgcenter = numpy.ravel(_bgcenter).reshape((-1,1))
479     if _nbmembers < 1:
480         raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
481     if _bgcovariance is None:
482         BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
483     else:
484         if _withSVD:
485             U, s, V = numpy.linalg.svd(_bgcovariance, full_matrices=False)
486             _nbctl = _bgcenter.size
487             if _nbmembers > _nbctl:
488                 _Z = numpy.concatenate((numpy.dot(
489                     numpy.diag(numpy.sqrt(s[:_nbctl])), V[:_nbctl]),
490                     numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1-_nbctl)), axis = 0)
491             else:
492                 _Z = numpy.dot(numpy.diag(numpy.sqrt(s[:_nbmembers-1])), V[:_nbmembers-1])
493             _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
494             BackgroundEnsemble = _bgcenter + _Zca
495         else:
496             if max(abs(_bgcovariance.flatten())) > 0:
497                 _nbctl = _bgcenter.size
498                 _Z = numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1)
499                 _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
500                 BackgroundEnsemble = _bgcenter + _Zca
501             else:
502                 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
503     #
504     return BackgroundEnsemble
505
506 # ==============================================================================
507 def EnsembleOfAnomalies( Ensemble, OptMean = None, Normalisation = 1.):
508     "Renvoie les anomalies centrées à partir d'un ensemble TailleEtat*NbMembres"
509     if OptMean is None:
510         __Em = numpy.asarray(Ensemble).mean(axis=1, dtype=mfp).astype('float').reshape((-1,1))
511     else:
512         __Em = numpy.ravel(OptMean).reshape((-1,1))
513     #
514     return Normalisation * (numpy.asarray(Ensemble) - __Em)
515
516 # ==============================================================================
517 def EnsembleErrorCovariance( Ensemble, __quick = False ):
518     "Renvoie l'estimation empirique de la covariance d'ensemble"
519     if __quick:
520         # Covariance rapide mais rarement définie positive
521         __Covariance = numpy.cov(Ensemble)
522     else:
523         # Résultat souvent identique à numpy.cov, mais plus robuste
524         __n, __m = numpy.asarray(Ensemble).shape
525         __Anomalies = EnsembleOfAnomalies( Ensemble )
526         # Estimation empirique
527         __Covariance = (__Anomalies @ __Anomalies.T) / (__m-1)
528         # Assure la symétrie
529         __Covariance = (__Covariance + __Covariance.T) * 0.5
530         # Assure la positivité
531         __epsilon    = mpr*numpy.trace(__Covariance)
532         __Covariance = __Covariance + __epsilon * numpy.identity(__n)
533     #
534     return __Covariance
535
536 # ==============================================================================
537 def EnsemblePerturbationWithGivenCovariance( __Ensemble, __Covariance, __Seed=None ):
538     "Ajout d'une perturbation à chaque membre d'un ensemble selon une covariance prescrite"
539     if hasattr(__Covariance,"assparsematrix"):
540         if (abs(__Ensemble).mean() > mpr) and (abs(__Covariance.assparsematrix())/abs(__Ensemble).mean() < mpr).all():
541             # Traitement d'une covariance nulle ou presque
542             return __Ensemble
543         if (abs(__Ensemble).mean() <= mpr) and (abs(__Covariance.assparsematrix()) < mpr).all():
544             # Traitement d'une covariance nulle ou presque
545             return __Ensemble
546     else:
547         if (abs(__Ensemble).mean() > mpr) and (abs(__Covariance)/abs(__Ensemble).mean() < mpr).all():
548             # Traitement d'une covariance nulle ou presque
549             return __Ensemble
550         if (abs(__Ensemble).mean() <= mpr) and (abs(__Covariance) < mpr).all():
551             # Traitement d'une covariance nulle ou presque
552             return __Ensemble
553     #
554     __n, __m = __Ensemble.shape
555     if __Seed is not None: numpy.random.seed(__Seed)
556     #
557     if hasattr(__Covariance,"isscalar") and __Covariance.isscalar():
558         # Traitement d'une covariance multiple de l'identité
559         __zero = 0.
560         __std  = numpy.sqrt(__Covariance.assparsematrix())
561         __Ensemble += numpy.random.normal(__zero, __std, size=(__m,__n)).T
562     #
563     elif hasattr(__Covariance,"isvector") and __Covariance.isvector():
564         # Traitement d'une covariance diagonale avec variances non identiques
565         __zero = numpy.zeros(__n)
566         __std  = numpy.sqrt(__Covariance.assparsematrix())
567         __Ensemble += numpy.asarray([numpy.random.normal(__zero, __std) for i in range(__m)]).T
568     #
569     elif hasattr(__Covariance,"ismatrix") and __Covariance.ismatrix():
570         # Traitement d'une covariance pleine
571         __Ensemble += numpy.random.multivariate_normal(numpy.zeros(__n), __Covariance.asfullmatrix(__n), size=__m).T
572     #
573     elif isinstance(__Covariance, numpy.ndarray):
574         # Traitement d'une covariance numpy pleine, sachant qu'on arrive ici en dernier
575         __Ensemble += numpy.random.multivariate_normal(numpy.zeros(__n), __Covariance, size=__m).T
576     #
577     else:
578         raise ValueError("Error in ensemble perturbation with inadequate covariance specification")
579     #
580     return __Ensemble
581
582 # ==============================================================================
583 def CovarianceInflation(
584         InputCovOrEns,
585         InflationType   = None,
586         InflationFactor = None,
587         BackgroundCov   = None,
588         ):
589     """
590     Inflation applicable soit sur Pb ou Pa, soit sur les ensembles EXb ou EXa
591
592     Synthèse : Hunt 2007, section 2.3.5
593     """
594     if InflationFactor is None:
595         return InputCovOrEns
596     else:
597         InflationFactor = float(InflationFactor)
598     #
599     if InflationType in ["MultiplicativeOnAnalysisCovariance", "MultiplicativeOnBackgroundCovariance"]:
600         if InflationFactor < 1.:
601             raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
602         if InflationFactor < 1.+mpr:
603             return InputCovOrEns
604         OutputCovOrEns = InflationFactor**2 * InputCovOrEns
605     #
606     elif InflationType in ["MultiplicativeOnAnalysisAnomalies", "MultiplicativeOnBackgroundAnomalies"]:
607         if InflationFactor < 1.:
608             raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
609         if InflationFactor < 1.+mpr:
610             return InputCovOrEns
611         InputCovOrEnsMean = InputCovOrEns.mean(axis=1, dtype=mfp).astype('float')
612         OutputCovOrEns = InputCovOrEnsMean[:,numpy.newaxis] \
613             + InflationFactor * (InputCovOrEns - InputCovOrEnsMean[:,numpy.newaxis])
614     #
615     elif InflationType in ["AdditiveOnAnalysisCovariance", "AdditiveOnBackgroundCovariance"]:
616         if InflationFactor < 0.:
617             raise ValueError("Inflation factor for additive inflation has to be greater or equal than 0.")
618         if InflationFactor < mpr:
619             return InputCovOrEns
620         __n, __m = numpy.asarray(InputCovOrEns).shape
621         if __n != __m:
622             raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
623         OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * numpy.identity(__n)
624     #
625     elif InflationType == "HybridOnBackgroundCovariance":
626         if InflationFactor < 0.:
627             raise ValueError("Inflation factor for hybrid inflation has to be greater or equal than 0.")
628         if InflationFactor < mpr:
629             return InputCovOrEns
630         __n, __m = numpy.asarray(InputCovOrEns).shape
631         if __n != __m:
632             raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
633         if BackgroundCov is None:
634             raise ValueError("Background covariance matrix B has to be given for hybrid inflation.")
635         if InputCovOrEns.shape != BackgroundCov.shape:
636             raise ValueError("Ensemble covariance matrix has to be of same size than background covariance matrix B.")
637         OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * BackgroundCov
638     #
639     elif InflationType == "Relaxation":
640         raise NotImplementedError("InflationType Relaxation")
641     #
642     else:
643         raise ValueError("Error in inflation type, '%s' is not a valid keyword."%InflationType)
644     #
645     return OutputCovOrEns
646
647 # ==============================================================================
648 def QuantilesEstimations(selfA, A, Xa, HXa = None, Hm = None, HtM = None):
649     "Estimation des quantiles a posteriori (selfA est modifié)"
650     nbsamples = selfA._parameters["NumberOfSamplesForQuantiles"]
651     #
652     # Traitement des bornes
653     if "QBounds" in selfA._parameters: LBounds = selfA._parameters["QBounds"] # Prioritaire
654     else:                              LBounds = selfA._parameters["Bounds"]  # Défaut raisonnable
655     if LBounds is not None:
656         def NoneRemove(paire):
657             bmin, bmax = paire
658             if bmin is None: bmin = numpy.finfo('float').min
659             if bmax is None: bmax = numpy.finfo('float').max
660             return [bmin, bmax]
661         LBounds = numpy.matrix( [NoneRemove(paire) for paire in LBounds] )
662     #
663     # Échantillonnage des états
664     YfQ  = None
665     EXr  = None
666     if selfA._parameters["SimulationForQuantiles"] == "Linear" and HXa is not None:
667         HXa  = numpy.matrix(numpy.ravel( HXa )).T
668     for i in range(nbsamples):
669         if selfA._parameters["SimulationForQuantiles"] == "Linear" and HtM is not None:
670             dXr = numpy.matrix(numpy.random.multivariate_normal(numpy.ravel(Xa),A) - numpy.ravel(Xa)).T
671             if LBounds is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
672                 dXr = numpy.max(numpy.hstack((dXr,LBounds[:,0]) - Xa),axis=1)
673                 dXr = numpy.min(numpy.hstack((dXr,LBounds[:,1]) - Xa),axis=1)
674             dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
675             Yr = HXa + dYr
676             if selfA._toStore("SampledStateForQuantiles"): Xr = Xa + dXr
677         elif selfA._parameters["SimulationForQuantiles"] == "NonLinear" and Hm is not None:
678             Xr = numpy.matrix(numpy.random.multivariate_normal(numpy.ravel(Xa),A)).T
679             if LBounds is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
680                 Xr = numpy.max(numpy.hstack((Xr,LBounds[:,0])),axis=1)
681                 Xr = numpy.min(numpy.hstack((Xr,LBounds[:,1])),axis=1)
682             Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
683         else:
684             raise ValueError("Quantile simulations has only to be Linear or NonLinear.")
685         #
686         if YfQ is None:
687             YfQ = Yr
688             if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.ravel(Xr)
689         else:
690             YfQ = numpy.hstack((YfQ,Yr))
691             if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.vstack((EXr,numpy.ravel(Xr)))
692     #
693     # Extraction des quantiles
694     YfQ.sort(axis=-1)
695     YQ = None
696     for quantile in selfA._parameters["Quantiles"]:
697         if not (0. <= float(quantile) <= 1.): continue
698         indice = int(nbsamples * float(quantile) - 1./nbsamples)
699         if YQ is None: YQ = YfQ[:,indice]
700         else:          YQ = numpy.hstack((YQ,YfQ[:,indice]))
701     selfA.StoredVariables["SimulationQuantiles"].store( YQ )
702     if selfA._toStore("SampledStateForQuantiles"):
703         selfA.StoredVariables["SampledStateForQuantiles"].store( EXr.T )
704     #
705     return 0
706
707 # ==============================================================================
708 def enks(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="EnKS16-KalmanFilterFormula"):
709     """
710     EnKS
711     """
712     #
713     # Opérateurs
714     H = HO["Direct"].appliedControledFormTo
715     #
716     if selfA._parameters["EstimationOf"] == "State":
717         M = EM["Direct"].appliedControledFormTo
718     #
719     if CM is not None and "Tangent" in CM and U is not None:
720         Cm = CM["Tangent"].asMatrix(Xb)
721     else:
722         Cm = None
723     #
724     # Précalcul des inversions de B et R
725     RIdemi = R.sqrtmI()
726     #
727     # Durée d'observation et tailles
728     LagL = selfA._parameters["SmootherLagL"]
729     if (not hasattr(Y,"store")) or (not hasattr(Y,"stepnumber")):
730         raise ValueError("Fixed-lag smoother requires a series of observation")
731     if Y.stepnumber() < LagL:
732         raise ValueError("Fixed-lag smoother requires a series of observation greater then the lag L")
733     duration = Y.stepnumber()
734     __p = numpy.cumprod(Y.shape())[-1]
735     __n = Xb.size
736     __m = selfA._parameters["NumberOfMembers"]
737     #
738     if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
739     else:                         Pn = B
740     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
741         selfA.StoredVariables["Analysis"].store( Xb )
742         if selfA._toStore("APosterioriCovariance"):
743             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
744             covarianceXa = Pn
745     #
746     # Calcul direct initial (on privilégie la mémorisation au recalcul)
747     __seed = numpy.random.get_state()
748     selfB = copy.deepcopy(selfA)
749     selfB._parameters["StoreSupplementaryCalculations"] = ["CurrentEnsembleState"]
750     if VariantM == "EnKS16-KalmanFilterFormula":
751         etkf(selfB, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM = "KalmanFilterFormula")
752     else:
753         raise ValueError("VariantM has to be chosen in the authorized methods list.")
754     if LagL > 0:
755         EL  = selfB.StoredVariables["CurrentEnsembleState"][LagL-1]
756     else:
757         EL = EnsembleOfBackgroundPerturbations( Xb, None, __m ) # Cf. etkf
758     selfA._parameters["SetSeed"] = numpy.random.set_state(__seed)
759     #
760     for step in range(LagL,duration-1):
761         #
762         sEL = selfB.StoredVariables["CurrentEnsembleState"][step+1-LagL:step+1]
763         sEL.append(None)
764         #
765         if hasattr(Y,"store"):
766             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
767         else:
768             Ynpu = numpy.ravel( Y ).reshape((__p,1))
769         #
770         if U is not None:
771             if hasattr(U,"store") and len(U)>1:
772                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
773             elif hasattr(U,"store") and len(U)==1:
774                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
775             else:
776                 Un = numpy.asmatrix(numpy.ravel( U )).T
777         else:
778             Un = None
779         #
780         #--------------------------
781         if VariantM == "EnKS16-KalmanFilterFormula":
782             if selfA._parameters["EstimationOf"] == "State": # Forecast
783                 EL = M( [(EL[:,i], Un) for i in range(__m)],
784                     argsAsSerie = True,
785                     returnSerieAsArrayMatrix = True )
786                 EL = EnsemblePerturbationWithGivenCovariance( EL, Q )
787                 EZ = H( [(EL[:,i], Un) for i in range(__m)],
788                     argsAsSerie = True,
789                     returnSerieAsArrayMatrix = True )
790                 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
791                     Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
792                     EZ = EZ + Cm * Un
793             elif selfA._parameters["EstimationOf"] == "Parameters":
794                 # --- > Par principe, M = Id, Q = 0
795                 EZ = H( [(EL[:,i], Un) for i in range(__m)],
796                     argsAsSerie = True,
797                     returnSerieAsArrayMatrix = True )
798             #
799             vEm   = EL.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
800             vZm   = EZ.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
801             #
802             mS    = RIdemi @ EnsembleOfAnomalies( EZ, vZm, 1./math.sqrt(__m-1) )
803             delta = RIdemi @ ( Ynpu - vZm )
804             mT    = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS )
805             vw    = mT @ mS.T @ delta
806             #
807             Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
808             mU    = numpy.identity(__m)
809             wTU   = (vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU)
810             #
811             EX    = EnsembleOfAnomalies( EL, vEm, 1./math.sqrt(__m-1) )
812             EL    = vEm + EX @ wTU
813             #
814             sEL[LagL] = EL
815             for irl in range(LagL): # Lissage des L précédentes analysis
816                 vEm = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
817                 EX = EnsembleOfAnomalies( sEL[irl], vEm, 1./math.sqrt(__m-1) )
818                 sEL[irl] = vEm + EX @ wTU
819             #
820             # Conservation de l'analyse retrospective d'ordre 0 avant rotation
821             Xa = sEL[0].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
822             if selfA._toStore("APosterioriCovariance"):
823                 EXn = sEL[0]
824             #
825             for irl in range(LagL):
826                 sEL[irl] = sEL[irl+1]
827             sEL[LagL] = None
828         #--------------------------
829         else:
830             raise ValueError("VariantM has to be chosen in the authorized methods list.")
831         #
832         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
833         # ---> avec analysis
834         selfA.StoredVariables["Analysis"].store( Xa )
835         if selfA._toStore("APosterioriCovariance"):
836             selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(EXn) )
837     #
838     # Stockage des dernières analyses incomplètement remises à jour
839     for irl in range(LagL):
840         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
841         Xa = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
842         selfA.StoredVariables["Analysis"].store( Xa )
843     #
844     return 0
845
846 # ==============================================================================
847 def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
848     """
849     Ensemble-Transform EnKF
850     """
851     if selfA._parameters["EstimationOf"] == "Parameters":
852         selfA._parameters["StoreInternalVariables"] = True
853     #
854     # Opérateurs
855     # ----------
856     H = HO["Direct"].appliedControledFormTo
857     #
858     if selfA._parameters["EstimationOf"] == "State":
859         M = EM["Direct"].appliedControledFormTo
860     #
861     if CM is not None and "Tangent" in CM and U is not None:
862         Cm = CM["Tangent"].asMatrix(Xb)
863     else:
864         Cm = None
865     #
866     # Nombre de pas identique au nombre de pas d'observations
867     # -------------------------------------------------------
868     if hasattr(Y,"stepnumber"):
869         duration = Y.stepnumber()
870         __p = numpy.cumprod(Y.shape())[-1]
871     else:
872         duration = 2
873         __p = numpy.array(Y).size
874     #
875     # Précalcul des inversions de B et R
876     # ----------------------------------
877     if selfA._parameters["StoreInternalVariables"] \
878         or selfA._toStore("CostFunctionJ") \
879         or selfA._toStore("CostFunctionJb") \
880         or selfA._toStore("CostFunctionJo") \
881         or selfA._toStore("CurrentOptimum") \
882         or selfA._toStore("APosterioriCovariance"):
883         BI = B.getI()
884         RI = R.getI()
885     elif VariantM != "KalmanFilterFormula":
886         RI = R.getI()
887     if VariantM == "KalmanFilterFormula":
888         RIdemi = R.sqrtmI()
889     #
890     # Initialisation
891     # --------------
892     __n = Xb.size
893     __m = selfA._parameters["NumberOfMembers"]
894     if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
895     else:                         Pn = B
896     Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
897     #~ Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
898     #
899     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
900         selfA.StoredVariables["Analysis"].store( Xb )
901         if selfA._toStore("APosterioriCovariance"):
902             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
903             covarianceXa = Pn
904     #
905     previousJMinimum = numpy.finfo(float).max
906     #
907     for step in range(duration-1):
908         if hasattr(Y,"store"):
909             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
910         else:
911             Ynpu = numpy.ravel( Y ).reshape((__p,1))
912         #
913         if U is not None:
914             if hasattr(U,"store") and len(U)>1:
915                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
916             elif hasattr(U,"store") and len(U)==1:
917                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
918             else:
919                 Un = numpy.asmatrix(numpy.ravel( U )).T
920         else:
921             Un = None
922         #
923         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
924             Xn = CovarianceInflation( Xn,
925                 selfA._parameters["InflationType"],
926                 selfA._parameters["InflationFactor"],
927                 )
928         #
929         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
930             EMX = M( [(Xn[:,i], Un) for i in range(__m)],
931                 argsAsSerie = True,
932                 returnSerieAsArrayMatrix = True )
933             Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
934             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
935                 argsAsSerie = True,
936                 returnSerieAsArrayMatrix = True )
937             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
938                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
939                 Xn_predicted = Xn_predicted + Cm * Un
940         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
941             # --- > Par principe, M = Id, Q = 0
942             Xn_predicted = Xn
943             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
944                 argsAsSerie = True,
945                 returnSerieAsArrayMatrix = True )
946         #
947         # Mean of forecast and observation of forecast
948         Xfm  = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
949         Hfm  = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
950         #
951         # Anomalies
952         EaX   = EnsembleOfAnomalies( Xn_predicted, Xfm )
953         EaHX  = EnsembleOfAnomalies( HX_predicted, Hfm)
954         #
955         #--------------------------
956         if VariantM == "KalmanFilterFormula":
957             mS    = RIdemi * EaHX / math.sqrt(__m-1)
958             delta = RIdemi * ( Ynpu - Hfm )
959             mT    = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS )
960             vw    = mT @ mS.T @ delta
961             #
962             Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
963             mU    = numpy.identity(__m)
964             #
965             EaX   = EaX / math.sqrt(__m-1)
966             Xn    = Xfm + EaX @ ( vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU )
967         #--------------------------
968         elif VariantM == "Variational":
969             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
970             def CostFunction(w):
971                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
972                 _Jo = 0.5 * _A.T @ (RI * _A)
973                 _Jb = 0.5 * (__m-1) * w.T @ w
974                 _J  = _Jo + _Jb
975                 return float(_J)
976             def GradientOfCostFunction(w):
977                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
978                 _GardJo = - EaHX.T @ (RI * _A)
979                 _GradJb = (__m-1) * w.reshape((__m,1))
980                 _GradJ  = _GardJo + _GradJb
981                 return numpy.ravel(_GradJ)
982             vw = scipy.optimize.fmin_cg(
983                 f           = CostFunction,
984                 x0          = numpy.zeros(__m),
985                 fprime      = GradientOfCostFunction,
986                 args        = (),
987                 disp        = False,
988                 )
989             #
990             Hto = EaHX.T @ (RI * EaHX)
991             Htb = (__m-1) * numpy.identity(__m)
992             Hta = Hto + Htb
993             #
994             Pta = numpy.linalg.inv( Hta )
995             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
996             #
997             Xn  = Xfm + EaX @ (vw[:,None] + EWa)
998         #--------------------------
999         elif VariantM == "FiniteSize11": # Jauge Boc2011
1000             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1001             def CostFunction(w):
1002                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1003                 _Jo = 0.5 * _A.T @ (RI * _A)
1004                 _Jb = 0.5 * __m * math.log(1 + 1/__m + w.T @ w)
1005                 _J  = _Jo + _Jb
1006                 return float(_J)
1007             def GradientOfCostFunction(w):
1008                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1009                 _GardJo = - EaHX.T @ (RI * _A)
1010                 _GradJb = __m * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
1011                 _GradJ  = _GardJo + _GradJb
1012                 return numpy.ravel(_GradJ)
1013             vw = scipy.optimize.fmin_cg(
1014                 f           = CostFunction,
1015                 x0          = numpy.zeros(__m),
1016                 fprime      = GradientOfCostFunction,
1017                 args        = (),
1018                 disp        = False,
1019                 )
1020             #
1021             Hto = EaHX.T @ (RI * EaHX)
1022             Htb = __m * \
1023                 ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
1024                 / (1 + 1/__m + vw.T @ vw)**2
1025             Hta = Hto + Htb
1026             #
1027             Pta = numpy.linalg.inv( Hta )
1028             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1029             #
1030             Xn  = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
1031         #--------------------------
1032         elif VariantM == "FiniteSize15": # Jauge Boc2015
1033             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1034             def CostFunction(w):
1035                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1036                 _Jo = 0.5 * _A.T * RI * _A
1037                 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w)
1038                 _J  = _Jo + _Jb
1039                 return float(_J)
1040             def GradientOfCostFunction(w):
1041                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1042                 _GardJo = - EaHX.T @ (RI * _A)
1043                 _GradJb = (__m+1) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
1044                 _GradJ  = _GardJo + _GradJb
1045                 return numpy.ravel(_GradJ)
1046             vw = scipy.optimize.fmin_cg(
1047                 f           = CostFunction,
1048                 x0          = numpy.zeros(__m),
1049                 fprime      = GradientOfCostFunction,
1050                 args        = (),
1051                 disp        = False,
1052                 )
1053             #
1054             Hto = EaHX.T @ (RI * EaHX)
1055             Htb = (__m+1) * \
1056                 ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
1057                 / (1 + 1/__m + vw.T @ vw)**2
1058             Hta = Hto + Htb
1059             #
1060             Pta = numpy.linalg.inv( Hta )
1061             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1062             #
1063             Xn  = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
1064         #--------------------------
1065         elif VariantM == "FiniteSize16": # Jauge Boc2016
1066             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1067             def CostFunction(w):
1068                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1069                 _Jo = 0.5 * _A.T @ (RI * _A)
1070                 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w / (__m-1))
1071                 _J  = _Jo + _Jb
1072                 return float(_J)
1073             def GradientOfCostFunction(w):
1074                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1075                 _GardJo = - EaHX.T @ (RI * _A)
1076                 _GradJb = ((__m+1) / (__m-1)) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w / (__m-1))
1077                 _GradJ  = _GardJo + _GradJb
1078                 return numpy.ravel(_GradJ)
1079             vw = scipy.optimize.fmin_cg(
1080                 f           = CostFunction,
1081                 x0          = numpy.zeros(__m),
1082                 fprime      = GradientOfCostFunction,
1083                 args        = (),
1084                 disp        = False,
1085                 )
1086             #
1087             Hto = EaHX.T @ (RI * EaHX)
1088             Htb = ((__m+1) / (__m-1)) * \
1089                 ( (1 + 1/__m + vw.T @ vw / (__m-1)) * numpy.identity(__m) - 2 * vw @ vw.T / (__m-1) ) \
1090                 / (1 + 1/__m + vw.T @ vw / (__m-1))**2
1091             Hta = Hto + Htb
1092             #
1093             Pta = numpy.linalg.inv( Hta )
1094             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1095             #
1096             Xn  = Xfm + EaX @ (vw[:,None] + EWa)
1097         #--------------------------
1098         else:
1099             raise ValueError("VariantM has to be chosen in the authorized methods list.")
1100         #
1101         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
1102             Xn = CovarianceInflation( Xn,
1103                 selfA._parameters["InflationType"],
1104                 selfA._parameters["InflationFactor"],
1105                 )
1106         #
1107         Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1108         #--------------------------
1109         #
1110         if selfA._parameters["StoreInternalVariables"] \
1111             or selfA._toStore("CostFunctionJ") \
1112             or selfA._toStore("CostFunctionJb") \
1113             or selfA._toStore("CostFunctionJo") \
1114             or selfA._toStore("APosterioriCovariance") \
1115             or selfA._toStore("InnovationAtCurrentAnalysis") \
1116             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1117             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1118             _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
1119             _Innovation = Ynpu - _HXa
1120         #
1121         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1122         # ---> avec analysis
1123         selfA.StoredVariables["Analysis"].store( Xa )
1124         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1125             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
1126         if selfA._toStore("InnovationAtCurrentAnalysis"):
1127             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1128         # ---> avec current state
1129         if selfA._parameters["StoreInternalVariables"] \
1130             or selfA._toStore("CurrentState"):
1131             selfA.StoredVariables["CurrentState"].store( Xn )
1132         if selfA._toStore("ForecastState"):
1133             selfA.StoredVariables["ForecastState"].store( EMX )
1134         if selfA._toStore("BMA"):
1135             selfA.StoredVariables["BMA"].store( EMX - Xa.reshape((__n,1)) )
1136         if selfA._toStore("InnovationAtCurrentState"):
1137             selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
1138         if selfA._toStore("SimulatedObservationAtCurrentState") \
1139             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1140             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
1141         # ---> autres
1142         if selfA._parameters["StoreInternalVariables"] \
1143             or selfA._toStore("CostFunctionJ") \
1144             or selfA._toStore("CostFunctionJb") \
1145             or selfA._toStore("CostFunctionJo") \
1146             or selfA._toStore("CurrentOptimum") \
1147             or selfA._toStore("APosterioriCovariance"):
1148             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
1149             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
1150             J   = Jb + Jo
1151             selfA.StoredVariables["CostFunctionJb"].store( Jb )
1152             selfA.StoredVariables["CostFunctionJo"].store( Jo )
1153             selfA.StoredVariables["CostFunctionJ" ].store( J )
1154             #
1155             if selfA._toStore("IndexOfOptimum") \
1156                 or selfA._toStore("CurrentOptimum") \
1157                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1158                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1159                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1160                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1161                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1162             if selfA._toStore("IndexOfOptimum"):
1163                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1164             if selfA._toStore("CurrentOptimum"):
1165                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1166             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1167                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1168             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1169                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1170             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1171                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1172             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1173                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1174         if selfA._toStore("APosterioriCovariance"):
1175             selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
1176         if selfA._parameters["EstimationOf"] == "Parameters" \
1177             and J < previousJMinimum:
1178             previousJMinimum    = J
1179             XaMin               = Xa
1180             if selfA._toStore("APosterioriCovariance"):
1181                 covarianceXaMin = Pn
1182         # ---> Pour les smoothers
1183         if selfA._toStore("CurrentEnsembleState"):
1184             selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
1185     #
1186     # Stockage final supplémentaire de l'optimum en estimation de paramètres
1187     # ----------------------------------------------------------------------
1188     if selfA._parameters["EstimationOf"] == "Parameters":
1189         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1190         selfA.StoredVariables["Analysis"].store( XaMin )
1191         if selfA._toStore("APosterioriCovariance"):
1192             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1193         if selfA._toStore("BMA"):
1194             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1195     #
1196     return 0
1197
1198 # ==============================================================================
1199 def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12",
1200     BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
1201     """
1202     Iterative EnKF
1203     """
1204     if selfA._parameters["EstimationOf"] == "Parameters":
1205         selfA._parameters["StoreInternalVariables"] = True
1206     #
1207     # Opérateurs
1208     # ----------
1209     H = HO["Direct"].appliedControledFormTo
1210     #
1211     if selfA._parameters["EstimationOf"] == "State":
1212         M = EM["Direct"].appliedControledFormTo
1213     #
1214     if CM is not None and "Tangent" in CM and U is not None:
1215         Cm = CM["Tangent"].asMatrix(Xb)
1216     else:
1217         Cm = None
1218     #
1219     # Nombre de pas identique au nombre de pas d'observations
1220     # -------------------------------------------------------
1221     if hasattr(Y,"stepnumber"):
1222         duration = Y.stepnumber()
1223         __p = numpy.cumprod(Y.shape())[-1]
1224     else:
1225         duration = 2
1226         __p = numpy.array(Y).size
1227     #
1228     # Précalcul des inversions de B et R
1229     # ----------------------------------
1230     if selfA._parameters["StoreInternalVariables"] \
1231         or selfA._toStore("CostFunctionJ") \
1232         or selfA._toStore("CostFunctionJb") \
1233         or selfA._toStore("CostFunctionJo") \
1234         or selfA._toStore("CurrentOptimum") \
1235         or selfA._toStore("APosterioriCovariance"):
1236         BI = B.getI()
1237     RI = R.getI()
1238     #
1239     # Initialisation
1240     # --------------
1241     __n = Xb.size
1242     __m = selfA._parameters["NumberOfMembers"]
1243     if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
1244     else:                         Pn = B
1245     if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
1246     else:                         Rn = R
1247     if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
1248     else:                         Qn = Q
1249     Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
1250     #
1251     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1252         selfA.StoredVariables["Analysis"].store( Xb )
1253         if selfA._toStore("APosterioriCovariance"):
1254             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1255             covarianceXa = Pn
1256     #
1257     previousJMinimum = numpy.finfo(float).max
1258     #
1259     for step in range(duration-1):
1260         if hasattr(Y,"store"):
1261             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1262         else:
1263             Ynpu = numpy.ravel( Y ).reshape((__p,1))
1264         #
1265         if U is not None:
1266             if hasattr(U,"store") and len(U)>1:
1267                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
1268             elif hasattr(U,"store") and len(U)==1:
1269                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
1270             else:
1271                 Un = numpy.asmatrix(numpy.ravel( U )).T
1272         else:
1273             Un = None
1274         #
1275         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
1276             Xn = CovarianceInflation( Xn,
1277                 selfA._parameters["InflationType"],
1278                 selfA._parameters["InflationFactor"],
1279                 )
1280         #
1281         #--------------------------
1282         if VariantM == "IEnKF12":
1283             Xfm = numpy.ravel(Xn.mean(axis=1, dtype=mfp).astype('float'))
1284             EaX = EnsembleOfAnomalies( Xn ) / math.sqrt(__m-1)
1285             __j = 0
1286             Deltaw = 1
1287             if not BnotT:
1288                 Ta  = numpy.identity(__m)
1289             vw  = numpy.zeros(__m)
1290             while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
1291                 vx1 = (Xfm + EaX @ vw).reshape((__n,1))
1292                 #
1293                 if BnotT:
1294                     E1 = vx1 + _epsilon * EaX
1295                 else:
1296                     E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
1297                 #
1298                 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q
1299                     E2 = M( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
1300                         argsAsSerie = True,
1301                         returnSerieAsArrayMatrix = True )
1302                 elif selfA._parameters["EstimationOf"] == "Parameters":
1303                     # --- > Par principe, M = Id
1304                     E2 = Xn
1305                 vx2 = E2.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1306                 vy1 = H((vx2, Un)).reshape((__p,1))
1307                 #
1308                 HE2 = H( [(E2[:,i,numpy.newaxis], Un) for i in range(__m)],
1309                     argsAsSerie = True,
1310                     returnSerieAsArrayMatrix = True )
1311                 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
1312                 #
1313                 if BnotT:
1314                     EaY = (HE2 - vy2) / _epsilon
1315                 else:
1316                     EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
1317                 #
1318                 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy1 )))
1319                 mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY)
1320                 Deltaw = - numpy.linalg.solve(mH,GradJ)
1321                 #
1322                 vw = vw + Deltaw
1323                 #
1324                 if not BnotT:
1325                     Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
1326                 #
1327                 __j = __j + 1
1328             #
1329             A2 = EnsembleOfAnomalies( E2 )
1330             #
1331             if BnotT:
1332                 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
1333                 A2 = math.sqrt(__m-1) * A2 @ Ta / _epsilon
1334             #
1335             Xn = vx2 + A2
1336         #--------------------------
1337         else:
1338             raise ValueError("VariantM has to be chosen in the authorized methods list.")
1339         #
1340         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
1341             Xn = CovarianceInflation( Xn,
1342                 selfA._parameters["InflationType"],
1343                 selfA._parameters["InflationFactor"],
1344                 )
1345         #
1346         Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1347         #--------------------------
1348         #
1349         if selfA._parameters["StoreInternalVariables"] \
1350             or selfA._toStore("CostFunctionJ") \
1351             or selfA._toStore("CostFunctionJb") \
1352             or selfA._toStore("CostFunctionJo") \
1353             or selfA._toStore("APosterioriCovariance") \
1354             or selfA._toStore("InnovationAtCurrentAnalysis") \
1355             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1356             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1357             _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
1358             _Innovation = Ynpu - _HXa
1359         #
1360         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1361         # ---> avec analysis
1362         selfA.StoredVariables["Analysis"].store( Xa )
1363         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1364             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
1365         if selfA._toStore("InnovationAtCurrentAnalysis"):
1366             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1367         # ---> avec current state
1368         if selfA._parameters["StoreInternalVariables"] \
1369             or selfA._toStore("CurrentState"):
1370             selfA.StoredVariables["CurrentState"].store( Xn )
1371         if selfA._toStore("ForecastState"):
1372             selfA.StoredVariables["ForecastState"].store( E2 )
1373         if selfA._toStore("BMA"):
1374             selfA.StoredVariables["BMA"].store( E2 - Xa )
1375         if selfA._toStore("InnovationAtCurrentState"):
1376             selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
1377         if selfA._toStore("SimulatedObservationAtCurrentState") \
1378             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1379             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
1380         # ---> autres
1381         if selfA._parameters["StoreInternalVariables"] \
1382             or selfA._toStore("CostFunctionJ") \
1383             or selfA._toStore("CostFunctionJb") \
1384             or selfA._toStore("CostFunctionJo") \
1385             or selfA._toStore("CurrentOptimum") \
1386             or selfA._toStore("APosterioriCovariance"):
1387             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
1388             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
1389             J   = Jb + Jo
1390             selfA.StoredVariables["CostFunctionJb"].store( Jb )
1391             selfA.StoredVariables["CostFunctionJo"].store( Jo )
1392             selfA.StoredVariables["CostFunctionJ" ].store( J )
1393             #
1394             if selfA._toStore("IndexOfOptimum") \
1395                 or selfA._toStore("CurrentOptimum") \
1396                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1397                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1398                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1399                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1400                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1401             if selfA._toStore("IndexOfOptimum"):
1402                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1403             if selfA._toStore("CurrentOptimum"):
1404                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1405             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1406                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1407             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1408                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1409             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1410                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1411             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1412                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1413         if selfA._toStore("APosterioriCovariance"):
1414             selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
1415         if selfA._parameters["EstimationOf"] == "Parameters" \
1416             and J < previousJMinimum:
1417             previousJMinimum    = J
1418             XaMin               = Xa
1419             if selfA._toStore("APosterioriCovariance"):
1420                 covarianceXaMin = Pn
1421     #
1422     # Stockage final supplémentaire de l'optimum en estimation de paramètres
1423     # ----------------------------------------------------------------------
1424     if selfA._parameters["EstimationOf"] == "Parameters":
1425         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1426         selfA.StoredVariables["Analysis"].store( XaMin )
1427         if selfA._toStore("APosterioriCovariance"):
1428             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1429         if selfA._toStore("BMA"):
1430             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1431     #
1432     return 0
1433
1434 # ==============================================================================
1435 def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1436     """
1437     3DVAR incrémental
1438     """
1439     #
1440     # Initialisations
1441     # ---------------
1442     #
1443     # Opérateur non-linéaire pour la boucle externe
1444     Hm = HO["Direct"].appliedTo
1445     #
1446     # Précalcul des inversions de B et R
1447     BI = B.getI()
1448     RI = R.getI()
1449     #
1450     # Point de démarrage de l'optimisation
1451     Xini = selfA._parameters["InitializationPoint"]
1452     #
1453     HXb = numpy.asmatrix(numpy.ravel( Hm( Xb ) )).T
1454     Innovation = Y - HXb
1455     #
1456     # Outer Loop
1457     # ----------
1458     iOuter = 0
1459     J      = 1./mpr
1460     DeltaJ = 1./mpr
1461     Xr     = Xini.reshape((-1,1))
1462     while abs(DeltaJ) >= selfA._parameters["CostDecrementTolerance"] and iOuter <= selfA._parameters["MaximumNumberOfSteps"]:
1463         #
1464         # Inner Loop
1465         # ----------
1466         Ht = HO["Tangent"].asMatrix(Xr)
1467         Ht = Ht.reshape(Y.size,Xr.size) # ADAO & check shape
1468         #
1469         # Définition de la fonction-coût
1470         # ------------------------------
1471         def CostFunction(dx):
1472             _dX  = numpy.asmatrix(numpy.ravel( dx )).T
1473             if selfA._parameters["StoreInternalVariables"] or \
1474                 selfA._toStore("CurrentState") or \
1475                 selfA._toStore("CurrentOptimum"):
1476                 selfA.StoredVariables["CurrentState"].store( Xb + _dX )
1477             _HdX = Ht * _dX
1478             _HdX = numpy.asmatrix(numpy.ravel( _HdX )).T
1479             _dInnovation = Innovation - _HdX
1480             if selfA._toStore("SimulatedObservationAtCurrentState") or \
1481                 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1482                 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HXb + _HdX )
1483             if selfA._toStore("InnovationAtCurrentState"):
1484                 selfA.StoredVariables["InnovationAtCurrentState"].store( _dInnovation )
1485             #
1486             Jb  = float( 0.5 * _dX.T * BI * _dX )
1487             Jo  = float( 0.5 * _dInnovation.T * RI * _dInnovation )
1488             J   = Jb + Jo
1489             #
1490             selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
1491             selfA.StoredVariables["CostFunctionJb"].store( Jb )
1492             selfA.StoredVariables["CostFunctionJo"].store( Jo )
1493             selfA.StoredVariables["CostFunctionJ" ].store( J )
1494             if selfA._toStore("IndexOfOptimum") or \
1495                 selfA._toStore("CurrentOptimum") or \
1496                 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
1497                 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
1498                 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
1499                 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1500                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1501             if selfA._toStore("IndexOfOptimum"):
1502                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1503             if selfA._toStore("CurrentOptimum"):
1504                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
1505             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1506                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
1507             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1508                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1509             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1510                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1511             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1512                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1513             return J
1514         #
1515         def GradientOfCostFunction(dx):
1516             _dX          = numpy.asmatrix(numpy.ravel( dx )).T
1517             _HdX         = Ht * _dX
1518             _HdX         = numpy.asmatrix(numpy.ravel( _HdX )).T
1519             _dInnovation = Innovation - _HdX
1520             GradJb       = BI * _dX
1521             GradJo       = - Ht.T @ (RI * _dInnovation)
1522             GradJ        = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
1523             return GradJ
1524         #
1525         # Minimisation de la fonctionnelle
1526         # --------------------------------
1527         nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
1528         #
1529         if selfA._parameters["Minimizer"] == "LBFGSB":
1530             # Minimum, J_optimal, Informations = scipy.optimize.fmin_l_bfgs_b(
1531             if "0.19" <= scipy.version.version <= "1.1.0":
1532                 import lbfgsbhlt as optimiseur
1533             else:
1534                 import scipy.optimize as optimiseur
1535             Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
1536                 func        = CostFunction,
1537                 x0          = numpy.zeros(Xini.size),
1538                 fprime      = GradientOfCostFunction,
1539                 args        = (),
1540                 bounds      = selfA._parameters["Bounds"],
1541                 maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
1542                 factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
1543                 pgtol       = selfA._parameters["ProjectedGradientTolerance"],
1544                 iprint      = selfA._parameters["optiprint"],
1545                 )
1546             nfeval = Informations['funcalls']
1547             rc     = Informations['warnflag']
1548         elif selfA._parameters["Minimizer"] == "TNC":
1549             Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
1550                 func        = CostFunction,
1551                 x0          = numpy.zeros(Xini.size),
1552                 fprime      = GradientOfCostFunction,
1553                 args        = (),
1554                 bounds      = selfA._parameters["Bounds"],
1555                 maxfun      = selfA._parameters["MaximumNumberOfSteps"],
1556                 pgtol       = selfA._parameters["ProjectedGradientTolerance"],
1557                 ftol        = selfA._parameters["CostDecrementTolerance"],
1558                 messages    = selfA._parameters["optmessages"],
1559                 )
1560         elif selfA._parameters["Minimizer"] == "CG":
1561             Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
1562                 f           = CostFunction,
1563                 x0          = numpy.zeros(Xini.size),
1564                 fprime      = GradientOfCostFunction,
1565                 args        = (),
1566                 maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1567                 gtol        = selfA._parameters["GradientNormTolerance"],
1568                 disp        = selfA._parameters["optdisp"],
1569                 full_output = True,
1570                 )
1571         elif selfA._parameters["Minimizer"] == "NCG":
1572             Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
1573                 f           = CostFunction,
1574                 x0          = numpy.zeros(Xini.size),
1575                 fprime      = GradientOfCostFunction,
1576                 args        = (),
1577                 maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1578                 avextol     = selfA._parameters["CostDecrementTolerance"],
1579                 disp        = selfA._parameters["optdisp"],
1580                 full_output = True,
1581                 )
1582         elif selfA._parameters["Minimizer"] == "BFGS":
1583             Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
1584                 f           = CostFunction,
1585                 x0          = numpy.zeros(Xini.size),
1586                 fprime      = GradientOfCostFunction,
1587                 args        = (),
1588                 maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1589                 gtol        = selfA._parameters["GradientNormTolerance"],
1590                 disp        = selfA._parameters["optdisp"],
1591                 full_output = True,
1592                 )
1593         else:
1594             raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
1595         #
1596         IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1597         MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
1598         #
1599         if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
1600             Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
1601             Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
1602         else:
1603             Minimum = Xb + numpy.asmatrix(numpy.ravel( Minimum )).T
1604         #
1605         Xr     = Minimum
1606         DeltaJ = selfA.StoredVariables["CostFunctionJ" ][-1] - J
1607         iOuter = selfA.StoredVariables["CurrentIterationNumber"][-1]
1608     #
1609     # Obtention de l'analyse
1610     # ----------------------
1611     Xa = Xr
1612     #
1613     selfA.StoredVariables["Analysis"].store( Xa )
1614     #
1615     if selfA._toStore("OMA") or \
1616         selfA._toStore("SigmaObs2") or \
1617         selfA._toStore("SimulationQuantiles") or \
1618         selfA._toStore("SimulatedObservationAtOptimum"):
1619         if selfA._toStore("SimulatedObservationAtCurrentState"):
1620             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
1621         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1622             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
1623         else:
1624             HXa = Hm( Xa )
1625     #
1626     # Calcul de la covariance d'analyse
1627     # ---------------------------------
1628     if selfA._toStore("APosterioriCovariance") or \
1629         selfA._toStore("SimulationQuantiles") or \
1630         selfA._toStore("JacobianMatrixAtOptimum") or \
1631         selfA._toStore("KalmanGainAtOptimum"):
1632         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
1633         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
1634     if selfA._toStore("APosterioriCovariance") or \
1635         selfA._toStore("SimulationQuantiles") or \
1636         selfA._toStore("KalmanGainAtOptimum"):
1637         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
1638         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
1639     if selfA._toStore("APosterioriCovariance") or \
1640         selfA._toStore("SimulationQuantiles"):
1641         HessienneI = []
1642         nb = Xa.size
1643         for i in range(nb):
1644             _ee    = numpy.matrix(numpy.zeros(nb)).T
1645             _ee[i] = 1.
1646             _HtEE  = numpy.dot(HtM,_ee)
1647             _HtEE  = numpy.asmatrix(numpy.ravel( _HtEE )).T
1648             HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
1649         HessienneI = numpy.matrix( HessienneI )
1650         A = HessienneI.I
1651         if min(A.shape) != max(A.shape):
1652             raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
1653         if (numpy.diag(A) < 0).any():
1654             raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
1655         if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
1656             try:
1657                 L = numpy.linalg.cholesky( A )
1658             except:
1659                 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
1660     if selfA._toStore("APosterioriCovariance"):
1661         selfA.StoredVariables["APosterioriCovariance"].store( A )
1662     if selfA._toStore("JacobianMatrixAtOptimum"):
1663         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
1664     if selfA._toStore("KalmanGainAtOptimum"):
1665         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
1666         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
1667         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
1668     #
1669     # Calculs et/ou stockages supplémentaires
1670     # ---------------------------------------
1671     if selfA._toStore("Innovation") or \
1672         selfA._toStore("SigmaObs2") or \
1673         selfA._toStore("MahalanobisConsistency") or \
1674         selfA._toStore("OMB"):
1675         d  = Y - HXb
1676     if selfA._toStore("Innovation"):
1677         selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
1678     if selfA._toStore("BMA"):
1679         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
1680     if selfA._toStore("OMA"):
1681         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
1682     if selfA._toStore("OMB"):
1683         selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
1684     if selfA._toStore("SigmaObs2"):
1685         TraceR = R.trace(Y.size)
1686         selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
1687     if selfA._toStore("MahalanobisConsistency"):
1688         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
1689     if selfA._toStore("SimulationQuantiles"):
1690         QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
1691     if selfA._toStore("SimulatedObservationAtBackground"):
1692         selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
1693     if selfA._toStore("SimulatedObservationAtOptimum"):
1694         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
1695     #
1696     return 0
1697
1698 # ==============================================================================
1699 def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13",
1700     BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
1701     """
1702     Maximum Likelihood Ensemble Filter
1703     """
1704     if selfA._parameters["EstimationOf"] == "Parameters":
1705         selfA._parameters["StoreInternalVariables"] = True
1706     #
1707     # Opérateurs
1708     # ----------
1709     H = HO["Direct"].appliedControledFormTo
1710     #
1711     if selfA._parameters["EstimationOf"] == "State":
1712         M = EM["Direct"].appliedControledFormTo
1713     #
1714     if CM is not None and "Tangent" in CM and U is not None:
1715         Cm = CM["Tangent"].asMatrix(Xb)
1716     else:
1717         Cm = None
1718     #
1719     # Nombre de pas identique au nombre de pas d'observations
1720     # -------------------------------------------------------
1721     if hasattr(Y,"stepnumber"):
1722         duration = Y.stepnumber()
1723         __p = numpy.cumprod(Y.shape())[-1]
1724     else:
1725         duration = 2
1726         __p = numpy.array(Y).size
1727     #
1728     # Précalcul des inversions de B et R
1729     # ----------------------------------
1730     if selfA._parameters["StoreInternalVariables"] \
1731         or selfA._toStore("CostFunctionJ") \
1732         or selfA._toStore("CostFunctionJb") \
1733         or selfA._toStore("CostFunctionJo") \
1734         or selfA._toStore("CurrentOptimum") \
1735         or selfA._toStore("APosterioriCovariance"):
1736         BI = B.getI()
1737     RI = R.getI()
1738     #
1739     # Initialisation
1740     # --------------
1741     __n = Xb.size
1742     __m = selfA._parameters["NumberOfMembers"]
1743     if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
1744     else:                         Pn = B
1745     if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
1746     else:                         Rn = R
1747     Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
1748     #
1749     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1750         selfA.StoredVariables["Analysis"].store( Xb )
1751         if selfA._toStore("APosterioriCovariance"):
1752             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1753             covarianceXa = Pn
1754     #
1755     previousJMinimum = numpy.finfo(float).max
1756     #
1757     for step in range(duration-1):
1758         if hasattr(Y,"store"):
1759             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1760         else:
1761             Ynpu = numpy.ravel( Y ).reshape((__p,1))
1762         #
1763         if U is not None:
1764             if hasattr(U,"store") and len(U)>1:
1765                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
1766             elif hasattr(U,"store") and len(U)==1:
1767                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
1768             else:
1769                 Un = numpy.asmatrix(numpy.ravel( U )).T
1770         else:
1771             Un = None
1772         #
1773         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
1774             Xn = CovarianceInflation( Xn,
1775                 selfA._parameters["InflationType"],
1776                 selfA._parameters["InflationFactor"],
1777                 )
1778         #
1779         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
1780             EMX = M( [(Xn[:,i], Un) for i in range(__m)],
1781                 argsAsSerie = True,
1782                 returnSerieAsArrayMatrix = True )
1783             Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
1784             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1785                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
1786                 Xn_predicted = Xn_predicted + Cm * Un
1787         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
1788             # --- > Par principe, M = Id, Q = 0
1789             Xn_predicted = Xn
1790         #
1791         #--------------------------
1792         if VariantM == "MLEF13":
1793             Xfm = numpy.ravel(Xn_predicted.mean(axis=1, dtype=mfp).astype('float'))
1794             EaX = EnsembleOfAnomalies( Xn_predicted, Xfm, 1./math.sqrt(__m-1) )
1795             Ua  = numpy.identity(__m)
1796             __j = 0
1797             Deltaw = 1
1798             if not BnotT:
1799                 Ta  = numpy.identity(__m)
1800             vw  = numpy.zeros(__m)
1801             while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
1802                 vx1 = (Xfm + EaX @ vw).reshape((__n,1))
1803                 #
1804                 if BnotT:
1805                     E1 = vx1 + _epsilon * EaX
1806                 else:
1807                     E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
1808                 #
1809                 HE2 = H( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
1810                     argsAsSerie = True,
1811                     returnSerieAsArrayMatrix = True )
1812                 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
1813                 #
1814                 if BnotT:
1815                     EaY = (HE2 - vy2) / _epsilon
1816                 else:
1817                     EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
1818                 #
1819                 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy2 )))
1820                 mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY)
1821                 Deltaw = - numpy.linalg.solve(mH,GradJ)
1822                 #
1823                 vw = vw + Deltaw
1824                 #
1825                 if not BnotT:
1826                     Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
1827                 #
1828                 __j = __j + 1
1829             #
1830             if BnotT:
1831                 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
1832             #
1833             Xn = vx1 + math.sqrt(__m-1) * EaX @ Ta @ Ua
1834         #--------------------------
1835         else:
1836             raise ValueError("VariantM has to be chosen in the authorized methods list.")
1837         #
1838         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
1839             Xn = CovarianceInflation( Xn,
1840                 selfA._parameters["InflationType"],
1841                 selfA._parameters["InflationFactor"],
1842                 )
1843         #
1844         Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1845         #--------------------------
1846         #
1847         if selfA._parameters["StoreInternalVariables"] \
1848             or selfA._toStore("CostFunctionJ") \
1849             or selfA._toStore("CostFunctionJb") \
1850             or selfA._toStore("CostFunctionJo") \
1851             or selfA._toStore("APosterioriCovariance") \
1852             or selfA._toStore("InnovationAtCurrentAnalysis") \
1853             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1854             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1855             _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
1856             _Innovation = Ynpu - _HXa
1857         #
1858         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1859         # ---> avec analysis
1860         selfA.StoredVariables["Analysis"].store( Xa )
1861         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1862             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
1863         if selfA._toStore("InnovationAtCurrentAnalysis"):
1864             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1865         # ---> avec current state
1866         if selfA._parameters["StoreInternalVariables"] \
1867             or selfA._toStore("CurrentState"):
1868             selfA.StoredVariables["CurrentState"].store( Xn )
1869         if selfA._toStore("ForecastState"):
1870             selfA.StoredVariables["ForecastState"].store( EMX )
1871         if selfA._toStore("BMA"):
1872             selfA.StoredVariables["BMA"].store( EMX - Xa )
1873         if selfA._toStore("InnovationAtCurrentState"):
1874             selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
1875         if selfA._toStore("SimulatedObservationAtCurrentState") \
1876             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1877             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
1878         # ---> autres
1879         if selfA._parameters["StoreInternalVariables"] \
1880             or selfA._toStore("CostFunctionJ") \
1881             or selfA._toStore("CostFunctionJb") \
1882             or selfA._toStore("CostFunctionJo") \
1883             or selfA._toStore("CurrentOptimum") \
1884             or selfA._toStore("APosterioriCovariance"):
1885             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
1886             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
1887             J   = Jb + Jo
1888             selfA.StoredVariables["CostFunctionJb"].store( Jb )
1889             selfA.StoredVariables["CostFunctionJo"].store( Jo )
1890             selfA.StoredVariables["CostFunctionJ" ].store( J )
1891             #
1892             if selfA._toStore("IndexOfOptimum") \
1893                 or selfA._toStore("CurrentOptimum") \
1894                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1895                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1896                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1897                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1898                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1899             if selfA._toStore("IndexOfOptimum"):
1900                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1901             if selfA._toStore("CurrentOptimum"):
1902                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1903             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1904                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1905             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1906                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1907             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1908                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1909             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1910                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1911         if selfA._toStore("APosterioriCovariance"):
1912             selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
1913         if selfA._parameters["EstimationOf"] == "Parameters" \
1914             and J < previousJMinimum:
1915             previousJMinimum    = J
1916             XaMin               = Xa
1917             if selfA._toStore("APosterioriCovariance"):
1918                 covarianceXaMin = Pn
1919     #
1920     # Stockage final supplémentaire de l'optimum en estimation de paramètres
1921     # ----------------------------------------------------------------------
1922     if selfA._parameters["EstimationOf"] == "Parameters":
1923         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1924         selfA.StoredVariables["Analysis"].store( XaMin )
1925         if selfA._toStore("APosterioriCovariance"):
1926             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1927         if selfA._toStore("BMA"):
1928             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1929     #
1930     return 0
1931
1932 # ==============================================================================
1933 def mmqr(
1934         func     = None,
1935         x0       = None,
1936         fprime   = None,
1937         bounds   = None,
1938         quantile = 0.5,
1939         maxfun   = 15000,
1940         toler    = 1.e-06,
1941         y        = None,
1942         ):
1943     """
1944     Implémentation informatique de l'algorithme MMQR, basée sur la publication :
1945     David R. Hunter, Kenneth Lange, "Quantile Regression via an MM Algorithm",
1946     Journal of Computational and Graphical Statistics, 9, 1, pp.60-77, 2000.
1947     """
1948     #
1949     # Recuperation des donnees et informations initiales
1950     # --------------------------------------------------
1951     variables = numpy.ravel( x0 )
1952     mesures   = numpy.ravel( y )
1953     increment = sys.float_info[0]
1954     p         = variables.size
1955     n         = mesures.size
1956     quantile  = float(quantile)
1957     #
1958     # Calcul des parametres du MM
1959     # ---------------------------
1960     tn      = float(toler) / n
1961     e0      = -tn / math.log(tn)
1962     epsilon = (e0-tn)/(1+math.log(e0))
1963     #
1964     # Calculs d'initialisation
1965     # ------------------------
1966     residus  = mesures - numpy.ravel( func( variables ) )
1967     poids    = 1./(epsilon+numpy.abs(residus))
1968     veps     = 1. - 2. * quantile - residus * poids
1969     lastsurrogate = -numpy.sum(residus*veps) - (1.-2.*quantile)*numpy.sum(residus)
1970     iteration = 0
1971     #
1972     # Recherche iterative
1973     # -------------------
1974     while (increment > toler) and (iteration < maxfun) :
1975         iteration += 1
1976         #
1977         Derivees  = numpy.array(fprime(variables))
1978         Derivees  = Derivees.reshape(n,p) # Necessaire pour remettre en place la matrice si elle passe par des tuyaux YACS
1979         DeriveesT = Derivees.transpose()
1980         M         =   numpy.dot( DeriveesT , (numpy.array(numpy.matrix(p*[poids,]).T)*Derivees) )
1981         SM        =   numpy.transpose(numpy.dot( DeriveesT , veps ))
1982         step      = - numpy.linalg.lstsq( M, SM, rcond=-1 )[0]
1983         #
1984         variables = variables + step
1985         if bounds is not None:
1986             # Attention : boucle infinie à éviter si un intervalle est trop petit
1987             while( (variables < numpy.ravel(numpy.asmatrix(bounds)[:,0])).any() or (variables > numpy.ravel(numpy.asmatrix(bounds)[:,1])).any() ):
1988                 step      = step/2.
1989                 variables = variables - step
1990         residus   = mesures - numpy.ravel( func(variables) )
1991         surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
1992         #
1993         while ( (surrogate > lastsurrogate) and ( max(list(numpy.abs(step))) > 1.e-16 ) ) :
1994             step      = step/2.
1995             variables = variables - step
1996             residus   = mesures - numpy.ravel( func(variables) )
1997             surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
1998         #
1999         increment     = lastsurrogate-surrogate
2000         poids         = 1./(epsilon+numpy.abs(residus))
2001         veps          = 1. - 2. * quantile - residus * poids
2002         lastsurrogate = -numpy.sum(residus * veps) - (1.-2.*quantile)*numpy.sum(residus)
2003     #
2004     # Mesure d'écart
2005     # --------------
2006     Ecart = quantile * numpy.sum(residus) - numpy.sum( residus[residus<0] )
2007     #
2008     return variables, Ecart, [n,p,iteration,increment,0]
2009
2010 # ==============================================================================
2011 def multi3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, oneCycle):
2012     """
2013     3DVAR multi-pas et multi-méthodes
2014     """
2015     #
2016     # Initialisation
2017     # --------------
2018     Xn = numpy.ravel(Xb).reshape((-1,1))
2019     #
2020     if selfA._parameters["EstimationOf"] == "State":
2021         M = EM["Direct"].appliedTo
2022         #
2023         if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2024             selfA.StoredVariables["Analysis"].store( Xn )
2025             if selfA._toStore("APosterioriCovariance"):
2026                 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(Xn.size)
2027                 else:                         Pn = B
2028                 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2029             if selfA._toStore("ForecastState"):
2030                 selfA.StoredVariables["ForecastState"].store( Xn )
2031     #
2032     if hasattr(Y,"stepnumber"):
2033         duration = Y.stepnumber()
2034     else:
2035         duration = 2
2036     #
2037     # Multi-pas
2038     # ---------
2039     for step in range(duration-1):
2040         if hasattr(Y,"store"):
2041             Ynpu = numpy.ravel( Y[step+1] ).reshape((-1,1))
2042         else:
2043             Ynpu = numpy.ravel( Y ).reshape((-1,1))
2044         #
2045         if selfA._parameters["EstimationOf"] == "State": # Forecast
2046             Xn = selfA.StoredVariables["Analysis"][-1]
2047             Xn_predicted = M( Xn )
2048             if selfA._toStore("ForecastState"):
2049                 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
2050         elif selfA._parameters["EstimationOf"] == "Parameters": # No forecast
2051             # --- > Par principe, M = Id, Q = 0
2052             Xn_predicted = Xn
2053         Xn_predicted = numpy.ravel(Xn_predicted).reshape((-1,1))
2054         #
2055         oneCycle(selfA, Xn_predicted, Ynpu, U, HO, None, None, R, B, None)
2056     #
2057     return 0
2058
2059 # ==============================================================================
2060 def psas3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
2061     """
2062     3DVAR PSAS
2063     """
2064     #
2065     # Initialisations
2066     # ---------------
2067     #
2068     # Opérateurs
2069     Hm = HO["Direct"].appliedTo
2070     #
2071     # Utilisation éventuelle d'un vecteur H(Xb) précalculé
2072     if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
2073         HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
2074     else:
2075         HXb = Hm( Xb )
2076     HXb = numpy.asmatrix(numpy.ravel( HXb )).T
2077     if Y.size != HXb.size:
2078         raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
2079     if max(Y.shape) != max(HXb.shape):
2080         raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
2081     #
2082     if selfA._toStore("JacobianMatrixAtBackground"):
2083         HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
2084         HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
2085         selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
2086     #
2087     Ht = HO["Tangent"].asMatrix(Xb)
2088     BHT = B * Ht.T
2089     HBHTpR = R + Ht * BHT
2090     Innovation = Y - HXb
2091     #
2092     # Point de démarrage de l'optimisation
2093     Xini = numpy.zeros(Xb.shape)
2094     #
2095     # Définition de la fonction-coût
2096     # ------------------------------
2097     def CostFunction(w):
2098         _W = numpy.asmatrix(numpy.ravel( w )).T
2099         if selfA._parameters["StoreInternalVariables"] or \
2100             selfA._toStore("CurrentState") or \
2101             selfA._toStore("CurrentOptimum"):
2102             selfA.StoredVariables["CurrentState"].store( Xb + BHT * _W )
2103         if selfA._toStore("SimulatedObservationAtCurrentState") or \
2104             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2105             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Hm( Xb + BHT * _W ) )
2106         if selfA._toStore("InnovationAtCurrentState"):
2107             selfA.StoredVariables["InnovationAtCurrentState"].store( Innovation )
2108         #
2109         Jb  = float( 0.5 * _W.T * HBHTpR * _W )
2110         Jo  = float( - _W.T * Innovation )
2111         J   = Jb + Jo
2112         #
2113         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
2114         selfA.StoredVariables["CostFunctionJb"].store( Jb )
2115         selfA.StoredVariables["CostFunctionJo"].store( Jo )
2116         selfA.StoredVariables["CostFunctionJ" ].store( J )
2117         if selfA._toStore("IndexOfOptimum") or \
2118             selfA._toStore("CurrentOptimum") or \
2119             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
2120             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
2121             selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
2122             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2123             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2124         if selfA._toStore("IndexOfOptimum"):
2125             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2126         if selfA._toStore("CurrentOptimum"):
2127             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
2128         if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2129             selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
2130         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2131             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2132         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2133             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2134         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2135             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2136         return J
2137     #
2138     def GradientOfCostFunction(w):
2139         _W = numpy.asmatrix(numpy.ravel( w )).T
2140         GradJb  = HBHTpR * _W
2141         GradJo  = - Innovation
2142         GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
2143         return GradJ
2144     #
2145     # Minimisation de la fonctionnelle
2146     # --------------------------------
2147     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
2148     #
2149     if selfA._parameters["Minimizer"] == "LBFGSB":
2150         if "0.19" <= scipy.version.version <= "1.1.0":
2151             import lbfgsbhlt as optimiseur
2152         else:
2153             import scipy.optimize as optimiseur
2154         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
2155             func        = CostFunction,
2156             x0          = Xini,
2157             fprime      = GradientOfCostFunction,
2158             args        = (),
2159             bounds      = selfA._parameters["Bounds"],
2160             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
2161             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
2162             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
2163             iprint      = selfA._parameters["optiprint"],
2164             )
2165         nfeval = Informations['funcalls']
2166         rc     = Informations['warnflag']
2167     elif selfA._parameters["Minimizer"] == "TNC":
2168         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
2169             func        = CostFunction,
2170             x0          = Xini,
2171             fprime      = GradientOfCostFunction,
2172             args        = (),
2173             bounds      = selfA._parameters["Bounds"],
2174             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
2175             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
2176             ftol        = selfA._parameters["CostDecrementTolerance"],
2177             messages    = selfA._parameters["optmessages"],
2178             )
2179     elif selfA._parameters["Minimizer"] == "CG":
2180         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
2181             f           = CostFunction,
2182             x0          = Xini,
2183             fprime      = GradientOfCostFunction,
2184             args        = (),
2185             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2186             gtol        = selfA._parameters["GradientNormTolerance"],
2187             disp        = selfA._parameters["optdisp"],
2188             full_output = True,
2189             )
2190     elif selfA._parameters["Minimizer"] == "NCG":
2191         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
2192             f           = CostFunction,
2193             x0          = Xini,
2194             fprime      = GradientOfCostFunction,
2195             args        = (),
2196             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2197             avextol     = selfA._parameters["CostDecrementTolerance"],
2198             disp        = selfA._parameters["optdisp"],
2199             full_output = True,
2200             )
2201     elif selfA._parameters["Minimizer"] == "BFGS":
2202         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
2203             f           = CostFunction,
2204             x0          = Xini,
2205             fprime      = GradientOfCostFunction,
2206             args        = (),
2207             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2208             gtol        = selfA._parameters["GradientNormTolerance"],
2209             disp        = selfA._parameters["optdisp"],
2210             full_output = True,
2211             )
2212     else:
2213         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
2214     #
2215     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2216     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
2217     #
2218     # Correction pour pallier a un bug de TNC sur le retour du Minimum
2219     # ----------------------------------------------------------------
2220     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
2221         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
2222         Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
2223     else:
2224         Minimum = Xb + BHT * numpy.asmatrix(numpy.ravel( Minimum )).T
2225     #
2226     # Obtention de l'analyse
2227     # ----------------------
2228     Xa = Minimum
2229     #
2230     selfA.StoredVariables["Analysis"].store( Xa )
2231     #
2232     if selfA._toStore("OMA") or \
2233         selfA._toStore("SigmaObs2") or \
2234         selfA._toStore("SimulationQuantiles") or \
2235         selfA._toStore("SimulatedObservationAtOptimum"):
2236         if selfA._toStore("SimulatedObservationAtCurrentState"):
2237             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
2238         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2239             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
2240         else:
2241             HXa = Hm( Xa )
2242     #
2243     # Calcul de la covariance d'analyse
2244     # ---------------------------------
2245     if selfA._toStore("APosterioriCovariance") or \
2246         selfA._toStore("SimulationQuantiles") or \
2247         selfA._toStore("JacobianMatrixAtOptimum") or \
2248         selfA._toStore("KalmanGainAtOptimum"):
2249         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
2250         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
2251     if selfA._toStore("APosterioriCovariance") or \
2252         selfA._toStore("SimulationQuantiles") or \
2253         selfA._toStore("KalmanGainAtOptimum"):
2254         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
2255         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
2256     if selfA._toStore("APosterioriCovariance") or \
2257         selfA._toStore("SimulationQuantiles"):
2258         BI = B.getI()
2259         RI = R.getI()
2260         HessienneI = []
2261         nb = Xa.size
2262         for i in range(nb):
2263             _ee    = numpy.matrix(numpy.zeros(nb)).T
2264             _ee[i] = 1.
2265             _HtEE  = numpy.dot(HtM,_ee)
2266             _HtEE  = numpy.asmatrix(numpy.ravel( _HtEE )).T
2267             HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
2268         HessienneI = numpy.matrix( HessienneI )
2269         A = HessienneI.I
2270         if min(A.shape) != max(A.shape):
2271             raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
2272         if (numpy.diag(A) < 0).any():
2273             raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
2274         if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
2275             try:
2276                 L = numpy.linalg.cholesky( A )
2277             except:
2278                 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
2279     if selfA._toStore("APosterioriCovariance"):
2280         selfA.StoredVariables["APosterioriCovariance"].store( A )
2281     if selfA._toStore("JacobianMatrixAtOptimum"):
2282         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
2283     if selfA._toStore("KalmanGainAtOptimum"):
2284         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
2285         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
2286         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
2287     #
2288     # Calculs et/ou stockages supplémentaires
2289     # ---------------------------------------
2290     if selfA._toStore("Innovation") or \
2291         selfA._toStore("SigmaObs2") or \
2292         selfA._toStore("MahalanobisConsistency") or \
2293         selfA._toStore("OMB"):
2294         d  = Y - HXb
2295     if selfA._toStore("Innovation"):
2296         selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
2297     if selfA._toStore("BMA"):
2298         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
2299     if selfA._toStore("OMA"):
2300         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
2301     if selfA._toStore("OMB"):
2302         selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
2303     if selfA._toStore("SigmaObs2"):
2304         TraceR = R.trace(Y.size)
2305         selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
2306     if selfA._toStore("MahalanobisConsistency"):
2307         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
2308     if selfA._toStore("SimulationQuantiles"):
2309         QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
2310     if selfA._toStore("SimulatedObservationAtBackground"):
2311         selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
2312     if selfA._toStore("SimulatedObservationAtOptimum"):
2313         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
2314     #
2315     return 0
2316
2317 # ==============================================================================
2318 def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
2319     """
2320     Stochastic EnKF
2321     """
2322     if selfA._parameters["EstimationOf"] == "Parameters":
2323         selfA._parameters["StoreInternalVariables"] = True
2324     #
2325     # Opérateurs
2326     H = HO["Direct"].appliedControledFormTo
2327     #
2328     if selfA._parameters["EstimationOf"] == "State":
2329         M = EM["Direct"].appliedControledFormTo
2330     #
2331     if CM is not None and "Tangent" in CM and U is not None:
2332         Cm = CM["Tangent"].asMatrix(Xb)
2333     else:
2334         Cm = None
2335     #
2336     # Durée d'observation et tailles
2337     if hasattr(Y,"stepnumber"):
2338         duration = Y.stepnumber()
2339         __p = numpy.cumprod(Y.shape())[-1]
2340     else:
2341         duration = 2
2342         __p = numpy.array(Y).size
2343     #
2344     # Précalcul des inversions de B et R
2345     if selfA._parameters["StoreInternalVariables"] \
2346         or selfA._toStore("CostFunctionJ") \
2347         or selfA._toStore("CostFunctionJb") \
2348         or selfA._toStore("CostFunctionJo") \
2349         or selfA._toStore("CurrentOptimum") \
2350         or selfA._toStore("APosterioriCovariance"):
2351         BI = B.getI()
2352         RI = R.getI()
2353     #
2354     __n = Xb.size
2355     __m = selfA._parameters["NumberOfMembers"]
2356     #
2357     if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
2358     else:                         Pn = B
2359     if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
2360     else:                         Rn = R
2361     Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
2362     #
2363     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2364         selfA.StoredVariables["Analysis"].store( Xb )
2365         if selfA._toStore("APosterioriCovariance"):
2366             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2367             covarianceXa = Pn
2368     #
2369     previousJMinimum = numpy.finfo(float).max
2370     #
2371     for step in range(duration-1):
2372         if hasattr(Y,"store"):
2373             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
2374         else:
2375             Ynpu = numpy.ravel( Y ).reshape((__p,1))
2376         #
2377         if U is not None:
2378             if hasattr(U,"store") and len(U)>1:
2379                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2380             elif hasattr(U,"store") and len(U)==1:
2381                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2382             else:
2383                 Un = numpy.asmatrix(numpy.ravel( U )).T
2384         else:
2385             Un = None
2386         #
2387         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2388             Xn = CovarianceInflation( Xn,
2389                 selfA._parameters["InflationType"],
2390                 selfA._parameters["InflationFactor"],
2391                 )
2392         #
2393         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
2394             EMX = M( [(Xn[:,i], Un) for i in range(__m)],
2395                 argsAsSerie = True,
2396                 returnSerieAsArrayMatrix = True )
2397             Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
2398             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2399                 argsAsSerie = True,
2400                 returnSerieAsArrayMatrix = True )
2401             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2402                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2403                 Xn_predicted = Xn_predicted + Cm * Un
2404         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
2405             # --- > Par principe, M = Id, Q = 0
2406             Xn_predicted = Xn
2407             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2408                 argsAsSerie = True,
2409                 returnSerieAsArrayMatrix = True )
2410         #
2411         # Mean of forecast and observation of forecast
2412         Xfm  = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2413         Hfm  = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2414         #
2415         #--------------------------
2416         if VariantM == "KalmanFilterFormula05":
2417             PfHT, HPfHT = 0., 0.
2418             for i in range(__m):
2419                 Exfi = Xn_predicted[:,i].reshape((__n,1)) - Xfm
2420                 Eyfi = HX_predicted[:,i].reshape((__p,1)) - Hfm
2421                 PfHT  += Exfi * Eyfi.T
2422                 HPfHT += Eyfi * Eyfi.T
2423             PfHT  = (1./(__m-1)) * PfHT
2424             HPfHT = (1./(__m-1)) * HPfHT
2425             Kn     = PfHT * ( R + HPfHT ).I
2426             del PfHT, HPfHT
2427             #
2428             for i in range(__m):
2429                 ri = numpy.random.multivariate_normal(numpy.zeros(__p), Rn)
2430                 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(Ynpu) + ri - HX_predicted[:,i])
2431         #--------------------------
2432         elif VariantM == "KalmanFilterFormula16":
2433             EpY   = EnsembleOfCenteredPerturbations(Ynpu, Rn, __m)
2434             EpYm  = EpY.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2435             #
2436             EaX   = EnsembleOfAnomalies( Xn_predicted ) / math.sqrt(__m-1)
2437             EaY = (HX_predicted - Hfm - EpY + EpYm) / math.sqrt(__m-1)
2438             #
2439             Kn = EaX @ EaY.T @ numpy.linalg.inv( EaY @ EaY.T)
2440             #
2441             for i in range(__m):
2442                 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(EpY[:,i]) - HX_predicted[:,i])
2443         #--------------------------
2444         else:
2445             raise ValueError("VariantM has to be chosen in the authorized methods list.")
2446         #
2447         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2448             Xn = CovarianceInflation( Xn,
2449                 selfA._parameters["InflationType"],
2450                 selfA._parameters["InflationFactor"],
2451                 )
2452         #
2453         Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2454         #--------------------------
2455         #
2456         if selfA._parameters["StoreInternalVariables"] \
2457             or selfA._toStore("CostFunctionJ") \
2458             or selfA._toStore("CostFunctionJb") \
2459             or selfA._toStore("CostFunctionJo") \
2460             or selfA._toStore("APosterioriCovariance") \
2461             or selfA._toStore("InnovationAtCurrentAnalysis") \
2462             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2463             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2464             _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
2465             _Innovation = Ynpu - _HXa
2466         #
2467         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2468         # ---> avec analysis
2469         selfA.StoredVariables["Analysis"].store( Xa )
2470         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2471             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2472         if selfA._toStore("InnovationAtCurrentAnalysis"):
2473             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2474         # ---> avec current state
2475         if selfA._parameters["StoreInternalVariables"] \
2476             or selfA._toStore("CurrentState"):
2477             selfA.StoredVariables["CurrentState"].store( Xn )
2478         if selfA._toStore("ForecastState"):
2479             selfA.StoredVariables["ForecastState"].store( EMX )
2480         if selfA._toStore("BMA"):
2481             selfA.StoredVariables["BMA"].store( EMX - Xa )
2482         if selfA._toStore("InnovationAtCurrentState"):
2483             selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
2484         if selfA._toStore("SimulatedObservationAtCurrentState") \
2485             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2486             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
2487         # ---> autres
2488         if selfA._parameters["StoreInternalVariables"] \
2489             or selfA._toStore("CostFunctionJ") \
2490             or selfA._toStore("CostFunctionJb") \
2491             or selfA._toStore("CostFunctionJo") \
2492             or selfA._toStore("CurrentOptimum") \
2493             or selfA._toStore("APosterioriCovariance"):
2494             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2495             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
2496             J   = Jb + Jo
2497             selfA.StoredVariables["CostFunctionJb"].store( Jb )
2498             selfA.StoredVariables["CostFunctionJo"].store( Jo )
2499             selfA.StoredVariables["CostFunctionJ" ].store( J )
2500             #
2501             if selfA._toStore("IndexOfOptimum") \
2502                 or selfA._toStore("CurrentOptimum") \
2503                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2504                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2505                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2506                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2507                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2508             if selfA._toStore("IndexOfOptimum"):
2509                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2510             if selfA._toStore("CurrentOptimum"):
2511                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2512             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2513                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2514             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2515                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2516             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2517                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2518             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2519                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2520         if selfA._toStore("APosterioriCovariance"):
2521             selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
2522         if selfA._parameters["EstimationOf"] == "Parameters" \
2523             and J < previousJMinimum:
2524             previousJMinimum    = J
2525             XaMin               = Xa
2526             if selfA._toStore("APosterioriCovariance"):
2527                 covarianceXaMin = Pn
2528     #
2529     # Stockage final supplémentaire de l'optimum en estimation de paramètres
2530     # ----------------------------------------------------------------------
2531     if selfA._parameters["EstimationOf"] == "Parameters":
2532         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2533         selfA.StoredVariables["Analysis"].store( XaMin )
2534         if selfA._toStore("APosterioriCovariance"):
2535             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2536         if selfA._toStore("BMA"):
2537             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2538     #
2539     return 0
2540
2541 # ==============================================================================
2542 def std3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
2543     """
2544     3DVAR
2545     """
2546     #
2547     # Initialisations
2548     # ---------------
2549     #
2550     # Opérateurs
2551     Hm = HO["Direct"].appliedTo
2552     Ha = HO["Adjoint"].appliedInXTo
2553     #
2554     # Utilisation éventuelle d'un vecteur H(Xb) précalculé
2555     if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
2556         HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
2557     else:
2558         HXb = Hm( Xb )
2559     HXb = numpy.asmatrix(numpy.ravel( HXb )).T
2560     if Y.size != HXb.size:
2561         raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
2562     if max(Y.shape) != max(HXb.shape):
2563         raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
2564     #
2565     if selfA._toStore("JacobianMatrixAtBackground"):
2566         HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
2567         HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
2568         selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
2569     #
2570     # Précalcul des inversions de B et R
2571     BI = B.getI()
2572     RI = R.getI()
2573     #
2574     # Point de démarrage de l'optimisation
2575     Xini = selfA._parameters["InitializationPoint"]
2576     #
2577     # Définition de la fonction-coût
2578     # ------------------------------
2579     def CostFunction(x):
2580         _X  = numpy.asmatrix(numpy.ravel( x )).T
2581         if selfA._parameters["StoreInternalVariables"] or \
2582             selfA._toStore("CurrentState") or \
2583             selfA._toStore("CurrentOptimum"):
2584             selfA.StoredVariables["CurrentState"].store( _X )
2585         _HX = Hm( _X )
2586         _HX = numpy.asmatrix(numpy.ravel( _HX )).T
2587         _Innovation = Y - _HX
2588         if selfA._toStore("SimulatedObservationAtCurrentState") or \
2589             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2590             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
2591         if selfA._toStore("InnovationAtCurrentState"):
2592             selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
2593         #
2594         Jb  = float( 0.5 * (_X - Xb).T * BI * (_X - Xb) )
2595         Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
2596         J   = Jb + Jo
2597         #
2598         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
2599         selfA.StoredVariables["CostFunctionJb"].store( Jb )
2600         selfA.StoredVariables["CostFunctionJo"].store( Jo )
2601         selfA.StoredVariables["CostFunctionJ" ].store( J )
2602         if selfA._toStore("IndexOfOptimum") or \
2603             selfA._toStore("CurrentOptimum") or \
2604             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
2605             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
2606             selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
2607             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2608             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2609         if selfA._toStore("IndexOfOptimum"):
2610             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2611         if selfA._toStore("CurrentOptimum"):
2612             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
2613         if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2614             selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
2615         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2616             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2617         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2618             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2619         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2620             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2621         return J
2622     #
2623     def GradientOfCostFunction(x):
2624         _X      = numpy.asmatrix(numpy.ravel( x )).T
2625         _HX     = Hm( _X )
2626         _HX     = numpy.asmatrix(numpy.ravel( _HX )).T
2627         GradJb  = BI * (_X - Xb)
2628         GradJo  = - Ha( (_X, RI * (Y - _HX)) )
2629         GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
2630         return GradJ
2631     #
2632     # Minimisation de la fonctionnelle
2633     # --------------------------------
2634     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
2635     #
2636     if selfA._parameters["Minimizer"] == "LBFGSB":
2637         if "0.19" <= scipy.version.version <= "1.1.0":
2638             import lbfgsbhlt as optimiseur
2639         else:
2640             import scipy.optimize as optimiseur
2641         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
2642             func        = CostFunction,
2643             x0          = Xini,
2644             fprime      = GradientOfCostFunction,
2645             args        = (),
2646             bounds      = selfA._parameters["Bounds"],
2647             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
2648             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
2649             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
2650             iprint      = selfA._parameters["optiprint"],
2651             )
2652         nfeval = Informations['funcalls']
2653         rc     = Informations['warnflag']
2654     elif selfA._parameters["Minimizer"] == "TNC":
2655         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
2656             func        = CostFunction,
2657             x0          = Xini,
2658             fprime      = GradientOfCostFunction,
2659             args        = (),
2660             bounds      = selfA._parameters["Bounds"],
2661             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
2662             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
2663             ftol        = selfA._parameters["CostDecrementTolerance"],
2664             messages    = selfA._parameters["optmessages"],
2665             )
2666     elif selfA._parameters["Minimizer"] == "CG":
2667         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
2668             f           = CostFunction,
2669             x0          = Xini,
2670             fprime      = GradientOfCostFunction,
2671             args        = (),
2672             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2673             gtol        = selfA._parameters["GradientNormTolerance"],
2674             disp        = selfA._parameters["optdisp"],
2675             full_output = True,
2676             )
2677     elif selfA._parameters["Minimizer"] == "NCG":
2678         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
2679             f           = CostFunction,
2680             x0          = Xini,
2681             fprime      = GradientOfCostFunction,
2682             args        = (),
2683             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2684             avextol     = selfA._parameters["CostDecrementTolerance"],
2685             disp        = selfA._parameters["optdisp"],
2686             full_output = True,
2687             )
2688     elif selfA._parameters["Minimizer"] == "BFGS":
2689         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
2690             f           = CostFunction,
2691             x0          = Xini,
2692             fprime      = GradientOfCostFunction,
2693             args        = (),
2694             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2695             gtol        = selfA._parameters["GradientNormTolerance"],
2696             disp        = selfA._parameters["optdisp"],
2697             full_output = True,
2698             )
2699     else:
2700         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
2701     #
2702     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2703     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
2704     #
2705     # Correction pour pallier a un bug de TNC sur le retour du Minimum
2706     # ----------------------------------------------------------------
2707     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
2708         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
2709     #
2710     # Obtention de l'analyse
2711     # ----------------------
2712     Xa = numpy.asmatrix(numpy.ravel( Minimum )).T
2713     #
2714     selfA.StoredVariables["Analysis"].store( Xa )
2715     #
2716     if selfA._toStore("OMA") or \
2717         selfA._toStore("SigmaObs2") or \
2718         selfA._toStore("SimulationQuantiles") or \
2719         selfA._toStore("SimulatedObservationAtOptimum"):
2720         if selfA._toStore("SimulatedObservationAtCurrentState"):
2721             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
2722         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2723             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
2724         else:
2725             HXa = Hm( Xa )
2726     #
2727     # Calcul de la covariance d'analyse
2728     # ---------------------------------
2729     if selfA._toStore("APosterioriCovariance") or \
2730         selfA._toStore("SimulationQuantiles") or \
2731         selfA._toStore("JacobianMatrixAtOptimum") or \
2732         selfA._toStore("KalmanGainAtOptimum"):
2733         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
2734         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
2735     if selfA._toStore("APosterioriCovariance") or \
2736         selfA._toStore("SimulationQuantiles") or \
2737         selfA._toStore("KalmanGainAtOptimum"):
2738         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
2739         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
2740     if selfA._toStore("APosterioriCovariance") or \
2741         selfA._toStore("SimulationQuantiles"):
2742         HessienneI = []
2743         nb = Xa.size
2744         for i in range(nb):
2745             _ee    = numpy.matrix(numpy.zeros(nb)).T
2746             _ee[i] = 1.
2747             _HtEE  = numpy.dot(HtM,_ee)
2748             _HtEE  = numpy.asmatrix(numpy.ravel( _HtEE )).T
2749             HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
2750         HessienneI = numpy.matrix( HessienneI )
2751         A = HessienneI.I
2752         if min(A.shape) != max(A.shape):
2753             raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
2754         if (numpy.diag(A) < 0).any():
2755             raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
2756         if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
2757             try:
2758                 L = numpy.linalg.cholesky( A )
2759             except:
2760                 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
2761     if selfA._toStore("APosterioriCovariance"):
2762         selfA.StoredVariables["APosterioriCovariance"].store( A )
2763     if selfA._toStore("JacobianMatrixAtOptimum"):
2764         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
2765     if selfA._toStore("KalmanGainAtOptimum"):
2766         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
2767         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
2768         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
2769     #
2770     # Calculs et/ou stockages supplémentaires
2771     # ---------------------------------------
2772     if selfA._toStore("Innovation") or \
2773         selfA._toStore("SigmaObs2") or \
2774         selfA._toStore("MahalanobisConsistency") or \
2775         selfA._toStore("OMB"):
2776         d  = Y - HXb
2777     if selfA._toStore("Innovation"):
2778         selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
2779     if selfA._toStore("BMA"):
2780         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
2781     if selfA._toStore("OMA"):
2782         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
2783     if selfA._toStore("OMB"):
2784         selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
2785     if selfA._toStore("SigmaObs2"):
2786         TraceR = R.trace(Y.size)
2787         selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
2788     if selfA._toStore("MahalanobisConsistency"):
2789         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
2790     if selfA._toStore("SimulationQuantiles"):
2791         QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
2792     if selfA._toStore("SimulatedObservationAtBackground"):
2793         selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
2794     if selfA._toStore("SimulatedObservationAtOptimum"):
2795         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
2796     #
2797     return 0
2798
2799 # ==============================================================================
2800 def std4dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
2801     """
2802     4DVAR
2803     """
2804     #
2805     # Initialisations
2806     # ---------------
2807     #
2808     # Opérateurs
2809     Hm = HO["Direct"].appliedControledFormTo
2810     Mm = EM["Direct"].appliedControledFormTo
2811     #
2812     if CM is not None and "Tangent" in CM and U is not None:
2813         Cm = CM["Tangent"].asMatrix(Xb)
2814     else:
2815         Cm = None
2816     #
2817     def Un(_step):
2818         if U is not None:
2819             if hasattr(U,"store") and 1<=_step<len(U) :
2820                 _Un = numpy.asmatrix(numpy.ravel( U[_step] )).T
2821             elif hasattr(U,"store") and len(U)==1:
2822                 _Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2823             else:
2824                 _Un = numpy.asmatrix(numpy.ravel( U )).T
2825         else:
2826             _Un = None
2827         return _Un
2828     def CmUn(_xn,_un):
2829         if Cm is not None and _un is not None: # Attention : si Cm est aussi dans M, doublon !
2830             _Cm   = Cm.reshape(_xn.size,_un.size) # ADAO & check shape
2831             _CmUn = _Cm * _un
2832         else:
2833             _CmUn = 0.
2834         return _CmUn
2835     #
2836     # Remarque : les observations sont exploitées à partir du pas de temps
2837     # numéro 1, et sont utilisées dans Yo comme rangées selon ces indices.
2838     # Donc le pas 0 n'est pas utilisé puisque la première étape commence
2839     # avec l'observation du pas 1.
2840     #
2841     # Nombre de pas identique au nombre de pas d'observations
2842     if hasattr(Y,"stepnumber"):
2843         duration = Y.stepnumber()
2844     else:
2845         duration = 2
2846     #
2847     # Précalcul des inversions de B et R
2848     BI = B.getI()
2849     RI = R.getI()
2850     #
2851     # Point de démarrage de l'optimisation
2852     Xini = selfA._parameters["InitializationPoint"]
2853     #
2854     # Définition de la fonction-coût
2855     # ------------------------------
2856     selfA.DirectCalculation = [None,] # Le pas 0 n'est pas observé
2857     selfA.DirectInnovation  = [None,] # Le pas 0 n'est pas observé
2858     def CostFunction(x):
2859         _X  = numpy.asmatrix(numpy.ravel( x )).T
2860         if selfA._parameters["StoreInternalVariables"] or \
2861             selfA._toStore("CurrentState") or \
2862             selfA._toStore("CurrentOptimum"):
2863             selfA.StoredVariables["CurrentState"].store( _X )
2864         Jb  = float( 0.5 * (_X - Xb).T * BI * (_X - Xb) )
2865         selfA.DirectCalculation = [None,]
2866         selfA.DirectInnovation  = [None,]
2867         Jo  = 0.
2868         _Xn = _X
2869         for step in range(0,duration-1):
2870             if hasattr(Y,"store"):
2871                 _Ynpu = numpy.asmatrix(numpy.ravel( Y[step+1] )).T
2872             else:
2873                 _Ynpu = numpy.asmatrix(numpy.ravel( Y )).T
2874             _Un = Un(step)
2875             #
2876             # Etape d'évolution
2877             if selfA._parameters["EstimationOf"] == "State":
2878                 _Xn = Mm( (_Xn, _Un) ) + CmUn(_Xn, _Un)
2879             elif selfA._parameters["EstimationOf"] == "Parameters":
2880                 pass
2881             #
2882             if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
2883                 _Xn = numpy.max(numpy.hstack((_Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1)
2884                 _Xn = numpy.min(numpy.hstack((_Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1)
2885             #
2886             # Etape de différence aux observations
2887             if selfA._parameters["EstimationOf"] == "State":
2888                 _YmHMX = _Ynpu - numpy.asmatrix(numpy.ravel( Hm( (_Xn, None) ) )).T
2889             elif selfA._parameters["EstimationOf"] == "Parameters":
2890                 _YmHMX = _Ynpu - numpy.asmatrix(numpy.ravel( Hm( (_Xn, _Un) ) )).T - CmUn(_Xn, _Un)
2891             #
2892             # Stockage de l'état
2893             selfA.DirectCalculation.append( _Xn )
2894             selfA.DirectInnovation.append( _YmHMX )
2895             #
2896             # Ajout dans la fonctionnelle d'observation
2897             Jo = Jo + 0.5 * float( _YmHMX.T * RI * _YmHMX )
2898         J = Jb + Jo
2899         #
2900         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
2901         selfA.StoredVariables["CostFunctionJb"].store( Jb )
2902         selfA.StoredVariables["CostFunctionJo"].store( Jo )
2903         selfA.StoredVariables["CostFunctionJ" ].store( J )
2904         if selfA._toStore("IndexOfOptimum") or \
2905             selfA._toStore("CurrentOptimum") or \
2906             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
2907             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
2908             selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2909             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2910         if selfA._toStore("IndexOfOptimum"):
2911             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2912         if selfA._toStore("CurrentOptimum"):
2913             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
2914         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2915             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2916         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2917             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2918         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2919             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2920         return J
2921     #
2922     def GradientOfCostFunction(x):
2923         _X      = numpy.asmatrix(numpy.ravel( x )).T
2924         GradJb  = BI * (_X - Xb)
2925         GradJo  = 0.
2926         for step in range(duration-1,0,-1):
2927             # Étape de récupération du dernier stockage de l'évolution
2928             _Xn = selfA.DirectCalculation.pop()
2929             # Étape de récupération du dernier stockage de l'innovation
2930             _YmHMX = selfA.DirectInnovation.pop()
2931             # Calcul des adjoints
2932             Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
2933             Ha = Ha.reshape(_Xn.size,_YmHMX.size) # ADAO & check shape
2934             Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
2935             Ma = Ma.reshape(_Xn.size,_Xn.size) # ADAO & check shape
2936             # Calcul du gradient par état adjoint
2937             GradJo = GradJo + Ha * RI * _YmHMX # Équivaut pour Ha linéaire à : Ha( (_Xn, RI * _YmHMX) )
2938             GradJo = Ma * GradJo               # Équivaut pour Ma linéaire à : Ma( (_Xn, GradJo) )
2939         GradJ = numpy.ravel( GradJb ) - numpy.ravel( GradJo )
2940         return GradJ
2941     #
2942     # Minimisation de la fonctionnelle
2943     # --------------------------------
2944     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
2945     #
2946     if selfA._parameters["Minimizer"] == "LBFGSB":
2947         if "0.19" <= scipy.version.version <= "1.1.0":
2948             import lbfgsbhlt as optimiseur
2949         else:
2950             import scipy.optimize as optimiseur
2951         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
2952             func        = CostFunction,
2953             x0          = Xini,
2954             fprime      = GradientOfCostFunction,
2955             args        = (),
2956             bounds      = selfA._parameters["Bounds"],
2957             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
2958             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
2959             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
2960             iprint      = selfA._parameters["optiprint"],
2961             )
2962         nfeval = Informations['funcalls']
2963         rc     = Informations['warnflag']
2964     elif selfA._parameters["Minimizer"] == "TNC":
2965         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
2966             func        = CostFunction,
2967             x0          = Xini,
2968             fprime      = GradientOfCostFunction,
2969             args        = (),
2970             bounds      = selfA._parameters["Bounds"],
2971             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
2972             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
2973             ftol        = selfA._parameters["CostDecrementTolerance"],
2974             messages    = selfA._parameters["optmessages"],
2975             )
2976     elif selfA._parameters["Minimizer"] == "CG":
2977         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
2978             f           = CostFunction,
2979             x0          = Xini,
2980             fprime      = GradientOfCostFunction,
2981             args        = (),
2982             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2983             gtol        = selfA._parameters["GradientNormTolerance"],
2984             disp        = selfA._parameters["optdisp"],
2985             full_output = True,
2986             )
2987     elif selfA._parameters["Minimizer"] == "NCG":
2988         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
2989             f           = CostFunction,
2990             x0          = Xini,
2991             fprime      = GradientOfCostFunction,
2992             args        = (),
2993             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2994             avextol     = selfA._parameters["CostDecrementTolerance"],
2995             disp        = selfA._parameters["optdisp"],
2996             full_output = True,
2997             )
2998     elif selfA._parameters["Minimizer"] == "BFGS":
2999         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
3000             f           = CostFunction,
3001             x0          = Xini,
3002             fprime      = GradientOfCostFunction,
3003             args        = (),
3004             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3005             gtol        = selfA._parameters["GradientNormTolerance"],
3006             disp        = selfA._parameters["optdisp"],
3007             full_output = True,
3008             )
3009     else:
3010         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
3011     #
3012     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3013     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
3014     #
3015     # Correction pour pallier a un bug de TNC sur le retour du Minimum
3016     # ----------------------------------------------------------------
3017     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
3018         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
3019     #
3020     # Obtention de l'analyse
3021     # ----------------------
3022     Xa = numpy.asmatrix(numpy.ravel( Minimum )).T
3023     #
3024     selfA.StoredVariables["Analysis"].store( Xa )
3025     #
3026     # Calculs et/ou stockages supplémentaires
3027     # ---------------------------------------
3028     if selfA._toStore("BMA"):
3029         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
3030     #
3031     return 0
3032
3033 # ==============================================================================
3034 def van3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3035     """
3036     3DVAR variational analysis with no inversion of B
3037     """
3038     #
3039     # Initialisations
3040     # ---------------
3041     #
3042     # Opérateurs
3043     Hm = HO["Direct"].appliedTo
3044     Ha = HO["Adjoint"].appliedInXTo
3045     #
3046     # Précalcul des inversions de B et R
3047     BT = B.getT()
3048     RI = R.getI()
3049     #
3050     # Point de démarrage de l'optimisation
3051     Xini = numpy.zeros(Xb.shape)
3052     #
3053     # Définition de la fonction-coût
3054     # ------------------------------
3055     def CostFunction(v):
3056         _V = numpy.asmatrix(numpy.ravel( v )).T
3057         _X = Xb + B * _V
3058         if selfA._parameters["StoreInternalVariables"] or \
3059             selfA._toStore("CurrentState") or \
3060             selfA._toStore("CurrentOptimum"):
3061             selfA.StoredVariables["CurrentState"].store( _X )
3062         _HX = Hm( _X )
3063         _HX = numpy.asmatrix(numpy.ravel( _HX )).T
3064         _Innovation = Y - _HX
3065         if selfA._toStore("SimulatedObservationAtCurrentState") or \
3066             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3067             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
3068         if selfA._toStore("InnovationAtCurrentState"):
3069             selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
3070         #
3071         Jb  = float( 0.5 * _V.T * BT * _V )
3072         Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
3073         J   = Jb + Jo
3074         #
3075         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
3076         selfA.StoredVariables["CostFunctionJb"].store( Jb )
3077         selfA.StoredVariables["CostFunctionJo"].store( Jo )
3078         selfA.StoredVariables["CostFunctionJ" ].store( J )
3079         if selfA._toStore("IndexOfOptimum") or \
3080             selfA._toStore("CurrentOptimum") or \
3081             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
3082             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
3083             selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
3084             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3085             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3086         if selfA._toStore("IndexOfOptimum"):
3087             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3088         if selfA._toStore("CurrentOptimum"):
3089             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
3090         if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3091             selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
3092         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3093             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3094         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3095             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3096         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3097             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3098         return J
3099     #
3100     def GradientOfCostFunction(v):
3101         _V = numpy.asmatrix(numpy.ravel( v )).T
3102         _X = Xb + B * _V
3103         _HX     = Hm( _X )
3104         _HX     = numpy.asmatrix(numpy.ravel( _HX )).T
3105         GradJb  = BT * _V
3106         GradJo  = - Ha( (_X, RI * (Y - _HX)) )
3107         GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
3108         return GradJ
3109     #
3110     # Minimisation de la fonctionnelle
3111     # --------------------------------
3112     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
3113     #
3114     if selfA._parameters["Minimizer"] == "LBFGSB":
3115         if "0.19" <= scipy.version.version <= "1.1.0":
3116             import lbfgsbhlt as optimiseur
3117         else:
3118             import scipy.optimize as optimiseur
3119         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
3120             func        = CostFunction,
3121             x0          = Xini,
3122             fprime      = GradientOfCostFunction,
3123             args        = (),
3124             bounds      = selfA._parameters["Bounds"],
3125             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
3126             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
3127             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
3128             iprint      = selfA._parameters["optiprint"],
3129             )
3130         nfeval = Informations['funcalls']
3131         rc     = Informations['warnflag']
3132     elif selfA._parameters["Minimizer"] == "TNC":
3133         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
3134             func        = CostFunction,
3135             x0          = Xini,
3136             fprime      = GradientOfCostFunction,
3137             args        = (),
3138             bounds      = selfA._parameters["Bounds"],
3139             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
3140             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
3141             ftol        = selfA._parameters["CostDecrementTolerance"],
3142             messages    = selfA._parameters["optmessages"],
3143             )
3144     elif selfA._parameters["Minimizer"] == "CG":
3145         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
3146             f           = CostFunction,
3147             x0          = Xini,
3148             fprime      = GradientOfCostFunction,
3149             args        = (),
3150             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3151             gtol        = selfA._parameters["GradientNormTolerance"],
3152             disp        = selfA._parameters["optdisp"],
3153             full_output = True,
3154             )
3155     elif selfA._parameters["Minimizer"] == "NCG":
3156         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
3157             f           = CostFunction,
3158             x0          = Xini,
3159             fprime      = GradientOfCostFunction,
3160             args        = (),
3161             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3162             avextol     = selfA._parameters["CostDecrementTolerance"],
3163             disp        = selfA._parameters["optdisp"],
3164             full_output = True,
3165             )
3166     elif selfA._parameters["Minimizer"] == "BFGS":
3167         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
3168             f           = CostFunction,
3169             x0          = Xini,
3170             fprime      = GradientOfCostFunction,
3171             args        = (),
3172             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3173             gtol        = selfA._parameters["GradientNormTolerance"],
3174             disp        = selfA._parameters["optdisp"],
3175             full_output = True,
3176             )
3177     else:
3178         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
3179     #
3180     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3181     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
3182     #
3183     # Correction pour pallier a un bug de TNC sur le retour du Minimum
3184     # ----------------------------------------------------------------
3185     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
3186         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
3187         Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
3188     else:
3189         Minimum = Xb + B * numpy.asmatrix(numpy.ravel( Minimum )).T
3190     #
3191     # Obtention de l'analyse
3192     # ----------------------
3193     Xa = Minimum
3194     #
3195     selfA.StoredVariables["Analysis"].store( Xa )
3196     #
3197     if selfA._toStore("OMA") or \
3198         selfA._toStore("SigmaObs2") or \
3199         selfA._toStore("SimulationQuantiles") or \
3200         selfA._toStore("SimulatedObservationAtOptimum"):
3201         if selfA._toStore("SimulatedObservationAtCurrentState"):
3202             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
3203         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3204             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
3205         else:
3206             HXa = Hm( Xa )
3207     #
3208     # Calcul de la covariance d'analyse
3209     # ---------------------------------
3210     if selfA._toStore("APosterioriCovariance") or \
3211         selfA._toStore("SimulationQuantiles") or \
3212         selfA._toStore("JacobianMatrixAtOptimum") or \
3213         selfA._toStore("KalmanGainAtOptimum"):
3214         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
3215         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
3216     if selfA._toStore("APosterioriCovariance") or \
3217         selfA._toStore("SimulationQuantiles") or \
3218         selfA._toStore("KalmanGainAtOptimum"):
3219         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
3220         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
3221     if selfA._toStore("APosterioriCovariance") or \
3222         selfA._toStore("SimulationQuantiles"):
3223         BI = B.getI()
3224         HessienneI = []
3225         nb = Xa.size
3226         for i in range(nb):
3227             _ee    = numpy.matrix(numpy.zeros(nb)).T
3228             _ee[i] = 1.
3229             _HtEE  = numpy.dot(HtM,_ee)
3230             _HtEE  = numpy.asmatrix(numpy.ravel( _HtEE )).T
3231             HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
3232         HessienneI = numpy.matrix( HessienneI )
3233         A = HessienneI.I
3234         if min(A.shape) != max(A.shape):
3235             raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
3236         if (numpy.diag(A) < 0).any():
3237             raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
3238         if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
3239             try:
3240                 L = numpy.linalg.cholesky( A )
3241             except:
3242                 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
3243     if selfA._toStore("APosterioriCovariance"):
3244         selfA.StoredVariables["APosterioriCovariance"].store( A )
3245     if selfA._toStore("JacobianMatrixAtOptimum"):
3246         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
3247     if selfA._toStore("KalmanGainAtOptimum"):
3248         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
3249         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
3250         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
3251     #
3252     # Calculs et/ou stockages supplémentaires
3253     # ---------------------------------------
3254     if selfA._toStore("Innovation") or \
3255         selfA._toStore("SigmaObs2") or \
3256         selfA._toStore("MahalanobisConsistency") or \
3257         selfA._toStore("OMB"):
3258         d  = Y - HXb
3259     if selfA._toStore("Innovation"):
3260         selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
3261     if selfA._toStore("BMA"):
3262         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
3263     if selfA._toStore("OMA"):
3264         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
3265     if selfA._toStore("OMB"):
3266         selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
3267     if selfA._toStore("SigmaObs2"):
3268         TraceR = R.trace(Y.size)
3269         selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
3270     if selfA._toStore("MahalanobisConsistency"):
3271         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
3272     if selfA._toStore("SimulationQuantiles"):
3273         QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
3274     if selfA._toStore("SimulatedObservationAtBackground"):
3275         selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
3276     if selfA._toStore("SimulatedObservationAtOptimum"):
3277         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
3278     #
3279     return 0
3280
3281 # ==============================================================================
3282 if __name__ == "__main__":
3283     print('\n AUTODIAGNOSTIC\n')