]> SALOME platform Git repositories - modules/adao.git/blob - src/daComposant/daCore/NumericObjects.py
Salome HOME
d57b020d2545fb44ad1f5a3a94605fa49d8f7184
[modules/adao.git] / src / daComposant / daCore / NumericObjects.py
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) 2008-2021 EDF R&D
4 #
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
9 #
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13 # Lesser General Public License for more details.
14 #
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
18 #
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
20 #
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
22
23 __doc__ = """
24     Définit les objets numériques génériques.
25 """
26 __author__ = "Jean-Philippe ARGAUD"
27
28 import os, time, copy, types, sys, logging
29 import math, numpy, scipy, scipy.optimize, scipy.version
30 from daCore.BasicObjects import Operator
31 from daCore.PlatformInfo import PlatformInfo
32 mpr = PlatformInfo().MachinePrecision()
33 mfp = PlatformInfo().MaximumPrecision()
34 # logging.getLogger().setLevel(logging.DEBUG)
35
36 # ==============================================================================
37 def ExecuteFunction( triplet ):
38     assert len(triplet) == 3, "Incorrect number of arguments"
39     X, xArgs, funcrepr = triplet
40     __X = numpy.asmatrix(numpy.ravel( X )).T
41     __sys_path_tmp = sys.path ; sys.path.insert(0,funcrepr["__userFunction__path"])
42     __module = __import__(funcrepr["__userFunction__modl"], globals(), locals(), [])
43     __fonction = getattr(__module,funcrepr["__userFunction__name"])
44     sys.path = __sys_path_tmp ; del __sys_path_tmp
45     if isinstance(xArgs, dict):
46         __HX  = __fonction( __X, **xArgs )
47     else:
48         __HX  = __fonction( __X )
49     return numpy.ravel( __HX )
50
51 # ==============================================================================
52 class FDApproximation(object):
53     """
54     Cette classe sert d'interface pour définir les opérateurs approximés. A la
55     création d'un objet, en fournissant une fonction "Function", on obtient un
56     objet qui dispose de 3 méthodes "DirectOperator", "TangentOperator" et
57     "AdjointOperator". On contrôle l'approximation DF avec l'incrément
58     multiplicatif "increment" valant par défaut 1%, ou avec l'incrément fixe
59     "dX" qui sera multiplié par "increment" (donc en %), et on effectue de DF
60     centrées si le booléen "centeredDF" est vrai.
61     """
62     def __init__(self,
63             name                  = "FDApproximation",
64             Function              = None,
65             centeredDF            = False,
66             increment             = 0.01,
67             dX                    = None,
68             extraArguments        = None,
69             avoidingRedundancy    = True,
70             toleranceInRedundancy = 1.e-18,
71             lenghtOfRedundancy    = -1,
72             mpEnabled             = False,
73             mpWorkers             = None,
74             mfEnabled             = False,
75             ):
76         self.__name = str(name)
77         self.__extraArgs = extraArguments
78         if mpEnabled:
79             try:
80                 import multiprocessing
81                 self.__mpEnabled = True
82             except ImportError:
83                 self.__mpEnabled = False
84         else:
85             self.__mpEnabled = False
86         self.__mpWorkers = mpWorkers
87         if self.__mpWorkers is not None and self.__mpWorkers < 1:
88             self.__mpWorkers = None
89         logging.debug("FDA Calculs en multiprocessing : %s (nombre de processus : %s)"%(self.__mpEnabled,self.__mpWorkers))
90         #
91         if mfEnabled:
92             self.__mfEnabled = True
93         else:
94             self.__mfEnabled = False
95         logging.debug("FDA Calculs en multifonctions : %s"%(self.__mfEnabled,))
96         #
97         if avoidingRedundancy:
98             self.__avoidRC = True
99             self.__tolerBP = float(toleranceInRedundancy)
100             self.__lenghtRJ = int(lenghtOfRedundancy)
101             self.__listJPCP = [] # Jacobian Previous Calculated Points
102             self.__listJPCI = [] # Jacobian Previous Calculated Increment
103             self.__listJPCR = [] # Jacobian Previous Calculated Results
104             self.__listJPPN = [] # Jacobian Previous Calculated Point Norms
105             self.__listJPIN = [] # Jacobian Previous Calculated Increment Norms
106         else:
107             self.__avoidRC = False
108         #
109         if self.__mpEnabled:
110             if isinstance(Function,types.FunctionType):
111                 logging.debug("FDA Calculs en multiprocessing : FunctionType")
112                 self.__userFunction__name = Function.__name__
113                 try:
114                     mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
115                 except:
116                     mod = os.path.abspath(Function.__globals__['__file__'])
117                 if not os.path.isfile(mod):
118                     raise ImportError("No user defined function or method found with the name %s"%(mod,))
119                 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
120                 self.__userFunction__path = os.path.dirname(mod)
121                 del mod
122                 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
123                 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
124             elif isinstance(Function,types.MethodType):
125                 logging.debug("FDA Calculs en multiprocessing : MethodType")
126                 self.__userFunction__name = Function.__name__
127                 try:
128                     mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
129                 except:
130                     mod = os.path.abspath(Function.__func__.__globals__['__file__'])
131                 if not os.path.isfile(mod):
132                     raise ImportError("No user defined function or method found with the name %s"%(mod,))
133                 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
134                 self.__userFunction__path = os.path.dirname(mod)
135                 del mod
136                 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
137                 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
138             else:
139                 raise TypeError("User defined function or method has to be provided for finite differences approximation.")
140         else:
141             self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
142             self.__userFunction = self.__userOperator.appliedTo
143         #
144         self.__centeredDF = bool(centeredDF)
145         if abs(float(increment)) > 1.e-15:
146             self.__increment  = float(increment)
147         else:
148             self.__increment  = 0.01
149         if dX is None:
150             self.__dX     = None
151         else:
152             self.__dX     = numpy.asmatrix(numpy.ravel( dX )).T
153         logging.debug("FDA Reduction des doublons de calcul : %s"%self.__avoidRC)
154         if self.__avoidRC:
155             logging.debug("FDA Tolerance de determination des doublons : %.2e"%self.__tolerBP)
156
157     # ---------------------------------------------------------
158     def __doublon__(self, e, l, n, v=None):
159         __ac, __iac = False, -1
160         for i in range(len(l)-1,-1,-1):
161             if numpy.linalg.norm(e - l[i]) < self.__tolerBP * n[i]:
162                 __ac, __iac = True, i
163                 if v is not None: logging.debug("FDA Cas%s déja calculé, récupération du doublon %i"%(v,__iac))
164                 break
165         return __ac, __iac
166
167     # ---------------------------------------------------------
168     def DirectOperator(self, X, **extraArgs ):
169         """
170         Calcul du direct à l'aide de la fonction fournie.
171
172         NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
173         ne doivent pas être données ici à la fonction utilisateur.
174         """
175         logging.debug("FDA Calcul DirectOperator (explicite)")
176         if self.__mfEnabled:
177             _HX = self.__userFunction( X, argsAsSerie = True )
178         else:
179             _X = numpy.asmatrix(numpy.ravel( X )).T
180             _HX = numpy.ravel(self.__userFunction( _X ))
181         #
182         return _HX
183
184     # ---------------------------------------------------------
185     def TangentMatrix(self, X ):
186         """
187         Calcul de l'opérateur tangent comme la Jacobienne par différences finies,
188         c'est-à-dire le gradient de H en X. On utilise des différences finies
189         directionnelles autour du point X. X est un numpy.matrix.
190
191         Différences finies centrées (approximation d'ordre 2):
192         1/ Pour chaque composante i de X, on ajoute et on enlève la perturbation
193            dX[i] à la  composante X[i], pour composer X_plus_dXi et X_moins_dXi, et
194            on calcule les réponses HX_plus_dXi = H( X_plus_dXi ) et HX_moins_dXi =
195            H( X_moins_dXi )
196         2/ On effectue les différences (HX_plus_dXi-HX_moins_dXi) et on divise par
197            le pas 2*dXi
198         3/ Chaque résultat, par composante, devient une colonne de la Jacobienne
199
200         Différences finies non centrées (approximation d'ordre 1):
201         1/ Pour chaque composante i de X, on ajoute la perturbation dX[i] à la
202            composante X[i] pour composer X_plus_dXi, et on calcule la réponse
203            HX_plus_dXi = H( X_plus_dXi )
204         2/ On calcule la valeur centrale HX = H(X)
205         3/ On effectue les différences (HX_plus_dXi-HX) et on divise par
206            le pas dXi
207         4/ Chaque résultat, par composante, devient une colonne de la Jacobienne
208
209         """
210         logging.debug("FDA Début du calcul de la Jacobienne")
211         logging.debug("FDA   Incrément de............: %s*X"%float(self.__increment))
212         logging.debug("FDA   Approximation centrée...: %s"%(self.__centeredDF))
213         #
214         if X is None or len(X)==0:
215             raise ValueError("Nominal point X for approximate derivatives can not be None or void (given X: %s)."%(str(X),))
216         #
217         _X = numpy.asmatrix(numpy.ravel( X )).T
218         #
219         if self.__dX is None:
220             _dX  = self.__increment * _X
221         else:
222             _dX = numpy.asmatrix(numpy.ravel( self.__dX )).T
223         #
224         if (_dX == 0.).any():
225             moyenne = _dX.mean()
226             if moyenne == 0.:
227                 _dX = numpy.where( _dX == 0., float(self.__increment), _dX )
228             else:
229                 _dX = numpy.where( _dX == 0., moyenne, _dX )
230         #
231         __alreadyCalculated  = False
232         if self.__avoidRC:
233             __bidon, __alreadyCalculatedP = self.__doublon__(_X,  self.__listJPCP, self.__listJPPN, None)
234             __bidon, __alreadyCalculatedI = self.__doublon__(_dX, self.__listJPCI, self.__listJPIN, None)
235             if __alreadyCalculatedP == __alreadyCalculatedI > -1:
236                 __alreadyCalculated, __i = True, __alreadyCalculatedP
237                 logging.debug("FDA Cas J déja calculé, récupération du doublon %i"%__i)
238         #
239         if __alreadyCalculated:
240             logging.debug("FDA   Calcul Jacobienne (par récupération du doublon %i)"%__i)
241             _Jacobienne = self.__listJPCR[__i]
242         else:
243             logging.debug("FDA   Calcul Jacobienne (explicite)")
244             if self.__centeredDF:
245                 #
246                 if self.__mpEnabled and not self.__mfEnabled:
247                     funcrepr = {
248                         "__userFunction__path" : self.__userFunction__path,
249                         "__userFunction__modl" : self.__userFunction__modl,
250                         "__userFunction__name" : self.__userFunction__name,
251                     }
252                     _jobs = []
253                     for i in range( len(_dX) ):
254                         _dXi            = _dX[i]
255                         _X_plus_dXi     = numpy.array( _X.A1, dtype=float )
256                         _X_plus_dXi[i]  = _X[i] + _dXi
257                         _X_moins_dXi    = numpy.array( _X.A1, dtype=float )
258                         _X_moins_dXi[i] = _X[i] - _dXi
259                         #
260                         _jobs.append( (_X_plus_dXi,  self.__extraArgs, funcrepr) )
261                         _jobs.append( (_X_moins_dXi, self.__extraArgs, funcrepr) )
262                     #
263                     import multiprocessing
264                     self.__pool = multiprocessing.Pool(self.__mpWorkers)
265                     _HX_plusmoins_dX = self.__pool.map( ExecuteFunction, _jobs )
266                     self.__pool.close()
267                     self.__pool.join()
268                     #
269                     _Jacobienne  = []
270                     for i in range( len(_dX) ):
271                         _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
272                     #
273                 elif self.__mfEnabled:
274                     _xserie = []
275                     for i in range( len(_dX) ):
276                         _dXi            = _dX[i]
277                         _X_plus_dXi     = numpy.array( _X.A1, dtype=float )
278                         _X_plus_dXi[i]  = _X[i] + _dXi
279                         _X_moins_dXi    = numpy.array( _X.A1, dtype=float )
280                         _X_moins_dXi[i] = _X[i] - _dXi
281                         #
282                         _xserie.append( _X_plus_dXi )
283                         _xserie.append( _X_moins_dXi )
284                     #
285                     _HX_plusmoins_dX = self.DirectOperator( _xserie )
286                      #
287                     _Jacobienne  = []
288                     for i in range( len(_dX) ):
289                         _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
290                     #
291                 else:
292                     _Jacobienne  = []
293                     for i in range( _dX.size ):
294                         _dXi            = _dX[i]
295                         _X_plus_dXi     = numpy.array( _X.A1, dtype=float )
296                         _X_plus_dXi[i]  = _X[i] + _dXi
297                         _X_moins_dXi    = numpy.array( _X.A1, dtype=float )
298                         _X_moins_dXi[i] = _X[i] - _dXi
299                         #
300                         _HX_plus_dXi    = self.DirectOperator( _X_plus_dXi )
301                         _HX_moins_dXi   = self.DirectOperator( _X_moins_dXi )
302                         #
303                         _Jacobienne.append( numpy.ravel( _HX_plus_dXi - _HX_moins_dXi ) / (2.*_dXi) )
304                 #
305             else:
306                 #
307                 if self.__mpEnabled and not self.__mfEnabled:
308                     funcrepr = {
309                         "__userFunction__path" : self.__userFunction__path,
310                         "__userFunction__modl" : self.__userFunction__modl,
311                         "__userFunction__name" : self.__userFunction__name,
312                     }
313                     _jobs = []
314                     _jobs.append( (_X.A1, self.__extraArgs, funcrepr) )
315                     for i in range( len(_dX) ):
316                         _X_plus_dXi    = numpy.array( _X.A1, dtype=float )
317                         _X_plus_dXi[i] = _X[i] + _dX[i]
318                         #
319                         _jobs.append( (_X_plus_dXi, self.__extraArgs, funcrepr) )
320                     #
321                     import multiprocessing
322                     self.__pool = multiprocessing.Pool(self.__mpWorkers)
323                     _HX_plus_dX = self.__pool.map( ExecuteFunction, _jobs )
324                     self.__pool.close()
325                     self.__pool.join()
326                     #
327                     _HX = _HX_plus_dX.pop(0)
328                     #
329                     _Jacobienne = []
330                     for i in range( len(_dX) ):
331                         _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
332                     #
333                 elif self.__mfEnabled:
334                     _xserie = []
335                     _xserie.append( _X.A1 )
336                     for i in range( len(_dX) ):
337                         _X_plus_dXi    = numpy.array( _X.A1, dtype=float )
338                         _X_plus_dXi[i] = _X[i] + _dX[i]
339                         #
340                         _xserie.append( _X_plus_dXi )
341                     #
342                     _HX_plus_dX = self.DirectOperator( _xserie )
343                     #
344                     _HX = _HX_plus_dX.pop(0)
345                     #
346                     _Jacobienne = []
347                     for i in range( len(_dX) ):
348                         _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
349                    #
350                 else:
351                     _Jacobienne  = []
352                     _HX = self.DirectOperator( _X )
353                     for i in range( _dX.size ):
354                         _dXi            = _dX[i]
355                         _X_plus_dXi     = numpy.array( _X.A1, dtype=float )
356                         _X_plus_dXi[i]  = _X[i] + _dXi
357                         #
358                         _HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
359                         #
360                         _Jacobienne.append( numpy.ravel(( _HX_plus_dXi - _HX ) / _dXi) )
361                 #
362             #
363             _Jacobienne = numpy.asmatrix( numpy.vstack( _Jacobienne ) ).T
364             if self.__avoidRC:
365                 if self.__lenghtRJ < 0: self.__lenghtRJ = 2 * _X.size
366                 while len(self.__listJPCP) > self.__lenghtRJ:
367                     self.__listJPCP.pop(0)
368                     self.__listJPCI.pop(0)
369                     self.__listJPCR.pop(0)
370                     self.__listJPPN.pop(0)
371                     self.__listJPIN.pop(0)
372                 self.__listJPCP.append( copy.copy(_X) )
373                 self.__listJPCI.append( copy.copy(_dX) )
374                 self.__listJPCR.append( copy.copy(_Jacobienne) )
375                 self.__listJPPN.append( numpy.linalg.norm(_X) )
376                 self.__listJPIN.append( numpy.linalg.norm(_Jacobienne) )
377         #
378         logging.debug("FDA Fin du calcul de la Jacobienne")
379         #
380         return _Jacobienne
381
382     # ---------------------------------------------------------
383     def TangentOperator(self, paire, **extraArgs ):
384         """
385         Calcul du tangent à l'aide de la Jacobienne.
386
387         NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
388         ne doivent pas être données ici à la fonction utilisateur.
389         """
390         if self.__mfEnabled:
391             assert len(paire) == 1, "Incorrect lenght of arguments"
392             _paire = paire[0]
393             assert len(_paire) == 2, "Incorrect number of arguments"
394         else:
395             assert len(paire) == 2, "Incorrect number of arguments"
396             _paire = paire
397         X, dX = _paire
398         _Jacobienne = self.TangentMatrix( X )
399         if dX is None or len(dX) == 0:
400             #
401             # Calcul de la forme matricielle si le second argument est None
402             # -------------------------------------------------------------
403             if self.__mfEnabled: return [_Jacobienne,]
404             else:                return _Jacobienne
405         else:
406             #
407             # Calcul de la valeur linéarisée de H en X appliqué à dX
408             # ------------------------------------------------------
409             _dX = numpy.asmatrix(numpy.ravel( dX )).T
410             _HtX = numpy.dot(_Jacobienne, _dX)
411             if self.__mfEnabled: return [_HtX.A1,]
412             else:                return _HtX.A1
413
414     # ---------------------------------------------------------
415     def AdjointOperator(self, paire, **extraArgs ):
416         """
417         Calcul de l'adjoint à l'aide de la Jacobienne.
418
419         NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
420         ne doivent pas être données ici à la fonction utilisateur.
421         """
422         if self.__mfEnabled:
423             assert len(paire) == 1, "Incorrect lenght of arguments"
424             _paire = paire[0]
425             assert len(_paire) == 2, "Incorrect number of arguments"
426         else:
427             assert len(paire) == 2, "Incorrect number of arguments"
428             _paire = paire
429         X, Y = _paire
430         _JacobienneT = self.TangentMatrix( X ).T
431         if Y is None or len(Y) == 0:
432             #
433             # Calcul de la forme matricielle si le second argument est None
434             # -------------------------------------------------------------
435             if self.__mfEnabled: return [_JacobienneT,]
436             else:                return _JacobienneT
437         else:
438             #
439             # Calcul de la valeur de l'adjoint en X appliqué à Y
440             # --------------------------------------------------
441             _Y = numpy.asmatrix(numpy.ravel( Y )).T
442             _HaY = numpy.dot(_JacobienneT, _Y)
443             if self.__mfEnabled: return [_HaY.A1,]
444             else:                return _HaY.A1
445
446 # ==============================================================================
447 def EnsembleOfCenteredPerturbations( _bgcenter, _bgcovariance, _nbmembers ):
448     "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
449     #
450     _bgcenter = numpy.ravel(_bgcenter)[:,None]
451     if _nbmembers < 1:
452         raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
453     #
454     if _bgcovariance is None:
455         BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
456     else:
457         _Z = numpy.random.multivariate_normal(numpy.zeros(_bgcenter.size), _bgcovariance, size=_nbmembers).T
458         BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers) + _Z
459     #
460     return BackgroundEnsemble
461
462 # ==============================================================================
463 def EnsembleOfBackgroundPerturbations( _bgcenter, _bgcovariance, _nbmembers, _withSVD = True):
464     "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
465     def __CenteredRandomAnomalies(Zr, N):
466         """
467         Génère une matrice de N anomalies aléatoires centrées sur Zr selon les
468         notes manuscrites de MB et conforme au code de PS avec eps = -1
469         """
470         eps = -1
471         Q = numpy.identity(N-1)-numpy.ones((N-1,N-1))/numpy.sqrt(N)/(numpy.sqrt(N)-eps)
472         Q = numpy.concatenate((Q, [eps*numpy.ones(N-1)/numpy.sqrt(N)]), axis=0)
473         R, _ = numpy.linalg.qr(numpy.random.normal(size = (N-1,N-1)))
474         Q = numpy.dot(Q,R)
475         Zr = numpy.dot(Q,Zr)
476         return Zr.T
477     #
478     _bgcenter = numpy.ravel(_bgcenter).reshape((-1,1))
479     if _nbmembers < 1:
480         raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
481     if _bgcovariance is None:
482         BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
483     else:
484         if _withSVD:
485             U, s, V = numpy.linalg.svd(_bgcovariance, full_matrices=False)
486             _nbctl = _bgcenter.size
487             if _nbmembers > _nbctl:
488                 _Z = numpy.concatenate((numpy.dot(
489                     numpy.diag(numpy.sqrt(s[:_nbctl])), V[:_nbctl]),
490                     numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1-_nbctl)), axis = 0)
491             else:
492                 _Z = numpy.dot(numpy.diag(numpy.sqrt(s[:_nbmembers-1])), V[:_nbmembers-1])
493             _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
494             BackgroundEnsemble = _bgcenter + _Zca
495         else:
496             if max(abs(_bgcovariance.flatten())) > 0:
497                 _nbctl = _bgcenter.size
498                 _Z = numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1)
499                 _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
500                 BackgroundEnsemble = _bgcenter + _Zca
501             else:
502                 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
503     #
504     return BackgroundEnsemble
505
506 # ==============================================================================
507 def EnsembleOfAnomalies( Ensemble, OptMean = None, Normalisation = 1.):
508     "Renvoie les anomalies centrées à partir d'un ensemble TailleEtat*NbMembres"
509     if OptMean is None:
510         __Em = numpy.asarray(Ensemble).mean(axis=1, dtype=mfp).astype('float').reshape((-1,1))
511     else:
512         __Em = numpy.ravel(OptMean).reshape((-1,1))
513     #
514     return Normalisation * (numpy.asarray(Ensemble) - __Em)
515
516 # ==============================================================================
517 def EnsembleErrorCovariance( Ensemble, __quick = False ):
518     "Renvoie l'estimation empirique de la covariance d'ensemble"
519     if __quick:
520         # Covariance rapide mais rarement définie positive
521         __Covariance = numpy.cov(Ensemble)
522     else:
523         # Résultat souvent identique à numpy.cov, mais plus robuste
524         __n, __m = numpy.asarray(Ensemble).shape
525         __Anomalies = EnsembleOfAnomalies( Ensemble )
526         # Estimation empirique
527         __Covariance = (__Anomalies @ __Anomalies.T) / (__m-1)
528         # Assure la symétrie
529         __Covariance = (__Covariance + __Covariance.T) * 0.5
530         # Assure la positivité
531         __epsilon    = mpr*numpy.trace(__Covariance)
532         __Covariance = __Covariance + __epsilon * numpy.identity(__n)
533     #
534     return __Covariance
535
536 # ==============================================================================
537 def EnsemblePerturbationWithGivenCovariance( __Ensemble, __Covariance, __Seed=None ):
538     "Ajout d'une perturbation à chaque membre d'un ensemble selon une covariance prescrite"
539     if hasattr(__Covariance,"assparsematrix"):
540         if (abs(__Ensemble).mean() > mpr) and (abs(__Covariance.assparsematrix())/abs(__Ensemble).mean() < mpr).all():
541             # Traitement d'une covariance nulle ou presque
542             return __Ensemble
543         if (abs(__Ensemble).mean() <= mpr) and (abs(__Covariance.assparsematrix()) < mpr).all():
544             # Traitement d'une covariance nulle ou presque
545             return __Ensemble
546     else:
547         if (abs(__Ensemble).mean() > mpr) and (abs(__Covariance)/abs(__Ensemble).mean() < mpr).all():
548             # Traitement d'une covariance nulle ou presque
549             return __Ensemble
550         if (abs(__Ensemble).mean() <= mpr) and (abs(__Covariance) < mpr).all():
551             # Traitement d'une covariance nulle ou presque
552             return __Ensemble
553     #
554     __n, __m = __Ensemble.shape
555     if __Seed is not None: numpy.random.seed(__Seed)
556     #
557     if hasattr(__Covariance,"isscalar") and __Covariance.isscalar():
558         # Traitement d'une covariance multiple de l'identité
559         __zero = 0.
560         __std  = numpy.sqrt(__Covariance.assparsematrix())
561         __Ensemble += numpy.random.normal(__zero, __std, size=(__m,__n)).T
562     #
563     elif hasattr(__Covariance,"isvector") and __Covariance.isvector():
564         # Traitement d'une covariance diagonale avec variances non identiques
565         __zero = numpy.zeros(__n)
566         __std  = numpy.sqrt(__Covariance.assparsematrix())
567         __Ensemble += numpy.asarray([numpy.random.normal(__zero, __std) for i in range(__m)]).T
568     #
569     elif hasattr(__Covariance,"ismatrix") and __Covariance.ismatrix():
570         # Traitement d'une covariance pleine
571         __Ensemble += numpy.random.multivariate_normal(numpy.zeros(__n), __Covariance.asfullmatrix(__n), size=__m).T
572     #
573     elif isinstance(__Covariance, numpy.ndarray):
574         # Traitement d'une covariance numpy pleine, sachant qu'on arrive ici en dernier
575         __Ensemble += numpy.random.multivariate_normal(numpy.zeros(__n), __Covariance, size=__m).T
576     #
577     else:
578         raise ValueError("Error in ensemble perturbation with inadequate covariance specification")
579     #
580     return __Ensemble
581
582 # ==============================================================================
583 def CovarianceInflation(
584         InputCovOrEns,
585         InflationType   = None,
586         InflationFactor = None,
587         BackgroundCov   = None,
588         ):
589     """
590     Inflation applicable soit sur Pb ou Pa, soit sur les ensembles EXb ou EXa
591
592     Synthèse : Hunt 2007, section 2.3.5
593     """
594     if InflationFactor is None:
595         return InputCovOrEns
596     else:
597         InflationFactor = float(InflationFactor)
598     #
599     if InflationType in ["MultiplicativeOnAnalysisCovariance", "MultiplicativeOnBackgroundCovariance"]:
600         if InflationFactor < 1.:
601             raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
602         if InflationFactor < 1.+mpr:
603             return InputCovOrEns
604         OutputCovOrEns = InflationFactor**2 * InputCovOrEns
605     #
606     elif InflationType in ["MultiplicativeOnAnalysisAnomalies", "MultiplicativeOnBackgroundAnomalies"]:
607         if InflationFactor < 1.:
608             raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
609         if InflationFactor < 1.+mpr:
610             return InputCovOrEns
611         InputCovOrEnsMean = InputCovOrEns.mean(axis=1, dtype=mfp).astype('float')
612         OutputCovOrEns = InputCovOrEnsMean[:,numpy.newaxis] \
613             + InflationFactor * (InputCovOrEns - InputCovOrEnsMean[:,numpy.newaxis])
614     #
615     elif InflationType in ["AdditiveOnAnalysisCovariance", "AdditiveOnBackgroundCovariance"]:
616         if InflationFactor < 0.:
617             raise ValueError("Inflation factor for additive inflation has to be greater or equal than 0.")
618         if InflationFactor < mpr:
619             return InputCovOrEns
620         __n, __m = numpy.asarray(InputCovOrEns).shape
621         if __n != __m:
622             raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
623         OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * numpy.identity(__n)
624     #
625     elif InflationType == "HybridOnBackgroundCovariance":
626         if InflationFactor < 0.:
627             raise ValueError("Inflation factor for hybrid inflation has to be greater or equal than 0.")
628         if InflationFactor < mpr:
629             return InputCovOrEns
630         __n, __m = numpy.asarray(InputCovOrEns).shape
631         if __n != __m:
632             raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
633         if BackgroundCov is None:
634             raise ValueError("Background covariance matrix B has to be given for hybrid inflation.")
635         if InputCovOrEns.shape != BackgroundCov.shape:
636             raise ValueError("Ensemble covariance matrix has to be of same size than background covariance matrix B.")
637         OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * BackgroundCov
638     #
639     elif InflationType == "Relaxation":
640         raise NotImplementedError("InflationType Relaxation")
641     #
642     else:
643         raise ValueError("Error in inflation type, '%s' is not a valid keyword."%InflationType)
644     #
645     return OutputCovOrEns
646
647 # ==============================================================================
648 def QuantilesEstimations(selfA, A, Xa, HXa = None, Hm = None, HtM = None):
649     "Estimation des quantiles a posteriori (selfA est modifié)"
650     nbsamples = selfA._parameters["NumberOfSamplesForQuantiles"]
651     #
652     # Traitement des bornes
653     if "StateBoundsForQuantiles" in selfA._parameters:
654         LBounds = selfA._parameters["StateBoundsForQuantiles"] # Prioritaire
655     elif "Bounds" in selfA._parameters:
656         LBounds = selfA._parameters["Bounds"]  # Défaut raisonnable
657     else:
658         LBounds = None
659     if LBounds is not None:
660         def NoneRemove(paire):
661             bmin, bmax = paire
662             if bmin is None: bmin = numpy.finfo('float').min
663             if bmax is None: bmax = numpy.finfo('float').max
664             return [bmin, bmax]
665         LBounds = numpy.matrix( [NoneRemove(paire) for paire in LBounds] )
666     #
667     # Échantillonnage des états
668     YfQ  = None
669     EXr  = None
670     if selfA._parameters["SimulationForQuantiles"] == "Linear" and HXa is not None:
671         HXa  = numpy.matrix(numpy.ravel( HXa )).T
672     for i in range(nbsamples):
673         if selfA._parameters["SimulationForQuantiles"] == "Linear" and HtM is not None:
674             dXr = numpy.matrix(numpy.random.multivariate_normal(numpy.ravel(Xa),A) - numpy.ravel(Xa)).T
675             if LBounds is not None: # "EstimateProjection" par défaut
676                 dXr = numpy.max(numpy.hstack((dXr,LBounds[:,0]) - Xa),axis=1)
677                 dXr = numpy.min(numpy.hstack((dXr,LBounds[:,1]) - Xa),axis=1)
678             dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
679             Yr = HXa + dYr
680             if selfA._toStore("SampledStateForQuantiles"): Xr = Xa + dXr
681         elif selfA._parameters["SimulationForQuantiles"] == "NonLinear" and Hm is not None:
682             Xr = numpy.matrix(numpy.random.multivariate_normal(numpy.ravel(Xa),A)).T
683             if LBounds is not None: # "EstimateProjection" par défaut
684                 Xr = numpy.max(numpy.hstack((Xr,LBounds[:,0])),axis=1)
685                 Xr = numpy.min(numpy.hstack((Xr,LBounds[:,1])),axis=1)
686             Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
687         else:
688             raise ValueError("Quantile simulations has only to be Linear or NonLinear.")
689         #
690         if YfQ is None:
691             YfQ = Yr
692             if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.ravel(Xr)
693         else:
694             YfQ = numpy.hstack((YfQ,Yr))
695             if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.vstack((EXr,numpy.ravel(Xr)))
696     #
697     # Extraction des quantiles
698     YfQ.sort(axis=-1)
699     YQ = None
700     for quantile in selfA._parameters["Quantiles"]:
701         if not (0. <= float(quantile) <= 1.): continue
702         indice = int(nbsamples * float(quantile) - 1./nbsamples)
703         if YQ is None: YQ = YfQ[:,indice]
704         else:          YQ = numpy.hstack((YQ,YfQ[:,indice]))
705     selfA.StoredVariables["SimulationQuantiles"].store( YQ )
706     if selfA._toStore("SampledStateForQuantiles"):
707         selfA.StoredVariables["SampledStateForQuantiles"].store( EXr.T )
708     #
709     return 0
710
711 # ==============================================================================
712 def enks(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="EnKS16-KalmanFilterFormula"):
713     """
714     EnKS
715     """
716     #
717     # Opérateurs
718     H = HO["Direct"].appliedControledFormTo
719     #
720     if selfA._parameters["EstimationOf"] == "State":
721         M = EM["Direct"].appliedControledFormTo
722     #
723     if CM is not None and "Tangent" in CM and U is not None:
724         Cm = CM["Tangent"].asMatrix(Xb)
725     else:
726         Cm = None
727     #
728     # Précalcul des inversions de B et R
729     RIdemi = R.sqrtmI()
730     #
731     # Durée d'observation et tailles
732     LagL = selfA._parameters["SmootherLagL"]
733     if (not hasattr(Y,"store")) or (not hasattr(Y,"stepnumber")):
734         raise ValueError("Fixed-lag smoother requires a series of observation")
735     if Y.stepnumber() < LagL:
736         raise ValueError("Fixed-lag smoother requires a series of observation greater then the lag L")
737     duration = Y.stepnumber()
738     __p = numpy.cumprod(Y.shape())[-1]
739     __n = Xb.size
740     __m = selfA._parameters["NumberOfMembers"]
741     #
742     if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
743     else:                         Pn = B
744     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
745         selfA.StoredVariables["Analysis"].store( Xb )
746         if selfA._toStore("APosterioriCovariance"):
747             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
748             covarianceXa = Pn
749     #
750     # Calcul direct initial (on privilégie la mémorisation au recalcul)
751     __seed = numpy.random.get_state()
752     selfB = copy.deepcopy(selfA)
753     selfB._parameters["StoreSupplementaryCalculations"] = ["CurrentEnsembleState"]
754     if VariantM == "EnKS16-KalmanFilterFormula":
755         etkf(selfB, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM = "KalmanFilterFormula")
756     else:
757         raise ValueError("VariantM has to be chosen in the authorized methods list.")
758     if LagL > 0:
759         EL  = selfB.StoredVariables["CurrentEnsembleState"][LagL-1]
760     else:
761         EL = EnsembleOfBackgroundPerturbations( Xb, None, __m ) # Cf. etkf
762     selfA._parameters["SetSeed"] = numpy.random.set_state(__seed)
763     #
764     for step in range(LagL,duration-1):
765         #
766         sEL = selfB.StoredVariables["CurrentEnsembleState"][step+1-LagL:step+1]
767         sEL.append(None)
768         #
769         if hasattr(Y,"store"):
770             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
771         else:
772             Ynpu = numpy.ravel( Y ).reshape((__p,1))
773         #
774         if U is not None:
775             if hasattr(U,"store") and len(U)>1:
776                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
777             elif hasattr(U,"store") and len(U)==1:
778                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
779             else:
780                 Un = numpy.asmatrix(numpy.ravel( U )).T
781         else:
782             Un = None
783         #
784         #--------------------------
785         if VariantM == "EnKS16-KalmanFilterFormula":
786             if selfA._parameters["EstimationOf"] == "State": # Forecast
787                 EL = M( [(EL[:,i], Un) for i in range(__m)],
788                     argsAsSerie = True,
789                     returnSerieAsArrayMatrix = True )
790                 EL = EnsemblePerturbationWithGivenCovariance( EL, Q )
791                 EZ = H( [(EL[:,i], Un) for i in range(__m)],
792                     argsAsSerie = True,
793                     returnSerieAsArrayMatrix = True )
794                 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
795                     Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
796                     EZ = EZ + Cm * Un
797             elif selfA._parameters["EstimationOf"] == "Parameters":
798                 # --- > Par principe, M = Id, Q = 0
799                 EZ = H( [(EL[:,i], Un) for i in range(__m)],
800                     argsAsSerie = True,
801                     returnSerieAsArrayMatrix = True )
802             #
803             vEm   = EL.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
804             vZm   = EZ.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
805             #
806             mS    = RIdemi @ EnsembleOfAnomalies( EZ, vZm, 1./math.sqrt(__m-1) )
807             mS    = mS.reshape((-1,__m)) # Pour dimension 1
808             delta = RIdemi @ ( Ynpu - vZm )
809             mT    = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS )
810             vw    = mT @ mS.T @ delta
811             #
812             Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
813             mU    = numpy.identity(__m)
814             wTU   = (vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU)
815             #
816             EX    = EnsembleOfAnomalies( EL, vEm, 1./math.sqrt(__m-1) )
817             EL    = vEm + EX @ wTU
818             #
819             sEL[LagL] = EL
820             for irl in range(LagL): # Lissage des L précédentes analysis
821                 vEm = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
822                 EX = EnsembleOfAnomalies( sEL[irl], vEm, 1./math.sqrt(__m-1) )
823                 sEL[irl] = vEm + EX @ wTU
824             #
825             # Conservation de l'analyse retrospective d'ordre 0 avant rotation
826             Xa = sEL[0].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
827             if selfA._toStore("APosterioriCovariance"):
828                 EXn = sEL[0]
829             #
830             for irl in range(LagL):
831                 sEL[irl] = sEL[irl+1]
832             sEL[LagL] = None
833         #--------------------------
834         else:
835             raise ValueError("VariantM has to be chosen in the authorized methods list.")
836         #
837         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
838         # ---> avec analysis
839         selfA.StoredVariables["Analysis"].store( Xa )
840         if selfA._toStore("APosterioriCovariance"):
841             selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(EXn) )
842     #
843     # Stockage des dernières analyses incomplètement remises à jour
844     for irl in range(LagL):
845         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
846         Xa = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
847         selfA.StoredVariables["Analysis"].store( Xa )
848     #
849     return 0
850
851 # ==============================================================================
852 def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
853     """
854     Ensemble-Transform EnKF
855     """
856     if selfA._parameters["EstimationOf"] == "Parameters":
857         selfA._parameters["StoreInternalVariables"] = True
858     #
859     # Opérateurs
860     # ----------
861     H = HO["Direct"].appliedControledFormTo
862     #
863     if selfA._parameters["EstimationOf"] == "State":
864         M = EM["Direct"].appliedControledFormTo
865     #
866     if CM is not None and "Tangent" in CM and U is not None:
867         Cm = CM["Tangent"].asMatrix(Xb)
868     else:
869         Cm = None
870     #
871     # Nombre de pas identique au nombre de pas d'observations
872     # -------------------------------------------------------
873     if hasattr(Y,"stepnumber"):
874         duration = Y.stepnumber()
875         __p = numpy.cumprod(Y.shape())[-1]
876     else:
877         duration = 2
878         __p = numpy.array(Y).size
879     #
880     # Précalcul des inversions de B et R
881     # ----------------------------------
882     if selfA._parameters["StoreInternalVariables"] \
883         or selfA._toStore("CostFunctionJ") \
884         or selfA._toStore("CostFunctionJb") \
885         or selfA._toStore("CostFunctionJo") \
886         or selfA._toStore("CurrentOptimum") \
887         or selfA._toStore("APosterioriCovariance"):
888         BI = B.getI()
889         RI = R.getI()
890     elif VariantM != "KalmanFilterFormula":
891         RI = R.getI()
892     if VariantM == "KalmanFilterFormula":
893         RIdemi = R.sqrtmI()
894     #
895     # Initialisation
896     # --------------
897     __n = Xb.size
898     __m = selfA._parameters["NumberOfMembers"]
899     if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
900     else:                         Pn = B
901     Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
902     #~ Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
903     #
904     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
905         selfA.StoredVariables["Analysis"].store( Xb )
906         if selfA._toStore("APosterioriCovariance"):
907             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
908             covarianceXa = Pn
909     #
910     previousJMinimum = numpy.finfo(float).max
911     #
912     for step in range(duration-1):
913         if hasattr(Y,"store"):
914             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
915         else:
916             Ynpu = numpy.ravel( Y ).reshape((__p,1))
917         #
918         if U is not None:
919             if hasattr(U,"store") and len(U)>1:
920                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
921             elif hasattr(U,"store") and len(U)==1:
922                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
923             else:
924                 Un = numpy.asmatrix(numpy.ravel( U )).T
925         else:
926             Un = None
927         #
928         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
929             Xn = CovarianceInflation( Xn,
930                 selfA._parameters["InflationType"],
931                 selfA._parameters["InflationFactor"],
932                 )
933         #
934         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
935             EMX = M( [(Xn[:,i], Un) for i in range(__m)],
936                 argsAsSerie = True,
937                 returnSerieAsArrayMatrix = True )
938             Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
939             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
940                 argsAsSerie = True,
941                 returnSerieAsArrayMatrix = True )
942             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
943                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
944                 Xn_predicted = Xn_predicted + Cm * Un
945         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
946             # --- > Par principe, M = Id, Q = 0
947             Xn_predicted = Xn
948             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
949                 argsAsSerie = True,
950                 returnSerieAsArrayMatrix = True )
951         #
952         # Mean of forecast and observation of forecast
953         Xfm  = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
954         Hfm  = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
955         #
956         # Anomalies
957         EaX   = EnsembleOfAnomalies( Xn_predicted, Xfm )
958         EaHX  = EnsembleOfAnomalies( HX_predicted, Hfm)
959         #
960         #--------------------------
961         if VariantM == "KalmanFilterFormula":
962             mS    = RIdemi * EaHX / math.sqrt(__m-1)
963             mS    = mS.reshape((-1,__m)) # Pour dimension 1
964             delta = RIdemi * ( Ynpu - Hfm )
965             mT    = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS )
966             vw    = mT @ mS.T @ delta
967             #
968             Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
969             mU    = numpy.identity(__m)
970             #
971             EaX   = EaX / math.sqrt(__m-1)
972             Xn    = Xfm + EaX @ ( vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU )
973         #--------------------------
974         elif VariantM == "Variational":
975             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
976             def CostFunction(w):
977                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
978                 _Jo = 0.5 * _A.T @ (RI * _A)
979                 _Jb = 0.5 * (__m-1) * w.T @ w
980                 _J  = _Jo + _Jb
981                 return float(_J)
982             def GradientOfCostFunction(w):
983                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
984                 _GardJo = - EaHX.T @ (RI * _A)
985                 _GradJb = (__m-1) * w.reshape((__m,1))
986                 _GradJ  = _GardJo + _GradJb
987                 return numpy.ravel(_GradJ)
988             vw = scipy.optimize.fmin_cg(
989                 f           = CostFunction,
990                 x0          = numpy.zeros(__m),
991                 fprime      = GradientOfCostFunction,
992                 args        = (),
993                 disp        = False,
994                 )
995             #
996             Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
997             Htb = (__m-1) * numpy.identity(__m)
998             Hta = Hto + Htb
999             #
1000             Pta = numpy.linalg.inv( Hta )
1001             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1002             #
1003             Xn  = Xfm + EaX @ (vw[:,None] + EWa)
1004         #--------------------------
1005         elif VariantM == "FiniteSize11": # Jauge Boc2011
1006             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1007             def CostFunction(w):
1008                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1009                 _Jo = 0.5 * _A.T @ (RI * _A)
1010                 _Jb = 0.5 * __m * math.log(1 + 1/__m + w.T @ w)
1011                 _J  = _Jo + _Jb
1012                 return float(_J)
1013             def GradientOfCostFunction(w):
1014                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1015                 _GardJo = - EaHX.T @ (RI * _A)
1016                 _GradJb = __m * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
1017                 _GradJ  = _GardJo + _GradJb
1018                 return numpy.ravel(_GradJ)
1019             vw = scipy.optimize.fmin_cg(
1020                 f           = CostFunction,
1021                 x0          = numpy.zeros(__m),
1022                 fprime      = GradientOfCostFunction,
1023                 args        = (),
1024                 disp        = False,
1025                 )
1026             #
1027             Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
1028             Htb = __m * \
1029                 ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
1030                 / (1 + 1/__m + vw.T @ vw)**2
1031             Hta = Hto + Htb
1032             #
1033             Pta = numpy.linalg.inv( Hta )
1034             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1035             #
1036             Xn  = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
1037         #--------------------------
1038         elif VariantM == "FiniteSize15": # Jauge Boc2015
1039             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1040             def CostFunction(w):
1041                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1042                 _Jo = 0.5 * _A.T * RI * _A
1043                 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w)
1044                 _J  = _Jo + _Jb
1045                 return float(_J)
1046             def GradientOfCostFunction(w):
1047                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1048                 _GardJo = - EaHX.T @ (RI * _A)
1049                 _GradJb = (__m+1) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
1050                 _GradJ  = _GardJo + _GradJb
1051                 return numpy.ravel(_GradJ)
1052             vw = scipy.optimize.fmin_cg(
1053                 f           = CostFunction,
1054                 x0          = numpy.zeros(__m),
1055                 fprime      = GradientOfCostFunction,
1056                 args        = (),
1057                 disp        = False,
1058                 )
1059             #
1060             Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
1061             Htb = (__m+1) * \
1062                 ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
1063                 / (1 + 1/__m + vw.T @ vw)**2
1064             Hta = Hto + Htb
1065             #
1066             Pta = numpy.linalg.inv( Hta )
1067             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1068             #
1069             Xn  = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
1070         #--------------------------
1071         elif VariantM == "FiniteSize16": # Jauge Boc2016
1072             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1073             def CostFunction(w):
1074                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1075                 _Jo = 0.5 * _A.T @ (RI * _A)
1076                 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w / (__m-1))
1077                 _J  = _Jo + _Jb
1078                 return float(_J)
1079             def GradientOfCostFunction(w):
1080                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1081                 _GardJo = - EaHX.T @ (RI * _A)
1082                 _GradJb = ((__m+1) / (__m-1)) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w / (__m-1))
1083                 _GradJ  = _GardJo + _GradJb
1084                 return numpy.ravel(_GradJ)
1085             vw = scipy.optimize.fmin_cg(
1086                 f           = CostFunction,
1087                 x0          = numpy.zeros(__m),
1088                 fprime      = GradientOfCostFunction,
1089                 args        = (),
1090                 disp        = False,
1091                 )
1092             #
1093             Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
1094             Htb = ((__m+1) / (__m-1)) * \
1095                 ( (1 + 1/__m + vw.T @ vw / (__m-1)) * numpy.identity(__m) - 2 * vw @ vw.T / (__m-1) ) \
1096                 / (1 + 1/__m + vw.T @ vw / (__m-1))**2
1097             Hta = Hto + Htb
1098             #
1099             Pta = numpy.linalg.inv( Hta )
1100             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1101             #
1102             Xn  = Xfm + EaX @ (vw[:,None] + EWa)
1103         #--------------------------
1104         else:
1105             raise ValueError("VariantM has to be chosen in the authorized methods list.")
1106         #
1107         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
1108             Xn = CovarianceInflation( Xn,
1109                 selfA._parameters["InflationType"],
1110                 selfA._parameters["InflationFactor"],
1111                 )
1112         #
1113         Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1114         #--------------------------
1115         #
1116         if selfA._parameters["StoreInternalVariables"] \
1117             or selfA._toStore("CostFunctionJ") \
1118             or selfA._toStore("CostFunctionJb") \
1119             or selfA._toStore("CostFunctionJo") \
1120             or selfA._toStore("APosterioriCovariance") \
1121             or selfA._toStore("InnovationAtCurrentAnalysis") \
1122             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1123             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1124             _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
1125             _Innovation = Ynpu - _HXa
1126         #
1127         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1128         # ---> avec analysis
1129         selfA.StoredVariables["Analysis"].store( Xa )
1130         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1131             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
1132         if selfA._toStore("InnovationAtCurrentAnalysis"):
1133             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1134         # ---> avec current state
1135         if selfA._parameters["StoreInternalVariables"] \
1136             or selfA._toStore("CurrentState"):
1137             selfA.StoredVariables["CurrentState"].store( Xn )
1138         if selfA._toStore("ForecastState"):
1139             selfA.StoredVariables["ForecastState"].store( EMX )
1140         if selfA._toStore("BMA"):
1141             selfA.StoredVariables["BMA"].store( EMX - Xa.reshape((__n,1)) )
1142         if selfA._toStore("InnovationAtCurrentState"):
1143             selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
1144         if selfA._toStore("SimulatedObservationAtCurrentState") \
1145             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1146             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
1147         # ---> autres
1148         if selfA._parameters["StoreInternalVariables"] \
1149             or selfA._toStore("CostFunctionJ") \
1150             or selfA._toStore("CostFunctionJb") \
1151             or selfA._toStore("CostFunctionJo") \
1152             or selfA._toStore("CurrentOptimum") \
1153             or selfA._toStore("APosterioriCovariance"):
1154             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
1155             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
1156             J   = Jb + Jo
1157             selfA.StoredVariables["CostFunctionJb"].store( Jb )
1158             selfA.StoredVariables["CostFunctionJo"].store( Jo )
1159             selfA.StoredVariables["CostFunctionJ" ].store( J )
1160             #
1161             if selfA._toStore("IndexOfOptimum") \
1162                 or selfA._toStore("CurrentOptimum") \
1163                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1164                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1165                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1166                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1167                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1168             if selfA._toStore("IndexOfOptimum"):
1169                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1170             if selfA._toStore("CurrentOptimum"):
1171                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1172             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1173                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1174             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1175                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1176             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1177                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1178             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1179                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1180         if selfA._toStore("APosterioriCovariance"):
1181             selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
1182         if selfA._parameters["EstimationOf"] == "Parameters" \
1183             and J < previousJMinimum:
1184             previousJMinimum    = J
1185             XaMin               = Xa
1186             if selfA._toStore("APosterioriCovariance"):
1187                 covarianceXaMin = Pn
1188         # ---> Pour les smoothers
1189         if selfA._toStore("CurrentEnsembleState"):
1190             selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
1191     #
1192     # Stockage final supplémentaire de l'optimum en estimation de paramètres
1193     # ----------------------------------------------------------------------
1194     if selfA._parameters["EstimationOf"] == "Parameters":
1195         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1196         selfA.StoredVariables["Analysis"].store( XaMin )
1197         if selfA._toStore("APosterioriCovariance"):
1198             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1199         if selfA._toStore("BMA"):
1200             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1201     #
1202     return 0
1203
1204 # ==============================================================================
1205 def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12",
1206     BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
1207     """
1208     Iterative EnKF
1209     """
1210     if selfA._parameters["EstimationOf"] == "Parameters":
1211         selfA._parameters["StoreInternalVariables"] = True
1212     #
1213     # Opérateurs
1214     # ----------
1215     H = HO["Direct"].appliedControledFormTo
1216     #
1217     if selfA._parameters["EstimationOf"] == "State":
1218         M = EM["Direct"].appliedControledFormTo
1219     #
1220     if CM is not None and "Tangent" in CM and U is not None:
1221         Cm = CM["Tangent"].asMatrix(Xb)
1222     else:
1223         Cm = None
1224     #
1225     # Nombre de pas identique au nombre de pas d'observations
1226     # -------------------------------------------------------
1227     if hasattr(Y,"stepnumber"):
1228         duration = Y.stepnumber()
1229         __p = numpy.cumprod(Y.shape())[-1]
1230     else:
1231         duration = 2
1232         __p = numpy.array(Y).size
1233     #
1234     # Précalcul des inversions de B et R
1235     # ----------------------------------
1236     if selfA._parameters["StoreInternalVariables"] \
1237         or selfA._toStore("CostFunctionJ") \
1238         or selfA._toStore("CostFunctionJb") \
1239         or selfA._toStore("CostFunctionJo") \
1240         or selfA._toStore("CurrentOptimum") \
1241         or selfA._toStore("APosterioriCovariance"):
1242         BI = B.getI()
1243     RI = R.getI()
1244     #
1245     # Initialisation
1246     # --------------
1247     __n = Xb.size
1248     __m = selfA._parameters["NumberOfMembers"]
1249     if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
1250     else:                         Pn = B
1251     if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
1252     else:                         Rn = R
1253     if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
1254     else:                         Qn = Q
1255     Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
1256     #
1257     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1258         selfA.StoredVariables["Analysis"].store( Xb )
1259         if selfA._toStore("APosterioriCovariance"):
1260             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1261             covarianceXa = Pn
1262     #
1263     previousJMinimum = numpy.finfo(float).max
1264     #
1265     for step in range(duration-1):
1266         if hasattr(Y,"store"):
1267             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1268         else:
1269             Ynpu = numpy.ravel( Y ).reshape((__p,1))
1270         #
1271         if U is not None:
1272             if hasattr(U,"store") and len(U)>1:
1273                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
1274             elif hasattr(U,"store") and len(U)==1:
1275                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
1276             else:
1277                 Un = numpy.asmatrix(numpy.ravel( U )).T
1278         else:
1279             Un = None
1280         #
1281         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
1282             Xn = CovarianceInflation( Xn,
1283                 selfA._parameters["InflationType"],
1284                 selfA._parameters["InflationFactor"],
1285                 )
1286         #
1287         #--------------------------
1288         if VariantM == "IEnKF12":
1289             Xfm = numpy.ravel(Xn.mean(axis=1, dtype=mfp).astype('float'))
1290             EaX = EnsembleOfAnomalies( Xn ) / math.sqrt(__m-1)
1291             __j = 0
1292             Deltaw = 1
1293             if not BnotT:
1294                 Ta  = numpy.identity(__m)
1295             vw  = numpy.zeros(__m)
1296             while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
1297                 vx1 = (Xfm + EaX @ vw).reshape((__n,1))
1298                 #
1299                 if BnotT:
1300                     E1 = vx1 + _epsilon * EaX
1301                 else:
1302                     E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
1303                 #
1304                 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q
1305                     E2 = M( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
1306                         argsAsSerie = True,
1307                         returnSerieAsArrayMatrix = True )
1308                 elif selfA._parameters["EstimationOf"] == "Parameters":
1309                     # --- > Par principe, M = Id
1310                     E2 = Xn
1311                 vx2 = E2.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1312                 vy1 = H((vx2, Un)).reshape((__p,1))
1313                 #
1314                 HE2 = H( [(E2[:,i,numpy.newaxis], Un) for i in range(__m)],
1315                     argsAsSerie = True,
1316                     returnSerieAsArrayMatrix = True )
1317                 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
1318                 #
1319                 if BnotT:
1320                     EaY = (HE2 - vy2) / _epsilon
1321                 else:
1322                     EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
1323                 #
1324                 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy1 )))
1325                 mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY).reshape((-1,__m))
1326                 Deltaw = - numpy.linalg.solve(mH,GradJ)
1327                 #
1328                 vw = vw + Deltaw
1329                 #
1330                 if not BnotT:
1331                     Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
1332                 #
1333                 __j = __j + 1
1334             #
1335             A2 = EnsembleOfAnomalies( E2 )
1336             #
1337             if BnotT:
1338                 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
1339                 A2 = math.sqrt(__m-1) * A2 @ Ta / _epsilon
1340             #
1341             Xn = vx2 + A2
1342         #--------------------------
1343         else:
1344             raise ValueError("VariantM has to be chosen in the authorized methods list.")
1345         #
1346         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
1347             Xn = CovarianceInflation( Xn,
1348                 selfA._parameters["InflationType"],
1349                 selfA._parameters["InflationFactor"],
1350                 )
1351         #
1352         Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1353         #--------------------------
1354         #
1355         if selfA._parameters["StoreInternalVariables"] \
1356             or selfA._toStore("CostFunctionJ") \
1357             or selfA._toStore("CostFunctionJb") \
1358             or selfA._toStore("CostFunctionJo") \
1359             or selfA._toStore("APosterioriCovariance") \
1360             or selfA._toStore("InnovationAtCurrentAnalysis") \
1361             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1362             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1363             _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
1364             _Innovation = Ynpu - _HXa
1365         #
1366         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1367         # ---> avec analysis
1368         selfA.StoredVariables["Analysis"].store( Xa )
1369         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1370             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
1371         if selfA._toStore("InnovationAtCurrentAnalysis"):
1372             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1373         # ---> avec current state
1374         if selfA._parameters["StoreInternalVariables"] \
1375             or selfA._toStore("CurrentState"):
1376             selfA.StoredVariables["CurrentState"].store( Xn )
1377         if selfA._toStore("ForecastState"):
1378             selfA.StoredVariables["ForecastState"].store( E2 )
1379         if selfA._toStore("BMA"):
1380             selfA.StoredVariables["BMA"].store( E2 - Xa )
1381         if selfA._toStore("InnovationAtCurrentState"):
1382             selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
1383         if selfA._toStore("SimulatedObservationAtCurrentState") \
1384             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1385             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
1386         # ---> autres
1387         if selfA._parameters["StoreInternalVariables"] \
1388             or selfA._toStore("CostFunctionJ") \
1389             or selfA._toStore("CostFunctionJb") \
1390             or selfA._toStore("CostFunctionJo") \
1391             or selfA._toStore("CurrentOptimum") \
1392             or selfA._toStore("APosterioriCovariance"):
1393             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
1394             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
1395             J   = Jb + Jo
1396             selfA.StoredVariables["CostFunctionJb"].store( Jb )
1397             selfA.StoredVariables["CostFunctionJo"].store( Jo )
1398             selfA.StoredVariables["CostFunctionJ" ].store( J )
1399             #
1400             if selfA._toStore("IndexOfOptimum") \
1401                 or selfA._toStore("CurrentOptimum") \
1402                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1403                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1404                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1405                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1406                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1407             if selfA._toStore("IndexOfOptimum"):
1408                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1409             if selfA._toStore("CurrentOptimum"):
1410                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1411             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1412                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1413             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1414                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1415             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1416                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1417             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1418                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1419         if selfA._toStore("APosterioriCovariance"):
1420             selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
1421         if selfA._parameters["EstimationOf"] == "Parameters" \
1422             and J < previousJMinimum:
1423             previousJMinimum    = J
1424             XaMin               = Xa
1425             if selfA._toStore("APosterioriCovariance"):
1426                 covarianceXaMin = Pn
1427     #
1428     # Stockage final supplémentaire de l'optimum en estimation de paramètres
1429     # ----------------------------------------------------------------------
1430     if selfA._parameters["EstimationOf"] == "Parameters":
1431         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1432         selfA.StoredVariables["Analysis"].store( XaMin )
1433         if selfA._toStore("APosterioriCovariance"):
1434             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1435         if selfA._toStore("BMA"):
1436             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1437     #
1438     return 0
1439
1440 # ==============================================================================
1441 def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1442     """
1443     3DVAR incrémental
1444     """
1445     #
1446     # Initialisations
1447     # ---------------
1448     #
1449     # Opérateur non-linéaire pour la boucle externe
1450     Hm = HO["Direct"].appliedTo
1451     #
1452     # Précalcul des inversions de B et R
1453     BI = B.getI()
1454     RI = R.getI()
1455     #
1456     # Point de démarrage de l'optimisation
1457     Xini = selfA._parameters["InitializationPoint"]
1458     #
1459     HXb = numpy.asmatrix(numpy.ravel( Hm( Xb ) )).T
1460     Innovation = Y - HXb
1461     #
1462     # Outer Loop
1463     # ----------
1464     iOuter = 0
1465     J      = 1./mpr
1466     DeltaJ = 1./mpr
1467     Xr     = Xini.reshape((-1,1))
1468     while abs(DeltaJ) >= selfA._parameters["CostDecrementTolerance"] and iOuter <= selfA._parameters["MaximumNumberOfSteps"]:
1469         #
1470         # Inner Loop
1471         # ----------
1472         Ht = HO["Tangent"].asMatrix(Xr)
1473         Ht = Ht.reshape(Y.size,Xr.size) # ADAO & check shape
1474         #
1475         # Définition de la fonction-coût
1476         # ------------------------------
1477         def CostFunction(dx):
1478             _dX  = numpy.asmatrix(numpy.ravel( dx )).T
1479             if selfA._parameters["StoreInternalVariables"] or \
1480                 selfA._toStore("CurrentState") or \
1481                 selfA._toStore("CurrentOptimum"):
1482                 selfA.StoredVariables["CurrentState"].store( Xb + _dX )
1483             _HdX = Ht * _dX
1484             _HdX = numpy.asmatrix(numpy.ravel( _HdX )).T
1485             _dInnovation = Innovation - _HdX
1486             if selfA._toStore("SimulatedObservationAtCurrentState") or \
1487                 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1488                 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HXb + _HdX )
1489             if selfA._toStore("InnovationAtCurrentState"):
1490                 selfA.StoredVariables["InnovationAtCurrentState"].store( _dInnovation )
1491             #
1492             Jb  = float( 0.5 * _dX.T * BI * _dX )
1493             Jo  = float( 0.5 * _dInnovation.T * RI * _dInnovation )
1494             J   = Jb + Jo
1495             #
1496             selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
1497             selfA.StoredVariables["CostFunctionJb"].store( Jb )
1498             selfA.StoredVariables["CostFunctionJo"].store( Jo )
1499             selfA.StoredVariables["CostFunctionJ" ].store( J )
1500             if selfA._toStore("IndexOfOptimum") or \
1501                 selfA._toStore("CurrentOptimum") or \
1502                 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
1503                 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
1504                 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
1505                 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1506                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1507             if selfA._toStore("IndexOfOptimum"):
1508                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1509             if selfA._toStore("CurrentOptimum"):
1510                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
1511             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1512                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
1513             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1514                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1515             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1516                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1517             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1518                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1519             return J
1520         #
1521         def GradientOfCostFunction(dx):
1522             _dX          = numpy.asmatrix(numpy.ravel( dx )).T
1523             _HdX         = Ht * _dX
1524             _HdX         = numpy.asmatrix(numpy.ravel( _HdX )).T
1525             _dInnovation = Innovation - _HdX
1526             GradJb       = BI * _dX
1527             GradJo       = - Ht.T @ (RI * _dInnovation)
1528             GradJ        = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
1529             return GradJ
1530         #
1531         # Minimisation de la fonctionnelle
1532         # --------------------------------
1533         nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
1534         #
1535         if selfA._parameters["Minimizer"] == "LBFGSB":
1536             # Minimum, J_optimal, Informations = scipy.optimize.fmin_l_bfgs_b(
1537             if "0.19" <= scipy.version.version <= "1.1.0":
1538                 import lbfgsbhlt as optimiseur
1539             else:
1540                 import scipy.optimize as optimiseur
1541             Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
1542                 func        = CostFunction,
1543                 x0          = numpy.zeros(Xini.size),
1544                 fprime      = GradientOfCostFunction,
1545                 args        = (),
1546                 bounds      = selfA._parameters["Bounds"],
1547                 maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
1548                 factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
1549                 pgtol       = selfA._parameters["ProjectedGradientTolerance"],
1550                 iprint      = selfA._parameters["optiprint"],
1551                 )
1552             nfeval = Informations['funcalls']
1553             rc     = Informations['warnflag']
1554         elif selfA._parameters["Minimizer"] == "TNC":
1555             Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
1556                 func        = CostFunction,
1557                 x0          = numpy.zeros(Xini.size),
1558                 fprime      = GradientOfCostFunction,
1559                 args        = (),
1560                 bounds      = selfA._parameters["Bounds"],
1561                 maxfun      = selfA._parameters["MaximumNumberOfSteps"],
1562                 pgtol       = selfA._parameters["ProjectedGradientTolerance"],
1563                 ftol        = selfA._parameters["CostDecrementTolerance"],
1564                 messages    = selfA._parameters["optmessages"],
1565                 )
1566         elif selfA._parameters["Minimizer"] == "CG":
1567             Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
1568                 f           = CostFunction,
1569                 x0          = numpy.zeros(Xini.size),
1570                 fprime      = GradientOfCostFunction,
1571                 args        = (),
1572                 maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1573                 gtol        = selfA._parameters["GradientNormTolerance"],
1574                 disp        = selfA._parameters["optdisp"],
1575                 full_output = True,
1576                 )
1577         elif selfA._parameters["Minimizer"] == "NCG":
1578             Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
1579                 f           = CostFunction,
1580                 x0          = numpy.zeros(Xini.size),
1581                 fprime      = GradientOfCostFunction,
1582                 args        = (),
1583                 maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1584                 avextol     = selfA._parameters["CostDecrementTolerance"],
1585                 disp        = selfA._parameters["optdisp"],
1586                 full_output = True,
1587                 )
1588         elif selfA._parameters["Minimizer"] == "BFGS":
1589             Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
1590                 f           = CostFunction,
1591                 x0          = numpy.zeros(Xini.size),
1592                 fprime      = GradientOfCostFunction,
1593                 args        = (),
1594                 maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1595                 gtol        = selfA._parameters["GradientNormTolerance"],
1596                 disp        = selfA._parameters["optdisp"],
1597                 full_output = True,
1598                 )
1599         else:
1600             raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
1601         #
1602         IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1603         MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
1604         #
1605         if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
1606             Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
1607             Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
1608         else:
1609             Minimum = Xb + numpy.asmatrix(numpy.ravel( Minimum )).T
1610         #
1611         Xr     = Minimum
1612         DeltaJ = selfA.StoredVariables["CostFunctionJ" ][-1] - J
1613         iOuter = selfA.StoredVariables["CurrentIterationNumber"][-1]
1614     #
1615     # Obtention de l'analyse
1616     # ----------------------
1617     Xa = Xr
1618     #
1619     selfA.StoredVariables["Analysis"].store( Xa )
1620     #
1621     if selfA._toStore("OMA") or \
1622         selfA._toStore("SigmaObs2") or \
1623         selfA._toStore("SimulationQuantiles") or \
1624         selfA._toStore("SimulatedObservationAtOptimum"):
1625         if selfA._toStore("SimulatedObservationAtCurrentState"):
1626             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
1627         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1628             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
1629         else:
1630             HXa = Hm( Xa )
1631     #
1632     # Calcul de la covariance d'analyse
1633     # ---------------------------------
1634     if selfA._toStore("APosterioriCovariance") or \
1635         selfA._toStore("SimulationQuantiles") or \
1636         selfA._toStore("JacobianMatrixAtOptimum") or \
1637         selfA._toStore("KalmanGainAtOptimum"):
1638         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
1639         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
1640     if selfA._toStore("APosterioriCovariance") or \
1641         selfA._toStore("SimulationQuantiles") or \
1642         selfA._toStore("KalmanGainAtOptimum"):
1643         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
1644         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
1645     if selfA._toStore("APosterioriCovariance") or \
1646         selfA._toStore("SimulationQuantiles"):
1647         HessienneI = []
1648         nb = Xa.size
1649         for i in range(nb):
1650             _ee    = numpy.matrix(numpy.zeros(nb)).T
1651             _ee[i] = 1.
1652             _HtEE  = numpy.dot(HtM,_ee)
1653             _HtEE  = numpy.asmatrix(numpy.ravel( _HtEE )).T
1654             HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
1655         HessienneI = numpy.matrix( HessienneI )
1656         A = HessienneI.I
1657         if min(A.shape) != max(A.shape):
1658             raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
1659         if (numpy.diag(A) < 0).any():
1660             raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
1661         if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
1662             try:
1663                 L = numpy.linalg.cholesky( A )
1664             except:
1665                 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
1666     if selfA._toStore("APosterioriCovariance"):
1667         selfA.StoredVariables["APosterioriCovariance"].store( A )
1668     if selfA._toStore("JacobianMatrixAtOptimum"):
1669         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
1670     if selfA._toStore("KalmanGainAtOptimum"):
1671         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
1672         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
1673         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
1674     #
1675     # Calculs et/ou stockages supplémentaires
1676     # ---------------------------------------
1677     if selfA._toStore("Innovation") or \
1678         selfA._toStore("SigmaObs2") or \
1679         selfA._toStore("MahalanobisConsistency") or \
1680         selfA._toStore("OMB"):
1681         d  = Y - HXb
1682     if selfA._toStore("Innovation"):
1683         selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
1684     if selfA._toStore("BMA"):
1685         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
1686     if selfA._toStore("OMA"):
1687         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
1688     if selfA._toStore("OMB"):
1689         selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
1690     if selfA._toStore("SigmaObs2"):
1691         TraceR = R.trace(Y.size)
1692         selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
1693     if selfA._toStore("MahalanobisConsistency"):
1694         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
1695     if selfA._toStore("SimulationQuantiles"):
1696         QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
1697     if selfA._toStore("SimulatedObservationAtBackground"):
1698         selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
1699     if selfA._toStore("SimulatedObservationAtOptimum"):
1700         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
1701     #
1702     return 0
1703
1704 # ==============================================================================
1705 def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13",
1706     BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
1707     """
1708     Maximum Likelihood Ensemble Filter
1709     """
1710     if selfA._parameters["EstimationOf"] == "Parameters":
1711         selfA._parameters["StoreInternalVariables"] = True
1712     #
1713     # Opérateurs
1714     # ----------
1715     H = HO["Direct"].appliedControledFormTo
1716     #
1717     if selfA._parameters["EstimationOf"] == "State":
1718         M = EM["Direct"].appliedControledFormTo
1719     #
1720     if CM is not None and "Tangent" in CM and U is not None:
1721         Cm = CM["Tangent"].asMatrix(Xb)
1722     else:
1723         Cm = None
1724     #
1725     # Nombre de pas identique au nombre de pas d'observations
1726     # -------------------------------------------------------
1727     if hasattr(Y,"stepnumber"):
1728         duration = Y.stepnumber()
1729         __p = numpy.cumprod(Y.shape())[-1]
1730     else:
1731         duration = 2
1732         __p = numpy.array(Y).size
1733     #
1734     # Précalcul des inversions de B et R
1735     # ----------------------------------
1736     if selfA._parameters["StoreInternalVariables"] \
1737         or selfA._toStore("CostFunctionJ") \
1738         or selfA._toStore("CostFunctionJb") \
1739         or selfA._toStore("CostFunctionJo") \
1740         or selfA._toStore("CurrentOptimum") \
1741         or selfA._toStore("APosterioriCovariance"):
1742         BI = B.getI()
1743     RI = R.getI()
1744     #
1745     # Initialisation
1746     # --------------
1747     __n = Xb.size
1748     __m = selfA._parameters["NumberOfMembers"]
1749     if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
1750     else:                         Pn = B
1751     if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
1752     else:                         Rn = R
1753     Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
1754     #
1755     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1756         selfA.StoredVariables["Analysis"].store( Xb )
1757         if selfA._toStore("APosterioriCovariance"):
1758             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1759             covarianceXa = Pn
1760     #
1761     previousJMinimum = numpy.finfo(float).max
1762     #
1763     for step in range(duration-1):
1764         if hasattr(Y,"store"):
1765             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1766         else:
1767             Ynpu = numpy.ravel( Y ).reshape((__p,1))
1768         #
1769         if U is not None:
1770             if hasattr(U,"store") and len(U)>1:
1771                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
1772             elif hasattr(U,"store") and len(U)==1:
1773                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
1774             else:
1775                 Un = numpy.asmatrix(numpy.ravel( U )).T
1776         else:
1777             Un = None
1778         #
1779         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
1780             Xn = CovarianceInflation( Xn,
1781                 selfA._parameters["InflationType"],
1782                 selfA._parameters["InflationFactor"],
1783                 )
1784         #
1785         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
1786             EMX = M( [(Xn[:,i], Un) for i in range(__m)],
1787                 argsAsSerie = True,
1788                 returnSerieAsArrayMatrix = True )
1789             Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
1790             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1791                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
1792                 Xn_predicted = Xn_predicted + Cm * Un
1793         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
1794             # --- > Par principe, M = Id, Q = 0
1795             Xn_predicted = Xn
1796         #
1797         #--------------------------
1798         if VariantM == "MLEF13":
1799             Xfm = numpy.ravel(Xn_predicted.mean(axis=1, dtype=mfp).astype('float'))
1800             EaX = EnsembleOfAnomalies( Xn_predicted, Xfm, 1./math.sqrt(__m-1) )
1801             Ua  = numpy.identity(__m)
1802             __j = 0
1803             Deltaw = 1
1804             if not BnotT:
1805                 Ta  = numpy.identity(__m)
1806             vw  = numpy.zeros(__m)
1807             while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
1808                 vx1 = (Xfm + EaX @ vw).reshape((__n,1))
1809                 #
1810                 if BnotT:
1811                     E1 = vx1 + _epsilon * EaX
1812                 else:
1813                     E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
1814                 #
1815                 HE2 = H( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
1816                     argsAsSerie = True,
1817                     returnSerieAsArrayMatrix = True )
1818                 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
1819                 #
1820                 if BnotT:
1821                     EaY = (HE2 - vy2) / _epsilon
1822                 else:
1823                     EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
1824                 #
1825                 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy2 )))
1826                 mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY).reshape((-1,__m))
1827                 Deltaw = - numpy.linalg.solve(mH,GradJ)
1828                 #
1829                 vw = vw + Deltaw
1830                 #
1831                 if not BnotT:
1832                     Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
1833                 #
1834                 __j = __j + 1
1835             #
1836             if BnotT:
1837                 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
1838             #
1839             Xn = vx1 + math.sqrt(__m-1) * EaX @ Ta @ Ua
1840         #--------------------------
1841         else:
1842             raise ValueError("VariantM has to be chosen in the authorized methods list.")
1843         #
1844         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
1845             Xn = CovarianceInflation( Xn,
1846                 selfA._parameters["InflationType"],
1847                 selfA._parameters["InflationFactor"],
1848                 )
1849         #
1850         Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1851         #--------------------------
1852         #
1853         if selfA._parameters["StoreInternalVariables"] \
1854             or selfA._toStore("CostFunctionJ") \
1855             or selfA._toStore("CostFunctionJb") \
1856             or selfA._toStore("CostFunctionJo") \
1857             or selfA._toStore("APosterioriCovariance") \
1858             or selfA._toStore("InnovationAtCurrentAnalysis") \
1859             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1860             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1861             _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
1862             _Innovation = Ynpu - _HXa
1863         #
1864         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1865         # ---> avec analysis
1866         selfA.StoredVariables["Analysis"].store( Xa )
1867         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1868             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
1869         if selfA._toStore("InnovationAtCurrentAnalysis"):
1870             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1871         # ---> avec current state
1872         if selfA._parameters["StoreInternalVariables"] \
1873             or selfA._toStore("CurrentState"):
1874             selfA.StoredVariables["CurrentState"].store( Xn )
1875         if selfA._toStore("ForecastState"):
1876             selfA.StoredVariables["ForecastState"].store( EMX )
1877         if selfA._toStore("BMA"):
1878             selfA.StoredVariables["BMA"].store( EMX - Xa )
1879         if selfA._toStore("InnovationAtCurrentState"):
1880             selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
1881         if selfA._toStore("SimulatedObservationAtCurrentState") \
1882             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1883             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
1884         # ---> autres
1885         if selfA._parameters["StoreInternalVariables"] \
1886             or selfA._toStore("CostFunctionJ") \
1887             or selfA._toStore("CostFunctionJb") \
1888             or selfA._toStore("CostFunctionJo") \
1889             or selfA._toStore("CurrentOptimum") \
1890             or selfA._toStore("APosterioriCovariance"):
1891             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
1892             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
1893             J   = Jb + Jo
1894             selfA.StoredVariables["CostFunctionJb"].store( Jb )
1895             selfA.StoredVariables["CostFunctionJo"].store( Jo )
1896             selfA.StoredVariables["CostFunctionJ" ].store( J )
1897             #
1898             if selfA._toStore("IndexOfOptimum") \
1899                 or selfA._toStore("CurrentOptimum") \
1900                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1901                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1902                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1903                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1904                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1905             if selfA._toStore("IndexOfOptimum"):
1906                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1907             if selfA._toStore("CurrentOptimum"):
1908                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1909             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1910                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1911             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1912                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1913             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1914                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1915             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1916                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1917         if selfA._toStore("APosterioriCovariance"):
1918             selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
1919         if selfA._parameters["EstimationOf"] == "Parameters" \
1920             and J < previousJMinimum:
1921             previousJMinimum    = J
1922             XaMin               = Xa
1923             if selfA._toStore("APosterioriCovariance"):
1924                 covarianceXaMin = Pn
1925     #
1926     # Stockage final supplémentaire de l'optimum en estimation de paramètres
1927     # ----------------------------------------------------------------------
1928     if selfA._parameters["EstimationOf"] == "Parameters":
1929         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1930         selfA.StoredVariables["Analysis"].store( XaMin )
1931         if selfA._toStore("APosterioriCovariance"):
1932             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1933         if selfA._toStore("BMA"):
1934             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1935     #
1936     return 0
1937
1938 # ==============================================================================
1939 def mmqr(
1940         func     = None,
1941         x0       = None,
1942         fprime   = None,
1943         bounds   = None,
1944         quantile = 0.5,
1945         maxfun   = 15000,
1946         toler    = 1.e-06,
1947         y        = None,
1948         ):
1949     """
1950     Implémentation informatique de l'algorithme MMQR, basée sur la publication :
1951     David R. Hunter, Kenneth Lange, "Quantile Regression via an MM Algorithm",
1952     Journal of Computational and Graphical Statistics, 9, 1, pp.60-77, 2000.
1953     """
1954     #
1955     # Recuperation des donnees et informations initiales
1956     # --------------------------------------------------
1957     variables = numpy.ravel( x0 )
1958     mesures   = numpy.ravel( y )
1959     increment = sys.float_info[0]
1960     p         = variables.size
1961     n         = mesures.size
1962     quantile  = float(quantile)
1963     #
1964     # Calcul des parametres du MM
1965     # ---------------------------
1966     tn      = float(toler) / n
1967     e0      = -tn / math.log(tn)
1968     epsilon = (e0-tn)/(1+math.log(e0))
1969     #
1970     # Calculs d'initialisation
1971     # ------------------------
1972     residus  = mesures - numpy.ravel( func( variables ) )
1973     poids    = 1./(epsilon+numpy.abs(residus))
1974     veps     = 1. - 2. * quantile - residus * poids
1975     lastsurrogate = -numpy.sum(residus*veps) - (1.-2.*quantile)*numpy.sum(residus)
1976     iteration = 0
1977     #
1978     # Recherche iterative
1979     # -------------------
1980     while (increment > toler) and (iteration < maxfun) :
1981         iteration += 1
1982         #
1983         Derivees  = numpy.array(fprime(variables))
1984         Derivees  = Derivees.reshape(n,p) # Necessaire pour remettre en place la matrice si elle passe par des tuyaux YACS
1985         DeriveesT = Derivees.transpose()
1986         M         =   numpy.dot( DeriveesT , (numpy.array(numpy.matrix(p*[poids,]).T)*Derivees) )
1987         SM        =   numpy.transpose(numpy.dot( DeriveesT , veps ))
1988         step      = - numpy.linalg.lstsq( M, SM, rcond=-1 )[0]
1989         #
1990         variables = variables + step
1991         if bounds is not None:
1992             # Attention : boucle infinie à éviter si un intervalle est trop petit
1993             while( (variables < numpy.ravel(numpy.asmatrix(bounds)[:,0])).any() or (variables > numpy.ravel(numpy.asmatrix(bounds)[:,1])).any() ):
1994                 step      = step/2.
1995                 variables = variables - step
1996         residus   = mesures - numpy.ravel( func(variables) )
1997         surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
1998         #
1999         while ( (surrogate > lastsurrogate) and ( max(list(numpy.abs(step))) > 1.e-16 ) ) :
2000             step      = step/2.
2001             variables = variables - step
2002             residus   = mesures - numpy.ravel( func(variables) )
2003             surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
2004         #
2005         increment     = lastsurrogate-surrogate
2006         poids         = 1./(epsilon+numpy.abs(residus))
2007         veps          = 1. - 2. * quantile - residus * poids
2008         lastsurrogate = -numpy.sum(residus * veps) - (1.-2.*quantile)*numpy.sum(residus)
2009     #
2010     # Mesure d'écart
2011     # --------------
2012     Ecart = quantile * numpy.sum(residus) - numpy.sum( residus[residus<0] )
2013     #
2014     return variables, Ecart, [n,p,iteration,increment,0]
2015
2016 # ==============================================================================
2017 def multi3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, oneCycle):
2018     """
2019     3DVAR multi-pas et multi-méthodes
2020     """
2021     #
2022     # Initialisation
2023     # --------------
2024     Xn = numpy.ravel(Xb).reshape((-1,1))
2025     #
2026     if selfA._parameters["EstimationOf"] == "State":
2027         M = EM["Direct"].appliedTo
2028         #
2029         if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2030             selfA.StoredVariables["Analysis"].store( Xn )
2031             if selfA._toStore("APosterioriCovariance"):
2032                 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(Xn.size)
2033                 else:                         Pn = B
2034                 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2035             if selfA._toStore("ForecastState"):
2036                 selfA.StoredVariables["ForecastState"].store( Xn )
2037     #
2038     if hasattr(Y,"stepnumber"):
2039         duration = Y.stepnumber()
2040     else:
2041         duration = 2
2042     #
2043     # Multi-pas
2044     # ---------
2045     for step in range(duration-1):
2046         if hasattr(Y,"store"):
2047             Ynpu = numpy.ravel( Y[step+1] ).reshape((-1,1))
2048         else:
2049             Ynpu = numpy.ravel( Y ).reshape((-1,1))
2050         #
2051         if selfA._parameters["EstimationOf"] == "State": # Forecast
2052             Xn = selfA.StoredVariables["Analysis"][-1]
2053             Xn_predicted = M( Xn )
2054             if selfA._toStore("ForecastState"):
2055                 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
2056         elif selfA._parameters["EstimationOf"] == "Parameters": # No forecast
2057             # --- > Par principe, M = Id, Q = 0
2058             Xn_predicted = Xn
2059         Xn_predicted = numpy.ravel(Xn_predicted).reshape((-1,1))
2060         #
2061         oneCycle(selfA, Xn_predicted, Ynpu, U, HO, None, None, R, B, None)
2062     #
2063     return 0
2064
2065 # ==============================================================================
2066 def psas3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
2067     """
2068     3DVAR PSAS
2069     """
2070     #
2071     # Initialisations
2072     # ---------------
2073     #
2074     # Opérateurs
2075     Hm = HO["Direct"].appliedTo
2076     #
2077     # Utilisation éventuelle d'un vecteur H(Xb) précalculé
2078     if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
2079         HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
2080     else:
2081         HXb = Hm( Xb )
2082     HXb = numpy.asmatrix(numpy.ravel( HXb )).T
2083     if Y.size != HXb.size:
2084         raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
2085     if max(Y.shape) != max(HXb.shape):
2086         raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
2087     #
2088     if selfA._toStore("JacobianMatrixAtBackground"):
2089         HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
2090         HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
2091         selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
2092     #
2093     Ht = HO["Tangent"].asMatrix(Xb)
2094     BHT = B * Ht.T
2095     HBHTpR = R + Ht * BHT
2096     Innovation = Y - HXb
2097     #
2098     # Point de démarrage de l'optimisation
2099     Xini = numpy.zeros(Xb.shape)
2100     #
2101     # Définition de la fonction-coût
2102     # ------------------------------
2103     def CostFunction(w):
2104         _W = numpy.asmatrix(numpy.ravel( w )).T
2105         if selfA._parameters["StoreInternalVariables"] or \
2106             selfA._toStore("CurrentState") or \
2107             selfA._toStore("CurrentOptimum"):
2108             selfA.StoredVariables["CurrentState"].store( Xb + BHT * _W )
2109         if selfA._toStore("SimulatedObservationAtCurrentState") or \
2110             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2111             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Hm( Xb + BHT * _W ) )
2112         if selfA._toStore("InnovationAtCurrentState"):
2113             selfA.StoredVariables["InnovationAtCurrentState"].store( Innovation )
2114         #
2115         Jb  = float( 0.5 * _W.T * HBHTpR * _W )
2116         Jo  = float( - _W.T * Innovation )
2117         J   = Jb + Jo
2118         #
2119         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
2120         selfA.StoredVariables["CostFunctionJb"].store( Jb )
2121         selfA.StoredVariables["CostFunctionJo"].store( Jo )
2122         selfA.StoredVariables["CostFunctionJ" ].store( J )
2123         if selfA._toStore("IndexOfOptimum") or \
2124             selfA._toStore("CurrentOptimum") or \
2125             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
2126             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
2127             selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
2128             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2129             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2130         if selfA._toStore("IndexOfOptimum"):
2131             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2132         if selfA._toStore("CurrentOptimum"):
2133             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
2134         if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2135             selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
2136         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2137             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2138         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2139             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2140         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2141             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2142         return J
2143     #
2144     def GradientOfCostFunction(w):
2145         _W = numpy.asmatrix(numpy.ravel( w )).T
2146         GradJb  = HBHTpR * _W
2147         GradJo  = - Innovation
2148         GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
2149         return GradJ
2150     #
2151     # Minimisation de la fonctionnelle
2152     # --------------------------------
2153     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
2154     #
2155     if selfA._parameters["Minimizer"] == "LBFGSB":
2156         if "0.19" <= scipy.version.version <= "1.1.0":
2157             import lbfgsbhlt as optimiseur
2158         else:
2159             import scipy.optimize as optimiseur
2160         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
2161             func        = CostFunction,
2162             x0          = Xini,
2163             fprime      = GradientOfCostFunction,
2164             args        = (),
2165             bounds      = selfA._parameters["Bounds"],
2166             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
2167             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
2168             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
2169             iprint      = selfA._parameters["optiprint"],
2170             )
2171         nfeval = Informations['funcalls']
2172         rc     = Informations['warnflag']
2173     elif selfA._parameters["Minimizer"] == "TNC":
2174         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
2175             func        = CostFunction,
2176             x0          = Xini,
2177             fprime      = GradientOfCostFunction,
2178             args        = (),
2179             bounds      = selfA._parameters["Bounds"],
2180             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
2181             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
2182             ftol        = selfA._parameters["CostDecrementTolerance"],
2183             messages    = selfA._parameters["optmessages"],
2184             )
2185     elif selfA._parameters["Minimizer"] == "CG":
2186         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
2187             f           = CostFunction,
2188             x0          = Xini,
2189             fprime      = GradientOfCostFunction,
2190             args        = (),
2191             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2192             gtol        = selfA._parameters["GradientNormTolerance"],
2193             disp        = selfA._parameters["optdisp"],
2194             full_output = True,
2195             )
2196     elif selfA._parameters["Minimizer"] == "NCG":
2197         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
2198             f           = CostFunction,
2199             x0          = Xini,
2200             fprime      = GradientOfCostFunction,
2201             args        = (),
2202             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2203             avextol     = selfA._parameters["CostDecrementTolerance"],
2204             disp        = selfA._parameters["optdisp"],
2205             full_output = True,
2206             )
2207     elif selfA._parameters["Minimizer"] == "BFGS":
2208         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
2209             f           = CostFunction,
2210             x0          = Xini,
2211             fprime      = GradientOfCostFunction,
2212             args        = (),
2213             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2214             gtol        = selfA._parameters["GradientNormTolerance"],
2215             disp        = selfA._parameters["optdisp"],
2216             full_output = True,
2217             )
2218     else:
2219         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
2220     #
2221     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2222     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
2223     #
2224     # Correction pour pallier a un bug de TNC sur le retour du Minimum
2225     # ----------------------------------------------------------------
2226     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
2227         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
2228         Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
2229     else:
2230         Minimum = Xb + BHT * numpy.asmatrix(numpy.ravel( Minimum )).T
2231     #
2232     # Obtention de l'analyse
2233     # ----------------------
2234     Xa = Minimum
2235     #
2236     selfA.StoredVariables["Analysis"].store( Xa )
2237     #
2238     if selfA._toStore("OMA") or \
2239         selfA._toStore("SigmaObs2") or \
2240         selfA._toStore("SimulationQuantiles") or \
2241         selfA._toStore("SimulatedObservationAtOptimum"):
2242         if selfA._toStore("SimulatedObservationAtCurrentState"):
2243             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
2244         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2245             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
2246         else:
2247             HXa = Hm( Xa )
2248     #
2249     # Calcul de la covariance d'analyse
2250     # ---------------------------------
2251     if selfA._toStore("APosterioriCovariance") or \
2252         selfA._toStore("SimulationQuantiles") or \
2253         selfA._toStore("JacobianMatrixAtOptimum") or \
2254         selfA._toStore("KalmanGainAtOptimum"):
2255         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
2256         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
2257     if selfA._toStore("APosterioriCovariance") or \
2258         selfA._toStore("SimulationQuantiles") or \
2259         selfA._toStore("KalmanGainAtOptimum"):
2260         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
2261         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
2262     if selfA._toStore("APosterioriCovariance") or \
2263         selfA._toStore("SimulationQuantiles"):
2264         BI = B.getI()
2265         RI = R.getI()
2266         HessienneI = []
2267         nb = Xa.size
2268         for i in range(nb):
2269             _ee    = numpy.matrix(numpy.zeros(nb)).T
2270             _ee[i] = 1.
2271             _HtEE  = numpy.dot(HtM,_ee)
2272             _HtEE  = numpy.asmatrix(numpy.ravel( _HtEE )).T
2273             HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
2274         HessienneI = numpy.matrix( HessienneI )
2275         A = HessienneI.I
2276         if min(A.shape) != max(A.shape):
2277             raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
2278         if (numpy.diag(A) < 0).any():
2279             raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
2280         if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
2281             try:
2282                 L = numpy.linalg.cholesky( A )
2283             except:
2284                 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
2285     if selfA._toStore("APosterioriCovariance"):
2286         selfA.StoredVariables["APosterioriCovariance"].store( A )
2287     if selfA._toStore("JacobianMatrixAtOptimum"):
2288         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
2289     if selfA._toStore("KalmanGainAtOptimum"):
2290         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
2291         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
2292         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
2293     #
2294     # Calculs et/ou stockages supplémentaires
2295     # ---------------------------------------
2296     if selfA._toStore("Innovation") or \
2297         selfA._toStore("SigmaObs2") or \
2298         selfA._toStore("MahalanobisConsistency") or \
2299         selfA._toStore("OMB"):
2300         d  = Y - HXb
2301     if selfA._toStore("Innovation"):
2302         selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
2303     if selfA._toStore("BMA"):
2304         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
2305     if selfA._toStore("OMA"):
2306         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
2307     if selfA._toStore("OMB"):
2308         selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
2309     if selfA._toStore("SigmaObs2"):
2310         TraceR = R.trace(Y.size)
2311         selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
2312     if selfA._toStore("MahalanobisConsistency"):
2313         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
2314     if selfA._toStore("SimulationQuantiles"):
2315         QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
2316     if selfA._toStore("SimulatedObservationAtBackground"):
2317         selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
2318     if selfA._toStore("SimulatedObservationAtOptimum"):
2319         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
2320     #
2321     return 0
2322
2323 # ==============================================================================
2324 def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
2325     """
2326     Stochastic EnKF
2327     """
2328     if selfA._parameters["EstimationOf"] == "Parameters":
2329         selfA._parameters["StoreInternalVariables"] = True
2330     #
2331     # Opérateurs
2332     H = HO["Direct"].appliedControledFormTo
2333     #
2334     if selfA._parameters["EstimationOf"] == "State":
2335         M = EM["Direct"].appliedControledFormTo
2336     #
2337     if CM is not None and "Tangent" in CM and U is not None:
2338         Cm = CM["Tangent"].asMatrix(Xb)
2339     else:
2340         Cm = None
2341     #
2342     # Durée d'observation et tailles
2343     if hasattr(Y,"stepnumber"):
2344         duration = Y.stepnumber()
2345         __p = numpy.cumprod(Y.shape())[-1]
2346     else:
2347         duration = 2
2348         __p = numpy.array(Y).size
2349     #
2350     # Précalcul des inversions de B et R
2351     if selfA._parameters["StoreInternalVariables"] \
2352         or selfA._toStore("CostFunctionJ") \
2353         or selfA._toStore("CostFunctionJb") \
2354         or selfA._toStore("CostFunctionJo") \
2355         or selfA._toStore("CurrentOptimum") \
2356         or selfA._toStore("APosterioriCovariance"):
2357         BI = B.getI()
2358         RI = R.getI()
2359     #
2360     __n = Xb.size
2361     __m = selfA._parameters["NumberOfMembers"]
2362     #
2363     if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
2364     else:                         Pn = B
2365     if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
2366     else:                         Rn = R
2367     Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
2368     #
2369     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2370         selfA.StoredVariables["Analysis"].store( Xb )
2371         if selfA._toStore("APosterioriCovariance"):
2372             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2373             covarianceXa = Pn
2374     #
2375     previousJMinimum = numpy.finfo(float).max
2376     #
2377     for step in range(duration-1):
2378         if hasattr(Y,"store"):
2379             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
2380         else:
2381             Ynpu = numpy.ravel( Y ).reshape((__p,1))
2382         #
2383         if U is not None:
2384             if hasattr(U,"store") and len(U)>1:
2385                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2386             elif hasattr(U,"store") and len(U)==1:
2387                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2388             else:
2389                 Un = numpy.asmatrix(numpy.ravel( U )).T
2390         else:
2391             Un = None
2392         #
2393         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2394             Xn = CovarianceInflation( Xn,
2395                 selfA._parameters["InflationType"],
2396                 selfA._parameters["InflationFactor"],
2397                 )
2398         #
2399         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
2400             EMX = M( [(Xn[:,i], Un) for i in range(__m)],
2401                 argsAsSerie = True,
2402                 returnSerieAsArrayMatrix = True )
2403             Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
2404             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2405                 argsAsSerie = True,
2406                 returnSerieAsArrayMatrix = True )
2407             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2408                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2409                 Xn_predicted = Xn_predicted + Cm * Un
2410         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
2411             # --- > Par principe, M = Id, Q = 0
2412             Xn_predicted = Xn
2413             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2414                 argsAsSerie = True,
2415                 returnSerieAsArrayMatrix = True )
2416         #
2417         # Mean of forecast and observation of forecast
2418         Xfm  = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2419         Hfm  = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2420         #
2421         #--------------------------
2422         if VariantM == "KalmanFilterFormula05":
2423             PfHT, HPfHT = 0., 0.
2424             for i in range(__m):
2425                 Exfi = Xn_predicted[:,i].reshape((__n,1)) - Xfm
2426                 Eyfi = HX_predicted[:,i].reshape((__p,1)) - Hfm
2427                 PfHT  += Exfi * Eyfi.T
2428                 HPfHT += Eyfi * Eyfi.T
2429             PfHT  = (1./(__m-1)) * PfHT
2430             HPfHT = (1./(__m-1)) * HPfHT
2431             Kn     = PfHT * ( R + HPfHT ).I
2432             del PfHT, HPfHT
2433             #
2434             for i in range(__m):
2435                 ri = numpy.random.multivariate_normal(numpy.zeros(__p), Rn)
2436                 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(Ynpu) + ri - HX_predicted[:,i])
2437         #--------------------------
2438         elif VariantM == "KalmanFilterFormula16":
2439             EpY   = EnsembleOfCenteredPerturbations(Ynpu, Rn, __m)
2440             EpYm  = EpY.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2441             #
2442             EaX   = EnsembleOfAnomalies( Xn_predicted ) / math.sqrt(__m-1)
2443             EaY = (HX_predicted - Hfm - EpY + EpYm) / math.sqrt(__m-1)
2444             #
2445             Kn = EaX @ EaY.T @ numpy.linalg.inv( EaY @ EaY.T)
2446             #
2447             for i in range(__m):
2448                 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(EpY[:,i]) - HX_predicted[:,i])
2449         #--------------------------
2450         else:
2451             raise ValueError("VariantM has to be chosen in the authorized methods list.")
2452         #
2453         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2454             Xn = CovarianceInflation( Xn,
2455                 selfA._parameters["InflationType"],
2456                 selfA._parameters["InflationFactor"],
2457                 )
2458         #
2459         Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2460         #--------------------------
2461         #
2462         if selfA._parameters["StoreInternalVariables"] \
2463             or selfA._toStore("CostFunctionJ") \
2464             or selfA._toStore("CostFunctionJb") \
2465             or selfA._toStore("CostFunctionJo") \
2466             or selfA._toStore("APosterioriCovariance") \
2467             or selfA._toStore("InnovationAtCurrentAnalysis") \
2468             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2469             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2470             _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
2471             _Innovation = Ynpu - _HXa
2472         #
2473         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2474         # ---> avec analysis
2475         selfA.StoredVariables["Analysis"].store( Xa )
2476         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2477             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2478         if selfA._toStore("InnovationAtCurrentAnalysis"):
2479             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2480         # ---> avec current state
2481         if selfA._parameters["StoreInternalVariables"] \
2482             or selfA._toStore("CurrentState"):
2483             selfA.StoredVariables["CurrentState"].store( Xn )
2484         if selfA._toStore("ForecastState"):
2485             selfA.StoredVariables["ForecastState"].store( EMX )
2486         if selfA._toStore("BMA"):
2487             selfA.StoredVariables["BMA"].store( EMX - Xa )
2488         if selfA._toStore("InnovationAtCurrentState"):
2489             selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
2490         if selfA._toStore("SimulatedObservationAtCurrentState") \
2491             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2492             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
2493         # ---> autres
2494         if selfA._parameters["StoreInternalVariables"] \
2495             or selfA._toStore("CostFunctionJ") \
2496             or selfA._toStore("CostFunctionJb") \
2497             or selfA._toStore("CostFunctionJo") \
2498             or selfA._toStore("CurrentOptimum") \
2499             or selfA._toStore("APosterioriCovariance"):
2500             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2501             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
2502             J   = Jb + Jo
2503             selfA.StoredVariables["CostFunctionJb"].store( Jb )
2504             selfA.StoredVariables["CostFunctionJo"].store( Jo )
2505             selfA.StoredVariables["CostFunctionJ" ].store( J )
2506             #
2507             if selfA._toStore("IndexOfOptimum") \
2508                 or selfA._toStore("CurrentOptimum") \
2509                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2510                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2511                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2512                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2513                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2514             if selfA._toStore("IndexOfOptimum"):
2515                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2516             if selfA._toStore("CurrentOptimum"):
2517                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2518             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2519                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2520             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2521                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2522             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2523                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2524             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2525                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2526         if selfA._toStore("APosterioriCovariance"):
2527             selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
2528         if selfA._parameters["EstimationOf"] == "Parameters" \
2529             and J < previousJMinimum:
2530             previousJMinimum    = J
2531             XaMin               = Xa
2532             if selfA._toStore("APosterioriCovariance"):
2533                 covarianceXaMin = Pn
2534     #
2535     # Stockage final supplémentaire de l'optimum en estimation de paramètres
2536     # ----------------------------------------------------------------------
2537     if selfA._parameters["EstimationOf"] == "Parameters":
2538         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2539         selfA.StoredVariables["Analysis"].store( XaMin )
2540         if selfA._toStore("APosterioriCovariance"):
2541             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2542         if selfA._toStore("BMA"):
2543             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2544     #
2545     return 0
2546
2547 # ==============================================================================
2548 def std3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
2549     """
2550     3DVAR
2551     """
2552     #
2553     # Initialisations
2554     # ---------------
2555     #
2556     # Opérateurs
2557     Hm = HO["Direct"].appliedTo
2558     Ha = HO["Adjoint"].appliedInXTo
2559     #
2560     # Utilisation éventuelle d'un vecteur H(Xb) précalculé
2561     if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
2562         HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
2563     else:
2564         HXb = Hm( Xb )
2565     HXb = numpy.asmatrix(numpy.ravel( HXb )).T
2566     if Y.size != HXb.size:
2567         raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
2568     if max(Y.shape) != max(HXb.shape):
2569         raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
2570     #
2571     if selfA._toStore("JacobianMatrixAtBackground"):
2572         HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
2573         HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
2574         selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
2575     #
2576     # Précalcul des inversions de B et R
2577     BI = B.getI()
2578     RI = R.getI()
2579     #
2580     # Point de démarrage de l'optimisation
2581     Xini = selfA._parameters["InitializationPoint"]
2582     #
2583     # Définition de la fonction-coût
2584     # ------------------------------
2585     def CostFunction(x):
2586         _X  = numpy.asmatrix(numpy.ravel( x )).T
2587         if selfA._parameters["StoreInternalVariables"] or \
2588             selfA._toStore("CurrentState") or \
2589             selfA._toStore("CurrentOptimum"):
2590             selfA.StoredVariables["CurrentState"].store( _X )
2591         _HX = Hm( _X )
2592         _HX = numpy.asmatrix(numpy.ravel( _HX )).T
2593         _Innovation = Y - _HX
2594         if selfA._toStore("SimulatedObservationAtCurrentState") or \
2595             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2596             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
2597         if selfA._toStore("InnovationAtCurrentState"):
2598             selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
2599         #
2600         Jb  = float( 0.5 * (_X - Xb).T * BI * (_X - Xb) )
2601         Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
2602         J   = Jb + Jo
2603         #
2604         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
2605         selfA.StoredVariables["CostFunctionJb"].store( Jb )
2606         selfA.StoredVariables["CostFunctionJo"].store( Jo )
2607         selfA.StoredVariables["CostFunctionJ" ].store( J )
2608         if selfA._toStore("IndexOfOptimum") or \
2609             selfA._toStore("CurrentOptimum") or \
2610             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
2611             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
2612             selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
2613             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2614             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2615         if selfA._toStore("IndexOfOptimum"):
2616             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2617         if selfA._toStore("CurrentOptimum"):
2618             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
2619         if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2620             selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
2621         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2622             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2623         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2624             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2625         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2626             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2627         return J
2628     #
2629     def GradientOfCostFunction(x):
2630         _X      = numpy.asmatrix(numpy.ravel( x )).T
2631         _HX     = Hm( _X )
2632         _HX     = numpy.asmatrix(numpy.ravel( _HX )).T
2633         GradJb  = BI * (_X - Xb)
2634         GradJo  = - Ha( (_X, RI * (Y - _HX)) )
2635         GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
2636         return GradJ
2637     #
2638     # Minimisation de la fonctionnelle
2639     # --------------------------------
2640     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
2641     #
2642     if selfA._parameters["Minimizer"] == "LBFGSB":
2643         if "0.19" <= scipy.version.version <= "1.1.0":
2644             import lbfgsbhlt as optimiseur
2645         else:
2646             import scipy.optimize as optimiseur
2647         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
2648             func        = CostFunction,
2649             x0          = Xini,
2650             fprime      = GradientOfCostFunction,
2651             args        = (),
2652             bounds      = selfA._parameters["Bounds"],
2653             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
2654             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
2655             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
2656             iprint      = selfA._parameters["optiprint"],
2657             )
2658         nfeval = Informations['funcalls']
2659         rc     = Informations['warnflag']
2660     elif selfA._parameters["Minimizer"] == "TNC":
2661         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
2662             func        = CostFunction,
2663             x0          = Xini,
2664             fprime      = GradientOfCostFunction,
2665             args        = (),
2666             bounds      = selfA._parameters["Bounds"],
2667             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
2668             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
2669             ftol        = selfA._parameters["CostDecrementTolerance"],
2670             messages    = selfA._parameters["optmessages"],
2671             )
2672     elif selfA._parameters["Minimizer"] == "CG":
2673         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
2674             f           = CostFunction,
2675             x0          = Xini,
2676             fprime      = GradientOfCostFunction,
2677             args        = (),
2678             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2679             gtol        = selfA._parameters["GradientNormTolerance"],
2680             disp        = selfA._parameters["optdisp"],
2681             full_output = True,
2682             )
2683     elif selfA._parameters["Minimizer"] == "NCG":
2684         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
2685             f           = CostFunction,
2686             x0          = Xini,
2687             fprime      = GradientOfCostFunction,
2688             args        = (),
2689             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2690             avextol     = selfA._parameters["CostDecrementTolerance"],
2691             disp        = selfA._parameters["optdisp"],
2692             full_output = True,
2693             )
2694     elif selfA._parameters["Minimizer"] == "BFGS":
2695         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
2696             f           = CostFunction,
2697             x0          = Xini,
2698             fprime      = GradientOfCostFunction,
2699             args        = (),
2700             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2701             gtol        = selfA._parameters["GradientNormTolerance"],
2702             disp        = selfA._parameters["optdisp"],
2703             full_output = True,
2704             )
2705     else:
2706         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
2707     #
2708     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2709     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
2710     #
2711     # Correction pour pallier a un bug de TNC sur le retour du Minimum
2712     # ----------------------------------------------------------------
2713     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
2714         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
2715     #
2716     # Obtention de l'analyse
2717     # ----------------------
2718     Xa = numpy.asmatrix(numpy.ravel( Minimum )).T
2719     #
2720     selfA.StoredVariables["Analysis"].store( Xa )
2721     #
2722     if selfA._toStore("OMA") or \
2723         selfA._toStore("SigmaObs2") or \
2724         selfA._toStore("SimulationQuantiles") or \
2725         selfA._toStore("SimulatedObservationAtOptimum"):
2726         if selfA._toStore("SimulatedObservationAtCurrentState"):
2727             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
2728         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2729             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
2730         else:
2731             HXa = Hm( Xa )
2732     #
2733     # Calcul de la covariance d'analyse
2734     # ---------------------------------
2735     if selfA._toStore("APosterioriCovariance") or \
2736         selfA._toStore("SimulationQuantiles") or \
2737         selfA._toStore("JacobianMatrixAtOptimum") or \
2738         selfA._toStore("KalmanGainAtOptimum"):
2739         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
2740         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
2741     if selfA._toStore("APosterioriCovariance") or \
2742         selfA._toStore("SimulationQuantiles") or \
2743         selfA._toStore("KalmanGainAtOptimum"):
2744         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
2745         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
2746     if selfA._toStore("APosterioriCovariance") or \
2747         selfA._toStore("SimulationQuantiles"):
2748         HessienneI = []
2749         nb = Xa.size
2750         for i in range(nb):
2751             _ee    = numpy.matrix(numpy.zeros(nb)).T
2752             _ee[i] = 1.
2753             _HtEE  = numpy.dot(HtM,_ee)
2754             _HtEE  = numpy.asmatrix(numpy.ravel( _HtEE )).T
2755             HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
2756         HessienneI = numpy.matrix( HessienneI )
2757         A = HessienneI.I
2758         if min(A.shape) != max(A.shape):
2759             raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
2760         if (numpy.diag(A) < 0).any():
2761             raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
2762         if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
2763             try:
2764                 L = numpy.linalg.cholesky( A )
2765             except:
2766                 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
2767     if selfA._toStore("APosterioriCovariance"):
2768         selfA.StoredVariables["APosterioriCovariance"].store( A )
2769     if selfA._toStore("JacobianMatrixAtOptimum"):
2770         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
2771     if selfA._toStore("KalmanGainAtOptimum"):
2772         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
2773         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
2774         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
2775     #
2776     # Calculs et/ou stockages supplémentaires
2777     # ---------------------------------------
2778     if selfA._toStore("Innovation") or \
2779         selfA._toStore("SigmaObs2") or \
2780         selfA._toStore("MahalanobisConsistency") or \
2781         selfA._toStore("OMB"):
2782         d  = Y - HXb
2783     if selfA._toStore("Innovation"):
2784         selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
2785     if selfA._toStore("BMA"):
2786         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
2787     if selfA._toStore("OMA"):
2788         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
2789     if selfA._toStore("OMB"):
2790         selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
2791     if selfA._toStore("SigmaObs2"):
2792         TraceR = R.trace(Y.size)
2793         selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
2794     if selfA._toStore("MahalanobisConsistency"):
2795         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
2796     if selfA._toStore("SimulationQuantiles"):
2797         QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
2798     if selfA._toStore("SimulatedObservationAtBackground"):
2799         selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
2800     if selfA._toStore("SimulatedObservationAtOptimum"):
2801         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
2802     #
2803     return 0
2804
2805 # ==============================================================================
2806 def std4dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
2807     """
2808     4DVAR
2809     """
2810     #
2811     # Initialisations
2812     # ---------------
2813     #
2814     # Opérateurs
2815     Hm = HO["Direct"].appliedControledFormTo
2816     Mm = EM["Direct"].appliedControledFormTo
2817     #
2818     if CM is not None and "Tangent" in CM and U is not None:
2819         Cm = CM["Tangent"].asMatrix(Xb)
2820     else:
2821         Cm = None
2822     #
2823     def Un(_step):
2824         if U is not None:
2825             if hasattr(U,"store") and 1<=_step<len(U) :
2826                 _Un = numpy.asmatrix(numpy.ravel( U[_step] )).T
2827             elif hasattr(U,"store") and len(U)==1:
2828                 _Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2829             else:
2830                 _Un = numpy.asmatrix(numpy.ravel( U )).T
2831         else:
2832             _Un = None
2833         return _Un
2834     def CmUn(_xn,_un):
2835         if Cm is not None and _un is not None: # Attention : si Cm est aussi dans M, doublon !
2836             _Cm   = Cm.reshape(_xn.size,_un.size) # ADAO & check shape
2837             _CmUn = _Cm * _un
2838         else:
2839             _CmUn = 0.
2840         return _CmUn
2841     #
2842     # Remarque : les observations sont exploitées à partir du pas de temps
2843     # numéro 1, et sont utilisées dans Yo comme rangées selon ces indices.
2844     # Donc le pas 0 n'est pas utilisé puisque la première étape commence
2845     # avec l'observation du pas 1.
2846     #
2847     # Nombre de pas identique au nombre de pas d'observations
2848     if hasattr(Y,"stepnumber"):
2849         duration = Y.stepnumber()
2850     else:
2851         duration = 2
2852     #
2853     # Précalcul des inversions de B et R
2854     BI = B.getI()
2855     RI = R.getI()
2856     #
2857     # Point de démarrage de l'optimisation
2858     Xini = selfA._parameters["InitializationPoint"]
2859     #
2860     # Définition de la fonction-coût
2861     # ------------------------------
2862     selfA.DirectCalculation = [None,] # Le pas 0 n'est pas observé
2863     selfA.DirectInnovation  = [None,] # Le pas 0 n'est pas observé
2864     def CostFunction(x):
2865         _X  = numpy.asmatrix(numpy.ravel( x )).T
2866         if selfA._parameters["StoreInternalVariables"] or \
2867             selfA._toStore("CurrentState") or \
2868             selfA._toStore("CurrentOptimum"):
2869             selfA.StoredVariables["CurrentState"].store( _X )
2870         Jb  = float( 0.5 * (_X - Xb).T * BI * (_X - Xb) )
2871         selfA.DirectCalculation = [None,]
2872         selfA.DirectInnovation  = [None,]
2873         Jo  = 0.
2874         _Xn = _X
2875         for step in range(0,duration-1):
2876             if hasattr(Y,"store"):
2877                 _Ynpu = numpy.asmatrix(numpy.ravel( Y[step+1] )).T
2878             else:
2879                 _Ynpu = numpy.asmatrix(numpy.ravel( Y )).T
2880             _Un = Un(step)
2881             #
2882             # Etape d'évolution
2883             if selfA._parameters["EstimationOf"] == "State":
2884                 _Xn = Mm( (_Xn, _Un) ) + CmUn(_Xn, _Un)
2885             elif selfA._parameters["EstimationOf"] == "Parameters":
2886                 pass
2887             #
2888             if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
2889                 _Xn = numpy.max(numpy.hstack((_Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1)
2890                 _Xn = numpy.min(numpy.hstack((_Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1)
2891             #
2892             # Etape de différence aux observations
2893             if selfA._parameters["EstimationOf"] == "State":
2894                 _YmHMX = _Ynpu - numpy.asmatrix(numpy.ravel( Hm( (_Xn, None) ) )).T
2895             elif selfA._parameters["EstimationOf"] == "Parameters":
2896                 _YmHMX = _Ynpu - numpy.asmatrix(numpy.ravel( Hm( (_Xn, _Un) ) )).T - CmUn(_Xn, _Un)
2897             #
2898             # Stockage de l'état
2899             selfA.DirectCalculation.append( _Xn )
2900             selfA.DirectInnovation.append( _YmHMX )
2901             #
2902             # Ajout dans la fonctionnelle d'observation
2903             Jo = Jo + 0.5 * float( _YmHMX.T * RI * _YmHMX )
2904         J = Jb + Jo
2905         #
2906         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
2907         selfA.StoredVariables["CostFunctionJb"].store( Jb )
2908         selfA.StoredVariables["CostFunctionJo"].store( Jo )
2909         selfA.StoredVariables["CostFunctionJ" ].store( J )
2910         if selfA._toStore("IndexOfOptimum") or \
2911             selfA._toStore("CurrentOptimum") or \
2912             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
2913             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
2914             selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2915             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2916         if selfA._toStore("IndexOfOptimum"):
2917             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2918         if selfA._toStore("CurrentOptimum"):
2919             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
2920         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2921             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2922         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2923             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2924         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2925             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2926         return J
2927     #
2928     def GradientOfCostFunction(x):
2929         _X      = numpy.asmatrix(numpy.ravel( x )).T
2930         GradJb  = BI * (_X - Xb)
2931         GradJo  = 0.
2932         for step in range(duration-1,0,-1):
2933             # Étape de récupération du dernier stockage de l'évolution
2934             _Xn = selfA.DirectCalculation.pop()
2935             # Étape de récupération du dernier stockage de l'innovation
2936             _YmHMX = selfA.DirectInnovation.pop()
2937             # Calcul des adjoints
2938             Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
2939             Ha = Ha.reshape(_Xn.size,_YmHMX.size) # ADAO & check shape
2940             Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
2941             Ma = Ma.reshape(_Xn.size,_Xn.size) # ADAO & check shape
2942             # Calcul du gradient par état adjoint
2943             GradJo = GradJo + Ha * RI * _YmHMX # Équivaut pour Ha linéaire à : Ha( (_Xn, RI * _YmHMX) )
2944             GradJo = Ma * GradJo               # Équivaut pour Ma linéaire à : Ma( (_Xn, GradJo) )
2945         GradJ = numpy.ravel( GradJb ) - numpy.ravel( GradJo )
2946         return GradJ
2947     #
2948     # Minimisation de la fonctionnelle
2949     # --------------------------------
2950     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
2951     #
2952     if selfA._parameters["Minimizer"] == "LBFGSB":
2953         if "0.19" <= scipy.version.version <= "1.1.0":
2954             import lbfgsbhlt as optimiseur
2955         else:
2956             import scipy.optimize as optimiseur
2957         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
2958             func        = CostFunction,
2959             x0          = Xini,
2960             fprime      = GradientOfCostFunction,
2961             args        = (),
2962             bounds      = selfA._parameters["Bounds"],
2963             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
2964             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
2965             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
2966             iprint      = selfA._parameters["optiprint"],
2967             )
2968         nfeval = Informations['funcalls']
2969         rc     = Informations['warnflag']
2970     elif selfA._parameters["Minimizer"] == "TNC":
2971         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
2972             func        = CostFunction,
2973             x0          = Xini,
2974             fprime      = GradientOfCostFunction,
2975             args        = (),
2976             bounds      = selfA._parameters["Bounds"],
2977             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
2978             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
2979             ftol        = selfA._parameters["CostDecrementTolerance"],
2980             messages    = selfA._parameters["optmessages"],
2981             )
2982     elif selfA._parameters["Minimizer"] == "CG":
2983         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
2984             f           = CostFunction,
2985             x0          = Xini,
2986             fprime      = GradientOfCostFunction,
2987             args        = (),
2988             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2989             gtol        = selfA._parameters["GradientNormTolerance"],
2990             disp        = selfA._parameters["optdisp"],
2991             full_output = True,
2992             )
2993     elif selfA._parameters["Minimizer"] == "NCG":
2994         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
2995             f           = CostFunction,
2996             x0          = Xini,
2997             fprime      = GradientOfCostFunction,
2998             args        = (),
2999             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3000             avextol     = selfA._parameters["CostDecrementTolerance"],
3001             disp        = selfA._parameters["optdisp"],
3002             full_output = True,
3003             )
3004     elif selfA._parameters["Minimizer"] == "BFGS":
3005         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
3006             f           = CostFunction,
3007             x0          = Xini,
3008             fprime      = GradientOfCostFunction,
3009             args        = (),
3010             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3011             gtol        = selfA._parameters["GradientNormTolerance"],
3012             disp        = selfA._parameters["optdisp"],
3013             full_output = True,
3014             )
3015     else:
3016         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
3017     #
3018     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3019     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
3020     #
3021     # Correction pour pallier a un bug de TNC sur le retour du Minimum
3022     # ----------------------------------------------------------------
3023     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
3024         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
3025     #
3026     # Obtention de l'analyse
3027     # ----------------------
3028     Xa = numpy.asmatrix(numpy.ravel( Minimum )).T
3029     #
3030     selfA.StoredVariables["Analysis"].store( Xa )
3031     #
3032     # Calculs et/ou stockages supplémentaires
3033     # ---------------------------------------
3034     if selfA._toStore("BMA"):
3035         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
3036     #
3037     return 0
3038
3039 # ==============================================================================
3040 def van3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3041     """
3042     3DVAR variational analysis with no inversion of B
3043     """
3044     #
3045     # Initialisations
3046     # ---------------
3047     #
3048     # Opérateurs
3049     Hm = HO["Direct"].appliedTo
3050     Ha = HO["Adjoint"].appliedInXTo
3051     #
3052     # Précalcul des inversions de B et R
3053     BT = B.getT()
3054     RI = R.getI()
3055     #
3056     # Point de démarrage de l'optimisation
3057     Xini = numpy.zeros(Xb.shape)
3058     #
3059     # Définition de la fonction-coût
3060     # ------------------------------
3061     def CostFunction(v):
3062         _V = numpy.asmatrix(numpy.ravel( v )).T
3063         _X = Xb + B * _V
3064         if selfA._parameters["StoreInternalVariables"] or \
3065             selfA._toStore("CurrentState") or \
3066             selfA._toStore("CurrentOptimum"):
3067             selfA.StoredVariables["CurrentState"].store( _X )
3068         _HX = Hm( _X )
3069         _HX = numpy.asmatrix(numpy.ravel( _HX )).T
3070         _Innovation = Y - _HX
3071         if selfA._toStore("SimulatedObservationAtCurrentState") or \
3072             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3073             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
3074         if selfA._toStore("InnovationAtCurrentState"):
3075             selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
3076         #
3077         Jb  = float( 0.5 * _V.T * BT * _V )
3078         Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
3079         J   = Jb + Jo
3080         #
3081         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
3082         selfA.StoredVariables["CostFunctionJb"].store( Jb )
3083         selfA.StoredVariables["CostFunctionJo"].store( Jo )
3084         selfA.StoredVariables["CostFunctionJ" ].store( J )
3085         if selfA._toStore("IndexOfOptimum") or \
3086             selfA._toStore("CurrentOptimum") or \
3087             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
3088             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
3089             selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
3090             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3091             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3092         if selfA._toStore("IndexOfOptimum"):
3093             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3094         if selfA._toStore("CurrentOptimum"):
3095             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
3096         if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3097             selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
3098         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3099             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3100         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3101             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3102         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3103             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3104         return J
3105     #
3106     def GradientOfCostFunction(v):
3107         _V = numpy.asmatrix(numpy.ravel( v )).T
3108         _X = Xb + B * _V
3109         _HX     = Hm( _X )
3110         _HX     = numpy.asmatrix(numpy.ravel( _HX )).T
3111         GradJb  = BT * _V
3112         GradJo  = - Ha( (_X, RI * (Y - _HX)) )
3113         GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
3114         return GradJ
3115     #
3116     # Minimisation de la fonctionnelle
3117     # --------------------------------
3118     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
3119     #
3120     if selfA._parameters["Minimizer"] == "LBFGSB":
3121         if "0.19" <= scipy.version.version <= "1.1.0":
3122             import lbfgsbhlt as optimiseur
3123         else:
3124             import scipy.optimize as optimiseur
3125         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
3126             func        = CostFunction,
3127             x0          = Xini,
3128             fprime      = GradientOfCostFunction,
3129             args        = (),
3130             bounds      = selfA._parameters["Bounds"],
3131             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
3132             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
3133             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
3134             iprint      = selfA._parameters["optiprint"],
3135             )
3136         nfeval = Informations['funcalls']
3137         rc     = Informations['warnflag']
3138     elif selfA._parameters["Minimizer"] == "TNC":
3139         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
3140             func        = CostFunction,
3141             x0          = Xini,
3142             fprime      = GradientOfCostFunction,
3143             args        = (),
3144             bounds      = selfA._parameters["Bounds"],
3145             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
3146             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
3147             ftol        = selfA._parameters["CostDecrementTolerance"],
3148             messages    = selfA._parameters["optmessages"],
3149             )
3150     elif selfA._parameters["Minimizer"] == "CG":
3151         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
3152             f           = CostFunction,
3153             x0          = Xini,
3154             fprime      = GradientOfCostFunction,
3155             args        = (),
3156             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3157             gtol        = selfA._parameters["GradientNormTolerance"],
3158             disp        = selfA._parameters["optdisp"],
3159             full_output = True,
3160             )
3161     elif selfA._parameters["Minimizer"] == "NCG":
3162         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
3163             f           = CostFunction,
3164             x0          = Xini,
3165             fprime      = GradientOfCostFunction,
3166             args        = (),
3167             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3168             avextol     = selfA._parameters["CostDecrementTolerance"],
3169             disp        = selfA._parameters["optdisp"],
3170             full_output = True,
3171             )
3172     elif selfA._parameters["Minimizer"] == "BFGS":
3173         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
3174             f           = CostFunction,
3175             x0          = Xini,
3176             fprime      = GradientOfCostFunction,
3177             args        = (),
3178             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3179             gtol        = selfA._parameters["GradientNormTolerance"],
3180             disp        = selfA._parameters["optdisp"],
3181             full_output = True,
3182             )
3183     else:
3184         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
3185     #
3186     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3187     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
3188     #
3189     # Correction pour pallier a un bug de TNC sur le retour du Minimum
3190     # ----------------------------------------------------------------
3191     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
3192         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
3193         Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
3194     else:
3195         Minimum = Xb + B * numpy.asmatrix(numpy.ravel( Minimum )).T
3196     #
3197     # Obtention de l'analyse
3198     # ----------------------
3199     Xa = Minimum
3200     #
3201     selfA.StoredVariables["Analysis"].store( Xa )
3202     #
3203     if selfA._toStore("OMA") or \
3204         selfA._toStore("SigmaObs2") or \
3205         selfA._toStore("SimulationQuantiles") or \
3206         selfA._toStore("SimulatedObservationAtOptimum"):
3207         if selfA._toStore("SimulatedObservationAtCurrentState"):
3208             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
3209         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3210             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
3211         else:
3212             HXa = Hm( Xa )
3213     #
3214     # Calcul de la covariance d'analyse
3215     # ---------------------------------
3216     if selfA._toStore("APosterioriCovariance") or \
3217         selfA._toStore("SimulationQuantiles") or \
3218         selfA._toStore("JacobianMatrixAtOptimum") or \
3219         selfA._toStore("KalmanGainAtOptimum"):
3220         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
3221         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
3222     if selfA._toStore("APosterioriCovariance") or \
3223         selfA._toStore("SimulationQuantiles") or \
3224         selfA._toStore("KalmanGainAtOptimum"):
3225         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
3226         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
3227     if selfA._toStore("APosterioriCovariance") or \
3228         selfA._toStore("SimulationQuantiles"):
3229         BI = B.getI()
3230         HessienneI = []
3231         nb = Xa.size
3232         for i in range(nb):
3233             _ee    = numpy.matrix(numpy.zeros(nb)).T
3234             _ee[i] = 1.
3235             _HtEE  = numpy.dot(HtM,_ee)
3236             _HtEE  = numpy.asmatrix(numpy.ravel( _HtEE )).T
3237             HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
3238         HessienneI = numpy.matrix( HessienneI )
3239         A = HessienneI.I
3240         if min(A.shape) != max(A.shape):
3241             raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
3242         if (numpy.diag(A) < 0).any():
3243             raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
3244         if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
3245             try:
3246                 L = numpy.linalg.cholesky( A )
3247             except:
3248                 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
3249     if selfA._toStore("APosterioriCovariance"):
3250         selfA.StoredVariables["APosterioriCovariance"].store( A )
3251     if selfA._toStore("JacobianMatrixAtOptimum"):
3252         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
3253     if selfA._toStore("KalmanGainAtOptimum"):
3254         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
3255         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
3256         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
3257     #
3258     # Calculs et/ou stockages supplémentaires
3259     # ---------------------------------------
3260     if selfA._toStore("Innovation") or \
3261         selfA._toStore("SigmaObs2") or \
3262         selfA._toStore("MahalanobisConsistency") or \
3263         selfA._toStore("OMB"):
3264         d  = Y - HXb
3265     if selfA._toStore("Innovation"):
3266         selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
3267     if selfA._toStore("BMA"):
3268         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
3269     if selfA._toStore("OMA"):
3270         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
3271     if selfA._toStore("OMB"):
3272         selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
3273     if selfA._toStore("SigmaObs2"):
3274         TraceR = R.trace(Y.size)
3275         selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
3276     if selfA._toStore("MahalanobisConsistency"):
3277         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
3278     if selfA._toStore("SimulationQuantiles"):
3279         QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
3280     if selfA._toStore("SimulatedObservationAtBackground"):
3281         selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
3282     if selfA._toStore("SimulatedObservationAtOptimum"):
3283         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
3284     #
3285     return 0
3286
3287 # ==============================================================================
3288 if __name__ == "__main__":
3289     print('\n AUTODIAGNOSTIC\n')