Salome HOME
Minor documentation, improvements and fixes for internal variables
[modules/adao.git] / src / daComposant / daCore / NumericObjects.py
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) 2008-2021 EDF R&D
4 #
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
9 #
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13 # Lesser General Public License for more details.
14 #
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
18 #
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
20 #
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
22
23 __doc__ = """
24     Définit les objets numériques génériques.
25 """
26 __author__ = "Jean-Philippe ARGAUD"
27
28 import os, time, copy, types, sys, logging
29 import math, numpy, scipy, scipy.optimize, scipy.version
30 from daCore.BasicObjects import Operator
31 from daCore.PlatformInfo import PlatformInfo
32 mpr = PlatformInfo().MachinePrecision()
33 mfp = PlatformInfo().MaximumPrecision()
34 # logging.getLogger().setLevel(logging.DEBUG)
35
36 # ==============================================================================
37 def ExecuteFunction( triplet ):
38     assert len(triplet) == 3, "Incorrect number of arguments"
39     X, xArgs, funcrepr = triplet
40     __X = numpy.asmatrix(numpy.ravel( X )).T
41     __sys_path_tmp = sys.path ; sys.path.insert(0,funcrepr["__userFunction__path"])
42     __module = __import__(funcrepr["__userFunction__modl"], globals(), locals(), [])
43     __fonction = getattr(__module,funcrepr["__userFunction__name"])
44     sys.path = __sys_path_tmp ; del __sys_path_tmp
45     if isinstance(xArgs, dict):
46         __HX  = __fonction( __X, **xArgs )
47     else:
48         __HX  = __fonction( __X )
49     return numpy.ravel( __HX )
50
51 # ==============================================================================
52 class FDApproximation(object):
53     """
54     Cette classe sert d'interface pour définir les opérateurs approximés. A la
55     création d'un objet, en fournissant une fonction "Function", on obtient un
56     objet qui dispose de 3 méthodes "DirectOperator", "TangentOperator" et
57     "AdjointOperator". On contrôle l'approximation DF avec l'incrément
58     multiplicatif "increment" valant par défaut 1%, ou avec l'incrément fixe
59     "dX" qui sera multiplié par "increment" (donc en %), et on effectue de DF
60     centrées si le booléen "centeredDF" est vrai.
61     """
62     def __init__(self,
63             name                  = "FDApproximation",
64             Function              = None,
65             centeredDF            = False,
66             increment             = 0.01,
67             dX                    = None,
68             extraArguments        = None,
69             avoidingRedundancy    = True,
70             toleranceInRedundancy = 1.e-18,
71             lenghtOfRedundancy    = -1,
72             mpEnabled             = False,
73             mpWorkers             = None,
74             mfEnabled             = False,
75             ):
76         self.__name = str(name)
77         self.__extraArgs = extraArguments
78         if mpEnabled:
79             try:
80                 import multiprocessing
81                 self.__mpEnabled = True
82             except ImportError:
83                 self.__mpEnabled = False
84         else:
85             self.__mpEnabled = False
86         self.__mpWorkers = mpWorkers
87         if self.__mpWorkers is not None and self.__mpWorkers < 1:
88             self.__mpWorkers = None
89         logging.debug("FDA Calculs en multiprocessing : %s (nombre de processus : %s)"%(self.__mpEnabled,self.__mpWorkers))
90         #
91         if mfEnabled:
92             self.__mfEnabled = True
93         else:
94             self.__mfEnabled = False
95         logging.debug("FDA Calculs en multifonctions : %s"%(self.__mfEnabled,))
96         #
97         if avoidingRedundancy:
98             self.__avoidRC = True
99             self.__tolerBP = float(toleranceInRedundancy)
100             self.__lenghtRJ = int(lenghtOfRedundancy)
101             self.__listJPCP = [] # Jacobian Previous Calculated Points
102             self.__listJPCI = [] # Jacobian Previous Calculated Increment
103             self.__listJPCR = [] # Jacobian Previous Calculated Results
104             self.__listJPPN = [] # Jacobian Previous Calculated Point Norms
105             self.__listJPIN = [] # Jacobian Previous Calculated Increment Norms
106         else:
107             self.__avoidRC = False
108         #
109         if self.__mpEnabled:
110             if isinstance(Function,types.FunctionType):
111                 logging.debug("FDA Calculs en multiprocessing : FunctionType")
112                 self.__userFunction__name = Function.__name__
113                 try:
114                     mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
115                 except:
116                     mod = os.path.abspath(Function.__globals__['__file__'])
117                 if not os.path.isfile(mod):
118                     raise ImportError("No user defined function or method found with the name %s"%(mod,))
119                 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
120                 self.__userFunction__path = os.path.dirname(mod)
121                 del mod
122                 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
123                 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
124             elif isinstance(Function,types.MethodType):
125                 logging.debug("FDA Calculs en multiprocessing : MethodType")
126                 self.__userFunction__name = Function.__name__
127                 try:
128                     mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
129                 except:
130                     mod = os.path.abspath(Function.__func__.__globals__['__file__'])
131                 if not os.path.isfile(mod):
132                     raise ImportError("No user defined function or method found with the name %s"%(mod,))
133                 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
134                 self.__userFunction__path = os.path.dirname(mod)
135                 del mod
136                 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
137                 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
138             else:
139                 raise TypeError("User defined function or method has to be provided for finite differences approximation.")
140         else:
141             self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
142             self.__userFunction = self.__userOperator.appliedTo
143         #
144         self.__centeredDF = bool(centeredDF)
145         if abs(float(increment)) > 1.e-15:
146             self.__increment  = float(increment)
147         else:
148             self.__increment  = 0.01
149         if dX is None:
150             self.__dX     = None
151         else:
152             self.__dX     = numpy.asmatrix(numpy.ravel( dX )).T
153         logging.debug("FDA Reduction des doublons de calcul : %s"%self.__avoidRC)
154         if self.__avoidRC:
155             logging.debug("FDA Tolerance de determination des doublons : %.2e"%self.__tolerBP)
156
157     # ---------------------------------------------------------
158     def __doublon__(self, e, l, n, v=None):
159         __ac, __iac = False, -1
160         for i in range(len(l)-1,-1,-1):
161             if numpy.linalg.norm(e - l[i]) < self.__tolerBP * n[i]:
162                 __ac, __iac = True, i
163                 if v is not None: logging.debug("FDA Cas%s déja calculé, récupération du doublon %i"%(v,__iac))
164                 break
165         return __ac, __iac
166
167     # ---------------------------------------------------------
168     def DirectOperator(self, X, **extraArgs ):
169         """
170         Calcul du direct à l'aide de la fonction fournie.
171
172         NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
173         ne doivent pas être données ici à la fonction utilisateur.
174         """
175         logging.debug("FDA Calcul DirectOperator (explicite)")
176         if self.__mfEnabled:
177             _HX = self.__userFunction( X, argsAsSerie = True )
178         else:
179             _X = numpy.asmatrix(numpy.ravel( X )).T
180             _HX = numpy.ravel(self.__userFunction( _X ))
181         #
182         return _HX
183
184     # ---------------------------------------------------------
185     def TangentMatrix(self, X ):
186         """
187         Calcul de l'opérateur tangent comme la Jacobienne par différences finies,
188         c'est-à-dire le gradient de H en X. On utilise des différences finies
189         directionnelles autour du point X. X est un numpy.matrix.
190
191         Différences finies centrées (approximation d'ordre 2):
192         1/ Pour chaque composante i de X, on ajoute et on enlève la perturbation
193            dX[i] à la  composante X[i], pour composer X_plus_dXi et X_moins_dXi, et
194            on calcule les réponses HX_plus_dXi = H( X_plus_dXi ) et HX_moins_dXi =
195            H( X_moins_dXi )
196         2/ On effectue les différences (HX_plus_dXi-HX_moins_dXi) et on divise par
197            le pas 2*dXi
198         3/ Chaque résultat, par composante, devient une colonne de la Jacobienne
199
200         Différences finies non centrées (approximation d'ordre 1):
201         1/ Pour chaque composante i de X, on ajoute la perturbation dX[i] à la
202            composante X[i] pour composer X_plus_dXi, et on calcule la réponse
203            HX_plus_dXi = H( X_plus_dXi )
204         2/ On calcule la valeur centrale HX = H(X)
205         3/ On effectue les différences (HX_plus_dXi-HX) et on divise par
206            le pas dXi
207         4/ Chaque résultat, par composante, devient une colonne de la Jacobienne
208
209         """
210         logging.debug("FDA Début du calcul de la Jacobienne")
211         logging.debug("FDA   Incrément de............: %s*X"%float(self.__increment))
212         logging.debug("FDA   Approximation centrée...: %s"%(self.__centeredDF))
213         #
214         if X is None or len(X)==0:
215             raise ValueError("Nominal point X for approximate derivatives can not be None or void (given X: %s)."%(str(X),))
216         #
217         _X = numpy.asmatrix(numpy.ravel( X )).T
218         #
219         if self.__dX is None:
220             _dX  = self.__increment * _X
221         else:
222             _dX = numpy.asmatrix(numpy.ravel( self.__dX )).T
223         #
224         if (_dX == 0.).any():
225             moyenne = _dX.mean()
226             if moyenne == 0.:
227                 _dX = numpy.where( _dX == 0., float(self.__increment), _dX )
228             else:
229                 _dX = numpy.where( _dX == 0., moyenne, _dX )
230         #
231         __alreadyCalculated  = False
232         if self.__avoidRC:
233             __bidon, __alreadyCalculatedP = self.__doublon__(_X,  self.__listJPCP, self.__listJPPN, None)
234             __bidon, __alreadyCalculatedI = self.__doublon__(_dX, self.__listJPCI, self.__listJPIN, None)
235             if __alreadyCalculatedP == __alreadyCalculatedI > -1:
236                 __alreadyCalculated, __i = True, __alreadyCalculatedP
237                 logging.debug("FDA Cas J déja calculé, récupération du doublon %i"%__i)
238         #
239         if __alreadyCalculated:
240             logging.debug("FDA   Calcul Jacobienne (par récupération du doublon %i)"%__i)
241             _Jacobienne = self.__listJPCR[__i]
242         else:
243             logging.debug("FDA   Calcul Jacobienne (explicite)")
244             if self.__centeredDF:
245                 #
246                 if self.__mpEnabled and not self.__mfEnabled:
247                     funcrepr = {
248                         "__userFunction__path" : self.__userFunction__path,
249                         "__userFunction__modl" : self.__userFunction__modl,
250                         "__userFunction__name" : self.__userFunction__name,
251                     }
252                     _jobs = []
253                     for i in range( len(_dX) ):
254                         _dXi            = _dX[i]
255                         _X_plus_dXi     = numpy.array( _X.A1, dtype=float )
256                         _X_plus_dXi[i]  = _X[i] + _dXi
257                         _X_moins_dXi    = numpy.array( _X.A1, dtype=float )
258                         _X_moins_dXi[i] = _X[i] - _dXi
259                         #
260                         _jobs.append( (_X_plus_dXi,  self.__extraArgs, funcrepr) )
261                         _jobs.append( (_X_moins_dXi, self.__extraArgs, funcrepr) )
262                     #
263                     import multiprocessing
264                     self.__pool = multiprocessing.Pool(self.__mpWorkers)
265                     _HX_plusmoins_dX = self.__pool.map( ExecuteFunction, _jobs )
266                     self.__pool.close()
267                     self.__pool.join()
268                     #
269                     _Jacobienne  = []
270                     for i in range( len(_dX) ):
271                         _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
272                     #
273                 elif self.__mfEnabled:
274                     _xserie = []
275                     for i in range( len(_dX) ):
276                         _dXi            = _dX[i]
277                         _X_plus_dXi     = numpy.array( _X.A1, dtype=float )
278                         _X_plus_dXi[i]  = _X[i] + _dXi
279                         _X_moins_dXi    = numpy.array( _X.A1, dtype=float )
280                         _X_moins_dXi[i] = _X[i] - _dXi
281                         #
282                         _xserie.append( _X_plus_dXi )
283                         _xserie.append( _X_moins_dXi )
284                     #
285                     _HX_plusmoins_dX = self.DirectOperator( _xserie )
286                      #
287                     _Jacobienne  = []
288                     for i in range( len(_dX) ):
289                         _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
290                     #
291                 else:
292                     _Jacobienne  = []
293                     for i in range( _dX.size ):
294                         _dXi            = _dX[i]
295                         _X_plus_dXi     = numpy.array( _X.A1, dtype=float )
296                         _X_plus_dXi[i]  = _X[i] + _dXi
297                         _X_moins_dXi    = numpy.array( _X.A1, dtype=float )
298                         _X_moins_dXi[i] = _X[i] - _dXi
299                         #
300                         _HX_plus_dXi    = self.DirectOperator( _X_plus_dXi )
301                         _HX_moins_dXi   = self.DirectOperator( _X_moins_dXi )
302                         #
303                         _Jacobienne.append( numpy.ravel( _HX_plus_dXi - _HX_moins_dXi ) / (2.*_dXi) )
304                 #
305             else:
306                 #
307                 if self.__mpEnabled and not self.__mfEnabled:
308                     funcrepr = {
309                         "__userFunction__path" : self.__userFunction__path,
310                         "__userFunction__modl" : self.__userFunction__modl,
311                         "__userFunction__name" : self.__userFunction__name,
312                     }
313                     _jobs = []
314                     _jobs.append( (_X.A1, self.__extraArgs, funcrepr) )
315                     for i in range( len(_dX) ):
316                         _X_plus_dXi    = numpy.array( _X.A1, dtype=float )
317                         _X_plus_dXi[i] = _X[i] + _dX[i]
318                         #
319                         _jobs.append( (_X_plus_dXi, self.__extraArgs, funcrepr) )
320                     #
321                     import multiprocessing
322                     self.__pool = multiprocessing.Pool(self.__mpWorkers)
323                     _HX_plus_dX = self.__pool.map( ExecuteFunction, _jobs )
324                     self.__pool.close()
325                     self.__pool.join()
326                     #
327                     _HX = _HX_plus_dX.pop(0)
328                     #
329                     _Jacobienne = []
330                     for i in range( len(_dX) ):
331                         _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
332                     #
333                 elif self.__mfEnabled:
334                     _xserie = []
335                     _xserie.append( _X.A1 )
336                     for i in range( len(_dX) ):
337                         _X_plus_dXi    = numpy.array( _X.A1, dtype=float )
338                         _X_plus_dXi[i] = _X[i] + _dX[i]
339                         #
340                         _xserie.append( _X_plus_dXi )
341                     #
342                     _HX_plus_dX = self.DirectOperator( _xserie )
343                     #
344                     _HX = _HX_plus_dX.pop(0)
345                     #
346                     _Jacobienne = []
347                     for i in range( len(_dX) ):
348                         _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
349                    #
350                 else:
351                     _Jacobienne  = []
352                     _HX = self.DirectOperator( _X )
353                     for i in range( _dX.size ):
354                         _dXi            = _dX[i]
355                         _X_plus_dXi     = numpy.array( _X.A1, dtype=float )
356                         _X_plus_dXi[i]  = _X[i] + _dXi
357                         #
358                         _HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
359                         #
360                         _Jacobienne.append( numpy.ravel(( _HX_plus_dXi - _HX ) / _dXi) )
361                 #
362             #
363             _Jacobienne = numpy.asmatrix( numpy.vstack( _Jacobienne ) ).T
364             if self.__avoidRC:
365                 if self.__lenghtRJ < 0: self.__lenghtRJ = 2 * _X.size
366                 while len(self.__listJPCP) > self.__lenghtRJ:
367                     self.__listJPCP.pop(0)
368                     self.__listJPCI.pop(0)
369                     self.__listJPCR.pop(0)
370                     self.__listJPPN.pop(0)
371                     self.__listJPIN.pop(0)
372                 self.__listJPCP.append( copy.copy(_X) )
373                 self.__listJPCI.append( copy.copy(_dX) )
374                 self.__listJPCR.append( copy.copy(_Jacobienne) )
375                 self.__listJPPN.append( numpy.linalg.norm(_X) )
376                 self.__listJPIN.append( numpy.linalg.norm(_Jacobienne) )
377         #
378         logging.debug("FDA Fin du calcul de la Jacobienne")
379         #
380         return _Jacobienne
381
382     # ---------------------------------------------------------
383     def TangentOperator(self, paire, **extraArgs ):
384         """
385         Calcul du tangent à l'aide de la Jacobienne.
386
387         NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
388         ne doivent pas être données ici à la fonction utilisateur.
389         """
390         if self.__mfEnabled:
391             assert len(paire) == 1, "Incorrect lenght of arguments"
392             _paire = paire[0]
393             assert len(_paire) == 2, "Incorrect number of arguments"
394         else:
395             assert len(paire) == 2, "Incorrect number of arguments"
396             _paire = paire
397         X, dX = _paire
398         _Jacobienne = self.TangentMatrix( X )
399         if dX is None or len(dX) == 0:
400             #
401             # Calcul de la forme matricielle si le second argument est None
402             # -------------------------------------------------------------
403             if self.__mfEnabled: return [_Jacobienne,]
404             else:                return _Jacobienne
405         else:
406             #
407             # Calcul de la valeur linéarisée de H en X appliqué à dX
408             # ------------------------------------------------------
409             _dX = numpy.asmatrix(numpy.ravel( dX )).T
410             _HtX = numpy.dot(_Jacobienne, _dX)
411             if self.__mfEnabled: return [_HtX.A1,]
412             else:                return _HtX.A1
413
414     # ---------------------------------------------------------
415     def AdjointOperator(self, paire, **extraArgs ):
416         """
417         Calcul de l'adjoint à l'aide de la Jacobienne.
418
419         NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
420         ne doivent pas être données ici à la fonction utilisateur.
421         """
422         if self.__mfEnabled:
423             assert len(paire) == 1, "Incorrect lenght of arguments"
424             _paire = paire[0]
425             assert len(_paire) == 2, "Incorrect number of arguments"
426         else:
427             assert len(paire) == 2, "Incorrect number of arguments"
428             _paire = paire
429         X, Y = _paire
430         _JacobienneT = self.TangentMatrix( X ).T
431         if Y is None or len(Y) == 0:
432             #
433             # Calcul de la forme matricielle si le second argument est None
434             # -------------------------------------------------------------
435             if self.__mfEnabled: return [_JacobienneT,]
436             else:                return _JacobienneT
437         else:
438             #
439             # Calcul de la valeur de l'adjoint en X appliqué à Y
440             # --------------------------------------------------
441             _Y = numpy.asmatrix(numpy.ravel( Y )).T
442             _HaY = numpy.dot(_JacobienneT, _Y)
443             if self.__mfEnabled: return [_HaY.A1,]
444             else:                return _HaY.A1
445
446 # ==============================================================================
447 def mmqr(
448         func     = None,
449         x0       = None,
450         fprime   = None,
451         bounds   = None,
452         quantile = 0.5,
453         maxfun   = 15000,
454         toler    = 1.e-06,
455         y        = None,
456         ):
457     """
458     Implémentation informatique de l'algorithme MMQR, basée sur la publication :
459     David R. Hunter, Kenneth Lange, "Quantile Regression via an MM Algorithm",
460     Journal of Computational and Graphical Statistics, 9, 1, pp.60-77, 2000.
461     """
462     #
463     # Recuperation des donnees et informations initiales
464     # --------------------------------------------------
465     variables = numpy.ravel( x0 )
466     mesures   = numpy.ravel( y )
467     increment = sys.float_info[0]
468     p         = variables.size
469     n         = mesures.size
470     quantile  = float(quantile)
471     #
472     # Calcul des parametres du MM
473     # ---------------------------
474     tn      = float(toler) / n
475     e0      = -tn / math.log(tn)
476     epsilon = (e0-tn)/(1+math.log(e0))
477     #
478     # Calculs d'initialisation
479     # ------------------------
480     residus  = mesures - numpy.ravel( func( variables ) )
481     poids    = 1./(epsilon+numpy.abs(residus))
482     veps     = 1. - 2. * quantile - residus * poids
483     lastsurrogate = -numpy.sum(residus*veps) - (1.-2.*quantile)*numpy.sum(residus)
484     iteration = 0
485     #
486     # Recherche iterative
487     # -------------------
488     while (increment > toler) and (iteration < maxfun) :
489         iteration += 1
490         #
491         Derivees  = numpy.array(fprime(variables))
492         Derivees  = Derivees.reshape(n,p) # Necessaire pour remettre en place la matrice si elle passe par des tuyaux YACS
493         DeriveesT = Derivees.transpose()
494         M         =   numpy.dot( DeriveesT , (numpy.array(numpy.matrix(p*[poids,]).T)*Derivees) )
495         SM        =   numpy.transpose(numpy.dot( DeriveesT , veps ))
496         step      = - numpy.linalg.lstsq( M, SM, rcond=-1 )[0]
497         #
498         variables = variables + step
499         if bounds is not None:
500             # Attention : boucle infinie à éviter si un intervalle est trop petit
501             while( (variables < numpy.ravel(numpy.asmatrix(bounds)[:,0])).any() or (variables > numpy.ravel(numpy.asmatrix(bounds)[:,1])).any() ):
502                 step      = step/2.
503                 variables = variables - step
504         residus   = mesures - numpy.ravel( func(variables) )
505         surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
506         #
507         while ( (surrogate > lastsurrogate) and ( max(list(numpy.abs(step))) > 1.e-16 ) ) :
508             step      = step/2.
509             variables = variables - step
510             residus   = mesures - numpy.ravel( func(variables) )
511             surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
512         #
513         increment     = lastsurrogate-surrogate
514         poids         = 1./(epsilon+numpy.abs(residus))
515         veps          = 1. - 2. * quantile - residus * poids
516         lastsurrogate = -numpy.sum(residus * veps) - (1.-2.*quantile)*numpy.sum(residus)
517     #
518     # Mesure d'écart
519     # --------------
520     Ecart = quantile * numpy.sum(residus) - numpy.sum( residus[residus<0] )
521     #
522     return variables, Ecart, [n,p,iteration,increment,0]
523
524 # ==============================================================================
525 def EnsembleOfCenteredPerturbations( _bgcenter, _bgcovariance, _nbmembers ):
526     "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
527     #
528     _bgcenter = numpy.ravel(_bgcenter)[:,None]
529     if _nbmembers < 1:
530         raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
531     #
532     if _bgcovariance is None:
533         BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
534     else:
535         _Z = numpy.random.multivariate_normal(numpy.zeros(_bgcenter.size), _bgcovariance, size=_nbmembers).T
536         BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers) + _Z
537     #
538     return BackgroundEnsemble
539
540 # ==============================================================================
541 def EnsembleOfBackgroundPerturbations( _bgcenter, _bgcovariance, _nbmembers, _withSVD = True):
542     "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
543     def __CenteredRandomAnomalies(Zr, N):
544         """
545         Génère une matrice de N anomalies aléatoires centrées sur Zr selon les
546         notes manuscrites de MB et conforme au code de PS avec eps = -1
547         """
548         eps = -1
549         Q = numpy.identity(N-1)-numpy.ones((N-1,N-1))/numpy.sqrt(N)/(numpy.sqrt(N)-eps)
550         Q = numpy.concatenate((Q, [eps*numpy.ones(N-1)/numpy.sqrt(N)]), axis=0)
551         R, _ = numpy.linalg.qr(numpy.random.normal(size = (N-1,N-1)))
552         Q = numpy.dot(Q,R)
553         Zr = numpy.dot(Q,Zr)
554         return Zr.T
555     #
556     _bgcenter = numpy.ravel(_bgcenter).reshape((-1,1))
557     if _nbmembers < 1:
558         raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
559     if _bgcovariance is None:
560         BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
561     else:
562         if _withSVD:
563             U, s, V = numpy.linalg.svd(_bgcovariance, full_matrices=False)
564             _nbctl = _bgcenter.size
565             if _nbmembers > _nbctl:
566                 _Z = numpy.concatenate((numpy.dot(
567                     numpy.diag(numpy.sqrt(s[:_nbctl])), V[:_nbctl]),
568                     numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1-_nbctl)), axis = 0)
569             else:
570                 _Z = numpy.dot(numpy.diag(numpy.sqrt(s[:_nbmembers-1])), V[:_nbmembers-1])
571             _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
572             BackgroundEnsemble = _bgcenter + _Zca
573         else:
574             if max(abs(_bgcovariance.flatten())) > 0:
575                 _nbctl = _bgcenter.size
576                 _Z = numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1)
577                 _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
578                 BackgroundEnsemble = _bgcenter + _Zca
579             else:
580                 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
581     #
582     return BackgroundEnsemble
583
584 # ==============================================================================
585 def EnsembleOfAnomalies( Ensemble, OptMean = None, Normalisation = 1.):
586     "Renvoie les anomalies centrées à partir d'un ensemble TailleEtat*NbMembres"
587     if OptMean is None:
588         __Em = numpy.asarray(Ensemble).mean(axis=1, dtype=mfp).astype('float').reshape((-1,1))
589     else:
590         __Em = numpy.ravel(OptMean).reshape((-1,1))
591     #
592     return Normalisation * (numpy.asarray(Ensemble) - __Em)
593
594 # ==============================================================================
595 def EnsembleErrorCovariance( Ensemble ):
596     "Renvoie la covariance d'ensemble"
597     __Anomalies = EnsembleOfAnomalies( Ensemble )
598     __n, __m = numpy.asarray(__Anomalies).shape
599     __Covariance = (__Anomalies @ __Anomalies.T) / (__m-1)
600     # Assure la symétrie
601     __Covariance = (__Covariance + __Covariance.T) * 0.5
602     # Assure la positivité
603     __epsilon    = mpr*numpy.trace(__Covariance)
604     __Covariance = __Covariance + __epsilon * numpy.identity(__n)
605     #
606     return __Covariance
607
608 # ==============================================================================
609 def CovarianceInflation(
610         InputCovOrEns,
611         InflationType   = None,
612         InflationFactor = None,
613         BackgroundCov   = None,
614         ):
615     """
616     Inflation applicable soit sur Pb ou Pa, soit sur les ensembles EXb ou EXa
617
618     Synthèse : Hunt 2007, section 2.3.5
619     """
620     if InflationFactor is None:
621         return InputCovOrEns
622     else:
623         InflationFactor = float(InflationFactor)
624     #
625     if InflationType in ["MultiplicativeOnAnalysisCovariance", "MultiplicativeOnBackgroundCovariance"]:
626         if InflationFactor < 1.:
627             raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
628         if InflationFactor < 1.+mpr:
629             return InputCovOrEns
630         OutputCovOrEns = InflationFactor**2 * InputCovOrEns
631     #
632     elif InflationType in ["MultiplicativeOnAnalysisAnomalies", "MultiplicativeOnBackgroundAnomalies"]:
633         if InflationFactor < 1.:
634             raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
635         if InflationFactor < 1.+mpr:
636             return InputCovOrEns
637         InputCovOrEnsMean = InputCovOrEns.mean(axis=1, dtype=mfp).astype('float')
638         OutputCovOrEns = InputCovOrEnsMean[:,numpy.newaxis] \
639             + InflationFactor * (InputCovOrEns - InputCovOrEnsMean[:,numpy.newaxis])
640     #
641     elif InflationType in ["AdditiveOnAnalysisCovariance", "AdditiveOnBackgroundCovariance"]:
642         if InflationFactor < 0.:
643             raise ValueError("Inflation factor for additive inflation has to be greater or equal than 0.")
644         if InflationFactor < mpr:
645             return InputCovOrEns
646         __n, __m = numpy.asarray(InputCovOrEns).shape
647         if __n != __m:
648             raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
649         OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * numpy.identity(__n)
650     #
651     elif InflationType == "HybridOnBackgroundCovariance":
652         if InflationFactor < 0.:
653             raise ValueError("Inflation factor for hybrid inflation has to be greater or equal than 0.")
654         if InflationFactor < mpr:
655             return InputCovOrEns
656         __n, __m = numpy.asarray(InputCovOrEns).shape
657         if __n != __m:
658             raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
659         if BackgroundCov is None:
660             raise ValueError("Background covariance matrix B has to be given for hybrid inflation.")
661         if InputCovOrEns.shape != BackgroundCov.shape:
662             raise ValueError("Ensemble covariance matrix has to be of same size than background covariance matrix B.")
663         OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * BackgroundCov
664     #
665     elif InflationType == "Relaxation":
666         raise NotImplementedError("InflationType Relaxation")
667     #
668     else:
669         raise ValueError("Error in inflation type, '%s' is not a valid keyword."%InflationType)
670     #
671     return OutputCovOrEns
672
673 # ==============================================================================
674 def multi3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, oneCycle):
675     """
676     3DVAR multi-pas et multi-méthodes
677     """
678     #
679     # Initialisation
680     # --------------
681     Xn = numpy.ravel(Xb).reshape((-1,1))
682     #
683     if selfA._parameters["EstimationOf"] == "State":
684         M = EM["Direct"].appliedTo
685         #
686         if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
687             selfA.StoredVariables["Analysis"].store( Xn )
688             if selfA._toStore("APosterioriCovariance"):
689                 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(Xn.size)
690                 else:                         Pn = B
691                 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
692             if selfA._toStore("ForecastState"):
693                 selfA.StoredVariables["ForecastState"].store( Xn )
694     #
695     if hasattr(Y,"stepnumber"):
696         duration = Y.stepnumber()
697     else:
698         duration = 2
699     #
700     # Multi-pas
701     # ---------
702     for step in range(duration-1):
703         if hasattr(Y,"store"):
704             Ynpu = numpy.ravel( Y[step+1] ).reshape((-1,1))
705         else:
706             Ynpu = numpy.ravel( Y ).reshape((-1,1))
707         #
708         if selfA._parameters["EstimationOf"] == "State": # Forecast
709             Xn = selfA.StoredVariables["Analysis"][-1]
710             Xn_predicted = M( Xn )
711             if selfA._toStore("ForecastState"):
712                 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
713         elif selfA._parameters["EstimationOf"] == "Parameters": # No forecast
714             # --- > Par principe, M = Id, Q = 0
715             Xn_predicted = Xn
716         Xn_predicted = numpy.ravel(Xn_predicted).reshape((-1,1))
717         #
718         oneCycle(selfA, Xn_predicted, Ynpu, U, HO, None, None, R, B, None)
719     #
720     return 0
721
722 # ==============================================================================
723 def std3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
724     """
725     3DVAR
726     """
727     #
728     # Initialisations
729     # ---------------
730     #
731     # Opérateurs
732     Hm = HO["Direct"].appliedTo
733     Ha = HO["Adjoint"].appliedInXTo
734     #
735     # Utilisation éventuelle d'un vecteur H(Xb) précalculé
736     if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
737         HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
738     else:
739         HXb = Hm( Xb )
740     HXb = numpy.asmatrix(numpy.ravel( HXb )).T
741     if Y.size != HXb.size:
742         raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
743     if max(Y.shape) != max(HXb.shape):
744         raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
745     #
746     if selfA._toStore("JacobianMatrixAtBackground"):
747         HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
748         HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
749         selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
750     #
751     # Précalcul des inversions de B et R
752     BI = B.getI()
753     RI = R.getI()
754     #
755     # Point de démarrage de l'optimisation
756     Xini = selfA._parameters["InitializationPoint"]
757     #
758     # Définition de la fonction-coût
759     # ------------------------------
760     def CostFunction(x):
761         _X  = numpy.asmatrix(numpy.ravel( x )).T
762         if selfA._parameters["StoreInternalVariables"] or \
763             selfA._toStore("CurrentState") or \
764             selfA._toStore("CurrentOptimum"):
765             selfA.StoredVariables["CurrentState"].store( _X )
766         _HX = Hm( _X )
767         _HX = numpy.asmatrix(numpy.ravel( _HX )).T
768         _Innovation = Y - _HX
769         if selfA._toStore("SimulatedObservationAtCurrentState") or \
770             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
771             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
772         if selfA._toStore("InnovationAtCurrentState"):
773             selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
774         #
775         Jb  = float( 0.5 * (_X - Xb).T * BI * (_X - Xb) )
776         Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
777         J   = Jb + Jo
778         #
779         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
780         selfA.StoredVariables["CostFunctionJb"].store( Jb )
781         selfA.StoredVariables["CostFunctionJo"].store( Jo )
782         selfA.StoredVariables["CostFunctionJ" ].store( J )
783         if selfA._toStore("IndexOfOptimum") or \
784             selfA._toStore("CurrentOptimum") or \
785             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
786             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
787             selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
788             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
789             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
790         if selfA._toStore("IndexOfOptimum"):
791             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
792         if selfA._toStore("CurrentOptimum"):
793             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
794         if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
795             selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
796         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
797             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
798         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
799             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
800         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
801             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
802         return J
803     #
804     def GradientOfCostFunction(x):
805         _X      = numpy.asmatrix(numpy.ravel( x )).T
806         _HX     = Hm( _X )
807         _HX     = numpy.asmatrix(numpy.ravel( _HX )).T
808         GradJb  = BI * (_X - Xb)
809         GradJo  = - Ha( (_X, RI * (Y - _HX)) )
810         GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
811         return GradJ
812     #
813     # Minimisation de la fonctionnelle
814     # --------------------------------
815     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
816     #
817     if selfA._parameters["Minimizer"] == "LBFGSB":
818         if "0.19" <= scipy.version.version <= "1.1.0":
819             import lbfgsbhlt as optimiseur
820         else:
821             import scipy.optimize as optimiseur
822         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
823             func        = CostFunction,
824             x0          = Xini,
825             fprime      = GradientOfCostFunction,
826             args        = (),
827             bounds      = selfA._parameters["Bounds"],
828             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
829             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
830             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
831             iprint      = selfA._parameters["optiprint"],
832             )
833         nfeval = Informations['funcalls']
834         rc     = Informations['warnflag']
835     elif selfA._parameters["Minimizer"] == "TNC":
836         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
837             func        = CostFunction,
838             x0          = Xini,
839             fprime      = GradientOfCostFunction,
840             args        = (),
841             bounds      = selfA._parameters["Bounds"],
842             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
843             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
844             ftol        = selfA._parameters["CostDecrementTolerance"],
845             messages    = selfA._parameters["optmessages"],
846             )
847     elif selfA._parameters["Minimizer"] == "CG":
848         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
849             f           = CostFunction,
850             x0          = Xini,
851             fprime      = GradientOfCostFunction,
852             args        = (),
853             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
854             gtol        = selfA._parameters["GradientNormTolerance"],
855             disp        = selfA._parameters["optdisp"],
856             full_output = True,
857             )
858     elif selfA._parameters["Minimizer"] == "NCG":
859         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
860             f           = CostFunction,
861             x0          = Xini,
862             fprime      = GradientOfCostFunction,
863             args        = (),
864             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
865             avextol     = selfA._parameters["CostDecrementTolerance"],
866             disp        = selfA._parameters["optdisp"],
867             full_output = True,
868             )
869     elif selfA._parameters["Minimizer"] == "BFGS":
870         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
871             f           = CostFunction,
872             x0          = Xini,
873             fprime      = GradientOfCostFunction,
874             args        = (),
875             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
876             gtol        = selfA._parameters["GradientNormTolerance"],
877             disp        = selfA._parameters["optdisp"],
878             full_output = True,
879             )
880     else:
881         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
882     #
883     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
884     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
885     #
886     # Correction pour pallier a un bug de TNC sur le retour du Minimum
887     # ----------------------------------------------------------------
888     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
889         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
890     #
891     # Obtention de l'analyse
892     # ----------------------
893     Xa = numpy.asmatrix(numpy.ravel( Minimum )).T
894     #
895     selfA.StoredVariables["Analysis"].store( Xa )
896     #
897     if selfA._toStore("OMA") or \
898         selfA._toStore("SigmaObs2") or \
899         selfA._toStore("SimulationQuantiles") or \
900         selfA._toStore("SimulatedObservationAtOptimum"):
901         if selfA._toStore("SimulatedObservationAtCurrentState"):
902             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
903         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
904             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
905         else:
906             HXa = Hm( Xa )
907     #
908     # Calcul de la covariance d'analyse
909     # ---------------------------------
910     if selfA._toStore("APosterioriCovariance") or \
911         selfA._toStore("SimulationQuantiles") or \
912         selfA._toStore("JacobianMatrixAtOptimum") or \
913         selfA._toStore("KalmanGainAtOptimum"):
914         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
915         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
916     if selfA._toStore("APosterioriCovariance") or \
917         selfA._toStore("SimulationQuantiles") or \
918         selfA._toStore("KalmanGainAtOptimum"):
919         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
920         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
921     if selfA._toStore("APosterioriCovariance") or \
922         selfA._toStore("SimulationQuantiles"):
923         HessienneI = []
924         nb = Xa.size
925         for i in range(nb):
926             _ee    = numpy.matrix(numpy.zeros(nb)).T
927             _ee[i] = 1.
928             _HtEE  = numpy.dot(HtM,_ee)
929             _HtEE  = numpy.asmatrix(numpy.ravel( _HtEE )).T
930             HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
931         HessienneI = numpy.matrix( HessienneI )
932         A = HessienneI.I
933         if min(A.shape) != max(A.shape):
934             raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
935         if (numpy.diag(A) < 0).any():
936             raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
937         if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
938             try:
939                 L = numpy.linalg.cholesky( A )
940             except:
941                 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
942     if selfA._toStore("APosterioriCovariance"):
943         selfA.StoredVariables["APosterioriCovariance"].store( A )
944     if selfA._toStore("JacobianMatrixAtOptimum"):
945         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
946     if selfA._toStore("KalmanGainAtOptimum"):
947         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
948         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
949         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
950     #
951     # Calculs et/ou stockages supplémentaires
952     # ---------------------------------------
953     if selfA._toStore("Innovation") or \
954         selfA._toStore("SigmaObs2") or \
955         selfA._toStore("MahalanobisConsistency") or \
956         selfA._toStore("OMB"):
957         d  = Y - HXb
958     if selfA._toStore("Innovation"):
959         selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
960     if selfA._toStore("BMA"):
961         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
962     if selfA._toStore("OMA"):
963         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
964     if selfA._toStore("OMB"):
965         selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
966     if selfA._toStore("SigmaObs2"):
967         TraceR = R.trace(Y.size)
968         selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
969     if selfA._toStore("MahalanobisConsistency"):
970         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
971     if selfA._toStore("SimulationQuantiles"):
972         nech = selfA._parameters["NumberOfSamplesForQuantiles"]
973         HXa  = numpy.matrix(numpy.ravel( HXa )).T
974         YfQ  = None
975         for i in range(nech):
976             if selfA._parameters["SimulationForQuantiles"] == "Linear":
977                 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
978                 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
979                 Yr = HXa + dYr
980             elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
981                 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
982                 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
983             if YfQ is None:
984                 YfQ = Yr
985             else:
986                 YfQ = numpy.hstack((YfQ,Yr))
987         YfQ.sort(axis=-1)
988         YQ = None
989         for quantile in selfA._parameters["Quantiles"]:
990             if not (0. <= float(quantile) <= 1.): continue
991             indice = int(nech * float(quantile) - 1./nech)
992             if YQ is None: YQ = YfQ[:,indice]
993             else:          YQ = numpy.hstack((YQ,YfQ[:,indice]))
994         selfA.StoredVariables["SimulationQuantiles"].store( YQ )
995     if selfA._toStore("SimulatedObservationAtBackground"):
996         selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
997     if selfA._toStore("SimulatedObservationAtOptimum"):
998         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
999     #
1000     return 0
1001
1002 # ==============================================================================
1003 def van3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1004     """
1005     3DVAR variational analysis with no inversion of B
1006     """
1007     #
1008     # Initialisations
1009     # ---------------
1010     #
1011     # Opérateurs
1012     Hm = HO["Direct"].appliedTo
1013     Ha = HO["Adjoint"].appliedInXTo
1014     #
1015     # Précalcul des inversions de B et R
1016     BT = B.getT()
1017     RI = R.getI()
1018     #
1019     # Point de démarrage de l'optimisation
1020     Xini = numpy.zeros(Xb.shape)
1021     #
1022     # Définition de la fonction-coût
1023     # ------------------------------
1024     def CostFunction(v):
1025         _V = numpy.asmatrix(numpy.ravel( v )).T
1026         _X = Xb + B * _V
1027         if selfA._parameters["StoreInternalVariables"] or \
1028             selfA._toStore("CurrentState") or \
1029             selfA._toStore("CurrentOptimum"):
1030             selfA.StoredVariables["CurrentState"].store( _X )
1031         _HX = Hm( _X )
1032         _HX = numpy.asmatrix(numpy.ravel( _HX )).T
1033         _Innovation = Y - _HX
1034         if selfA._toStore("SimulatedObservationAtCurrentState") or \
1035             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1036             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
1037         if selfA._toStore("InnovationAtCurrentState"):
1038             selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
1039         #
1040         Jb  = float( 0.5 * _V.T * BT * _V )
1041         Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
1042         J   = Jb + Jo
1043         #
1044         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
1045         selfA.StoredVariables["CostFunctionJb"].store( Jb )
1046         selfA.StoredVariables["CostFunctionJo"].store( Jo )
1047         selfA.StoredVariables["CostFunctionJ" ].store( J )
1048         if selfA._toStore("IndexOfOptimum") or \
1049             selfA._toStore("CurrentOptimum") or \
1050             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
1051             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
1052             selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
1053             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1054             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1055         if selfA._toStore("IndexOfOptimum"):
1056             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1057         if selfA._toStore("CurrentOptimum"):
1058             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
1059         if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1060             selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
1061         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1062             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1063         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1064             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1065         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1066             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1067         return J
1068     #
1069     def GradientOfCostFunction(v):
1070         _V = numpy.asmatrix(numpy.ravel( v )).T
1071         _X = Xb + B * _V
1072         _HX     = Hm( _X )
1073         _HX     = numpy.asmatrix(numpy.ravel( _HX )).T
1074         GradJb  = BT * _V
1075         GradJo  = - Ha( (_X, RI * (Y - _HX)) )
1076         GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
1077         return GradJ
1078     #
1079     # Minimisation de la fonctionnelle
1080     # --------------------------------
1081     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
1082     #
1083     if selfA._parameters["Minimizer"] == "LBFGSB":
1084         if "0.19" <= scipy.version.version <= "1.1.0":
1085             import lbfgsbhlt as optimiseur
1086         else:
1087             import scipy.optimize as optimiseur
1088         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
1089             func        = CostFunction,
1090             x0          = Xini,
1091             fprime      = GradientOfCostFunction,
1092             args        = (),
1093             bounds      = selfA._parameters["Bounds"],
1094             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
1095             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
1096             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
1097             iprint      = selfA._parameters["optiprint"],
1098             )
1099         nfeval = Informations['funcalls']
1100         rc     = Informations['warnflag']
1101     elif selfA._parameters["Minimizer"] == "TNC":
1102         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
1103             func        = CostFunction,
1104             x0          = Xini,
1105             fprime      = GradientOfCostFunction,
1106             args        = (),
1107             bounds      = selfA._parameters["Bounds"],
1108             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
1109             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
1110             ftol        = selfA._parameters["CostDecrementTolerance"],
1111             messages    = selfA._parameters["optmessages"],
1112             )
1113     elif selfA._parameters["Minimizer"] == "CG":
1114         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
1115             f           = CostFunction,
1116             x0          = Xini,
1117             fprime      = GradientOfCostFunction,
1118             args        = (),
1119             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1120             gtol        = selfA._parameters["GradientNormTolerance"],
1121             disp        = selfA._parameters["optdisp"],
1122             full_output = True,
1123             )
1124     elif selfA._parameters["Minimizer"] == "NCG":
1125         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
1126             f           = CostFunction,
1127             x0          = Xini,
1128             fprime      = GradientOfCostFunction,
1129             args        = (),
1130             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1131             avextol     = selfA._parameters["CostDecrementTolerance"],
1132             disp        = selfA._parameters["optdisp"],
1133             full_output = True,
1134             )
1135     elif selfA._parameters["Minimizer"] == "BFGS":
1136         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
1137             f           = CostFunction,
1138             x0          = Xini,
1139             fprime      = GradientOfCostFunction,
1140             args        = (),
1141             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1142             gtol        = selfA._parameters["GradientNormTolerance"],
1143             disp        = selfA._parameters["optdisp"],
1144             full_output = True,
1145             )
1146     else:
1147         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
1148     #
1149     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1150     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
1151     #
1152     # Correction pour pallier a un bug de TNC sur le retour du Minimum
1153     # ----------------------------------------------------------------
1154     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
1155         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
1156         Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
1157     else:
1158         Minimum = Xb + B * numpy.asmatrix(numpy.ravel( Minimum )).T
1159     #
1160     # Obtention de l'analyse
1161     # ----------------------
1162     Xa = Minimum
1163     #
1164     selfA.StoredVariables["Analysis"].store( Xa )
1165     #
1166     if selfA._toStore("OMA") or \
1167         selfA._toStore("SigmaObs2") or \
1168         selfA._toStore("SimulationQuantiles") or \
1169         selfA._toStore("SimulatedObservationAtOptimum"):
1170         if selfA._toStore("SimulatedObservationAtCurrentState"):
1171             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
1172         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1173             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
1174         else:
1175             HXa = Hm( Xa )
1176     #
1177     # Calcul de la covariance d'analyse
1178     # ---------------------------------
1179     if selfA._toStore("APosterioriCovariance") or \
1180         selfA._toStore("SimulationQuantiles") or \
1181         selfA._toStore("JacobianMatrixAtOptimum") or \
1182         selfA._toStore("KalmanGainAtOptimum"):
1183         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
1184         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
1185     if selfA._toStore("APosterioriCovariance") or \
1186         selfA._toStore("SimulationQuantiles") or \
1187         selfA._toStore("KalmanGainAtOptimum"):
1188         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
1189         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
1190     if selfA._toStore("APosterioriCovariance") or \
1191         selfA._toStore("SimulationQuantiles"):
1192         BI = B.getI()
1193         HessienneI = []
1194         nb = Xa.size
1195         for i in range(nb):
1196             _ee    = numpy.matrix(numpy.zeros(nb)).T
1197             _ee[i] = 1.
1198             _HtEE  = numpy.dot(HtM,_ee)
1199             _HtEE  = numpy.asmatrix(numpy.ravel( _HtEE )).T
1200             HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
1201         HessienneI = numpy.matrix( HessienneI )
1202         A = HessienneI.I
1203         if min(A.shape) != max(A.shape):
1204             raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
1205         if (numpy.diag(A) < 0).any():
1206             raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
1207         if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
1208             try:
1209                 L = numpy.linalg.cholesky( A )
1210             except:
1211                 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
1212     if selfA._toStore("APosterioriCovariance"):
1213         selfA.StoredVariables["APosterioriCovariance"].store( A )
1214     if selfA._toStore("JacobianMatrixAtOptimum"):
1215         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
1216     if selfA._toStore("KalmanGainAtOptimum"):
1217         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
1218         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
1219         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
1220     #
1221     # Calculs et/ou stockages supplémentaires
1222     # ---------------------------------------
1223     if selfA._toStore("Innovation") or \
1224         selfA._toStore("SigmaObs2") or \
1225         selfA._toStore("MahalanobisConsistency") or \
1226         selfA._toStore("OMB"):
1227         d  = Y - HXb
1228     if selfA._toStore("Innovation"):
1229         selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
1230     if selfA._toStore("BMA"):
1231         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
1232     if selfA._toStore("OMA"):
1233         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
1234     if selfA._toStore("OMB"):
1235         selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
1236     if selfA._toStore("SigmaObs2"):
1237         TraceR = R.trace(Y.size)
1238         selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
1239     if selfA._toStore("MahalanobisConsistency"):
1240         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
1241     if selfA._toStore("SimulationQuantiles"):
1242         nech = selfA._parameters["NumberOfSamplesForQuantiles"]
1243         HXa  = numpy.matrix(numpy.ravel( HXa )).T
1244         YfQ  = None
1245         for i in range(nech):
1246             if selfA._parameters["SimulationForQuantiles"] == "Linear":
1247                 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
1248                 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
1249                 Yr = HXa + dYr
1250             elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
1251                 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
1252                 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
1253             if YfQ is None:
1254                 YfQ = Yr
1255             else:
1256                 YfQ = numpy.hstack((YfQ,Yr))
1257         YfQ.sort(axis=-1)
1258         YQ = None
1259         for quantile in selfA._parameters["Quantiles"]:
1260             if not (0. <= float(quantile) <= 1.): continue
1261             indice = int(nech * float(quantile) - 1./nech)
1262             if YQ is None: YQ = YfQ[:,indice]
1263             else:          YQ = numpy.hstack((YQ,YfQ[:,indice]))
1264         selfA.StoredVariables["SimulationQuantiles"].store( YQ )
1265     if selfA._toStore("SimulatedObservationAtBackground"):
1266         selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
1267     if selfA._toStore("SimulatedObservationAtOptimum"):
1268         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
1269     #
1270     return 0
1271
1272 # ==============================================================================
1273 def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1274     """
1275     3DVAR incrémental
1276     """
1277     #
1278     # Initialisations
1279     # ---------------
1280     #
1281     # Opérateur non-linéaire pour la boucle externe
1282     Hm = HO["Direct"].appliedTo
1283     #
1284     # Précalcul des inversions de B et R
1285     BI = B.getI()
1286     RI = R.getI()
1287     #
1288     # Point de démarrage de l'optimisation
1289     Xini = selfA._parameters["InitializationPoint"]
1290     #
1291     HXb = numpy.asmatrix(numpy.ravel( Hm( Xb ) )).T
1292     Innovation = Y - HXb
1293     #
1294     # Outer Loop
1295     # ----------
1296     iOuter = 0
1297     J      = 1./mpr
1298     DeltaJ = 1./mpr
1299     Xr     = Xini.reshape((-1,1))
1300     while abs(DeltaJ) >= selfA._parameters["CostDecrementTolerance"] and iOuter <= selfA._parameters["MaximumNumberOfSteps"]:
1301         #
1302         # Inner Loop
1303         # ----------
1304         Ht = HO["Tangent"].asMatrix(Xr)
1305         Ht = Ht.reshape(Y.size,Xr.size) # ADAO & check shape
1306         #
1307         # Définition de la fonction-coût
1308         # ------------------------------
1309         def CostFunction(dx):
1310             _dX  = numpy.asmatrix(numpy.ravel( dx )).T
1311             if selfA._parameters["StoreInternalVariables"] or \
1312                 selfA._toStore("CurrentState") or \
1313                 selfA._toStore("CurrentOptimum"):
1314                 selfA.StoredVariables["CurrentState"].store( Xb + _dX )
1315             _HdX = Ht * _dX
1316             _HdX = numpy.asmatrix(numpy.ravel( _HdX )).T
1317             _dInnovation = Innovation - _HdX
1318             if selfA._toStore("SimulatedObservationAtCurrentState") or \
1319                 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1320                 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HXb + _HdX )
1321             if selfA._toStore("InnovationAtCurrentState"):
1322                 selfA.StoredVariables["InnovationAtCurrentState"].store( _dInnovation )
1323             #
1324             Jb  = float( 0.5 * _dX.T * BI * _dX )
1325             Jo  = float( 0.5 * _dInnovation.T * RI * _dInnovation )
1326             J   = Jb + Jo
1327             #
1328             selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
1329             selfA.StoredVariables["CostFunctionJb"].store( Jb )
1330             selfA.StoredVariables["CostFunctionJo"].store( Jo )
1331             selfA.StoredVariables["CostFunctionJ" ].store( J )
1332             if selfA._toStore("IndexOfOptimum") or \
1333                 selfA._toStore("CurrentOptimum") or \
1334                 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
1335                 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
1336                 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
1337                 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1338                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1339             if selfA._toStore("IndexOfOptimum"):
1340                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1341             if selfA._toStore("CurrentOptimum"):
1342                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
1343             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1344                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
1345             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1346                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1347             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1348                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1349             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1350                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1351             return J
1352         #
1353         def GradientOfCostFunction(dx):
1354             _dX          = numpy.asmatrix(numpy.ravel( dx )).T
1355             _HdX         = Ht * _dX
1356             _HdX         = numpy.asmatrix(numpy.ravel( _HdX )).T
1357             _dInnovation = Innovation - _HdX
1358             GradJb       = BI * _dX
1359             GradJo       = - Ht.T @ (RI * _dInnovation)
1360             GradJ        = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
1361             return GradJ
1362         #
1363         # Minimisation de la fonctionnelle
1364         # --------------------------------
1365         nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
1366         #
1367         if selfA._parameters["Minimizer"] == "LBFGSB":
1368             # Minimum, J_optimal, Informations = scipy.optimize.fmin_l_bfgs_b(
1369             if "0.19" <= scipy.version.version <= "1.1.0":
1370                 import lbfgsbhlt as optimiseur
1371             else:
1372                 import scipy.optimize as optimiseur
1373             Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
1374                 func        = CostFunction,
1375                 x0          = numpy.zeros(Xini.size),
1376                 fprime      = GradientOfCostFunction,
1377                 args        = (),
1378                 bounds      = selfA._parameters["Bounds"],
1379                 maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
1380                 factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
1381                 pgtol       = selfA._parameters["ProjectedGradientTolerance"],
1382                 iprint      = selfA._parameters["optiprint"],
1383                 )
1384             nfeval = Informations['funcalls']
1385             rc     = Informations['warnflag']
1386         elif selfA._parameters["Minimizer"] == "TNC":
1387             Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
1388                 func        = CostFunction,
1389                 x0          = numpy.zeros(Xini.size),
1390                 fprime      = GradientOfCostFunction,
1391                 args        = (),
1392                 bounds      = selfA._parameters["Bounds"],
1393                 maxfun      = selfA._parameters["MaximumNumberOfSteps"],
1394                 pgtol       = selfA._parameters["ProjectedGradientTolerance"],
1395                 ftol        = selfA._parameters["CostDecrementTolerance"],
1396                 messages    = selfA._parameters["optmessages"],
1397                 )
1398         elif selfA._parameters["Minimizer"] == "CG":
1399             Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
1400                 f           = CostFunction,
1401                 x0          = numpy.zeros(Xini.size),
1402                 fprime      = GradientOfCostFunction,
1403                 args        = (),
1404                 maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1405                 gtol        = selfA._parameters["GradientNormTolerance"],
1406                 disp        = selfA._parameters["optdisp"],
1407                 full_output = True,
1408                 )
1409         elif selfA._parameters["Minimizer"] == "NCG":
1410             Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
1411                 f           = CostFunction,
1412                 x0          = numpy.zeros(Xini.size),
1413                 fprime      = GradientOfCostFunction,
1414                 args        = (),
1415                 maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1416                 avextol     = selfA._parameters["CostDecrementTolerance"],
1417                 disp        = selfA._parameters["optdisp"],
1418                 full_output = True,
1419                 )
1420         elif selfA._parameters["Minimizer"] == "BFGS":
1421             Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
1422                 f           = CostFunction,
1423                 x0          = numpy.zeros(Xini.size),
1424                 fprime      = GradientOfCostFunction,
1425                 args        = (),
1426                 maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1427                 gtol        = selfA._parameters["GradientNormTolerance"],
1428                 disp        = selfA._parameters["optdisp"],
1429                 full_output = True,
1430                 )
1431         else:
1432             raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
1433         #
1434         IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1435         MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
1436         #
1437         if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
1438             Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
1439             Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
1440         else:
1441             Minimum = Xb + numpy.asmatrix(numpy.ravel( Minimum )).T
1442         #
1443         Xr     = Minimum
1444         DeltaJ = selfA.StoredVariables["CostFunctionJ" ][-1] - J
1445         iOuter = selfA.StoredVariables["CurrentIterationNumber"][-1]
1446     #
1447     # Obtention de l'analyse
1448     # ----------------------
1449     Xa = Xr
1450     #
1451     selfA.StoredVariables["Analysis"].store( Xa )
1452     #
1453     if selfA._toStore("OMA") or \
1454         selfA._toStore("SigmaObs2") or \
1455         selfA._toStore("SimulationQuantiles") or \
1456         selfA._toStore("SimulatedObservationAtOptimum"):
1457         if selfA._toStore("SimulatedObservationAtCurrentState"):
1458             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
1459         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1460             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
1461         else:
1462             HXa = Hm( Xa )
1463     #
1464     # Calcul de la covariance d'analyse
1465     # ---------------------------------
1466     if selfA._toStore("APosterioriCovariance") or \
1467         selfA._toStore("SimulationQuantiles") or \
1468         selfA._toStore("JacobianMatrixAtOptimum") or \
1469         selfA._toStore("KalmanGainAtOptimum"):
1470         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
1471         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
1472     if selfA._toStore("APosterioriCovariance") or \
1473         selfA._toStore("SimulationQuantiles") or \
1474         selfA._toStore("KalmanGainAtOptimum"):
1475         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
1476         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
1477     if selfA._toStore("APosterioriCovariance") or \
1478         selfA._toStore("SimulationQuantiles"):
1479         HessienneI = []
1480         nb = Xa.size
1481         for i in range(nb):
1482             _ee    = numpy.matrix(numpy.zeros(nb)).T
1483             _ee[i] = 1.
1484             _HtEE  = numpy.dot(HtM,_ee)
1485             _HtEE  = numpy.asmatrix(numpy.ravel( _HtEE )).T
1486             HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
1487         HessienneI = numpy.matrix( HessienneI )
1488         A = HessienneI.I
1489         if min(A.shape) != max(A.shape):
1490             raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
1491         if (numpy.diag(A) < 0).any():
1492             raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
1493         if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
1494             try:
1495                 L = numpy.linalg.cholesky( A )
1496             except:
1497                 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
1498     if selfA._toStore("APosterioriCovariance"):
1499         selfA.StoredVariables["APosterioriCovariance"].store( A )
1500     if selfA._toStore("JacobianMatrixAtOptimum"):
1501         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
1502     if selfA._toStore("KalmanGainAtOptimum"):
1503         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
1504         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
1505         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
1506     #
1507     # Calculs et/ou stockages supplémentaires
1508     # ---------------------------------------
1509     if selfA._toStore("Innovation") or \
1510         selfA._toStore("SigmaObs2") or \
1511         selfA._toStore("MahalanobisConsistency") or \
1512         selfA._toStore("OMB"):
1513         d  = Y - HXb
1514     if selfA._toStore("Innovation"):
1515         selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
1516     if selfA._toStore("BMA"):
1517         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
1518     if selfA._toStore("OMA"):
1519         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
1520     if selfA._toStore("OMB"):
1521         selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
1522     if selfA._toStore("SigmaObs2"):
1523         TraceR = R.trace(Y.size)
1524         selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
1525     if selfA._toStore("MahalanobisConsistency"):
1526         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
1527     if selfA._toStore("SimulationQuantiles"):
1528         nech = selfA._parameters["NumberOfSamplesForQuantiles"]
1529         HXa  = numpy.matrix(numpy.ravel( HXa )).T
1530         YfQ  = None
1531         for i in range(nech):
1532             if selfA._parameters["SimulationForQuantiles"] == "Linear":
1533                 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
1534                 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
1535                 Yr = HXa + dYr
1536             elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
1537                 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
1538                 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
1539             if YfQ is None:
1540                 YfQ = Yr
1541             else:
1542                 YfQ = numpy.hstack((YfQ,Yr))
1543         YfQ.sort(axis=-1)
1544         YQ = None
1545         for quantile in selfA._parameters["Quantiles"]:
1546             if not (0. <= float(quantile) <= 1.): continue
1547             indice = int(nech * float(quantile) - 1./nech)
1548             if YQ is None: YQ = YfQ[:,indice]
1549             else:          YQ = numpy.hstack((YQ,YfQ[:,indice]))
1550         selfA.StoredVariables["SimulationQuantiles"].store( YQ )
1551     if selfA._toStore("SimulatedObservationAtBackground"):
1552         selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
1553     if selfA._toStore("SimulatedObservationAtOptimum"):
1554         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
1555     #
1556     return 0
1557
1558 # ==============================================================================
1559 def psas3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1560     """
1561     3DVAR PSAS
1562     """
1563     #
1564     # Initialisations
1565     # ---------------
1566     #
1567     # Opérateurs
1568     Hm = HO["Direct"].appliedTo
1569     #
1570     # Utilisation éventuelle d'un vecteur H(Xb) précalculé
1571     if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
1572         HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
1573     else:
1574         HXb = Hm( Xb )
1575     HXb = numpy.asmatrix(numpy.ravel( HXb )).T
1576     if Y.size != HXb.size:
1577         raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
1578     if max(Y.shape) != max(HXb.shape):
1579         raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
1580     #
1581     if selfA._toStore("JacobianMatrixAtBackground"):
1582         HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
1583         HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
1584         selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
1585     #
1586     Ht = HO["Tangent"].asMatrix(Xb)
1587     BHT = B * Ht.T
1588     HBHTpR = R + Ht * BHT
1589     Innovation = Y - HXb
1590     #
1591     # Point de démarrage de l'optimisation
1592     Xini = numpy.zeros(Xb.shape)
1593     #
1594     # Définition de la fonction-coût
1595     # ------------------------------
1596     def CostFunction(w):
1597         _W = numpy.asmatrix(numpy.ravel( w )).T
1598         if selfA._parameters["StoreInternalVariables"] or \
1599             selfA._toStore("CurrentState") or \
1600             selfA._toStore("CurrentOptimum"):
1601             selfA.StoredVariables["CurrentState"].store( Xb + BHT * _W )
1602         if selfA._toStore("SimulatedObservationAtCurrentState") or \
1603             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1604             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Hm( Xb + BHT * _W ) )
1605         if selfA._toStore("InnovationAtCurrentState"):
1606             selfA.StoredVariables["InnovationAtCurrentState"].store( Innovation )
1607         #
1608         Jb  = float( 0.5 * _W.T * HBHTpR * _W )
1609         Jo  = float( - _W.T * Innovation )
1610         J   = Jb + Jo
1611         #
1612         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
1613         selfA.StoredVariables["CostFunctionJb"].store( Jb )
1614         selfA.StoredVariables["CostFunctionJo"].store( Jo )
1615         selfA.StoredVariables["CostFunctionJ" ].store( J )
1616         if selfA._toStore("IndexOfOptimum") or \
1617             selfA._toStore("CurrentOptimum") or \
1618             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
1619             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
1620             selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
1621             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1622             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1623         if selfA._toStore("IndexOfOptimum"):
1624             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1625         if selfA._toStore("CurrentOptimum"):
1626             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
1627         if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1628             selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
1629         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1630             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1631         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1632             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1633         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1634             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1635         return J
1636     #
1637     def GradientOfCostFunction(w):
1638         _W = numpy.asmatrix(numpy.ravel( w )).T
1639         GradJb  = HBHTpR * _W
1640         GradJo  = - Innovation
1641         GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
1642         return GradJ
1643     #
1644     # Minimisation de la fonctionnelle
1645     # --------------------------------
1646     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
1647     #
1648     if selfA._parameters["Minimizer"] == "LBFGSB":
1649         if "0.19" <= scipy.version.version <= "1.1.0":
1650             import lbfgsbhlt as optimiseur
1651         else:
1652             import scipy.optimize as optimiseur
1653         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
1654             func        = CostFunction,
1655             x0          = Xini,
1656             fprime      = GradientOfCostFunction,
1657             args        = (),
1658             bounds      = selfA._parameters["Bounds"],
1659             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
1660             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
1661             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
1662             iprint      = selfA._parameters["optiprint"],
1663             )
1664         nfeval = Informations['funcalls']
1665         rc     = Informations['warnflag']
1666     elif selfA._parameters["Minimizer"] == "TNC":
1667         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
1668             func        = CostFunction,
1669             x0          = Xini,
1670             fprime      = GradientOfCostFunction,
1671             args        = (),
1672             bounds      = selfA._parameters["Bounds"],
1673             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
1674             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
1675             ftol        = selfA._parameters["CostDecrementTolerance"],
1676             messages    = selfA._parameters["optmessages"],
1677             )
1678     elif selfA._parameters["Minimizer"] == "CG":
1679         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
1680             f           = CostFunction,
1681             x0          = Xini,
1682             fprime      = GradientOfCostFunction,
1683             args        = (),
1684             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1685             gtol        = selfA._parameters["GradientNormTolerance"],
1686             disp        = selfA._parameters["optdisp"],
1687             full_output = True,
1688             )
1689     elif selfA._parameters["Minimizer"] == "NCG":
1690         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
1691             f           = CostFunction,
1692             x0          = Xini,
1693             fprime      = GradientOfCostFunction,
1694             args        = (),
1695             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1696             avextol     = selfA._parameters["CostDecrementTolerance"],
1697             disp        = selfA._parameters["optdisp"],
1698             full_output = True,
1699             )
1700     elif selfA._parameters["Minimizer"] == "BFGS":
1701         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
1702             f           = CostFunction,
1703             x0          = Xini,
1704             fprime      = GradientOfCostFunction,
1705             args        = (),
1706             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1707             gtol        = selfA._parameters["GradientNormTolerance"],
1708             disp        = selfA._parameters["optdisp"],
1709             full_output = True,
1710             )
1711     else:
1712         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
1713     #
1714     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1715     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
1716     #
1717     # Correction pour pallier a un bug de TNC sur le retour du Minimum
1718     # ----------------------------------------------------------------
1719     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
1720         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
1721         Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
1722     else:
1723         Minimum = Xb + BHT * numpy.asmatrix(numpy.ravel( Minimum )).T
1724     #
1725     # Obtention de l'analyse
1726     # ----------------------
1727     Xa = Minimum
1728     #
1729     selfA.StoredVariables["Analysis"].store( Xa )
1730     #
1731     if selfA._toStore("OMA") or \
1732         selfA._toStore("SigmaObs2") or \
1733         selfA._toStore("SimulationQuantiles") or \
1734         selfA._toStore("SimulatedObservationAtOptimum"):
1735         if selfA._toStore("SimulatedObservationAtCurrentState"):
1736             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
1737         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1738             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
1739         else:
1740             HXa = Hm( Xa )
1741     #
1742     # Calcul de la covariance d'analyse
1743     # ---------------------------------
1744     if selfA._toStore("APosterioriCovariance") or \
1745         selfA._toStore("SimulationQuantiles") or \
1746         selfA._toStore("JacobianMatrixAtOptimum") or \
1747         selfA._toStore("KalmanGainAtOptimum"):
1748         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
1749         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
1750     if selfA._toStore("APosterioriCovariance") or \
1751         selfA._toStore("SimulationQuantiles") or \
1752         selfA._toStore("KalmanGainAtOptimum"):
1753         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
1754         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
1755     if selfA._toStore("APosterioriCovariance") or \
1756         selfA._toStore("SimulationQuantiles"):
1757         BI = B.getI()
1758         RI = R.getI()
1759         HessienneI = []
1760         nb = Xa.size
1761         for i in range(nb):
1762             _ee    = numpy.matrix(numpy.zeros(nb)).T
1763             _ee[i] = 1.
1764             _HtEE  = numpy.dot(HtM,_ee)
1765             _HtEE  = numpy.asmatrix(numpy.ravel( _HtEE )).T
1766             HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
1767         HessienneI = numpy.matrix( HessienneI )
1768         A = HessienneI.I
1769         if min(A.shape) != max(A.shape):
1770             raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
1771         if (numpy.diag(A) < 0).any():
1772             raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
1773         if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
1774             try:
1775                 L = numpy.linalg.cholesky( A )
1776             except:
1777                 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
1778     if selfA._toStore("APosterioriCovariance"):
1779         selfA.StoredVariables["APosterioriCovariance"].store( A )
1780     if selfA._toStore("JacobianMatrixAtOptimum"):
1781         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
1782     if selfA._toStore("KalmanGainAtOptimum"):
1783         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
1784         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
1785         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
1786     #
1787     # Calculs et/ou stockages supplémentaires
1788     # ---------------------------------------
1789     if selfA._toStore("Innovation") or \
1790         selfA._toStore("SigmaObs2") or \
1791         selfA._toStore("MahalanobisConsistency") or \
1792         selfA._toStore("OMB"):
1793         d  = Y - HXb
1794     if selfA._toStore("Innovation"):
1795         selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
1796     if selfA._toStore("BMA"):
1797         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
1798     if selfA._toStore("OMA"):
1799         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
1800     if selfA._toStore("OMB"):
1801         selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
1802     if selfA._toStore("SigmaObs2"):
1803         TraceR = R.trace(Y.size)
1804         selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
1805     if selfA._toStore("MahalanobisConsistency"):
1806         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
1807     if selfA._toStore("SimulationQuantiles"):
1808         nech = selfA._parameters["NumberOfSamplesForQuantiles"]
1809         HXa  = numpy.matrix(numpy.ravel( HXa )).T
1810         YfQ  = None
1811         for i in range(nech):
1812             if selfA._parameters["SimulationForQuantiles"] == "Linear":
1813                 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
1814                 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
1815                 Yr = HXa + dYr
1816             elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
1817                 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
1818                 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
1819             if YfQ is None:
1820                 YfQ = Yr
1821             else:
1822                 YfQ = numpy.hstack((YfQ,Yr))
1823         YfQ.sort(axis=-1)
1824         YQ = None
1825         for quantile in selfA._parameters["Quantiles"]:
1826             if not (0. <= float(quantile) <= 1.): continue
1827             indice = int(nech * float(quantile) - 1./nech)
1828             if YQ is None: YQ = YfQ[:,indice]
1829             else:          YQ = numpy.hstack((YQ,YfQ[:,indice]))
1830         selfA.StoredVariables["SimulationQuantiles"].store( YQ )
1831     if selfA._toStore("SimulatedObservationAtBackground"):
1832         selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
1833     if selfA._toStore("SimulatedObservationAtOptimum"):
1834         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
1835     #
1836     return 0
1837
1838 # ==============================================================================
1839 def std4dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1840     """
1841     4DVAR
1842     """
1843     #
1844     # Initialisations
1845     # ---------------
1846     #
1847     # Opérateurs
1848     Hm = HO["Direct"].appliedControledFormTo
1849     Mm = EM["Direct"].appliedControledFormTo
1850     #
1851     if CM is not None and "Tangent" in CM and U is not None:
1852         Cm = CM["Tangent"].asMatrix(Xb)
1853     else:
1854         Cm = None
1855     #
1856     def Un(_step):
1857         if U is not None:
1858             if hasattr(U,"store") and 1<=_step<len(U) :
1859                 _Un = numpy.asmatrix(numpy.ravel( U[_step] )).T
1860             elif hasattr(U,"store") and len(U)==1:
1861                 _Un = numpy.asmatrix(numpy.ravel( U[0] )).T
1862             else:
1863                 _Un = numpy.asmatrix(numpy.ravel( U )).T
1864         else:
1865             _Un = None
1866         return _Un
1867     def CmUn(_xn,_un):
1868         if Cm is not None and _un is not None: # Attention : si Cm est aussi dans M, doublon !
1869             _Cm   = Cm.reshape(_xn.size,_un.size) # ADAO & check shape
1870             _CmUn = _Cm * _un
1871         else:
1872             _CmUn = 0.
1873         return _CmUn
1874     #
1875     # Remarque : les observations sont exploitées à partir du pas de temps
1876     # numéro 1, et sont utilisées dans Yo comme rangées selon ces indices.
1877     # Donc le pas 0 n'est pas utilisé puisque la première étape commence
1878     # avec l'observation du pas 1.
1879     #
1880     # Nombre de pas identique au nombre de pas d'observations
1881     if hasattr(Y,"stepnumber"):
1882         duration = Y.stepnumber()
1883     else:
1884         duration = 2
1885     #
1886     # Précalcul des inversions de B et R
1887     BI = B.getI()
1888     RI = R.getI()
1889     #
1890     # Point de démarrage de l'optimisation
1891     Xini = selfA._parameters["InitializationPoint"]
1892     #
1893     # Définition de la fonction-coût
1894     # ------------------------------
1895     selfA.DirectCalculation = [None,] # Le pas 0 n'est pas observé
1896     selfA.DirectInnovation  = [None,] # Le pas 0 n'est pas observé
1897     def CostFunction(x):
1898         _X  = numpy.asmatrix(numpy.ravel( x )).T
1899         if selfA._parameters["StoreInternalVariables"] or \
1900             selfA._toStore("CurrentState") or \
1901             selfA._toStore("CurrentOptimum"):
1902             selfA.StoredVariables["CurrentState"].store( _X )
1903         Jb  = float( 0.5 * (_X - Xb).T * BI * (_X - Xb) )
1904         selfA.DirectCalculation = [None,]
1905         selfA.DirectInnovation  = [None,]
1906         Jo  = 0.
1907         _Xn = _X
1908         for step in range(0,duration-1):
1909             if hasattr(Y,"store"):
1910                 _Ynpu = numpy.asmatrix(numpy.ravel( Y[step+1] )).T
1911             else:
1912                 _Ynpu = numpy.asmatrix(numpy.ravel( Y )).T
1913             _Un = Un(step)
1914             #
1915             # Etape d'évolution
1916             if selfA._parameters["EstimationOf"] == "State":
1917                 _Xn = Mm( (_Xn, _Un) ) + CmUn(_Xn, _Un)
1918             elif selfA._parameters["EstimationOf"] == "Parameters":
1919                 pass
1920             #
1921             if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
1922                 _Xn = numpy.max(numpy.hstack((_Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1)
1923                 _Xn = numpy.min(numpy.hstack((_Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1)
1924             #
1925             # Etape de différence aux observations
1926             if selfA._parameters["EstimationOf"] == "State":
1927                 _YmHMX = _Ynpu - numpy.asmatrix(numpy.ravel( Hm( (_Xn, None) ) )).T
1928             elif selfA._parameters["EstimationOf"] == "Parameters":
1929                 _YmHMX = _Ynpu - numpy.asmatrix(numpy.ravel( Hm( (_Xn, _Un) ) )).T - CmUn(_Xn, _Un)
1930             #
1931             # Stockage de l'état
1932             selfA.DirectCalculation.append( _Xn )
1933             selfA.DirectInnovation.append( _YmHMX )
1934             #
1935             # Ajout dans la fonctionnelle d'observation
1936             Jo = Jo + 0.5 * float( _YmHMX.T * RI * _YmHMX )
1937         J = Jb + Jo
1938         #
1939         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
1940         selfA.StoredVariables["CostFunctionJb"].store( Jb )
1941         selfA.StoredVariables["CostFunctionJo"].store( Jo )
1942         selfA.StoredVariables["CostFunctionJ" ].store( J )
1943         if selfA._toStore("IndexOfOptimum") or \
1944             selfA._toStore("CurrentOptimum") or \
1945             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
1946             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
1947             selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1948             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1949         if selfA._toStore("IndexOfOptimum"):
1950             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1951         if selfA._toStore("CurrentOptimum"):
1952             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
1953         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1954             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1955         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1956             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1957         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1958             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1959         return J
1960     #
1961     def GradientOfCostFunction(x):
1962         _X      = numpy.asmatrix(numpy.ravel( x )).T
1963         GradJb  = BI * (_X - Xb)
1964         GradJo  = 0.
1965         for step in range(duration-1,0,-1):
1966             # Étape de récupération du dernier stockage de l'évolution
1967             _Xn = selfA.DirectCalculation.pop()
1968             # Étape de récupération du dernier stockage de l'innovation
1969             _YmHMX = selfA.DirectInnovation.pop()
1970             # Calcul des adjoints
1971             Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
1972             Ha = Ha.reshape(_Xn.size,_YmHMX.size) # ADAO & check shape
1973             Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
1974             Ma = Ma.reshape(_Xn.size,_Xn.size) # ADAO & check shape
1975             # Calcul du gradient par état adjoint
1976             GradJo = GradJo + Ha * RI * _YmHMX # Équivaut pour Ha linéaire à : Ha( (_Xn, RI * _YmHMX) )
1977             GradJo = Ma * GradJo               # Équivaut pour Ma linéaire à : Ma( (_Xn, GradJo) )
1978         GradJ = numpy.ravel( GradJb ) - numpy.ravel( GradJo )
1979         return GradJ
1980     #
1981     # Minimisation de la fonctionnelle
1982     # --------------------------------
1983     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
1984     #
1985     if selfA._parameters["Minimizer"] == "LBFGSB":
1986         if "0.19" <= scipy.version.version <= "1.1.0":
1987             import lbfgsbhlt as optimiseur
1988         else:
1989             import scipy.optimize as optimiseur
1990         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
1991             func        = CostFunction,
1992             x0          = Xini,
1993             fprime      = GradientOfCostFunction,
1994             args        = (),
1995             bounds      = selfA._parameters["Bounds"],
1996             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
1997             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
1998             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
1999             iprint      = selfA._parameters["optiprint"],
2000             )
2001         nfeval = Informations['funcalls']
2002         rc     = Informations['warnflag']
2003     elif selfA._parameters["Minimizer"] == "TNC":
2004         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
2005             func        = CostFunction,
2006             x0          = Xini,
2007             fprime      = GradientOfCostFunction,
2008             args        = (),
2009             bounds      = selfA._parameters["Bounds"],
2010             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
2011             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
2012             ftol        = selfA._parameters["CostDecrementTolerance"],
2013             messages    = selfA._parameters["optmessages"],
2014             )
2015     elif selfA._parameters["Minimizer"] == "CG":
2016         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
2017             f           = CostFunction,
2018             x0          = Xini,
2019             fprime      = GradientOfCostFunction,
2020             args        = (),
2021             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2022             gtol        = selfA._parameters["GradientNormTolerance"],
2023             disp        = selfA._parameters["optdisp"],
2024             full_output = True,
2025             )
2026     elif selfA._parameters["Minimizer"] == "NCG":
2027         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
2028             f           = CostFunction,
2029             x0          = Xini,
2030             fprime      = GradientOfCostFunction,
2031             args        = (),
2032             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2033             avextol     = selfA._parameters["CostDecrementTolerance"],
2034             disp        = selfA._parameters["optdisp"],
2035             full_output = True,
2036             )
2037     elif selfA._parameters["Minimizer"] == "BFGS":
2038         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
2039             f           = CostFunction,
2040             x0          = Xini,
2041             fprime      = GradientOfCostFunction,
2042             args        = (),
2043             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2044             gtol        = selfA._parameters["GradientNormTolerance"],
2045             disp        = selfA._parameters["optdisp"],
2046             full_output = True,
2047             )
2048     else:
2049         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
2050     #
2051     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2052     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
2053     #
2054     # Correction pour pallier a un bug de TNC sur le retour du Minimum
2055     # ----------------------------------------------------------------
2056     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
2057         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
2058     #
2059     # Obtention de l'analyse
2060     # ----------------------
2061     Xa = numpy.asmatrix(numpy.ravel( Minimum )).T
2062     #
2063     selfA.StoredVariables["Analysis"].store( Xa )
2064     #
2065     # Calculs et/ou stockages supplémentaires
2066     # ---------------------------------------
2067     if selfA._toStore("BMA"):
2068         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
2069     #
2070     return 0
2071
2072 # ==============================================================================
2073 def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
2074     """
2075     Stochastic EnKF
2076     """
2077     if selfA._parameters["EstimationOf"] == "Parameters":
2078         selfA._parameters["StoreInternalVariables"] = True
2079     #
2080     # Opérateurs
2081     # ----------
2082     H = HO["Direct"].appliedControledFormTo
2083     #
2084     if selfA._parameters["EstimationOf"] == "State":
2085         M = EM["Direct"].appliedControledFormTo
2086     #
2087     if CM is not None and "Tangent" in CM and U is not None:
2088         Cm = CM["Tangent"].asMatrix(Xb)
2089     else:
2090         Cm = None
2091     #
2092     # Nombre de pas identique au nombre de pas d'observations
2093     # -------------------------------------------------------
2094     if hasattr(Y,"stepnumber"):
2095         duration = Y.stepnumber()
2096         __p = numpy.cumprod(Y.shape())[-1]
2097     else:
2098         duration = 2
2099         __p = numpy.array(Y).size
2100     #
2101     # Précalcul des inversions de B et R
2102     # ----------------------------------
2103     if selfA._parameters["StoreInternalVariables"] \
2104         or selfA._toStore("CostFunctionJ") \
2105         or selfA._toStore("CostFunctionJb") \
2106         or selfA._toStore("CostFunctionJo") \
2107         or selfA._toStore("CurrentOptimum") \
2108         or selfA._toStore("APosterioriCovariance"):
2109         BI = B.getI()
2110         RI = R.getI()
2111     #
2112     # Initialisation
2113     # --------------
2114     __n = Xb.size
2115     __m = selfA._parameters["NumberOfMembers"]
2116     if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
2117     else:                         Pn = B
2118     if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
2119     else:                         Rn = R
2120     if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
2121     else:                         Qn = Q
2122     Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
2123     #
2124     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2125         selfA.StoredVariables["Analysis"].store( Xb )
2126         if selfA._toStore("APosterioriCovariance"):
2127             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2128             covarianceXa = Pn
2129     #
2130     previousJMinimum = numpy.finfo(float).max
2131     #
2132     for step in range(duration-1):
2133         if hasattr(Y,"store"):
2134             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
2135         else:
2136             Ynpu = numpy.ravel( Y ).reshape((__p,1))
2137         #
2138         if U is not None:
2139             if hasattr(U,"store") and len(U)>1:
2140                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2141             elif hasattr(U,"store") and len(U)==1:
2142                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2143             else:
2144                 Un = numpy.asmatrix(numpy.ravel( U )).T
2145         else:
2146             Un = None
2147         #
2148         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2149             Xn = CovarianceInflation( Xn,
2150                 selfA._parameters["InflationType"],
2151                 selfA._parameters["InflationFactor"],
2152                 )
2153         #
2154         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
2155             EMX = M( [(Xn[:,i], Un) for i in range(__m)],
2156                 argsAsSerie = True,
2157                 returnSerieAsArrayMatrix = True )
2158             qi = numpy.random.multivariate_normal(numpy.zeros(__n), Qn, size=__m).T
2159             Xn_predicted = EMX + qi
2160             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2161                 argsAsSerie = True,
2162                 returnSerieAsArrayMatrix = True )
2163             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2164                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2165                 Xn_predicted = Xn_predicted + Cm * Un
2166         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
2167             # --- > Par principe, M = Id, Q = 0
2168             Xn_predicted = Xn
2169             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2170                 argsAsSerie = True,
2171                 returnSerieAsArrayMatrix = True )
2172         #
2173         # Mean of forecast and observation of forecast
2174         Xfm  = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2175         Hfm  = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2176         #
2177         #--------------------------
2178         if VariantM == "KalmanFilterFormula05":
2179             PfHT, HPfHT = 0., 0.
2180             for i in range(__m):
2181                 Exfi = Xn_predicted[:,i].reshape((__n,1)) - Xfm
2182                 Eyfi = HX_predicted[:,i].reshape((__p,1)) - Hfm
2183                 PfHT  += Exfi * Eyfi.T
2184                 HPfHT += Eyfi * Eyfi.T
2185             PfHT  = (1./(__m-1)) * PfHT
2186             HPfHT = (1./(__m-1)) * HPfHT
2187             Kn     = PfHT * ( R + HPfHT ).I
2188             del PfHT, HPfHT
2189             #
2190             for i in range(__m):
2191                 ri = numpy.random.multivariate_normal(numpy.zeros(__p), Rn)
2192                 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(Ynpu) + ri - HX_predicted[:,i])
2193         #--------------------------
2194         elif VariantM == "KalmanFilterFormula16":
2195             EpY   = EnsembleOfCenteredPerturbations(Ynpu, Rn, __m)
2196             EpYm  = EpY.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2197             #
2198             EaX   = EnsembleOfAnomalies( Xn_predicted ) / math.sqrt(__m-1)
2199             EaY = (HX_predicted - Hfm - EpY + EpYm) / math.sqrt(__m-1)
2200             #
2201             Kn = EaX @ EaY.T @ numpy.linalg.inv( EaY @ EaY.T)
2202             #
2203             for i in range(__m):
2204                 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(EpY[:,i]) - HX_predicted[:,i])
2205         #--------------------------
2206         else:
2207             raise ValueError("VariantM has to be chosen in the authorized methods list.")
2208         #
2209         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2210             Xn = CovarianceInflation( Xn,
2211                 selfA._parameters["InflationType"],
2212                 selfA._parameters["InflationFactor"],
2213                 )
2214         #
2215         Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2216         #--------------------------
2217         #
2218         if selfA._parameters["StoreInternalVariables"] \
2219             or selfA._toStore("CostFunctionJ") \
2220             or selfA._toStore("CostFunctionJb") \
2221             or selfA._toStore("CostFunctionJo") \
2222             or selfA._toStore("APosterioriCovariance") \
2223             or selfA._toStore("InnovationAtCurrentAnalysis") \
2224             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2225             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2226             _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
2227             _Innovation = Ynpu - _HXa
2228         #
2229         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2230         # ---> avec analysis
2231         selfA.StoredVariables["Analysis"].store( Xa )
2232         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2233             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2234         if selfA._toStore("InnovationAtCurrentAnalysis"):
2235             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2236         # ---> avec current state
2237         if selfA._parameters["StoreInternalVariables"] \
2238             or selfA._toStore("CurrentState"):
2239             selfA.StoredVariables["CurrentState"].store( Xn )
2240         if selfA._toStore("ForecastState"):
2241             selfA.StoredVariables["ForecastState"].store( EMX )
2242         if selfA._toStore("BMA"):
2243             selfA.StoredVariables["BMA"].store( EMX - Xa )
2244         if selfA._toStore("InnovationAtCurrentState"):
2245             selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
2246         if selfA._toStore("SimulatedObservationAtCurrentState") \
2247             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2248             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
2249         # ---> autres
2250         if selfA._parameters["StoreInternalVariables"] \
2251             or selfA._toStore("CostFunctionJ") \
2252             or selfA._toStore("CostFunctionJb") \
2253             or selfA._toStore("CostFunctionJo") \
2254             or selfA._toStore("CurrentOptimum") \
2255             or selfA._toStore("APosterioriCovariance"):
2256             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2257             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
2258             J   = Jb + Jo
2259             selfA.StoredVariables["CostFunctionJb"].store( Jb )
2260             selfA.StoredVariables["CostFunctionJo"].store( Jo )
2261             selfA.StoredVariables["CostFunctionJ" ].store( J )
2262             #
2263             if selfA._toStore("IndexOfOptimum") \
2264                 or selfA._toStore("CurrentOptimum") \
2265                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2266                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2267                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2268                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2269                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2270             if selfA._toStore("IndexOfOptimum"):
2271                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2272             if selfA._toStore("CurrentOptimum"):
2273                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2274             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2275                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2276             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2277                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2278             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2279                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2280             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2281                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2282         if selfA._toStore("APosterioriCovariance"):
2283             selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
2284         if selfA._parameters["EstimationOf"] == "Parameters" \
2285             and J < previousJMinimum:
2286             previousJMinimum    = J
2287             XaMin               = Xa
2288             if selfA._toStore("APosterioriCovariance"):
2289                 covarianceXaMin = Pn
2290     #
2291     # Stockage final supplémentaire de l'optimum en estimation de paramètres
2292     # ----------------------------------------------------------------------
2293     if selfA._parameters["EstimationOf"] == "Parameters":
2294         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2295         selfA.StoredVariables["Analysis"].store( XaMin )
2296         if selfA._toStore("APosterioriCovariance"):
2297             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2298         if selfA._toStore("BMA"):
2299             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2300     #
2301     return 0
2302
2303 # ==============================================================================
2304 def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
2305     """
2306     Ensemble-Transform EnKF
2307     """
2308     if selfA._parameters["EstimationOf"] == "Parameters":
2309         selfA._parameters["StoreInternalVariables"] = True
2310     #
2311     # Opérateurs
2312     # ----------
2313     H = HO["Direct"].appliedControledFormTo
2314     #
2315     if selfA._parameters["EstimationOf"] == "State":
2316         M = EM["Direct"].appliedControledFormTo
2317     #
2318     if CM is not None and "Tangent" in CM and U is not None:
2319         Cm = CM["Tangent"].asMatrix(Xb)
2320     else:
2321         Cm = None
2322     #
2323     # Nombre de pas identique au nombre de pas d'observations
2324     # -------------------------------------------------------
2325     if hasattr(Y,"stepnumber"):
2326         duration = Y.stepnumber()
2327         __p = numpy.cumprod(Y.shape())[-1]
2328     else:
2329         duration = 2
2330         __p = numpy.array(Y).size
2331     #
2332     # Précalcul des inversions de B et R
2333     # ----------------------------------
2334     if selfA._parameters["StoreInternalVariables"] \
2335         or selfA._toStore("CostFunctionJ") \
2336         or selfA._toStore("CostFunctionJb") \
2337         or selfA._toStore("CostFunctionJo") \
2338         or selfA._toStore("CurrentOptimum") \
2339         or selfA._toStore("APosterioriCovariance"):
2340         BI = B.getI()
2341         RI = R.getI()
2342     elif VariantM != "KalmanFilterFormula":
2343         RI = R.getI()
2344     if VariantM == "KalmanFilterFormula":
2345         RIdemi = R.sqrtmI()
2346     #
2347     # Initialisation
2348     # --------------
2349     __n = Xb.size
2350     __m = selfA._parameters["NumberOfMembers"]
2351     if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
2352     else:                         Pn = B
2353     if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
2354     else:                         Rn = R
2355     if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
2356     else:                         Qn = Q
2357     Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
2358     #~ Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
2359     #
2360     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2361         selfA.StoredVariables["Analysis"].store( Xb )
2362         if selfA._toStore("APosterioriCovariance"):
2363             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2364             covarianceXa = Pn
2365     #
2366     previousJMinimum = numpy.finfo(float).max
2367     #
2368     for step in range(duration-1):
2369         if hasattr(Y,"store"):
2370             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
2371         else:
2372             Ynpu = numpy.ravel( Y ).reshape((__p,1))
2373         #
2374         if U is not None:
2375             if hasattr(U,"store") and len(U)>1:
2376                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2377             elif hasattr(U,"store") and len(U)==1:
2378                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2379             else:
2380                 Un = numpy.asmatrix(numpy.ravel( U )).T
2381         else:
2382             Un = None
2383         #
2384         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2385             Xn = CovarianceInflation( Xn,
2386                 selfA._parameters["InflationType"],
2387                 selfA._parameters["InflationFactor"],
2388                 )
2389         #
2390         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
2391             EMX = M( [(Xn[:,i], Un) for i in range(__m)],
2392                 argsAsSerie = True,
2393                 returnSerieAsArrayMatrix = True )
2394             qi = numpy.random.multivariate_normal(numpy.zeros(__n), Qn, size=__m).T
2395             Xn_predicted = EMX + qi
2396             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2397                 argsAsSerie = True,
2398                 returnSerieAsArrayMatrix = True )
2399             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2400                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2401                 Xn_predicted = Xn_predicted + Cm * Un
2402         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
2403             # --- > Par principe, M = Id, Q = 0
2404             Xn_predicted = Xn
2405             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2406                 argsAsSerie = True,
2407                 returnSerieAsArrayMatrix = True )
2408         #
2409         # Mean of forecast and observation of forecast
2410         Xfm  = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2411         Hfm  = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2412         #
2413         # Anomalies
2414         EaX   = EnsembleOfAnomalies( Xn_predicted )
2415         EaHX  = numpy.array(HX_predicted - Hfm)
2416         #
2417         #--------------------------
2418         if VariantM == "KalmanFilterFormula":
2419             mS    = RIdemi * EaHX / math.sqrt(__m-1)
2420             delta = RIdemi * ( Ynpu - Hfm )
2421             mT    = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS )
2422             vw    = mT @ mS.T @ delta
2423             #
2424             Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
2425             mU    = numpy.identity(__m)
2426             #
2427             EaX   = EaX / math.sqrt(__m-1)
2428             Xn    = Xfm + EaX @ ( vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU )
2429         #--------------------------
2430         elif VariantM == "Variational":
2431             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
2432             def CostFunction(w):
2433                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
2434                 _Jo = 0.5 * _A.T @ (RI * _A)
2435                 _Jb = 0.5 * (__m-1) * w.T @ w
2436                 _J  = _Jo + _Jb
2437                 return float(_J)
2438             def GradientOfCostFunction(w):
2439                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
2440                 _GardJo = - EaHX.T @ (RI * _A)
2441                 _GradJb = (__m-1) * w.reshape((__m,1))
2442                 _GradJ  = _GardJo + _GradJb
2443                 return numpy.ravel(_GradJ)
2444             vw = scipy.optimize.fmin_cg(
2445                 f           = CostFunction,
2446                 x0          = numpy.zeros(__m),
2447                 fprime      = GradientOfCostFunction,
2448                 args        = (),
2449                 disp        = False,
2450                 )
2451             #
2452             Hto = EaHX.T @ (RI * EaHX)
2453             Htb = (__m-1) * numpy.identity(__m)
2454             Hta = Hto + Htb
2455             #
2456             Pta = numpy.linalg.inv( Hta )
2457             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
2458             #
2459             Xn  = Xfm + EaX @ (vw[:,None] + EWa)
2460         #--------------------------
2461         elif VariantM == "FiniteSize11": # Jauge Boc2011
2462             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
2463             def CostFunction(w):
2464                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
2465                 _Jo = 0.5 * _A.T @ (RI * _A)
2466                 _Jb = 0.5 * __m * math.log(1 + 1/__m + w.T @ w)
2467                 _J  = _Jo + _Jb
2468                 return float(_J)
2469             def GradientOfCostFunction(w):
2470                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
2471                 _GardJo = - EaHX.T @ (RI * _A)
2472                 _GradJb = __m * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
2473                 _GradJ  = _GardJo + _GradJb
2474                 return numpy.ravel(_GradJ)
2475             vw = scipy.optimize.fmin_cg(
2476                 f           = CostFunction,
2477                 x0          = numpy.zeros(__m),
2478                 fprime      = GradientOfCostFunction,
2479                 args        = (),
2480                 disp        = False,
2481                 )
2482             #
2483             Hto = EaHX.T @ (RI * EaHX)
2484             Htb = __m * \
2485                 ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
2486                 / (1 + 1/__m + vw.T @ vw)**2
2487             Hta = Hto + Htb
2488             #
2489             Pta = numpy.linalg.inv( Hta )
2490             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
2491             #
2492             Xn  = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
2493         #--------------------------
2494         elif VariantM == "FiniteSize15": # Jauge Boc2015
2495             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
2496             def CostFunction(w):
2497                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
2498                 _Jo = 0.5 * _A.T * RI * _A
2499                 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w)
2500                 _J  = _Jo + _Jb
2501                 return float(_J)
2502             def GradientOfCostFunction(w):
2503                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
2504                 _GardJo = - EaHX.T @ (RI * _A)
2505                 _GradJb = (__m+1) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
2506                 _GradJ  = _GardJo + _GradJb
2507                 return numpy.ravel(_GradJ)
2508             vw = scipy.optimize.fmin_cg(
2509                 f           = CostFunction,
2510                 x0          = numpy.zeros(__m),
2511                 fprime      = GradientOfCostFunction,
2512                 args        = (),
2513                 disp        = False,
2514                 )
2515             #
2516             Hto = EaHX.T @ (RI * EaHX)
2517             Htb = (__m+1) * \
2518                 ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
2519                 / (1 + 1/__m + vw.T @ vw)**2
2520             Hta = Hto + Htb
2521             #
2522             Pta = numpy.linalg.inv( Hta )
2523             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
2524             #
2525             Xn  = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
2526         #--------------------------
2527         elif VariantM == "FiniteSize16": # Jauge Boc2016
2528             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
2529             def CostFunction(w):
2530                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
2531                 _Jo = 0.5 * _A.T @ (RI * _A)
2532                 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w / (__m-1))
2533                 _J  = _Jo + _Jb
2534                 return float(_J)
2535             def GradientOfCostFunction(w):
2536                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
2537                 _GardJo = - EaHX.T @ (RI * _A)
2538                 _GradJb = ((__m+1) / (__m-1)) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w / (__m-1))
2539                 _GradJ  = _GardJo + _GradJb
2540                 return numpy.ravel(_GradJ)
2541             vw = scipy.optimize.fmin_cg(
2542                 f           = CostFunction,
2543                 x0          = numpy.zeros(__m),
2544                 fprime      = GradientOfCostFunction,
2545                 args        = (),
2546                 disp        = False,
2547                 )
2548             #
2549             Hto = EaHX.T @ (RI * EaHX)
2550             Htb = ((__m+1) / (__m-1)) * \
2551                 ( (1 + 1/__m + vw.T @ vw / (__m-1)) * numpy.identity(__m) - 2 * vw @ vw.T / (__m-1) ) \
2552                 / (1 + 1/__m + vw.T @ vw / (__m-1))**2
2553             Hta = Hto + Htb
2554             #
2555             Pta = numpy.linalg.inv( Hta )
2556             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
2557             #
2558             Xn  = Xfm + EaX @ (vw[:,None] + EWa)
2559         #--------------------------
2560         else:
2561             raise ValueError("VariantM has to be chosen in the authorized methods list.")
2562         #
2563         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2564             Xn = CovarianceInflation( Xn,
2565                 selfA._parameters["InflationType"],
2566                 selfA._parameters["InflationFactor"],
2567                 )
2568         #
2569         Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2570         #--------------------------
2571         #
2572         if selfA._parameters["StoreInternalVariables"] \
2573             or selfA._toStore("CostFunctionJ") \
2574             or selfA._toStore("CostFunctionJb") \
2575             or selfA._toStore("CostFunctionJo") \
2576             or selfA._toStore("APosterioriCovariance") \
2577             or selfA._toStore("InnovationAtCurrentAnalysis") \
2578             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2579             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2580             _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
2581             _Innovation = Ynpu - _HXa
2582         #
2583         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2584         # ---> avec analysis
2585         selfA.StoredVariables["Analysis"].store( Xa )
2586         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2587             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2588         if selfA._toStore("InnovationAtCurrentAnalysis"):
2589             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2590         # ---> avec current state
2591         if selfA._parameters["StoreInternalVariables"] \
2592             or selfA._toStore("CurrentState"):
2593             selfA.StoredVariables["CurrentState"].store( Xn )
2594         if selfA._toStore("ForecastState"):
2595             selfA.StoredVariables["ForecastState"].store( EMX )
2596         if selfA._toStore("BMA"):
2597             selfA.StoredVariables["BMA"].store( EMX - Xa.reshape((__n,1)) )
2598         if selfA._toStore("InnovationAtCurrentState"):
2599             selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
2600         if selfA._toStore("SimulatedObservationAtCurrentState") \
2601             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2602             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
2603         # ---> autres
2604         if selfA._parameters["StoreInternalVariables"] \
2605             or selfA._toStore("CostFunctionJ") \
2606             or selfA._toStore("CostFunctionJb") \
2607             or selfA._toStore("CostFunctionJo") \
2608             or selfA._toStore("CurrentOptimum") \
2609             or selfA._toStore("APosterioriCovariance"):
2610             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2611             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
2612             J   = Jb + Jo
2613             selfA.StoredVariables["CostFunctionJb"].store( Jb )
2614             selfA.StoredVariables["CostFunctionJo"].store( Jo )
2615             selfA.StoredVariables["CostFunctionJ" ].store( J )
2616             #
2617             if selfA._toStore("IndexOfOptimum") \
2618                 or selfA._toStore("CurrentOptimum") \
2619                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2620                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2621                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2622                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2623                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2624             if selfA._toStore("IndexOfOptimum"):
2625                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2626             if selfA._toStore("CurrentOptimum"):
2627                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2628             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2629                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2630             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2631                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2632             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2633                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2634             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2635                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2636         if selfA._toStore("APosterioriCovariance"):
2637             selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
2638         if selfA._parameters["EstimationOf"] == "Parameters" \
2639             and J < previousJMinimum:
2640             previousJMinimum    = J
2641             XaMin               = Xa
2642             if selfA._toStore("APosterioriCovariance"):
2643                 covarianceXaMin = Pn
2644         # ---> Pour les smoothers
2645         if selfA._toStore("CurrentEnsembleState"):
2646             selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
2647     if selfA._toStore("LastEnsembleForecastState"):
2648         selfA.StoredVariables["LastEnsembleForecastState"].store( EMX )
2649     #
2650     # Stockage final supplémentaire de l'optimum en estimation de paramètres
2651     # ----------------------------------------------------------------------
2652     if selfA._parameters["EstimationOf"] == "Parameters":
2653         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2654         selfA.StoredVariables["Analysis"].store( XaMin )
2655         if selfA._toStore("APosterioriCovariance"):
2656             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2657         if selfA._toStore("BMA"):
2658             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2659     #
2660     return 0
2661
2662 # ==============================================================================
2663 def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13",
2664     BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
2665     """
2666     Maximum Likelihood Ensemble Filter
2667     """
2668     if selfA._parameters["EstimationOf"] == "Parameters":
2669         selfA._parameters["StoreInternalVariables"] = True
2670     #
2671     # Opérateurs
2672     # ----------
2673     H = HO["Direct"].appliedControledFormTo
2674     #
2675     if selfA._parameters["EstimationOf"] == "State":
2676         M = EM["Direct"].appliedControledFormTo
2677     #
2678     if CM is not None and "Tangent" in CM and U is not None:
2679         Cm = CM["Tangent"].asMatrix(Xb)
2680     else:
2681         Cm = None
2682     #
2683     # Nombre de pas identique au nombre de pas d'observations
2684     # -------------------------------------------------------
2685     if hasattr(Y,"stepnumber"):
2686         duration = Y.stepnumber()
2687         __p = numpy.cumprod(Y.shape())[-1]
2688     else:
2689         duration = 2
2690         __p = numpy.array(Y).size
2691     #
2692     # Précalcul des inversions de B et R
2693     # ----------------------------------
2694     if selfA._parameters["StoreInternalVariables"] \
2695         or selfA._toStore("CostFunctionJ") \
2696         or selfA._toStore("CostFunctionJb") \
2697         or selfA._toStore("CostFunctionJo") \
2698         or selfA._toStore("CurrentOptimum") \
2699         or selfA._toStore("APosterioriCovariance"):
2700         BI = B.getI()
2701     RI = R.getI()
2702     #
2703     # Initialisation
2704     # --------------
2705     __n = Xb.size
2706     __m = selfA._parameters["NumberOfMembers"]
2707     if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
2708     else:                         Pn = B
2709     if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
2710     else:                         Rn = R
2711     if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
2712     else:                         Qn = Q
2713     Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
2714     #
2715     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2716         selfA.StoredVariables["Analysis"].store( Xb )
2717         if selfA._toStore("APosterioriCovariance"):
2718             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2719             covarianceXa = Pn
2720     #
2721     previousJMinimum = numpy.finfo(float).max
2722     #
2723     for step in range(duration-1):
2724         if hasattr(Y,"store"):
2725             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
2726         else:
2727             Ynpu = numpy.ravel( Y ).reshape((__p,1))
2728         #
2729         if U is not None:
2730             if hasattr(U,"store") and len(U)>1:
2731                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2732             elif hasattr(U,"store") and len(U)==1:
2733                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2734             else:
2735                 Un = numpy.asmatrix(numpy.ravel( U )).T
2736         else:
2737             Un = None
2738         #
2739         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2740             Xn = CovarianceInflation( Xn,
2741                 selfA._parameters["InflationType"],
2742                 selfA._parameters["InflationFactor"],
2743                 )
2744         #
2745         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
2746             EMX = M( [(Xn[:,i], Un) for i in range(__m)],
2747                 argsAsSerie = True,
2748                 returnSerieAsArrayMatrix = True )
2749             qi = numpy.random.multivariate_normal(numpy.zeros(__n), Qn, size=__m).T
2750             Xn_predicted = EMX + qi
2751             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2752                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2753                 Xn_predicted = Xn_predicted + Cm * Un
2754         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
2755             # --- > Par principe, M = Id, Q = 0
2756             Xn_predicted = Xn
2757         #
2758         #--------------------------
2759         if VariantM == "MLEF13":
2760             Xfm = numpy.ravel(Xn_predicted.mean(axis=1, dtype=mfp).astype('float'))
2761             EaX = EnsembleOfAnomalies( Xn_predicted, Xfm, 1./math.sqrt(__m-1) )
2762             Ua  = numpy.identity(__m)
2763             __j = 0
2764             Deltaw = 1
2765             if not BnotT:
2766                 Ta  = numpy.identity(__m)
2767             vw  = numpy.zeros(__m)
2768             while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
2769                 vx1 = (Xfm + EaX @ vw).reshape((__n,1))
2770                 #
2771                 if BnotT:
2772                     E1 = vx1 + _epsilon * EaX
2773                 else:
2774                     E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
2775                 #
2776                 HE2 = H( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
2777                     argsAsSerie = True,
2778                     returnSerieAsArrayMatrix = True )
2779                 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2780                 #
2781                 if BnotT:
2782                     EaY = (HE2 - vy2) / _epsilon
2783                 else:
2784                     EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
2785                 #
2786                 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy2 )))
2787                 mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY)
2788                 Deltaw = - numpy.linalg.solve(mH,GradJ)
2789                 #
2790                 vw = vw + Deltaw
2791                 #
2792                 if not BnotT:
2793                     Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2794                 #
2795                 __j = __j + 1
2796             #
2797             if BnotT:
2798                 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2799             #
2800             Xn = vx1 + math.sqrt(__m-1) * EaX @ Ta @ Ua
2801         #--------------------------
2802         else:
2803             raise ValueError("VariantM has to be chosen in the authorized methods list.")
2804         #
2805         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2806             Xn = CovarianceInflation( Xn,
2807                 selfA._parameters["InflationType"],
2808                 selfA._parameters["InflationFactor"],
2809                 )
2810         #
2811         Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2812         #--------------------------
2813         #
2814         if selfA._parameters["StoreInternalVariables"] \
2815             or selfA._toStore("CostFunctionJ") \
2816             or selfA._toStore("CostFunctionJb") \
2817             or selfA._toStore("CostFunctionJo") \
2818             or selfA._toStore("APosterioriCovariance") \
2819             or selfA._toStore("InnovationAtCurrentAnalysis") \
2820             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2821             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2822             _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
2823             _Innovation = Ynpu - _HXa
2824         #
2825         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2826         # ---> avec analysis
2827         selfA.StoredVariables["Analysis"].store( Xa )
2828         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2829             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2830         if selfA._toStore("InnovationAtCurrentAnalysis"):
2831             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2832         # ---> avec current state
2833         if selfA._parameters["StoreInternalVariables"] \
2834             or selfA._toStore("CurrentState"):
2835             selfA.StoredVariables["CurrentState"].store( Xn )
2836         if selfA._toStore("ForecastState"):
2837             selfA.StoredVariables["ForecastState"].store( EMX )
2838         if selfA._toStore("BMA"):
2839             selfA.StoredVariables["BMA"].store( EMX - Xa )
2840         if selfA._toStore("InnovationAtCurrentState"):
2841             selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
2842         if selfA._toStore("SimulatedObservationAtCurrentState") \
2843             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2844             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
2845         # ---> autres
2846         if selfA._parameters["StoreInternalVariables"] \
2847             or selfA._toStore("CostFunctionJ") \
2848             or selfA._toStore("CostFunctionJb") \
2849             or selfA._toStore("CostFunctionJo") \
2850             or selfA._toStore("CurrentOptimum") \
2851             or selfA._toStore("APosterioriCovariance"):
2852             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2853             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
2854             J   = Jb + Jo
2855             selfA.StoredVariables["CostFunctionJb"].store( Jb )
2856             selfA.StoredVariables["CostFunctionJo"].store( Jo )
2857             selfA.StoredVariables["CostFunctionJ" ].store( J )
2858             #
2859             if selfA._toStore("IndexOfOptimum") \
2860                 or selfA._toStore("CurrentOptimum") \
2861                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2862                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2863                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2864                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2865                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2866             if selfA._toStore("IndexOfOptimum"):
2867                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2868             if selfA._toStore("CurrentOptimum"):
2869                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2870             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2871                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2872             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2873                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2874             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2875                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2876             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2877                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2878         if selfA._toStore("APosterioriCovariance"):
2879             selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
2880         if selfA._parameters["EstimationOf"] == "Parameters" \
2881             and J < previousJMinimum:
2882             previousJMinimum    = J
2883             XaMin               = Xa
2884             if selfA._toStore("APosterioriCovariance"):
2885                 covarianceXaMin = Pn
2886     #
2887     # Stockage final supplémentaire de l'optimum en estimation de paramètres
2888     # ----------------------------------------------------------------------
2889     if selfA._parameters["EstimationOf"] == "Parameters":
2890         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2891         selfA.StoredVariables["Analysis"].store( XaMin )
2892         if selfA._toStore("APosterioriCovariance"):
2893             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2894         if selfA._toStore("BMA"):
2895             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2896     #
2897     return 0
2898
2899 # ==============================================================================
2900 def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12",
2901     BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
2902     """
2903     Iterative EnKF
2904     """
2905     if selfA._parameters["EstimationOf"] == "Parameters":
2906         selfA._parameters["StoreInternalVariables"] = True
2907     #
2908     # Opérateurs
2909     # ----------
2910     H = HO["Direct"].appliedControledFormTo
2911     #
2912     if selfA._parameters["EstimationOf"] == "State":
2913         M = EM["Direct"].appliedControledFormTo
2914     #
2915     if CM is not None and "Tangent" in CM and U is not None:
2916         Cm = CM["Tangent"].asMatrix(Xb)
2917     else:
2918         Cm = None
2919     #
2920     # Nombre de pas identique au nombre de pas d'observations
2921     # -------------------------------------------------------
2922     if hasattr(Y,"stepnumber"):
2923         duration = Y.stepnumber()
2924         __p = numpy.cumprod(Y.shape())[-1]
2925     else:
2926         duration = 2
2927         __p = numpy.array(Y).size
2928     #
2929     # Précalcul des inversions de B et R
2930     # ----------------------------------
2931     if selfA._parameters["StoreInternalVariables"] \
2932         or selfA._toStore("CostFunctionJ") \
2933         or selfA._toStore("CostFunctionJb") \
2934         or selfA._toStore("CostFunctionJo") \
2935         or selfA._toStore("CurrentOptimum") \
2936         or selfA._toStore("APosterioriCovariance"):
2937         BI = B.getI()
2938     RI = R.getI()
2939     #
2940     # Initialisation
2941     # --------------
2942     __n = Xb.size
2943     __m = selfA._parameters["NumberOfMembers"]
2944     if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
2945     else:                         Pn = B
2946     if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
2947     else:                         Rn = R
2948     if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
2949     else:                         Qn = Q
2950     Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
2951     #
2952     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2953         selfA.StoredVariables["Analysis"].store( Xb )
2954         if selfA._toStore("APosterioriCovariance"):
2955             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2956             covarianceXa = Pn
2957     #
2958     previousJMinimum = numpy.finfo(float).max
2959     #
2960     for step in range(duration-1):
2961         if hasattr(Y,"store"):
2962             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
2963         else:
2964             Ynpu = numpy.ravel( Y ).reshape((__p,1))
2965         #
2966         if U is not None:
2967             if hasattr(U,"store") and len(U)>1:
2968                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2969             elif hasattr(U,"store") and len(U)==1:
2970                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2971             else:
2972                 Un = numpy.asmatrix(numpy.ravel( U )).T
2973         else:
2974             Un = None
2975         #
2976         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2977             Xn = CovarianceInflation( Xn,
2978                 selfA._parameters["InflationType"],
2979                 selfA._parameters["InflationFactor"],
2980                 )
2981         #
2982         #--------------------------
2983         if VariantM == "IEnKF12":
2984             Xfm = numpy.ravel(Xn.mean(axis=1, dtype=mfp).astype('float'))
2985             EaX = EnsembleOfAnomalies( Xn ) / math.sqrt(__m-1)
2986             __j = 0
2987             Deltaw = 1
2988             if not BnotT:
2989                 Ta  = numpy.identity(__m)
2990             vw  = numpy.zeros(__m)
2991             while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
2992                 vx1 = (Xfm + EaX @ vw).reshape((__n,1))
2993                 #
2994                 if BnotT:
2995                     E1 = vx1 + _epsilon * EaX
2996                 else:
2997                     E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
2998                 #
2999                 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q
3000                     E2 = M( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
3001                         argsAsSerie = True,
3002                         returnSerieAsArrayMatrix = True )
3003                 elif selfA._parameters["EstimationOf"] == "Parameters":
3004                     # --- > Par principe, M = Id
3005                     E2 = Xn
3006                 vx2 = E2.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
3007                 vy1 = H((vx2, Un)).reshape((__p,1))
3008                 #
3009                 HE2 = H( [(E2[:,i,numpy.newaxis], Un) for i in range(__m)],
3010                     argsAsSerie = True,
3011                     returnSerieAsArrayMatrix = True )
3012                 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
3013                 #
3014                 if BnotT:
3015                     EaY = (HE2 - vy2) / _epsilon
3016                 else:
3017                     EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
3018                 #
3019                 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy1 )))
3020                 mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY)
3021                 Deltaw = - numpy.linalg.solve(mH,GradJ)
3022                 #
3023                 vw = vw + Deltaw
3024                 #
3025                 if not BnotT:
3026                     Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
3027                 #
3028                 __j = __j + 1
3029             #
3030             A2 = EnsembleOfAnomalies( E2 )
3031             #
3032             if BnotT:
3033                 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
3034                 A2 = math.sqrt(__m-1) * A2 @ Ta / _epsilon
3035             #
3036             Xn = vx2 + A2
3037         #--------------------------
3038         else:
3039             raise ValueError("VariantM has to be chosen in the authorized methods list.")
3040         #
3041         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
3042             Xn = CovarianceInflation( Xn,
3043                 selfA._parameters["InflationType"],
3044                 selfA._parameters["InflationFactor"],
3045                 )
3046         #
3047         Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
3048         #--------------------------
3049         #
3050         if selfA._parameters["StoreInternalVariables"] \
3051             or selfA._toStore("CostFunctionJ") \
3052             or selfA._toStore("CostFunctionJb") \
3053             or selfA._toStore("CostFunctionJo") \
3054             or selfA._toStore("APosterioriCovariance") \
3055             or selfA._toStore("InnovationAtCurrentAnalysis") \
3056             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
3057             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3058             _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
3059             _Innovation = Ynpu - _HXa
3060         #
3061         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3062         # ---> avec analysis
3063         selfA.StoredVariables["Analysis"].store( Xa )
3064         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
3065             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
3066         if selfA._toStore("InnovationAtCurrentAnalysis"):
3067             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
3068         # ---> avec current state
3069         if selfA._parameters["StoreInternalVariables"] \
3070             or selfA._toStore("CurrentState"):
3071             selfA.StoredVariables["CurrentState"].store( Xn )
3072         if selfA._toStore("ForecastState"):
3073             selfA.StoredVariables["ForecastState"].store( E2 )
3074         if selfA._toStore("BMA"):
3075             selfA.StoredVariables["BMA"].store( E2 - Xa )
3076         if selfA._toStore("InnovationAtCurrentState"):
3077             selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
3078         if selfA._toStore("SimulatedObservationAtCurrentState") \
3079             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3080             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
3081         # ---> autres
3082         if selfA._parameters["StoreInternalVariables"] \
3083             or selfA._toStore("CostFunctionJ") \
3084             or selfA._toStore("CostFunctionJb") \
3085             or selfA._toStore("CostFunctionJo") \
3086             or selfA._toStore("CurrentOptimum") \
3087             or selfA._toStore("APosterioriCovariance"):
3088             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
3089             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
3090             J   = Jb + Jo
3091             selfA.StoredVariables["CostFunctionJb"].store( Jb )
3092             selfA.StoredVariables["CostFunctionJo"].store( Jo )
3093             selfA.StoredVariables["CostFunctionJ" ].store( J )
3094             #
3095             if selfA._toStore("IndexOfOptimum") \
3096                 or selfA._toStore("CurrentOptimum") \
3097                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
3098                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
3099                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
3100                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3101                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3102             if selfA._toStore("IndexOfOptimum"):
3103                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3104             if selfA._toStore("CurrentOptimum"):
3105                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
3106             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3107                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
3108             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3109                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3110             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3111                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3112             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3113                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3114         if selfA._toStore("APosterioriCovariance"):
3115             selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
3116         if selfA._parameters["EstimationOf"] == "Parameters" \
3117             and J < previousJMinimum:
3118             previousJMinimum    = J
3119             XaMin               = Xa
3120             if selfA._toStore("APosterioriCovariance"):
3121                 covarianceXaMin = Pn
3122     #
3123     # Stockage final supplémentaire de l'optimum en estimation de paramètres
3124     # ----------------------------------------------------------------------
3125     if selfA._parameters["EstimationOf"] == "Parameters":
3126         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3127         selfA.StoredVariables["Analysis"].store( XaMin )
3128         if selfA._toStore("APosterioriCovariance"):
3129             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
3130         if selfA._toStore("BMA"):
3131             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
3132     #
3133     return 0
3134
3135 # ==============================================================================
3136 if __name__ == "__main__":
3137     print('\n AUTODIAGNOSTIC\n')