Salome HOME
Documentation and code improvements for args
[modules/adao.git] / src / daComposant / daCore / NumericObjects.py
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) 2008-2021 EDF R&D
4 #
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
9 #
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13 # Lesser General Public License for more details.
14 #
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
18 #
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
20 #
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
22
23 __doc__ = """
24     Définit les objets numériques génériques.
25 """
26 __author__ = "Jean-Philippe ARGAUD"
27
28 import os, time, copy, types, sys, logging
29 import math, numpy, scipy, scipy.optimize, scipy.version
30 from daCore.BasicObjects import Operator
31 from daCore.PlatformInfo import PlatformInfo
32 mpr = PlatformInfo().MachinePrecision()
33 mfp = PlatformInfo().MaximumPrecision()
34 # logging.getLogger().setLevel(logging.DEBUG)
35
36 # ==============================================================================
37 def ExecuteFunction( triplet ):
38     assert len(triplet) == 3, "Incorrect number of arguments"
39     X, xArgs, funcrepr = triplet
40     __X = numpy.asmatrix(numpy.ravel( X )).T
41     __sys_path_tmp = sys.path ; sys.path.insert(0,funcrepr["__userFunction__path"])
42     __module = __import__(funcrepr["__userFunction__modl"], globals(), locals(), [])
43     __fonction = getattr(__module,funcrepr["__userFunction__name"])
44     sys.path = __sys_path_tmp ; del __sys_path_tmp
45     if isinstance(xArgs, dict):
46         __HX  = __fonction( __X, **xArgs )
47     else:
48         __HX  = __fonction( __X )
49     return numpy.ravel( __HX )
50
51 # ==============================================================================
52 class FDApproximation(object):
53     """
54     Cette classe sert d'interface pour définir les opérateurs approximés. A la
55     création d'un objet, en fournissant une fonction "Function", on obtient un
56     objet qui dispose de 3 méthodes "DirectOperator", "TangentOperator" et
57     "AdjointOperator". On contrôle l'approximation DF avec l'incrément
58     multiplicatif "increment" valant par défaut 1%, ou avec l'incrément fixe
59     "dX" qui sera multiplié par "increment" (donc en %), et on effectue de DF
60     centrées si le booléen "centeredDF" est vrai.
61     """
62     def __init__(self,
63             name                  = "FDApproximation",
64             Function              = None,
65             centeredDF            = False,
66             increment             = 0.01,
67             dX                    = None,
68             extraArguments        = None,
69             avoidingRedundancy    = True,
70             toleranceInRedundancy = 1.e-18,
71             lenghtOfRedundancy    = -1,
72             mpEnabled             = False,
73             mpWorkers             = None,
74             mfEnabled             = False,
75             ):
76         self.__name = str(name)
77         self.__extraArgs = extraArguments
78         if mpEnabled:
79             try:
80                 import multiprocessing
81                 self.__mpEnabled = True
82             except ImportError:
83                 self.__mpEnabled = False
84         else:
85             self.__mpEnabled = False
86         self.__mpWorkers = mpWorkers
87         if self.__mpWorkers is not None and self.__mpWorkers < 1:
88             self.__mpWorkers = None
89         logging.debug("FDA Calculs en multiprocessing : %s (nombre de processus : %s)"%(self.__mpEnabled,self.__mpWorkers))
90         #
91         if mfEnabled:
92             self.__mfEnabled = True
93         else:
94             self.__mfEnabled = False
95         logging.debug("FDA Calculs en multifonctions : %s"%(self.__mfEnabled,))
96         #
97         if avoidingRedundancy:
98             self.__avoidRC = True
99             self.__tolerBP = float(toleranceInRedundancy)
100             self.__lenghtRJ = int(lenghtOfRedundancy)
101             self.__listJPCP = [] # Jacobian Previous Calculated Points
102             self.__listJPCI = [] # Jacobian Previous Calculated Increment
103             self.__listJPCR = [] # Jacobian Previous Calculated Results
104             self.__listJPPN = [] # Jacobian Previous Calculated Point Norms
105             self.__listJPIN = [] # Jacobian Previous Calculated Increment Norms
106         else:
107             self.__avoidRC = False
108         #
109         if self.__mpEnabled:
110             if isinstance(Function,types.FunctionType):
111                 logging.debug("FDA Calculs en multiprocessing : FunctionType")
112                 self.__userFunction__name = Function.__name__
113                 try:
114                     mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
115                 except:
116                     mod = os.path.abspath(Function.__globals__['__file__'])
117                 if not os.path.isfile(mod):
118                     raise ImportError("No user defined function or method found with the name %s"%(mod,))
119                 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
120                 self.__userFunction__path = os.path.dirname(mod)
121                 del mod
122                 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
123                 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
124             elif isinstance(Function,types.MethodType):
125                 logging.debug("FDA Calculs en multiprocessing : MethodType")
126                 self.__userFunction__name = Function.__name__
127                 try:
128                     mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
129                 except:
130                     mod = os.path.abspath(Function.__func__.__globals__['__file__'])
131                 if not os.path.isfile(mod):
132                     raise ImportError("No user defined function or method found with the name %s"%(mod,))
133                 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
134                 self.__userFunction__path = os.path.dirname(mod)
135                 del mod
136                 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
137                 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
138             else:
139                 raise TypeError("User defined function or method has to be provided for finite differences approximation.")
140         else:
141             self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
142             self.__userFunction = self.__userOperator.appliedTo
143         #
144         self.__centeredDF = bool(centeredDF)
145         if abs(float(increment)) > 1.e-15:
146             self.__increment  = float(increment)
147         else:
148             self.__increment  = 0.01
149         if dX is None:
150             self.__dX     = None
151         else:
152             self.__dX     = numpy.asmatrix(numpy.ravel( dX )).T
153         logging.debug("FDA Reduction des doublons de calcul : %s"%self.__avoidRC)
154         if self.__avoidRC:
155             logging.debug("FDA Tolerance de determination des doublons : %.2e"%self.__tolerBP)
156
157     # ---------------------------------------------------------
158     def __doublon__(self, e, l, n, v=None):
159         __ac, __iac = False, -1
160         for i in range(len(l)-1,-1,-1):
161             if numpy.linalg.norm(e - l[i]) < self.__tolerBP * n[i]:
162                 __ac, __iac = True, i
163                 if v is not None: logging.debug("FDA Cas%s déja calculé, récupération du doublon %i"%(v,__iac))
164                 break
165         return __ac, __iac
166
167     # ---------------------------------------------------------
168     def DirectOperator(self, X, **extraArgs ):
169         """
170         Calcul du direct à l'aide de la fonction fournie.
171
172         NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
173         ne doivent pas être données ici à la fonction utilisateur.
174         """
175         logging.debug("FDA Calcul DirectOperator (explicite)")
176         if self.__mfEnabled:
177             _HX = self.__userFunction( X, argsAsSerie = True )
178         else:
179             _X = numpy.asmatrix(numpy.ravel( X )).T
180             _HX = numpy.ravel(self.__userFunction( _X ))
181         #
182         return _HX
183
184     # ---------------------------------------------------------
185     def TangentMatrix(self, X ):
186         """
187         Calcul de l'opérateur tangent comme la Jacobienne par différences finies,
188         c'est-à-dire le gradient de H en X. On utilise des différences finies
189         directionnelles autour du point X. X est un numpy.matrix.
190
191         Différences finies centrées (approximation d'ordre 2):
192         1/ Pour chaque composante i de X, on ajoute et on enlève la perturbation
193            dX[i] à la  composante X[i], pour composer X_plus_dXi et X_moins_dXi, et
194            on calcule les réponses HX_plus_dXi = H( X_plus_dXi ) et HX_moins_dXi =
195            H( X_moins_dXi )
196         2/ On effectue les différences (HX_plus_dXi-HX_moins_dXi) et on divise par
197            le pas 2*dXi
198         3/ Chaque résultat, par composante, devient une colonne de la Jacobienne
199
200         Différences finies non centrées (approximation d'ordre 1):
201         1/ Pour chaque composante i de X, on ajoute la perturbation dX[i] à la
202            composante X[i] pour composer X_plus_dXi, et on calcule la réponse
203            HX_plus_dXi = H( X_plus_dXi )
204         2/ On calcule la valeur centrale HX = H(X)
205         3/ On effectue les différences (HX_plus_dXi-HX) et on divise par
206            le pas dXi
207         4/ Chaque résultat, par composante, devient une colonne de la Jacobienne
208
209         """
210         logging.debug("FDA Début du calcul de la Jacobienne")
211         logging.debug("FDA   Incrément de............: %s*X"%float(self.__increment))
212         logging.debug("FDA   Approximation centrée...: %s"%(self.__centeredDF))
213         #
214         if X is None or len(X)==0:
215             raise ValueError("Nominal point X for approximate derivatives can not be None or void (given X: %s)."%(str(X),))
216         #
217         _X = numpy.asmatrix(numpy.ravel( X )).T
218         #
219         if self.__dX is None:
220             _dX  = self.__increment * _X
221         else:
222             _dX = numpy.asmatrix(numpy.ravel( self.__dX )).T
223         #
224         if (_dX == 0.).any():
225             moyenne = _dX.mean()
226             if moyenne == 0.:
227                 _dX = numpy.where( _dX == 0., float(self.__increment), _dX )
228             else:
229                 _dX = numpy.where( _dX == 0., moyenne, _dX )
230         #
231         __alreadyCalculated  = False
232         if self.__avoidRC:
233             __bidon, __alreadyCalculatedP = self.__doublon__(_X,  self.__listJPCP, self.__listJPPN, None)
234             __bidon, __alreadyCalculatedI = self.__doublon__(_dX, self.__listJPCI, self.__listJPIN, None)
235             if __alreadyCalculatedP == __alreadyCalculatedI > -1:
236                 __alreadyCalculated, __i = True, __alreadyCalculatedP
237                 logging.debug("FDA Cas J déja calculé, récupération du doublon %i"%__i)
238         #
239         if __alreadyCalculated:
240             logging.debug("FDA   Calcul Jacobienne (par récupération du doublon %i)"%__i)
241             _Jacobienne = self.__listJPCR[__i]
242         else:
243             logging.debug("FDA   Calcul Jacobienne (explicite)")
244             if self.__centeredDF:
245                 #
246                 if self.__mpEnabled and not self.__mfEnabled:
247                     funcrepr = {
248                         "__userFunction__path" : self.__userFunction__path,
249                         "__userFunction__modl" : self.__userFunction__modl,
250                         "__userFunction__name" : self.__userFunction__name,
251                     }
252                     _jobs = []
253                     for i in range( len(_dX) ):
254                         _dXi            = _dX[i]
255                         _X_plus_dXi     = numpy.array( _X.A1, dtype=float )
256                         _X_plus_dXi[i]  = _X[i] + _dXi
257                         _X_moins_dXi    = numpy.array( _X.A1, dtype=float )
258                         _X_moins_dXi[i] = _X[i] - _dXi
259                         #
260                         _jobs.append( (_X_plus_dXi,  self.__extraArgs, funcrepr) )
261                         _jobs.append( (_X_moins_dXi, self.__extraArgs, funcrepr) )
262                     #
263                     import multiprocessing
264                     self.__pool = multiprocessing.Pool(self.__mpWorkers)
265                     _HX_plusmoins_dX = self.__pool.map( ExecuteFunction, _jobs )
266                     self.__pool.close()
267                     self.__pool.join()
268                     #
269                     _Jacobienne  = []
270                     for i in range( len(_dX) ):
271                         _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
272                     #
273                 elif self.__mfEnabled:
274                     _xserie = []
275                     for i in range( len(_dX) ):
276                         _dXi            = _dX[i]
277                         _X_plus_dXi     = numpy.array( _X.A1, dtype=float )
278                         _X_plus_dXi[i]  = _X[i] + _dXi
279                         _X_moins_dXi    = numpy.array( _X.A1, dtype=float )
280                         _X_moins_dXi[i] = _X[i] - _dXi
281                         #
282                         _xserie.append( _X_plus_dXi )
283                         _xserie.append( _X_moins_dXi )
284                     #
285                     _HX_plusmoins_dX = self.DirectOperator( _xserie )
286                      #
287                     _Jacobienne  = []
288                     for i in range( len(_dX) ):
289                         _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
290                     #
291                 else:
292                     _Jacobienne  = []
293                     for i in range( _dX.size ):
294                         _dXi            = _dX[i]
295                         _X_plus_dXi     = numpy.array( _X.A1, dtype=float )
296                         _X_plus_dXi[i]  = _X[i] + _dXi
297                         _X_moins_dXi    = numpy.array( _X.A1, dtype=float )
298                         _X_moins_dXi[i] = _X[i] - _dXi
299                         #
300                         _HX_plus_dXi    = self.DirectOperator( _X_plus_dXi )
301                         _HX_moins_dXi   = self.DirectOperator( _X_moins_dXi )
302                         #
303                         _Jacobienne.append( numpy.ravel( _HX_plus_dXi - _HX_moins_dXi ) / (2.*_dXi) )
304                 #
305             else:
306                 #
307                 if self.__mpEnabled and not self.__mfEnabled:
308                     funcrepr = {
309                         "__userFunction__path" : self.__userFunction__path,
310                         "__userFunction__modl" : self.__userFunction__modl,
311                         "__userFunction__name" : self.__userFunction__name,
312                     }
313                     _jobs = []
314                     _jobs.append( (_X.A1, self.__extraArgs, funcrepr) )
315                     for i in range( len(_dX) ):
316                         _X_plus_dXi    = numpy.array( _X.A1, dtype=float )
317                         _X_plus_dXi[i] = _X[i] + _dX[i]
318                         #
319                         _jobs.append( (_X_plus_dXi, self.__extraArgs, funcrepr) )
320                     #
321                     import multiprocessing
322                     self.__pool = multiprocessing.Pool(self.__mpWorkers)
323                     _HX_plus_dX = self.__pool.map( ExecuteFunction, _jobs )
324                     self.__pool.close()
325                     self.__pool.join()
326                     #
327                     _HX = _HX_plus_dX.pop(0)
328                     #
329                     _Jacobienne = []
330                     for i in range( len(_dX) ):
331                         _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
332                     #
333                 elif self.__mfEnabled:
334                     _xserie = []
335                     _xserie.append( _X.A1 )
336                     for i in range( len(_dX) ):
337                         _X_plus_dXi    = numpy.array( _X.A1, dtype=float )
338                         _X_plus_dXi[i] = _X[i] + _dX[i]
339                         #
340                         _xserie.append( _X_plus_dXi )
341                     #
342                     _HX_plus_dX = self.DirectOperator( _xserie )
343                     #
344                     _HX = _HX_plus_dX.pop(0)
345                     #
346                     _Jacobienne = []
347                     for i in range( len(_dX) ):
348                         _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
349                    #
350                 else:
351                     _Jacobienne  = []
352                     _HX = self.DirectOperator( _X )
353                     for i in range( _dX.size ):
354                         _dXi            = _dX[i]
355                         _X_plus_dXi     = numpy.array( _X.A1, dtype=float )
356                         _X_plus_dXi[i]  = _X[i] + _dXi
357                         #
358                         _HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
359                         #
360                         _Jacobienne.append( numpy.ravel(( _HX_plus_dXi - _HX ) / _dXi) )
361                 #
362             #
363             _Jacobienne = numpy.asmatrix( numpy.vstack( _Jacobienne ) ).T
364             if self.__avoidRC:
365                 if self.__lenghtRJ < 0: self.__lenghtRJ = 2 * _X.size
366                 while len(self.__listJPCP) > self.__lenghtRJ:
367                     self.__listJPCP.pop(0)
368                     self.__listJPCI.pop(0)
369                     self.__listJPCR.pop(0)
370                     self.__listJPPN.pop(0)
371                     self.__listJPIN.pop(0)
372                 self.__listJPCP.append( copy.copy(_X) )
373                 self.__listJPCI.append( copy.copy(_dX) )
374                 self.__listJPCR.append( copy.copy(_Jacobienne) )
375                 self.__listJPPN.append( numpy.linalg.norm(_X) )
376                 self.__listJPIN.append( numpy.linalg.norm(_Jacobienne) )
377         #
378         logging.debug("FDA Fin du calcul de la Jacobienne")
379         #
380         return _Jacobienne
381
382     # ---------------------------------------------------------
383     def TangentOperator(self, paire, **extraArgs ):
384         """
385         Calcul du tangent à l'aide de la Jacobienne.
386
387         NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
388         ne doivent pas être données ici à la fonction utilisateur.
389         """
390         if self.__mfEnabled:
391             assert len(paire) == 1, "Incorrect lenght of arguments"
392             _paire = paire[0]
393             assert len(_paire) == 2, "Incorrect number of arguments"
394         else:
395             assert len(paire) == 2, "Incorrect number of arguments"
396             _paire = paire
397         X, dX = _paire
398         _Jacobienne = self.TangentMatrix( X )
399         if dX is None or len(dX) == 0:
400             #
401             # Calcul de la forme matricielle si le second argument est None
402             # -------------------------------------------------------------
403             if self.__mfEnabled: return [_Jacobienne,]
404             else:                return _Jacobienne
405         else:
406             #
407             # Calcul de la valeur linéarisée de H en X appliqué à dX
408             # ------------------------------------------------------
409             _dX = numpy.asmatrix(numpy.ravel( dX )).T
410             _HtX = numpy.dot(_Jacobienne, _dX)
411             if self.__mfEnabled: return [_HtX.A1,]
412             else:                return _HtX.A1
413
414     # ---------------------------------------------------------
415     def AdjointOperator(self, paire, **extraArgs ):
416         """
417         Calcul de l'adjoint à l'aide de la Jacobienne.
418
419         NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
420         ne doivent pas être données ici à la fonction utilisateur.
421         """
422         if self.__mfEnabled:
423             assert len(paire) == 1, "Incorrect lenght of arguments"
424             _paire = paire[0]
425             assert len(_paire) == 2, "Incorrect number of arguments"
426         else:
427             assert len(paire) == 2, "Incorrect number of arguments"
428             _paire = paire
429         X, Y = _paire
430         _JacobienneT = self.TangentMatrix( X ).T
431         if Y is None or len(Y) == 0:
432             #
433             # Calcul de la forme matricielle si le second argument est None
434             # -------------------------------------------------------------
435             if self.__mfEnabled: return [_JacobienneT,]
436             else:                return _JacobienneT
437         else:
438             #
439             # Calcul de la valeur de l'adjoint en X appliqué à Y
440             # --------------------------------------------------
441             _Y = numpy.asmatrix(numpy.ravel( Y )).T
442             _HaY = numpy.dot(_JacobienneT, _Y)
443             if self.__mfEnabled: return [_HaY.A1,]
444             else:                return _HaY.A1
445
446 # ==============================================================================
447 def mmqr(
448         func     = None,
449         x0       = None,
450         fprime   = None,
451         bounds   = None,
452         quantile = 0.5,
453         maxfun   = 15000,
454         toler    = 1.e-06,
455         y        = None,
456         ):
457     """
458     Implémentation informatique de l'algorithme MMQR, basée sur la publication :
459     David R. Hunter, Kenneth Lange, "Quantile Regression via an MM Algorithm",
460     Journal of Computational and Graphical Statistics, 9, 1, pp.60-77, 2000.
461     """
462     #
463     # Recuperation des donnees et informations initiales
464     # --------------------------------------------------
465     variables = numpy.ravel( x0 )
466     mesures   = numpy.ravel( y )
467     increment = sys.float_info[0]
468     p         = variables.size
469     n         = mesures.size
470     quantile  = float(quantile)
471     #
472     # Calcul des parametres du MM
473     # ---------------------------
474     tn      = float(toler) / n
475     e0      = -tn / math.log(tn)
476     epsilon = (e0-tn)/(1+math.log(e0))
477     #
478     # Calculs d'initialisation
479     # ------------------------
480     residus  = mesures - numpy.ravel( func( variables ) )
481     poids    = 1./(epsilon+numpy.abs(residus))
482     veps     = 1. - 2. * quantile - residus * poids
483     lastsurrogate = -numpy.sum(residus*veps) - (1.-2.*quantile)*numpy.sum(residus)
484     iteration = 0
485     #
486     # Recherche iterative
487     # -------------------
488     while (increment > toler) and (iteration < maxfun) :
489         iteration += 1
490         #
491         Derivees  = numpy.array(fprime(variables))
492         Derivees  = Derivees.reshape(n,p) # Necessaire pour remettre en place la matrice si elle passe par des tuyaux YACS
493         DeriveesT = Derivees.transpose()
494         M         =   numpy.dot( DeriveesT , (numpy.array(numpy.matrix(p*[poids,]).T)*Derivees) )
495         SM        =   numpy.transpose(numpy.dot( DeriveesT , veps ))
496         step      = - numpy.linalg.lstsq( M, SM, rcond=-1 )[0]
497         #
498         variables = variables + step
499         if bounds is not None:
500             # Attention : boucle infinie à éviter si un intervalle est trop petit
501             while( (variables < numpy.ravel(numpy.asmatrix(bounds)[:,0])).any() or (variables > numpy.ravel(numpy.asmatrix(bounds)[:,1])).any() ):
502                 step      = step/2.
503                 variables = variables - step
504         residus   = mesures - numpy.ravel( func(variables) )
505         surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
506         #
507         while ( (surrogate > lastsurrogate) and ( max(list(numpy.abs(step))) > 1.e-16 ) ) :
508             step      = step/2.
509             variables = variables - step
510             residus   = mesures - numpy.ravel( func(variables) )
511             surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
512         #
513         increment     = lastsurrogate-surrogate
514         poids         = 1./(epsilon+numpy.abs(residus))
515         veps          = 1. - 2. * quantile - residus * poids
516         lastsurrogate = -numpy.sum(residus * veps) - (1.-2.*quantile)*numpy.sum(residus)
517     #
518     # Mesure d'écart
519     # --------------
520     Ecart = quantile * numpy.sum(residus) - numpy.sum( residus[residus<0] )
521     #
522     return variables, Ecart, [n,p,iteration,increment,0]
523
524 # ==============================================================================
525 def EnsembleOfCenteredPerturbations( _bgcenter, _bgcovariance, _nbmembers ):
526     "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
527     #
528     _bgcenter = numpy.ravel(_bgcenter)[:,None]
529     if _nbmembers < 1:
530         raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
531     #
532     if _bgcovariance is None:
533         BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
534     else:
535         _Z = numpy.random.multivariate_normal(numpy.zeros(_bgcenter.size), _bgcovariance, size=_nbmembers).T
536         BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers) + _Z
537     #
538     return BackgroundEnsemble
539
540 # ==============================================================================
541 def EnsembleOfBackgroundPerturbations( _bgcenter, _bgcovariance, _nbmembers, _withSVD = True):
542     "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
543     def __CenteredRandomAnomalies(Zr, N):
544         """
545         Génère une matrice de N anomalies aléatoires centrées sur Zr selon les
546         notes manuscrites de MB et conforme au code de PS avec eps = -1
547         """
548         eps = -1
549         Q = numpy.eye(N-1)-numpy.ones((N-1,N-1))/numpy.sqrt(N)/(numpy.sqrt(N)-eps)
550         Q = numpy.concatenate((Q, [eps*numpy.ones(N-1)/numpy.sqrt(N)]), axis=0)
551         R, _ = numpy.linalg.qr(numpy.random.normal(size = (N-1,N-1)))
552         Q = numpy.dot(Q,R)
553         Zr = numpy.dot(Q,Zr)
554         return Zr.T
555     #
556     _bgcenter = numpy.ravel(_bgcenter)[:,None]
557     if _nbmembers < 1:
558         raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
559     if _bgcovariance is None:
560         BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
561     else:
562         if _withSVD:
563             U, s, V = numpy.linalg.svd(_bgcovariance, full_matrices=False)
564             _nbctl = _bgcenter.size
565             if _nbmembers > _nbctl:
566                 _Z = numpy.concatenate((numpy.dot(
567                     numpy.diag(numpy.sqrt(s[:_nbctl])), V[:_nbctl]),
568                     numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1-_nbctl)), axis = 0)
569             else:
570                 _Z = numpy.dot(numpy.diag(numpy.sqrt(s[:_nbmembers-1])), V[:_nbmembers-1])
571             _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
572             BackgroundEnsemble = _bgcenter + _Zca
573         else:
574             if max(abs(_bgcovariance.flatten())) > 0:
575                 _nbctl = _bgcenter.size
576                 _Z = numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1)
577                 _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
578                 BackgroundEnsemble = _bgcenter + _Zca
579             else:
580                 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
581     #
582     return BackgroundEnsemble
583
584 # ==============================================================================
585 def EnsembleOfAnomalies( _ensemble, _optmean = None):
586     "Renvoie les anomalies centrées à partir d'un ensemble TailleEtat*NbMembres"
587     if _optmean is None:
588         Em = numpy.asarray(_ensemble).mean(axis=1, dtype=mfp).astype('float')[:,numpy.newaxis]
589     else:
590         Em = numpy.ravel(_optmean)[:,numpy.newaxis]
591     #
592     return numpy.asarray(_ensemble) - Em
593
594 # ==============================================================================
595 def CovarianceInflation(
596         InputCovOrEns,
597         InflationType   = None,
598         InflationFactor = None,
599         BackgroundCov   = None,
600         ):
601     """
602     Inflation applicable soit sur Pb ou Pa, soit sur les ensembles EXb ou EXa
603
604     Synthèse : Hunt 2007, section 2.3.5
605     """
606     if InflationFactor is None:
607         return InputCovOrEns
608     else:
609         InflationFactor = float(InflationFactor)
610     #
611     if InflationType in ["MultiplicativeOnAnalysisCovariance", "MultiplicativeOnBackgroundCovariance"]:
612         if InflationFactor < 1.:
613             raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
614         if InflationFactor < 1.+mpr:
615             return InputCovOrEns
616         OutputCovOrEns = InflationFactor**2 * InputCovOrEns
617     #
618     elif InflationType in ["MultiplicativeOnAnalysisAnomalies", "MultiplicativeOnBackgroundAnomalies"]:
619         if InflationFactor < 1.:
620             raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
621         if InflationFactor < 1.+mpr:
622             return InputCovOrEns
623         InputCovOrEnsMean = InputCovOrEns.mean(axis=1, dtype=mfp).astype('float')
624         OutputCovOrEns = InputCovOrEnsMean[:,numpy.newaxis] \
625             + InflationFactor * (InputCovOrEns - InputCovOrEnsMean[:,numpy.newaxis])
626     #
627     elif InflationType in ["AdditiveOnAnalysisCovariance", "AdditiveOnBackgroundCovariance"]:
628         if InflationFactor < 0.:
629             raise ValueError("Inflation factor for additive inflation has to be greater or equal than 0.")
630         if InflationFactor < mpr:
631             return InputCovOrEns
632         __n, __m = numpy.asarray(InputCovOrEns).shape
633         if __n != __m:
634             raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
635         OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * numpy.eye(__n)
636     #
637     elif InflationType == "HybridOnBackgroundCovariance":
638         if InflationFactor < 0.:
639             raise ValueError("Inflation factor for hybrid inflation has to be greater or equal than 0.")
640         if InflationFactor < mpr:
641             return InputCovOrEns
642         __n, __m = numpy.asarray(InputCovOrEns).shape
643         if __n != __m:
644             raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
645         if BackgroundCov is None:
646             raise ValueError("Background covariance matrix B has to be given for hybrid inflation.")
647         if InputCovOrEns.shape != BackgroundCov.shape:
648             raise ValueError("Ensemble covariance matrix has to be of same size than background covariance matrix B.")
649         OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * BackgroundCov
650     #
651     elif InflationType == "Relaxation":
652         raise NotImplementedError("InflationType Relaxation")
653     #
654     else:
655         raise ValueError("Error in inflation type, '%s' is not a valid keyword."%InflationType)
656     #
657     return OutputCovOrEns
658
659 # ==============================================================================
660 def multi3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, oneCycle):
661     """
662     3DVAR multi-pas et multi-méthodes
663     """
664     #
665     # Initialisation
666     # --------------
667     Xn = numpy.ravel(Xb).reshape((-1,1))
668     #
669     if selfA._parameters["EstimationOf"] == "State":
670         M = EM["Direct"].appliedTo
671         #
672         if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
673             selfA.StoredVariables["Analysis"].store( Xn )
674             if selfA._toStore("APosterioriCovariance"):
675                 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(Xn.size)
676                 else:                         Pn = B
677                 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
678             if selfA._toStore("ForecastState"):
679                 selfA.StoredVariables["ForecastState"].store( Xn )
680     #
681     if hasattr(Y,"stepnumber"):
682         duration = Y.stepnumber()
683     else:
684         duration = 2
685     #
686     # Multi-pas
687     # ---------
688     for step in range(duration-1):
689         if hasattr(Y,"store"):
690             Ynpu = numpy.ravel( Y[step+1] ).reshape((-1,1))
691         else:
692             Ynpu = numpy.ravel( Y ).reshape((-1,1))
693         #
694         if selfA._parameters["EstimationOf"] == "State": # Forecast
695             Xn = selfA.StoredVariables["Analysis"][-1]
696             Xn_predicted = M( Xn )
697             if selfA._toStore("ForecastState"):
698                 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
699         elif selfA._parameters["EstimationOf"] == "Parameters": # No forecast
700             # --- > Par principe, M = Id, Q = 0
701             Xn_predicted = Xn
702         Xn_predicted = numpy.ravel(Xn_predicted).reshape((-1,1))
703         #
704         oneCycle(selfA, Xn_predicted, Ynpu, U, HO, None, None, R, B, None)
705     #
706     return 0
707
708 # ==============================================================================
709 def std3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
710     """
711     3DVAR
712     """
713     #
714     # Initialisations
715     # ---------------
716     #
717     # Opérateurs
718     Hm = HO["Direct"].appliedTo
719     Ha = HO["Adjoint"].appliedInXTo
720     #
721     # Utilisation éventuelle d'un vecteur H(Xb) précalculé
722     if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
723         HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
724     else:
725         HXb = Hm( Xb )
726     HXb = numpy.asmatrix(numpy.ravel( HXb )).T
727     if Y.size != HXb.size:
728         raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
729     if max(Y.shape) != max(HXb.shape):
730         raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
731     #
732     if selfA._toStore("JacobianMatrixAtBackground"):
733         HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
734         HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
735         selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
736     #
737     # Précalcul des inversions de B et R
738     BI = B.getI()
739     RI = R.getI()
740     #
741     # Point de démarrage de l'optimisation
742     Xini = selfA._parameters["InitializationPoint"]
743     #
744     # Définition de la fonction-coût
745     # ------------------------------
746     def CostFunction(x):
747         _X  = numpy.asmatrix(numpy.ravel( x )).T
748         if selfA._parameters["StoreInternalVariables"] or \
749             selfA._toStore("CurrentState") or \
750             selfA._toStore("CurrentOptimum"):
751             selfA.StoredVariables["CurrentState"].store( _X )
752         _HX = Hm( _X )
753         _HX = numpy.asmatrix(numpy.ravel( _HX )).T
754         _Innovation = Y - _HX
755         if selfA._toStore("SimulatedObservationAtCurrentState") or \
756             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
757             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
758         if selfA._toStore("InnovationAtCurrentState"):
759             selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
760         #
761         Jb  = float( 0.5 * (_X - Xb).T * BI * (_X - Xb) )
762         Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
763         J   = Jb + Jo
764         #
765         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
766         selfA.StoredVariables["CostFunctionJb"].store( Jb )
767         selfA.StoredVariables["CostFunctionJo"].store( Jo )
768         selfA.StoredVariables["CostFunctionJ" ].store( J )
769         if selfA._toStore("IndexOfOptimum") or \
770             selfA._toStore("CurrentOptimum") or \
771             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
772             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
773             selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
774             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
775             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
776         if selfA._toStore("IndexOfOptimum"):
777             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
778         if selfA._toStore("CurrentOptimum"):
779             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
780         if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
781             selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
782         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
783             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
784         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
785             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
786         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
787             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
788         return J
789     #
790     def GradientOfCostFunction(x):
791         _X      = numpy.asmatrix(numpy.ravel( x )).T
792         _HX     = Hm( _X )
793         _HX     = numpy.asmatrix(numpy.ravel( _HX )).T
794         GradJb  = BI * (_X - Xb)
795         GradJo  = - Ha( (_X, RI * (Y - _HX)) )
796         GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
797         return GradJ
798     #
799     # Minimisation de la fonctionnelle
800     # --------------------------------
801     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
802     #
803     if selfA._parameters["Minimizer"] == "LBFGSB":
804         if "0.19" <= scipy.version.version <= "1.1.0":
805             import lbfgsbhlt as optimiseur
806         else:
807             import scipy.optimize as optimiseur
808         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
809             func        = CostFunction,
810             x0          = Xini,
811             fprime      = GradientOfCostFunction,
812             args        = (),
813             bounds      = selfA._parameters["Bounds"],
814             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
815             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
816             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
817             iprint      = selfA._parameters["optiprint"],
818             )
819         nfeval = Informations['funcalls']
820         rc     = Informations['warnflag']
821     elif selfA._parameters["Minimizer"] == "TNC":
822         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
823             func        = CostFunction,
824             x0          = Xini,
825             fprime      = GradientOfCostFunction,
826             args        = (),
827             bounds      = selfA._parameters["Bounds"],
828             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
829             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
830             ftol        = selfA._parameters["CostDecrementTolerance"],
831             messages    = selfA._parameters["optmessages"],
832             )
833     elif selfA._parameters["Minimizer"] == "CG":
834         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
835             f           = CostFunction,
836             x0          = Xini,
837             fprime      = GradientOfCostFunction,
838             args        = (),
839             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
840             gtol        = selfA._parameters["GradientNormTolerance"],
841             disp        = selfA._parameters["optdisp"],
842             full_output = True,
843             )
844     elif selfA._parameters["Minimizer"] == "NCG":
845         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
846             f           = CostFunction,
847             x0          = Xini,
848             fprime      = GradientOfCostFunction,
849             args        = (),
850             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
851             avextol     = selfA._parameters["CostDecrementTolerance"],
852             disp        = selfA._parameters["optdisp"],
853             full_output = True,
854             )
855     elif selfA._parameters["Minimizer"] == "BFGS":
856         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
857             f           = CostFunction,
858             x0          = Xini,
859             fprime      = GradientOfCostFunction,
860             args        = (),
861             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
862             gtol        = selfA._parameters["GradientNormTolerance"],
863             disp        = selfA._parameters["optdisp"],
864             full_output = True,
865             )
866     else:
867         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
868     #
869     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
870     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
871     #
872     # Correction pour pallier a un bug de TNC sur le retour du Minimum
873     # ----------------------------------------------------------------
874     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
875         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
876     #
877     # Obtention de l'analyse
878     # ----------------------
879     Xa = numpy.asmatrix(numpy.ravel( Minimum )).T
880     #
881     selfA.StoredVariables["Analysis"].store( Xa )
882     #
883     if selfA._toStore("OMA") or \
884         selfA._toStore("SigmaObs2") or \
885         selfA._toStore("SimulationQuantiles") or \
886         selfA._toStore("SimulatedObservationAtOptimum"):
887         if selfA._toStore("SimulatedObservationAtCurrentState"):
888             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
889         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
890             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
891         else:
892             HXa = Hm( Xa )
893     #
894     # Calcul de la covariance d'analyse
895     # ---------------------------------
896     if selfA._toStore("APosterioriCovariance") or \
897         selfA._toStore("SimulationQuantiles") or \
898         selfA._toStore("JacobianMatrixAtOptimum") or \
899         selfA._toStore("KalmanGainAtOptimum"):
900         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
901         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
902     if selfA._toStore("APosterioriCovariance") or \
903         selfA._toStore("SimulationQuantiles") or \
904         selfA._toStore("KalmanGainAtOptimum"):
905         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
906         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
907     if selfA._toStore("APosterioriCovariance") or \
908         selfA._toStore("SimulationQuantiles"):
909         HessienneI = []
910         nb = Xa.size
911         for i in range(nb):
912             _ee    = numpy.matrix(numpy.zeros(nb)).T
913             _ee[i] = 1.
914             _HtEE  = numpy.dot(HtM,_ee)
915             _HtEE  = numpy.asmatrix(numpy.ravel( _HtEE )).T
916             HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
917         HessienneI = numpy.matrix( HessienneI )
918         A = HessienneI.I
919         if min(A.shape) != max(A.shape):
920             raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
921         if (numpy.diag(A) < 0).any():
922             raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
923         if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
924             try:
925                 L = numpy.linalg.cholesky( A )
926             except:
927                 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
928     if selfA._toStore("APosterioriCovariance"):
929         selfA.StoredVariables["APosterioriCovariance"].store( A )
930     if selfA._toStore("JacobianMatrixAtOptimum"):
931         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
932     if selfA._toStore("KalmanGainAtOptimum"):
933         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
934         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
935         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
936     #
937     # Calculs et/ou stockages supplémentaires
938     # ---------------------------------------
939     if selfA._toStore("Innovation") or \
940         selfA._toStore("SigmaObs2") or \
941         selfA._toStore("MahalanobisConsistency") or \
942         selfA._toStore("OMB"):
943         d  = Y - HXb
944     if selfA._toStore("Innovation"):
945         selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
946     if selfA._toStore("BMA"):
947         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
948     if selfA._toStore("OMA"):
949         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
950     if selfA._toStore("OMB"):
951         selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
952     if selfA._toStore("SigmaObs2"):
953         TraceR = R.trace(Y.size)
954         selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
955     if selfA._toStore("MahalanobisConsistency"):
956         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
957     if selfA._toStore("SimulationQuantiles"):
958         nech = selfA._parameters["NumberOfSamplesForQuantiles"]
959         HXa  = numpy.matrix(numpy.ravel( HXa )).T
960         YfQ  = None
961         for i in range(nech):
962             if selfA._parameters["SimulationForQuantiles"] == "Linear":
963                 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
964                 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
965                 Yr = HXa + dYr
966             elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
967                 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
968                 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
969             if YfQ is None:
970                 YfQ = Yr
971             else:
972                 YfQ = numpy.hstack((YfQ,Yr))
973         YfQ.sort(axis=-1)
974         YQ = None
975         for quantile in selfA._parameters["Quantiles"]:
976             if not (0. <= float(quantile) <= 1.): continue
977             indice = int(nech * float(quantile) - 1./nech)
978             if YQ is None: YQ = YfQ[:,indice]
979             else:          YQ = numpy.hstack((YQ,YfQ[:,indice]))
980         selfA.StoredVariables["SimulationQuantiles"].store( YQ )
981     if selfA._toStore("SimulatedObservationAtBackground"):
982         selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
983     if selfA._toStore("SimulatedObservationAtOptimum"):
984         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
985     #
986     return 0
987
988 # ==============================================================================
989 def van3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
990     """
991     3DVAR variational analysis with no inversion of B
992     """
993     #
994     # Initialisations
995     # ---------------
996     #
997     # Opérateurs
998     Hm = HO["Direct"].appliedTo
999     Ha = HO["Adjoint"].appliedInXTo
1000     #
1001     # Précalcul des inversions de B et R
1002     BT = B.getT()
1003     RI = R.getI()
1004     #
1005     # Point de démarrage de l'optimisation
1006     Xini = numpy.zeros(Xb.shape)
1007     #
1008     # Définition de la fonction-coût
1009     # ------------------------------
1010     def CostFunction(v):
1011         _V = numpy.asmatrix(numpy.ravel( v )).T
1012         _X = Xb + B * _V
1013         if selfA._parameters["StoreInternalVariables"] or \
1014             selfA._toStore("CurrentState") or \
1015             selfA._toStore("CurrentOptimum"):
1016             selfA.StoredVariables["CurrentState"].store( _X )
1017         _HX = Hm( _X )
1018         _HX = numpy.asmatrix(numpy.ravel( _HX )).T
1019         _Innovation = Y - _HX
1020         if selfA._toStore("SimulatedObservationAtCurrentState") or \
1021             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1022             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
1023         if selfA._toStore("InnovationAtCurrentState"):
1024             selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
1025         #
1026         Jb  = float( 0.5 * _V.T * BT * _V )
1027         Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
1028         J   = Jb + Jo
1029         #
1030         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
1031         selfA.StoredVariables["CostFunctionJb"].store( Jb )
1032         selfA.StoredVariables["CostFunctionJo"].store( Jo )
1033         selfA.StoredVariables["CostFunctionJ" ].store( J )
1034         if selfA._toStore("IndexOfOptimum") or \
1035             selfA._toStore("CurrentOptimum") or \
1036             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
1037             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
1038             selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
1039             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1040             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1041         if selfA._toStore("IndexOfOptimum"):
1042             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1043         if selfA._toStore("CurrentOptimum"):
1044             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
1045         if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1046             selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
1047         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1048             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1049         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1050             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1051         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1052             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1053         return J
1054     #
1055     def GradientOfCostFunction(v):
1056         _V = numpy.asmatrix(numpy.ravel( v )).T
1057         _X = Xb + B * _V
1058         _HX     = Hm( _X )
1059         _HX     = numpy.asmatrix(numpy.ravel( _HX )).T
1060         GradJb  = BT * _V
1061         GradJo  = - Ha( (_X, RI * (Y - _HX)) )
1062         GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
1063         return GradJ
1064     #
1065     # Minimisation de la fonctionnelle
1066     # --------------------------------
1067     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
1068     #
1069     if selfA._parameters["Minimizer"] == "LBFGSB":
1070         if "0.19" <= scipy.version.version <= "1.1.0":
1071             import lbfgsbhlt as optimiseur
1072         else:
1073             import scipy.optimize as optimiseur
1074         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
1075             func        = CostFunction,
1076             x0          = Xini,
1077             fprime      = GradientOfCostFunction,
1078             args        = (),
1079             bounds      = selfA._parameters["Bounds"],
1080             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
1081             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
1082             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
1083             iprint      = selfA._parameters["optiprint"],
1084             )
1085         nfeval = Informations['funcalls']
1086         rc     = Informations['warnflag']
1087     elif selfA._parameters["Minimizer"] == "TNC":
1088         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
1089             func        = CostFunction,
1090             x0          = Xini,
1091             fprime      = GradientOfCostFunction,
1092             args        = (),
1093             bounds      = selfA._parameters["Bounds"],
1094             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
1095             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
1096             ftol        = selfA._parameters["CostDecrementTolerance"],
1097             messages    = selfA._parameters["optmessages"],
1098             )
1099     elif selfA._parameters["Minimizer"] == "CG":
1100         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
1101             f           = CostFunction,
1102             x0          = Xini,
1103             fprime      = GradientOfCostFunction,
1104             args        = (),
1105             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1106             gtol        = selfA._parameters["GradientNormTolerance"],
1107             disp        = selfA._parameters["optdisp"],
1108             full_output = True,
1109             )
1110     elif selfA._parameters["Minimizer"] == "NCG":
1111         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
1112             f           = CostFunction,
1113             x0          = Xini,
1114             fprime      = GradientOfCostFunction,
1115             args        = (),
1116             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1117             avextol     = selfA._parameters["CostDecrementTolerance"],
1118             disp        = selfA._parameters["optdisp"],
1119             full_output = True,
1120             )
1121     elif selfA._parameters["Minimizer"] == "BFGS":
1122         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
1123             f           = CostFunction,
1124             x0          = Xini,
1125             fprime      = GradientOfCostFunction,
1126             args        = (),
1127             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1128             gtol        = selfA._parameters["GradientNormTolerance"],
1129             disp        = selfA._parameters["optdisp"],
1130             full_output = True,
1131             )
1132     else:
1133         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
1134     #
1135     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1136     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
1137     #
1138     # Correction pour pallier a un bug de TNC sur le retour du Minimum
1139     # ----------------------------------------------------------------
1140     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
1141         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
1142         Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
1143     else:
1144         Minimum = Xb + B * numpy.asmatrix(numpy.ravel( Minimum )).T
1145     #
1146     # Obtention de l'analyse
1147     # ----------------------
1148     Xa = Minimum
1149     #
1150     selfA.StoredVariables["Analysis"].store( Xa )
1151     #
1152     if selfA._toStore("OMA") or \
1153         selfA._toStore("SigmaObs2") or \
1154         selfA._toStore("SimulationQuantiles") or \
1155         selfA._toStore("SimulatedObservationAtOptimum"):
1156         if selfA._toStore("SimulatedObservationAtCurrentState"):
1157             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
1158         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1159             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
1160         else:
1161             HXa = Hm( Xa )
1162     #
1163     # Calcul de la covariance d'analyse
1164     # ---------------------------------
1165     if selfA._toStore("APosterioriCovariance") or \
1166         selfA._toStore("SimulationQuantiles") or \
1167         selfA._toStore("JacobianMatrixAtOptimum") or \
1168         selfA._toStore("KalmanGainAtOptimum"):
1169         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
1170         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
1171     if selfA._toStore("APosterioriCovariance") or \
1172         selfA._toStore("SimulationQuantiles") or \
1173         selfA._toStore("KalmanGainAtOptimum"):
1174         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
1175         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
1176     if selfA._toStore("APosterioriCovariance") or \
1177         selfA._toStore("SimulationQuantiles"):
1178         BI = B.getI()
1179         HessienneI = []
1180         nb = Xa.size
1181         for i in range(nb):
1182             _ee    = numpy.matrix(numpy.zeros(nb)).T
1183             _ee[i] = 1.
1184             _HtEE  = numpy.dot(HtM,_ee)
1185             _HtEE  = numpy.asmatrix(numpy.ravel( _HtEE )).T
1186             HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
1187         HessienneI = numpy.matrix( HessienneI )
1188         A = HessienneI.I
1189         if min(A.shape) != max(A.shape):
1190             raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
1191         if (numpy.diag(A) < 0).any():
1192             raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
1193         if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
1194             try:
1195                 L = numpy.linalg.cholesky( A )
1196             except:
1197                 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
1198     if selfA._toStore("APosterioriCovariance"):
1199         selfA.StoredVariables["APosterioriCovariance"].store( A )
1200     if selfA._toStore("JacobianMatrixAtOptimum"):
1201         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
1202     if selfA._toStore("KalmanGainAtOptimum"):
1203         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
1204         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
1205         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
1206     #
1207     # Calculs et/ou stockages supplémentaires
1208     # ---------------------------------------
1209     if selfA._toStore("Innovation") or \
1210         selfA._toStore("SigmaObs2") or \
1211         selfA._toStore("MahalanobisConsistency") or \
1212         selfA._toStore("OMB"):
1213         d  = Y - HXb
1214     if selfA._toStore("Innovation"):
1215         selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
1216     if selfA._toStore("BMA"):
1217         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
1218     if selfA._toStore("OMA"):
1219         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
1220     if selfA._toStore("OMB"):
1221         selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
1222     if selfA._toStore("SigmaObs2"):
1223         TraceR = R.trace(Y.size)
1224         selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
1225     if selfA._toStore("MahalanobisConsistency"):
1226         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
1227     if selfA._toStore("SimulationQuantiles"):
1228         nech = selfA._parameters["NumberOfSamplesForQuantiles"]
1229         HXa  = numpy.matrix(numpy.ravel( HXa )).T
1230         YfQ  = None
1231         for i in range(nech):
1232             if selfA._parameters["SimulationForQuantiles"] == "Linear":
1233                 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
1234                 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
1235                 Yr = HXa + dYr
1236             elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
1237                 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
1238                 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
1239             if YfQ is None:
1240                 YfQ = Yr
1241             else:
1242                 YfQ = numpy.hstack((YfQ,Yr))
1243         YfQ.sort(axis=-1)
1244         YQ = None
1245         for quantile in selfA._parameters["Quantiles"]:
1246             if not (0. <= float(quantile) <= 1.): continue
1247             indice = int(nech * float(quantile) - 1./nech)
1248             if YQ is None: YQ = YfQ[:,indice]
1249             else:          YQ = numpy.hstack((YQ,YfQ[:,indice]))
1250         selfA.StoredVariables["SimulationQuantiles"].store( YQ )
1251     if selfA._toStore("SimulatedObservationAtBackground"):
1252         selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
1253     if selfA._toStore("SimulatedObservationAtOptimum"):
1254         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
1255     #
1256     return 0
1257
1258 # ==============================================================================
1259 def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1260     """
1261     3DVAR incrémental
1262     """
1263     #
1264     # Initialisations
1265     # ---------------
1266     #
1267     # Opérateur non-linéaire pour la boucle externe
1268     Hm = HO["Direct"].appliedTo
1269     #
1270     # Précalcul des inversions de B et R
1271     BI = B.getI()
1272     RI = R.getI()
1273     #
1274     # Point de démarrage de l'optimisation
1275     Xini = selfA._parameters["InitializationPoint"]
1276     #
1277     HXb = numpy.asmatrix(numpy.ravel( Hm( Xb ) )).T
1278     Innovation = Y - HXb
1279     #
1280     # Outer Loop
1281     # ----------
1282     iOuter = 0
1283     J      = 1./mpr
1284     DeltaJ = 1./mpr
1285     Xr     = Xini.reshape((-1,1))
1286     while abs(DeltaJ) >= selfA._parameters["CostDecrementTolerance"] and iOuter <= selfA._parameters["MaximumNumberOfSteps"]:
1287         #
1288         # Inner Loop
1289         # ----------
1290         Ht = HO["Tangent"].asMatrix(Xr)
1291         Ht = Ht.reshape(Y.size,Xr.size) # ADAO & check shape
1292         #
1293         # Définition de la fonction-coût
1294         # ------------------------------
1295         def CostFunction(dx):
1296             _dX  = numpy.asmatrix(numpy.ravel( dx )).T
1297             if selfA._parameters["StoreInternalVariables"] or \
1298                 selfA._toStore("CurrentState") or \
1299                 selfA._toStore("CurrentOptimum"):
1300                 selfA.StoredVariables["CurrentState"].store( Xb + _dX )
1301             _HdX = Ht * _dX
1302             _HdX = numpy.asmatrix(numpy.ravel( _HdX )).T
1303             _dInnovation = Innovation - _HdX
1304             if selfA._toStore("SimulatedObservationAtCurrentState") or \
1305                 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1306                 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HXb + _HdX )
1307             if selfA._toStore("InnovationAtCurrentState"):
1308                 selfA.StoredVariables["InnovationAtCurrentState"].store( _dInnovation )
1309             #
1310             Jb  = float( 0.5 * _dX.T * BI * _dX )
1311             Jo  = float( 0.5 * _dInnovation.T * RI * _dInnovation )
1312             J   = Jb + Jo
1313             #
1314             selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
1315             selfA.StoredVariables["CostFunctionJb"].store( Jb )
1316             selfA.StoredVariables["CostFunctionJo"].store( Jo )
1317             selfA.StoredVariables["CostFunctionJ" ].store( J )
1318             if selfA._toStore("IndexOfOptimum") or \
1319                 selfA._toStore("CurrentOptimum") or \
1320                 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
1321                 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
1322                 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
1323                 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1324                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1325             if selfA._toStore("IndexOfOptimum"):
1326                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1327             if selfA._toStore("CurrentOptimum"):
1328                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
1329             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1330                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
1331             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1332                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1333             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1334                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1335             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1336                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1337             return J
1338         #
1339         def GradientOfCostFunction(dx):
1340             _dX          = numpy.asmatrix(numpy.ravel( dx )).T
1341             _HdX         = Ht * _dX
1342             _HdX         = numpy.asmatrix(numpy.ravel( _HdX )).T
1343             _dInnovation = Innovation - _HdX
1344             GradJb       = BI * _dX
1345             GradJo       = - Ht.T @ (RI * _dInnovation)
1346             GradJ        = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
1347             return GradJ
1348         #
1349         # Minimisation de la fonctionnelle
1350         # --------------------------------
1351         nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
1352         #
1353         if selfA._parameters["Minimizer"] == "LBFGSB":
1354             # Minimum, J_optimal, Informations = scipy.optimize.fmin_l_bfgs_b(
1355             if "0.19" <= scipy.version.version <= "1.1.0":
1356                 import lbfgsbhlt as optimiseur
1357             else:
1358                 import scipy.optimize as optimiseur
1359             Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
1360                 func        = CostFunction,
1361                 x0          = numpy.zeros(Xini.size),
1362                 fprime      = GradientOfCostFunction,
1363                 args        = (),
1364                 bounds      = selfA._parameters["Bounds"],
1365                 maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
1366                 factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
1367                 pgtol       = selfA._parameters["ProjectedGradientTolerance"],
1368                 iprint      = selfA._parameters["optiprint"],
1369                 )
1370             nfeval = Informations['funcalls']
1371             rc     = Informations['warnflag']
1372         elif selfA._parameters["Minimizer"] == "TNC":
1373             Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
1374                 func        = CostFunction,
1375                 x0          = numpy.zeros(Xini.size),
1376                 fprime      = GradientOfCostFunction,
1377                 args        = (),
1378                 bounds      = selfA._parameters["Bounds"],
1379                 maxfun      = selfA._parameters["MaximumNumberOfSteps"],
1380                 pgtol       = selfA._parameters["ProjectedGradientTolerance"],
1381                 ftol        = selfA._parameters["CostDecrementTolerance"],
1382                 messages    = selfA._parameters["optmessages"],
1383                 )
1384         elif selfA._parameters["Minimizer"] == "CG":
1385             Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
1386                 f           = CostFunction,
1387                 x0          = numpy.zeros(Xini.size),
1388                 fprime      = GradientOfCostFunction,
1389                 args        = (),
1390                 maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1391                 gtol        = selfA._parameters["GradientNormTolerance"],
1392                 disp        = selfA._parameters["optdisp"],
1393                 full_output = True,
1394                 )
1395         elif selfA._parameters["Minimizer"] == "NCG":
1396             Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
1397                 f           = CostFunction,
1398                 x0          = numpy.zeros(Xini.size),
1399                 fprime      = GradientOfCostFunction,
1400                 args        = (),
1401                 maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1402                 avextol     = selfA._parameters["CostDecrementTolerance"],
1403                 disp        = selfA._parameters["optdisp"],
1404                 full_output = True,
1405                 )
1406         elif selfA._parameters["Minimizer"] == "BFGS":
1407             Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
1408                 f           = CostFunction,
1409                 x0          = numpy.zeros(Xini.size),
1410                 fprime      = GradientOfCostFunction,
1411                 args        = (),
1412                 maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1413                 gtol        = selfA._parameters["GradientNormTolerance"],
1414                 disp        = selfA._parameters["optdisp"],
1415                 full_output = True,
1416                 )
1417         else:
1418             raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
1419         #
1420         IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1421         MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
1422         #
1423         if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
1424             Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
1425             Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
1426         else:
1427             Minimum = Xb + numpy.asmatrix(numpy.ravel( Minimum )).T
1428         #
1429         Xr     = Minimum
1430         DeltaJ = selfA.StoredVariables["CostFunctionJ" ][-1] - J
1431         iOuter = selfA.StoredVariables["CurrentIterationNumber"][-1]
1432     #
1433     # Obtention de l'analyse
1434     # ----------------------
1435     Xa = Xr
1436     #
1437     selfA.StoredVariables["Analysis"].store( Xa )
1438     #
1439     if selfA._toStore("OMA") or \
1440         selfA._toStore("SigmaObs2") or \
1441         selfA._toStore("SimulationQuantiles") or \
1442         selfA._toStore("SimulatedObservationAtOptimum"):
1443         if selfA._toStore("SimulatedObservationAtCurrentState"):
1444             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
1445         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1446             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
1447         else:
1448             HXa = Hm( Xa )
1449     #
1450     # Calcul de la covariance d'analyse
1451     # ---------------------------------
1452     if selfA._toStore("APosterioriCovariance") or \
1453         selfA._toStore("SimulationQuantiles") or \
1454         selfA._toStore("JacobianMatrixAtOptimum") or \
1455         selfA._toStore("KalmanGainAtOptimum"):
1456         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
1457         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
1458     if selfA._toStore("APosterioriCovariance") or \
1459         selfA._toStore("SimulationQuantiles") or \
1460         selfA._toStore("KalmanGainAtOptimum"):
1461         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
1462         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
1463     if selfA._toStore("APosterioriCovariance") or \
1464         selfA._toStore("SimulationQuantiles"):
1465         HessienneI = []
1466         nb = Xa.size
1467         for i in range(nb):
1468             _ee    = numpy.matrix(numpy.zeros(nb)).T
1469             _ee[i] = 1.
1470             _HtEE  = numpy.dot(HtM,_ee)
1471             _HtEE  = numpy.asmatrix(numpy.ravel( _HtEE )).T
1472             HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
1473         HessienneI = numpy.matrix( HessienneI )
1474         A = HessienneI.I
1475         if min(A.shape) != max(A.shape):
1476             raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
1477         if (numpy.diag(A) < 0).any():
1478             raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
1479         if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
1480             try:
1481                 L = numpy.linalg.cholesky( A )
1482             except:
1483                 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
1484     if selfA._toStore("APosterioriCovariance"):
1485         selfA.StoredVariables["APosterioriCovariance"].store( A )
1486     if selfA._toStore("JacobianMatrixAtOptimum"):
1487         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
1488     if selfA._toStore("KalmanGainAtOptimum"):
1489         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
1490         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
1491         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
1492     #
1493     # Calculs et/ou stockages supplémentaires
1494     # ---------------------------------------
1495     if selfA._toStore("Innovation") or \
1496         selfA._toStore("SigmaObs2") or \
1497         selfA._toStore("MahalanobisConsistency") or \
1498         selfA._toStore("OMB"):
1499         d  = Y - HXb
1500     if selfA._toStore("Innovation"):
1501         selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
1502     if selfA._toStore("BMA"):
1503         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
1504     if selfA._toStore("OMA"):
1505         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
1506     if selfA._toStore("OMB"):
1507         selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
1508     if selfA._toStore("SigmaObs2"):
1509         TraceR = R.trace(Y.size)
1510         selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
1511     if selfA._toStore("MahalanobisConsistency"):
1512         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
1513     if selfA._toStore("SimulationQuantiles"):
1514         nech = selfA._parameters["NumberOfSamplesForQuantiles"]
1515         HXa  = numpy.matrix(numpy.ravel( HXa )).T
1516         YfQ  = None
1517         for i in range(nech):
1518             if selfA._parameters["SimulationForQuantiles"] == "Linear":
1519                 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
1520                 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
1521                 Yr = HXa + dYr
1522             elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
1523                 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
1524                 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
1525             if YfQ is None:
1526                 YfQ = Yr
1527             else:
1528                 YfQ = numpy.hstack((YfQ,Yr))
1529         YfQ.sort(axis=-1)
1530         YQ = None
1531         for quantile in selfA._parameters["Quantiles"]:
1532             if not (0. <= float(quantile) <= 1.): continue
1533             indice = int(nech * float(quantile) - 1./nech)
1534             if YQ is None: YQ = YfQ[:,indice]
1535             else:          YQ = numpy.hstack((YQ,YfQ[:,indice]))
1536         selfA.StoredVariables["SimulationQuantiles"].store( YQ )
1537     if selfA._toStore("SimulatedObservationAtBackground"):
1538         selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
1539     if selfA._toStore("SimulatedObservationAtOptimum"):
1540         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
1541     #
1542     return 0
1543
1544 # ==============================================================================
1545 def psas3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1546     """
1547     3DVAR PSAS
1548     """
1549     #
1550     # Initialisations
1551     # ---------------
1552     #
1553     # Opérateurs
1554     Hm = HO["Direct"].appliedTo
1555     #
1556     # Utilisation éventuelle d'un vecteur H(Xb) précalculé
1557     if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
1558         HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
1559     else:
1560         HXb = Hm( Xb )
1561     HXb = numpy.asmatrix(numpy.ravel( HXb )).T
1562     if Y.size != HXb.size:
1563         raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
1564     if max(Y.shape) != max(HXb.shape):
1565         raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
1566     #
1567     if selfA._toStore("JacobianMatrixAtBackground"):
1568         HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
1569         HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
1570         selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
1571     #
1572     Ht = HO["Tangent"].asMatrix(Xb)
1573     BHT = B * Ht.T
1574     HBHTpR = R + Ht * BHT
1575     Innovation = Y - HXb
1576     #
1577     # Point de démarrage de l'optimisation
1578     Xini = numpy.zeros(Xb.shape)
1579     #
1580     # Définition de la fonction-coût
1581     # ------------------------------
1582     def CostFunction(w):
1583         _W = numpy.asmatrix(numpy.ravel( w )).T
1584         if selfA._parameters["StoreInternalVariables"] or \
1585             selfA._toStore("CurrentState") or \
1586             selfA._toStore("CurrentOptimum"):
1587             selfA.StoredVariables["CurrentState"].store( Xb + BHT * _W )
1588         if selfA._toStore("SimulatedObservationAtCurrentState") or \
1589             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1590             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Hm( Xb + BHT * _W ) )
1591         if selfA._toStore("InnovationAtCurrentState"):
1592             selfA.StoredVariables["InnovationAtCurrentState"].store( Innovation )
1593         #
1594         Jb  = float( 0.5 * _W.T * HBHTpR * _W )
1595         Jo  = float( - _W.T * Innovation )
1596         J   = Jb + Jo
1597         #
1598         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
1599         selfA.StoredVariables["CostFunctionJb"].store( Jb )
1600         selfA.StoredVariables["CostFunctionJo"].store( Jo )
1601         selfA.StoredVariables["CostFunctionJ" ].store( J )
1602         if selfA._toStore("IndexOfOptimum") or \
1603             selfA._toStore("CurrentOptimum") or \
1604             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
1605             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
1606             selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
1607             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1608             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1609         if selfA._toStore("IndexOfOptimum"):
1610             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1611         if selfA._toStore("CurrentOptimum"):
1612             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
1613         if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1614             selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
1615         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1616             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1617         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1618             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1619         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1620             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1621         return J
1622     #
1623     def GradientOfCostFunction(w):
1624         _W = numpy.asmatrix(numpy.ravel( w )).T
1625         GradJb  = HBHTpR * _W
1626         GradJo  = - Innovation
1627         GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
1628         return GradJ
1629     #
1630     # Minimisation de la fonctionnelle
1631     # --------------------------------
1632     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
1633     #
1634     if selfA._parameters["Minimizer"] == "LBFGSB":
1635         if "0.19" <= scipy.version.version <= "1.1.0":
1636             import lbfgsbhlt as optimiseur
1637         else:
1638             import scipy.optimize as optimiseur
1639         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
1640             func        = CostFunction,
1641             x0          = Xini,
1642             fprime      = GradientOfCostFunction,
1643             args        = (),
1644             bounds      = selfA._parameters["Bounds"],
1645             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
1646             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
1647             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
1648             iprint      = selfA._parameters["optiprint"],
1649             )
1650         nfeval = Informations['funcalls']
1651         rc     = Informations['warnflag']
1652     elif selfA._parameters["Minimizer"] == "TNC":
1653         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
1654             func        = CostFunction,
1655             x0          = Xini,
1656             fprime      = GradientOfCostFunction,
1657             args        = (),
1658             bounds      = selfA._parameters["Bounds"],
1659             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
1660             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
1661             ftol        = selfA._parameters["CostDecrementTolerance"],
1662             messages    = selfA._parameters["optmessages"],
1663             )
1664     elif selfA._parameters["Minimizer"] == "CG":
1665         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
1666             f           = CostFunction,
1667             x0          = Xini,
1668             fprime      = GradientOfCostFunction,
1669             args        = (),
1670             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1671             gtol        = selfA._parameters["GradientNormTolerance"],
1672             disp        = selfA._parameters["optdisp"],
1673             full_output = True,
1674             )
1675     elif selfA._parameters["Minimizer"] == "NCG":
1676         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
1677             f           = CostFunction,
1678             x0          = Xini,
1679             fprime      = GradientOfCostFunction,
1680             args        = (),
1681             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1682             avextol     = selfA._parameters["CostDecrementTolerance"],
1683             disp        = selfA._parameters["optdisp"],
1684             full_output = True,
1685             )
1686     elif selfA._parameters["Minimizer"] == "BFGS":
1687         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
1688             f           = CostFunction,
1689             x0          = Xini,
1690             fprime      = GradientOfCostFunction,
1691             args        = (),
1692             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1693             gtol        = selfA._parameters["GradientNormTolerance"],
1694             disp        = selfA._parameters["optdisp"],
1695             full_output = True,
1696             )
1697     else:
1698         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
1699     #
1700     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1701     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
1702     #
1703     # Correction pour pallier a un bug de TNC sur le retour du Minimum
1704     # ----------------------------------------------------------------
1705     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
1706         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
1707         Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
1708     else:
1709         Minimum = Xb + BHT * numpy.asmatrix(numpy.ravel( Minimum )).T
1710     #
1711     # Obtention de l'analyse
1712     # ----------------------
1713     Xa = Minimum
1714     #
1715     selfA.StoredVariables["Analysis"].store( Xa )
1716     #
1717     if selfA._toStore("OMA") or \
1718         selfA._toStore("SigmaObs2") or \
1719         selfA._toStore("SimulationQuantiles") or \
1720         selfA._toStore("SimulatedObservationAtOptimum"):
1721         if selfA._toStore("SimulatedObservationAtCurrentState"):
1722             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
1723         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1724             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
1725         else:
1726             HXa = Hm( Xa )
1727     #
1728     # Calcul de la covariance d'analyse
1729     # ---------------------------------
1730     if selfA._toStore("APosterioriCovariance") or \
1731         selfA._toStore("SimulationQuantiles") or \
1732         selfA._toStore("JacobianMatrixAtOptimum") or \
1733         selfA._toStore("KalmanGainAtOptimum"):
1734         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
1735         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
1736     if selfA._toStore("APosterioriCovariance") or \
1737         selfA._toStore("SimulationQuantiles") or \
1738         selfA._toStore("KalmanGainAtOptimum"):
1739         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
1740         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
1741     if selfA._toStore("APosterioriCovariance") or \
1742         selfA._toStore("SimulationQuantiles"):
1743         BI = B.getI()
1744         RI = R.getI()
1745         HessienneI = []
1746         nb = Xa.size
1747         for i in range(nb):
1748             _ee    = numpy.matrix(numpy.zeros(nb)).T
1749             _ee[i] = 1.
1750             _HtEE  = numpy.dot(HtM,_ee)
1751             _HtEE  = numpy.asmatrix(numpy.ravel( _HtEE )).T
1752             HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
1753         HessienneI = numpy.matrix( HessienneI )
1754         A = HessienneI.I
1755         if min(A.shape) != max(A.shape):
1756             raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
1757         if (numpy.diag(A) < 0).any():
1758             raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
1759         if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
1760             try:
1761                 L = numpy.linalg.cholesky( A )
1762             except:
1763                 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
1764     if selfA._toStore("APosterioriCovariance"):
1765         selfA.StoredVariables["APosterioriCovariance"].store( A )
1766     if selfA._toStore("JacobianMatrixAtOptimum"):
1767         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
1768     if selfA._toStore("KalmanGainAtOptimum"):
1769         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
1770         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
1771         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
1772     #
1773     # Calculs et/ou stockages supplémentaires
1774     # ---------------------------------------
1775     if selfA._toStore("Innovation") or \
1776         selfA._toStore("SigmaObs2") or \
1777         selfA._toStore("MahalanobisConsistency") or \
1778         selfA._toStore("OMB"):
1779         d  = Y - HXb
1780     if selfA._toStore("Innovation"):
1781         selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
1782     if selfA._toStore("BMA"):
1783         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
1784     if selfA._toStore("OMA"):
1785         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
1786     if selfA._toStore("OMB"):
1787         selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
1788     if selfA._toStore("SigmaObs2"):
1789         TraceR = R.trace(Y.size)
1790         selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
1791     if selfA._toStore("MahalanobisConsistency"):
1792         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
1793     if selfA._toStore("SimulationQuantiles"):
1794         nech = selfA._parameters["NumberOfSamplesForQuantiles"]
1795         HXa  = numpy.matrix(numpy.ravel( HXa )).T
1796         YfQ  = None
1797         for i in range(nech):
1798             if selfA._parameters["SimulationForQuantiles"] == "Linear":
1799                 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
1800                 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
1801                 Yr = HXa + dYr
1802             elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
1803                 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
1804                 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
1805             if YfQ is None:
1806                 YfQ = Yr
1807             else:
1808                 YfQ = numpy.hstack((YfQ,Yr))
1809         YfQ.sort(axis=-1)
1810         YQ = None
1811         for quantile in selfA._parameters["Quantiles"]:
1812             if not (0. <= float(quantile) <= 1.): continue
1813             indice = int(nech * float(quantile) - 1./nech)
1814             if YQ is None: YQ = YfQ[:,indice]
1815             else:          YQ = numpy.hstack((YQ,YfQ[:,indice]))
1816         selfA.StoredVariables["SimulationQuantiles"].store( YQ )
1817     if selfA._toStore("SimulatedObservationAtBackground"):
1818         selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
1819     if selfA._toStore("SimulatedObservationAtOptimum"):
1820         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
1821     #
1822     return 0
1823
1824 # ==============================================================================
1825 def std4dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1826     """
1827     4DVAR
1828     """
1829     #
1830     # Initialisations
1831     # ---------------
1832     #
1833     # Opérateurs
1834     Hm = HO["Direct"].appliedControledFormTo
1835     Mm = EM["Direct"].appliedControledFormTo
1836     #
1837     if CM is not None and "Tangent" in CM and U is not None:
1838         Cm = CM["Tangent"].asMatrix(Xb)
1839     else:
1840         Cm = None
1841     #
1842     def Un(_step):
1843         if U is not None:
1844             if hasattr(U,"store") and 1<=_step<len(U) :
1845                 _Un = numpy.asmatrix(numpy.ravel( U[_step] )).T
1846             elif hasattr(U,"store") and len(U)==1:
1847                 _Un = numpy.asmatrix(numpy.ravel( U[0] )).T
1848             else:
1849                 _Un = numpy.asmatrix(numpy.ravel( U )).T
1850         else:
1851             _Un = None
1852         return _Un
1853     def CmUn(_xn,_un):
1854         if Cm is not None and _un is not None: # Attention : si Cm est aussi dans M, doublon !
1855             _Cm   = Cm.reshape(_xn.size,_un.size) # ADAO & check shape
1856             _CmUn = _Cm * _un
1857         else:
1858             _CmUn = 0.
1859         return _CmUn
1860     #
1861     # Remarque : les observations sont exploitées à partir du pas de temps
1862     # numéro 1, et sont utilisées dans Yo comme rangées selon ces indices.
1863     # Donc le pas 0 n'est pas utilisé puisque la première étape commence
1864     # avec l'observation du pas 1.
1865     #
1866     # Nombre de pas identique au nombre de pas d'observations
1867     if hasattr(Y,"stepnumber"):
1868         duration = Y.stepnumber()
1869     else:
1870         duration = 2
1871     #
1872     # Précalcul des inversions de B et R
1873     BI = B.getI()
1874     RI = R.getI()
1875     #
1876     # Point de démarrage de l'optimisation
1877     Xini = selfA._parameters["InitializationPoint"]
1878     #
1879     # Définition de la fonction-coût
1880     # ------------------------------
1881     selfA.DirectCalculation = [None,] # Le pas 0 n'est pas observé
1882     selfA.DirectInnovation  = [None,] # Le pas 0 n'est pas observé
1883     def CostFunction(x):
1884         _X  = numpy.asmatrix(numpy.ravel( x )).T
1885         if selfA._parameters["StoreInternalVariables"] or \
1886             selfA._toStore("CurrentState") or \
1887             selfA._toStore("CurrentOptimum"):
1888             selfA.StoredVariables["CurrentState"].store( _X )
1889         Jb  = float( 0.5 * (_X - Xb).T * BI * (_X - Xb) )
1890         selfA.DirectCalculation = [None,]
1891         selfA.DirectInnovation  = [None,]
1892         Jo  = 0.
1893         _Xn = _X
1894         for step in range(0,duration-1):
1895             if hasattr(Y,"store"):
1896                 _Ynpu = numpy.asmatrix(numpy.ravel( Y[step+1] )).T
1897             else:
1898                 _Ynpu = numpy.asmatrix(numpy.ravel( Y )).T
1899             _Un = Un(step)
1900             #
1901             # Etape d'évolution
1902             if selfA._parameters["EstimationOf"] == "State":
1903                 _Xn = Mm( (_Xn, _Un) ) + CmUn(_Xn, _Un)
1904             elif selfA._parameters["EstimationOf"] == "Parameters":
1905                 pass
1906             #
1907             if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
1908                 _Xn = numpy.max(numpy.hstack((_Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1)
1909                 _Xn = numpy.min(numpy.hstack((_Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1)
1910             #
1911             # Etape de différence aux observations
1912             if selfA._parameters["EstimationOf"] == "State":
1913                 _YmHMX = _Ynpu - numpy.asmatrix(numpy.ravel( Hm( (_Xn, None) ) )).T
1914             elif selfA._parameters["EstimationOf"] == "Parameters":
1915                 _YmHMX = _Ynpu - numpy.asmatrix(numpy.ravel( Hm( (_Xn, _Un) ) )).T - CmUn(_Xn, _Un)
1916             #
1917             # Stockage de l'état
1918             selfA.DirectCalculation.append( _Xn )
1919             selfA.DirectInnovation.append( _YmHMX )
1920             #
1921             # Ajout dans la fonctionnelle d'observation
1922             Jo = Jo + 0.5 * float( _YmHMX.T * RI * _YmHMX )
1923         J = Jb + Jo
1924         #
1925         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
1926         selfA.StoredVariables["CostFunctionJb"].store( Jb )
1927         selfA.StoredVariables["CostFunctionJo"].store( Jo )
1928         selfA.StoredVariables["CostFunctionJ" ].store( J )
1929         if selfA._toStore("IndexOfOptimum") or \
1930             selfA._toStore("CurrentOptimum") or \
1931             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
1932             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
1933             selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1934             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1935         if selfA._toStore("IndexOfOptimum"):
1936             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1937         if selfA._toStore("CurrentOptimum"):
1938             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
1939         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1940             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1941         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1942             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1943         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1944             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1945         return J
1946     #
1947     def GradientOfCostFunction(x):
1948         _X      = numpy.asmatrix(numpy.ravel( x )).T
1949         GradJb  = BI * (_X - Xb)
1950         GradJo  = 0.
1951         for step in range(duration-1,0,-1):
1952             # Étape de récupération du dernier stockage de l'évolution
1953             _Xn = selfA.DirectCalculation.pop()
1954             # Étape de récupération du dernier stockage de l'innovation
1955             _YmHMX = selfA.DirectInnovation.pop()
1956             # Calcul des adjoints
1957             Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
1958             Ha = Ha.reshape(_Xn.size,_YmHMX.size) # ADAO & check shape
1959             Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
1960             Ma = Ma.reshape(_Xn.size,_Xn.size) # ADAO & check shape
1961             # Calcul du gradient par état adjoint
1962             GradJo = GradJo + Ha * RI * _YmHMX # Équivaut pour Ha linéaire à : Ha( (_Xn, RI * _YmHMX) )
1963             GradJo = Ma * GradJo               # Équivaut pour Ma linéaire à : Ma( (_Xn, GradJo) )
1964         GradJ = numpy.ravel( GradJb ) - numpy.ravel( GradJo )
1965         return GradJ
1966     #
1967     # Minimisation de la fonctionnelle
1968     # --------------------------------
1969     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
1970     #
1971     if selfA._parameters["Minimizer"] == "LBFGSB":
1972         if "0.19" <= scipy.version.version <= "1.1.0":
1973             import lbfgsbhlt as optimiseur
1974         else:
1975             import scipy.optimize as optimiseur
1976         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
1977             func        = CostFunction,
1978             x0          = Xini,
1979             fprime      = GradientOfCostFunction,
1980             args        = (),
1981             bounds      = selfA._parameters["Bounds"],
1982             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
1983             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
1984             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
1985             iprint      = selfA._parameters["optiprint"],
1986             )
1987         nfeval = Informations['funcalls']
1988         rc     = Informations['warnflag']
1989     elif selfA._parameters["Minimizer"] == "TNC":
1990         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
1991             func        = CostFunction,
1992             x0          = Xini,
1993             fprime      = GradientOfCostFunction,
1994             args        = (),
1995             bounds      = selfA._parameters["Bounds"],
1996             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
1997             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
1998             ftol        = selfA._parameters["CostDecrementTolerance"],
1999             messages    = selfA._parameters["optmessages"],
2000             )
2001     elif selfA._parameters["Minimizer"] == "CG":
2002         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
2003             f           = CostFunction,
2004             x0          = Xini,
2005             fprime      = GradientOfCostFunction,
2006             args        = (),
2007             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2008             gtol        = selfA._parameters["GradientNormTolerance"],
2009             disp        = selfA._parameters["optdisp"],
2010             full_output = True,
2011             )
2012     elif selfA._parameters["Minimizer"] == "NCG":
2013         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
2014             f           = CostFunction,
2015             x0          = Xini,
2016             fprime      = GradientOfCostFunction,
2017             args        = (),
2018             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2019             avextol     = selfA._parameters["CostDecrementTolerance"],
2020             disp        = selfA._parameters["optdisp"],
2021             full_output = True,
2022             )
2023     elif selfA._parameters["Minimizer"] == "BFGS":
2024         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
2025             f           = CostFunction,
2026             x0          = Xini,
2027             fprime      = GradientOfCostFunction,
2028             args        = (),
2029             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2030             gtol        = selfA._parameters["GradientNormTolerance"],
2031             disp        = selfA._parameters["optdisp"],
2032             full_output = True,
2033             )
2034     else:
2035         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
2036     #
2037     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2038     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
2039     #
2040     # Correction pour pallier a un bug de TNC sur le retour du Minimum
2041     # ----------------------------------------------------------------
2042     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
2043         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
2044     #
2045     # Obtention de l'analyse
2046     # ----------------------
2047     Xa = numpy.asmatrix(numpy.ravel( Minimum )).T
2048     #
2049     selfA.StoredVariables["Analysis"].store( Xa )
2050     #
2051     # Calculs et/ou stockages supplémentaires
2052     # ---------------------------------------
2053     if selfA._toStore("BMA"):
2054         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
2055     #
2056     return 0
2057
2058 # ==============================================================================
2059 def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
2060     """
2061     Stochastic EnKF
2062     """
2063     if selfA._parameters["EstimationOf"] == "Parameters":
2064         selfA._parameters["StoreInternalVariables"] = True
2065     #
2066     # Opérateurs
2067     # ----------
2068     H = HO["Direct"].appliedControledFormTo
2069     #
2070     if selfA._parameters["EstimationOf"] == "State":
2071         M = EM["Direct"].appliedControledFormTo
2072     #
2073     if CM is not None and "Tangent" in CM and U is not None:
2074         Cm = CM["Tangent"].asMatrix(Xb)
2075     else:
2076         Cm = None
2077     #
2078     # Nombre de pas identique au nombre de pas d'observations
2079     # -------------------------------------------------------
2080     if hasattr(Y,"stepnumber"):
2081         duration = Y.stepnumber()
2082         __p = numpy.cumprod(Y.shape())[-1]
2083     else:
2084         duration = 2
2085         __p = numpy.array(Y).size
2086     #
2087     # Précalcul des inversions de B et R
2088     # ----------------------------------
2089     if selfA._parameters["StoreInternalVariables"] \
2090         or selfA._toStore("CostFunctionJ") \
2091         or selfA._toStore("CostFunctionJb") \
2092         or selfA._toStore("CostFunctionJo") \
2093         or selfA._toStore("CurrentOptimum") \
2094         or selfA._toStore("APosterioriCovariance"):
2095         BI = B.getI()
2096         RI = R.getI()
2097     #
2098     # Initialisation
2099     # --------------
2100     __n = Xb.size
2101     __m = selfA._parameters["NumberOfMembers"]
2102     if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
2103     else:                         Pn = B
2104     if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
2105     else:                         Rn = R
2106     if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
2107     else:                         Qn = Q
2108     Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
2109     #
2110     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2111         selfA.StoredVariables["Analysis"].store( Xb )
2112         if selfA._toStore("APosterioriCovariance"):
2113             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2114             covarianceXa = Pn
2115     #
2116     previousJMinimum = numpy.finfo(float).max
2117     #
2118     for step in range(duration-1):
2119         if hasattr(Y,"store"):
2120             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
2121         else:
2122             Ynpu = numpy.ravel( Y ).reshape((__p,1))
2123         #
2124         if U is not None:
2125             if hasattr(U,"store") and len(U)>1:
2126                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2127             elif hasattr(U,"store") and len(U)==1:
2128                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2129             else:
2130                 Un = numpy.asmatrix(numpy.ravel( U )).T
2131         else:
2132             Un = None
2133         #
2134         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2135             Xn = CovarianceInflation( Xn,
2136                 selfA._parameters["InflationType"],
2137                 selfA._parameters["InflationFactor"],
2138                 )
2139         #
2140         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
2141             EMX = M( [(Xn[:,i], Un) for i in range(__m)],
2142                 argsAsSerie = True,
2143                 returnSerieAsArrayMatrix = True )
2144             qi = numpy.random.multivariate_normal(numpy.zeros(__n), Qn, size=__m).T
2145             Xn_predicted = EMX + qi
2146             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2147                 argsAsSerie = True,
2148                 returnSerieAsArrayMatrix = True )
2149             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2150                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2151                 Xn_predicted = Xn_predicted + Cm * Un
2152         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
2153             # --- > Par principe, M = Id, Q = 0
2154             Xn_predicted = Xn
2155             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2156                 argsAsSerie = True,
2157                 returnSerieAsArrayMatrix = True )
2158         #
2159         # Mean of forecast and observation of forecast
2160         Xfm  = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2161         Hfm  = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2162         #
2163         #--------------------------
2164         if VariantM == "KalmanFilterFormula05":
2165             PfHT, HPfHT = 0., 0.
2166             for i in range(__m):
2167                 Exfi = Xn_predicted[:,i].reshape((__n,1)) - Xfm
2168                 Eyfi = HX_predicted[:,i].reshape((__p,1)) - Hfm
2169                 PfHT  += Exfi * Eyfi.T
2170                 HPfHT += Eyfi * Eyfi.T
2171             PfHT  = (1./(__m-1)) * PfHT
2172             HPfHT = (1./(__m-1)) * HPfHT
2173             Kn     = PfHT * ( R + HPfHT ).I
2174             del PfHT, HPfHT
2175             #
2176             for i in range(__m):
2177                 ri = numpy.random.multivariate_normal(numpy.zeros(__p), Rn)
2178                 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(Ynpu) + ri - HX_predicted[:,i])
2179         #--------------------------
2180         elif VariantM == "KalmanFilterFormula16":
2181             EpY   = EnsembleOfCenteredPerturbations(Ynpu, Rn, __m)
2182             EpYm  = EpY.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2183             #
2184             EaX   = EnsembleOfAnomalies( Xn_predicted ) / numpy.sqrt(__m-1)
2185             EaY = (HX_predicted - Hfm - EpY + EpYm) / numpy.sqrt(__m-1)
2186             #
2187             Kn = EaX @ EaY.T @ numpy.linalg.inv( EaY @ EaY.T)
2188             #
2189             for i in range(__m):
2190                 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(EpY[:,i]) - HX_predicted[:,i])
2191         #--------------------------
2192         else:
2193             raise ValueError("VariantM has to be chosen in the authorized methods list.")
2194         #
2195         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2196             Xn = CovarianceInflation( Xn,
2197                 selfA._parameters["InflationType"],
2198                 selfA._parameters["InflationFactor"],
2199                 )
2200         #
2201         Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2202         #--------------------------
2203         #
2204         if selfA._parameters["StoreInternalVariables"] \
2205             or selfA._toStore("CostFunctionJ") \
2206             or selfA._toStore("CostFunctionJb") \
2207             or selfA._toStore("CostFunctionJo") \
2208             or selfA._toStore("APosterioriCovariance") \
2209             or selfA._toStore("InnovationAtCurrentAnalysis") \
2210             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2211             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2212             _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
2213             _Innovation = Ynpu - _HXa
2214         #
2215         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2216         # ---> avec analysis
2217         selfA.StoredVariables["Analysis"].store( Xa )
2218         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2219             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2220         if selfA._toStore("InnovationAtCurrentAnalysis"):
2221             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2222         # ---> avec current state
2223         if selfA._parameters["StoreInternalVariables"] \
2224             or selfA._toStore("CurrentState"):
2225             selfA.StoredVariables["CurrentState"].store( Xn )
2226         if selfA._toStore("ForecastState"):
2227             selfA.StoredVariables["ForecastState"].store( EMX )
2228         if selfA._toStore("BMA"):
2229             selfA.StoredVariables["BMA"].store( EMX - Xa )
2230         if selfA._toStore("InnovationAtCurrentState"):
2231             selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
2232         if selfA._toStore("SimulatedObservationAtCurrentState") \
2233             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2234             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
2235         # ---> autres
2236         if selfA._parameters["StoreInternalVariables"] \
2237             or selfA._toStore("CostFunctionJ") \
2238             or selfA._toStore("CostFunctionJb") \
2239             or selfA._toStore("CostFunctionJo") \
2240             or selfA._toStore("CurrentOptimum") \
2241             or selfA._toStore("APosterioriCovariance"):
2242             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2243             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
2244             J   = Jb + Jo
2245             selfA.StoredVariables["CostFunctionJb"].store( Jb )
2246             selfA.StoredVariables["CostFunctionJo"].store( Jo )
2247             selfA.StoredVariables["CostFunctionJ" ].store( J )
2248             #
2249             if selfA._toStore("IndexOfOptimum") \
2250                 or selfA._toStore("CurrentOptimum") \
2251                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2252                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2253                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2254                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2255                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2256             if selfA._toStore("IndexOfOptimum"):
2257                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2258             if selfA._toStore("CurrentOptimum"):
2259                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2260             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2261                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2262             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2263                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2264             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2265                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2266             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2267                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2268         if selfA._toStore("APosterioriCovariance"):
2269             Eai = EnsembleOfAnomalies( Xn ) / numpy.sqrt(__m-1) # Anomalies
2270             Pn = Eai @ Eai.T
2271             Pn = 0.5 * (Pn + Pn.T)
2272             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2273         if selfA._parameters["EstimationOf"] == "Parameters" \
2274             and J < previousJMinimum:
2275             previousJMinimum    = J
2276             XaMin               = Xa
2277             if selfA._toStore("APosterioriCovariance"):
2278                 covarianceXaMin = Pn
2279     #
2280     # Stockage final supplémentaire de l'optimum en estimation de paramètres
2281     # ----------------------------------------------------------------------
2282     if selfA._parameters["EstimationOf"] == "Parameters":
2283         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2284         selfA.StoredVariables["Analysis"].store( XaMin )
2285         if selfA._toStore("APosterioriCovariance"):
2286             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2287         if selfA._toStore("BMA"):
2288             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2289     #
2290     return 0
2291
2292 # ==============================================================================
2293 def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
2294     """
2295     Ensemble-Transform EnKF
2296     """
2297     if selfA._parameters["EstimationOf"] == "Parameters":
2298         selfA._parameters["StoreInternalVariables"] = True
2299     #
2300     # Opérateurs
2301     # ----------
2302     H = HO["Direct"].appliedControledFormTo
2303     #
2304     if selfA._parameters["EstimationOf"] == "State":
2305         M = EM["Direct"].appliedControledFormTo
2306     #
2307     if CM is not None and "Tangent" in CM and U is not None:
2308         Cm = CM["Tangent"].asMatrix(Xb)
2309     else:
2310         Cm = None
2311     #
2312     # Nombre de pas identique au nombre de pas d'observations
2313     # -------------------------------------------------------
2314     if hasattr(Y,"stepnumber"):
2315         duration = Y.stepnumber()
2316         __p = numpy.cumprod(Y.shape())[-1]
2317     else:
2318         duration = 2
2319         __p = numpy.array(Y).size
2320     #
2321     # Précalcul des inversions de B et R
2322     # ----------------------------------
2323     if selfA._parameters["StoreInternalVariables"] \
2324         or selfA._toStore("CostFunctionJ") \
2325         or selfA._toStore("CostFunctionJb") \
2326         or selfA._toStore("CostFunctionJo") \
2327         or selfA._toStore("CurrentOptimum") \
2328         or selfA._toStore("APosterioriCovariance"):
2329         BI = B.getI()
2330         RI = R.getI()
2331     elif VariantM != "KalmanFilterFormula":
2332         RI = R.getI()
2333     if VariantM == "KalmanFilterFormula":
2334         RIdemi = R.choleskyI()
2335     #
2336     # Initialisation
2337     # --------------
2338     __n = Xb.size
2339     __m = selfA._parameters["NumberOfMembers"]
2340     if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
2341     else:                         Pn = B
2342     if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
2343     else:                         Rn = R
2344     if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
2345     else:                         Qn = Q
2346     Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
2347     #
2348     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2349         selfA.StoredVariables["Analysis"].store( Xb )
2350         if selfA._toStore("APosterioriCovariance"):
2351             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2352             covarianceXa = Pn
2353     #
2354     previousJMinimum = numpy.finfo(float).max
2355     #
2356     for step in range(duration-1):
2357         if hasattr(Y,"store"):
2358             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
2359         else:
2360             Ynpu = numpy.ravel( Y ).reshape((__p,1))
2361         #
2362         if U is not None:
2363             if hasattr(U,"store") and len(U)>1:
2364                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2365             elif hasattr(U,"store") and len(U)==1:
2366                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2367             else:
2368                 Un = numpy.asmatrix(numpy.ravel( U )).T
2369         else:
2370             Un = None
2371         #
2372         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2373             Xn = CovarianceInflation( Xn,
2374                 selfA._parameters["InflationType"],
2375                 selfA._parameters["InflationFactor"],
2376                 )
2377         #
2378         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
2379             EMX = M( [(Xn[:,i], Un) for i in range(__m)],
2380                 argsAsSerie = True,
2381                 returnSerieAsArrayMatrix = True )
2382             qi = numpy.random.multivariate_normal(numpy.zeros(__n), Qn, size=__m).T
2383             Xn_predicted = EMX + qi
2384             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2385                 argsAsSerie = True,
2386                 returnSerieAsArrayMatrix = True )
2387             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2388                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2389                 Xn_predicted = Xn_predicted + Cm * Un
2390         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
2391             # --- > Par principe, M = Id, Q = 0
2392             Xn_predicted = Xn
2393             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2394                 argsAsSerie = True,
2395                 returnSerieAsArrayMatrix = True )
2396         #
2397         # Mean of forecast and observation of forecast
2398         Xfm  = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2399         Hfm  = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2400         #
2401         # Anomalies
2402         EaX   = EnsembleOfAnomalies( Xn_predicted )
2403         EaHX  = numpy.array(HX_predicted - Hfm)
2404         #
2405         #--------------------------
2406         if VariantM == "KalmanFilterFormula":
2407             mS    = RIdemi * EaHX / numpy.sqrt(__m-1)
2408             delta = RIdemi * ( Ynpu - Hfm )
2409             mT    = numpy.linalg.inv( numpy.eye(__m) + mS.T @ mS )
2410             vw    = mT @ mS.transpose() @ delta
2411             #
2412             Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
2413             mU    = numpy.eye(__m)
2414             #
2415             EaX   = EaX / numpy.sqrt(__m-1)
2416             Xn    = Xfm + EaX @ ( vw.reshape((__m,1)) + numpy.sqrt(__m-1) * Tdemi @ mU )
2417         #--------------------------
2418         elif VariantM == "Variational":
2419             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
2420             def CostFunction(w):
2421                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
2422                 _Jo = 0.5 * _A.T @ (RI * _A)
2423                 _Jb = 0.5 * (__m-1) * w.T @ w
2424                 _J  = _Jo + _Jb
2425                 return float(_J)
2426             def GradientOfCostFunction(w):
2427                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
2428                 _GardJo = - EaHX.T @ (RI * _A)
2429                 _GradJb = (__m-1) * w.reshape((__m,1))
2430                 _GradJ  = _GardJo + _GradJb
2431                 return numpy.ravel(_GradJ)
2432             vw = scipy.optimize.fmin_cg(
2433                 f           = CostFunction,
2434                 x0          = numpy.zeros(__m),
2435                 fprime      = GradientOfCostFunction,
2436                 args        = (),
2437                 disp        = False,
2438                 )
2439             #
2440             Hto = EaHX.T @ (RI * EaHX)
2441             Htb = (__m-1) * numpy.eye(__m)
2442             Hta = Hto + Htb
2443             #
2444             Pta = numpy.linalg.inv( Hta )
2445             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
2446             #
2447             Xn  = Xfm + EaX @ (vw[:,None] + EWa)
2448         #--------------------------
2449         elif VariantM == "FiniteSize11": # Jauge Boc2011
2450             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
2451             def CostFunction(w):
2452                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
2453                 _Jo = 0.5 * _A.T @ (RI * _A)
2454                 _Jb = 0.5 * __m * math.log(1 + 1/__m + w.T @ w)
2455                 _J  = _Jo + _Jb
2456                 return float(_J)
2457             def GradientOfCostFunction(w):
2458                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
2459                 _GardJo = - EaHX.T @ (RI * _A)
2460                 _GradJb = __m * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
2461                 _GradJ  = _GardJo + _GradJb
2462                 return numpy.ravel(_GradJ)
2463             vw = scipy.optimize.fmin_cg(
2464                 f           = CostFunction,
2465                 x0          = numpy.zeros(__m),
2466                 fprime      = GradientOfCostFunction,
2467                 args        = (),
2468                 disp        = False,
2469                 )
2470             #
2471             Hto = EaHX.T @ (RI * EaHX)
2472             Htb = __m * \
2473                 ( (1 + 1/__m + vw.T @ vw) * numpy.eye(__m) - 2 * vw @ vw.T ) \
2474                 / (1 + 1/__m + vw.T @ vw)**2
2475             Hta = Hto + Htb
2476             #
2477             Pta = numpy.linalg.inv( Hta )
2478             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
2479             #
2480             Xn  = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
2481         #--------------------------
2482         elif VariantM == "FiniteSize15": # Jauge Boc2015
2483             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
2484             def CostFunction(w):
2485                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
2486                 _Jo = 0.5 * _A.T * RI * _A
2487                 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w)
2488                 _J  = _Jo + _Jb
2489                 return float(_J)
2490             def GradientOfCostFunction(w):
2491                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
2492                 _GardJo = - EaHX.T @ (RI * _A)
2493                 _GradJb = (__m+1) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
2494                 _GradJ  = _GardJo + _GradJb
2495                 return numpy.ravel(_GradJ)
2496             vw = scipy.optimize.fmin_cg(
2497                 f           = CostFunction,
2498                 x0          = numpy.zeros(__m),
2499                 fprime      = GradientOfCostFunction,
2500                 args        = (),
2501                 disp        = False,
2502                 )
2503             #
2504             Hto = EaHX.T @ (RI * EaHX)
2505             Htb = (__m+1) * \
2506                 ( (1 + 1/__m + vw.T @ vw) * numpy.eye(__m) - 2 * vw @ vw.T ) \
2507                 / (1 + 1/__m + vw.T @ vw)**2
2508             Hta = Hto + Htb
2509             #
2510             Pta = numpy.linalg.inv( Hta )
2511             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
2512             #
2513             Xn  = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
2514         #--------------------------
2515         elif VariantM == "FiniteSize16": # Jauge Boc2016
2516             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
2517             def CostFunction(w):
2518                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
2519                 _Jo = 0.5 * _A.T @ (RI * _A)
2520                 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w / (__m-1))
2521                 _J  = _Jo + _Jb
2522                 return float(_J)
2523             def GradientOfCostFunction(w):
2524                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
2525                 _GardJo = - EaHX.T @ (RI * _A)
2526                 _GradJb = ((__m+1) / (__m-1)) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w / (__m-1))
2527                 _GradJ  = _GardJo + _GradJb
2528                 return numpy.ravel(_GradJ)
2529             vw = scipy.optimize.fmin_cg(
2530                 f           = CostFunction,
2531                 x0          = numpy.zeros(__m),
2532                 fprime      = GradientOfCostFunction,
2533                 args        = (),
2534                 disp        = False,
2535                 )
2536             #
2537             Hto = EaHX.T @ (RI * EaHX)
2538             Htb = ((__m+1) / (__m-1)) * \
2539                 ( (1 + 1/__m + vw.T @ vw / (__m-1)) * numpy.eye(__m) - 2 * vw @ vw.T / (__m-1) ) \
2540                 / (1 + 1/__m + vw.T @ vw / (__m-1))**2
2541             Hta = Hto + Htb
2542             #
2543             Pta = numpy.linalg.inv( Hta )
2544             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
2545             #
2546             Xn  = Xfm + EaX @ (vw[:,None] + EWa)
2547         #--------------------------
2548         else:
2549             raise ValueError("VariantM has to be chosen in the authorized methods list.")
2550         #
2551         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2552             Xn = CovarianceInflation( Xn,
2553                 selfA._parameters["InflationType"],
2554                 selfA._parameters["InflationFactor"],
2555                 )
2556         #
2557         Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2558         #--------------------------
2559         #
2560         if selfA._parameters["StoreInternalVariables"] \
2561             or selfA._toStore("CostFunctionJ") \
2562             or selfA._toStore("CostFunctionJb") \
2563             or selfA._toStore("CostFunctionJo") \
2564             or selfA._toStore("APosterioriCovariance") \
2565             or selfA._toStore("InnovationAtCurrentAnalysis") \
2566             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2567             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2568             _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
2569             _Innovation = Ynpu - _HXa
2570         #
2571         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2572         # ---> avec analysis
2573         selfA.StoredVariables["Analysis"].store( Xa )
2574         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2575             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2576         if selfA._toStore("InnovationAtCurrentAnalysis"):
2577             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2578         # ---> avec current state
2579         if selfA._parameters["StoreInternalVariables"] \
2580             or selfA._toStore("CurrentState"):
2581             selfA.StoredVariables["CurrentState"].store( Xn )
2582         if selfA._toStore("ForecastState"):
2583             selfA.StoredVariables["ForecastState"].store( EMX )
2584         if selfA._toStore("BMA"):
2585             selfA.StoredVariables["BMA"].store( EMX - Xa.reshape((__n,1)) )
2586         if selfA._toStore("InnovationAtCurrentState"):
2587             selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
2588         if selfA._toStore("SimulatedObservationAtCurrentState") \
2589             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2590             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
2591         # ---> autres
2592         if selfA._parameters["StoreInternalVariables"] \
2593             or selfA._toStore("CostFunctionJ") \
2594             or selfA._toStore("CostFunctionJb") \
2595             or selfA._toStore("CostFunctionJo") \
2596             or selfA._toStore("CurrentOptimum") \
2597             or selfA._toStore("APosterioriCovariance"):
2598             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2599             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
2600             J   = Jb + Jo
2601             selfA.StoredVariables["CostFunctionJb"].store( Jb )
2602             selfA.StoredVariables["CostFunctionJo"].store( Jo )
2603             selfA.StoredVariables["CostFunctionJ" ].store( J )
2604             #
2605             if selfA._toStore("IndexOfOptimum") \
2606                 or selfA._toStore("CurrentOptimum") \
2607                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2608                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2609                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2610                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2611                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2612             if selfA._toStore("IndexOfOptimum"):
2613                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2614             if selfA._toStore("CurrentOptimum"):
2615                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2616             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2617                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2618             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2619                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2620             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2621                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2622             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2623                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2624         if selfA._toStore("APosterioriCovariance"):
2625             Eai = EnsembleOfAnomalies( Xn ) / numpy.sqrt(__m-1) # Anomalies
2626             Pn = Eai @ Eai.T
2627             Pn = 0.5 * (Pn + Pn.T)
2628             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2629         if selfA._parameters["EstimationOf"] == "Parameters" \
2630             and J < previousJMinimum:
2631             previousJMinimum    = J
2632             XaMin               = Xa
2633             if selfA._toStore("APosterioriCovariance"):
2634                 covarianceXaMin = Pn
2635     #
2636     # Stockage final supplémentaire de l'optimum en estimation de paramètres
2637     # ----------------------------------------------------------------------
2638     if selfA._parameters["EstimationOf"] == "Parameters":
2639         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2640         selfA.StoredVariables["Analysis"].store( XaMin )
2641         if selfA._toStore("APosterioriCovariance"):
2642             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2643         if selfA._toStore("BMA"):
2644             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2645     #
2646     return 0
2647
2648 # ==============================================================================
2649 def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13",
2650     BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
2651     """
2652     Maximum Likelihood Ensemble Filter
2653     """
2654     if selfA._parameters["EstimationOf"] == "Parameters":
2655         selfA._parameters["StoreInternalVariables"] = True
2656     #
2657     # Opérateurs
2658     # ----------
2659     H = HO["Direct"].appliedControledFormTo
2660     #
2661     if selfA._parameters["EstimationOf"] == "State":
2662         M = EM["Direct"].appliedControledFormTo
2663     #
2664     if CM is not None and "Tangent" in CM and U is not None:
2665         Cm = CM["Tangent"].asMatrix(Xb)
2666     else:
2667         Cm = None
2668     #
2669     # Nombre de pas identique au nombre de pas d'observations
2670     # -------------------------------------------------------
2671     if hasattr(Y,"stepnumber"):
2672         duration = Y.stepnumber()
2673         __p = numpy.cumprod(Y.shape())[-1]
2674     else:
2675         duration = 2
2676         __p = numpy.array(Y).size
2677     #
2678     # Précalcul des inversions de B et R
2679     # ----------------------------------
2680     if selfA._parameters["StoreInternalVariables"] \
2681         or selfA._toStore("CostFunctionJ") \
2682         or selfA._toStore("CostFunctionJb") \
2683         or selfA._toStore("CostFunctionJo") \
2684         or selfA._toStore("CurrentOptimum") \
2685         or selfA._toStore("APosterioriCovariance"):
2686         BI = B.getI()
2687     RI = R.getI()
2688     #
2689     # Initialisation
2690     # --------------
2691     __n = Xb.size
2692     __m = selfA._parameters["NumberOfMembers"]
2693     if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
2694     else:                         Pn = B
2695     if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
2696     else:                         Rn = R
2697     if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
2698     else:                         Qn = Q
2699     Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
2700     #
2701     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2702         selfA.StoredVariables["Analysis"].store( Xb )
2703         if selfA._toStore("APosterioriCovariance"):
2704             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2705             covarianceXa = Pn
2706     #
2707     previousJMinimum = numpy.finfo(float).max
2708     #
2709     for step in range(duration-1):
2710         if hasattr(Y,"store"):
2711             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
2712         else:
2713             Ynpu = numpy.ravel( Y ).reshape((__p,1))
2714         #
2715         if U is not None:
2716             if hasattr(U,"store") and len(U)>1:
2717                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2718             elif hasattr(U,"store") and len(U)==1:
2719                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2720             else:
2721                 Un = numpy.asmatrix(numpy.ravel( U )).T
2722         else:
2723             Un = None
2724         #
2725         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2726             Xn = CovarianceInflation( Xn,
2727                 selfA._parameters["InflationType"],
2728                 selfA._parameters["InflationFactor"],
2729                 )
2730         #
2731         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
2732             EMX = M( [(Xn[:,i], Un) for i in range(__m)],
2733                 argsAsSerie = True,
2734                 returnSerieAsArrayMatrix = True )
2735             qi = numpy.random.multivariate_normal(numpy.zeros(__n), Qn, size=__m).T
2736             Xn_predicted = EMX + qi
2737             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2738                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2739                 Xn_predicted = Xn_predicted + Cm * Un
2740         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
2741             # --- > Par principe, M = Id, Q = 0
2742             Xn_predicted = Xn
2743         #
2744         #--------------------------
2745         if VariantM == "MLEF13":
2746             Xfm = numpy.ravel(Xn_predicted.mean(axis=1, dtype=mfp).astype('float'))
2747             EaX = EnsembleOfAnomalies( Xn_predicted ) / numpy.sqrt(__m-1)
2748             Ua  = numpy.eye(__m)
2749             __j = 0
2750             Deltaw = 1
2751             if not BnotT:
2752                 Ta  = numpy.eye(__m)
2753             vw  = numpy.zeros(__m)
2754             while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
2755                 vx1 = (Xfm + EaX @ vw).reshape((__n,1))
2756                 #
2757                 if BnotT:
2758                     E1 = vx1 + _epsilon * EaX
2759                 else:
2760                     E1 = vx1 + numpy.sqrt(__m-1) * EaX @ Ta
2761                 #
2762                 HE2 = H( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
2763                     argsAsSerie = True,
2764                     returnSerieAsArrayMatrix = True )
2765                 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2766                 #
2767                 if BnotT:
2768                     EaY = (HE2 - vy2) / _epsilon
2769                 else:
2770                     EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / numpy.sqrt(__m-1)
2771                 #
2772                 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy2 )))
2773                 mH = numpy.eye(__m) + EaY.transpose() @ (RI * EaY)
2774                 Deltaw = - numpy.linalg.solve(mH,GradJ)
2775                 #
2776                 vw = vw + Deltaw
2777                 #
2778                 if not BnotT:
2779                     Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2780                 #
2781                 __j = __j + 1
2782             #
2783             if BnotT:
2784                 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2785             #
2786             Xn = vx1 + numpy.sqrt(__m-1) * EaX @ Ta @ Ua
2787         #--------------------------
2788         else:
2789             raise ValueError("VariantM has to be chosen in the authorized methods list.")
2790         #
2791         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2792             Xn = CovarianceInflation( Xn,
2793                 selfA._parameters["InflationType"],
2794                 selfA._parameters["InflationFactor"],
2795                 )
2796         #
2797         Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2798         #--------------------------
2799         #
2800         if selfA._parameters["StoreInternalVariables"] \
2801             or selfA._toStore("CostFunctionJ") \
2802             or selfA._toStore("CostFunctionJb") \
2803             or selfA._toStore("CostFunctionJo") \
2804             or selfA._toStore("APosterioriCovariance") \
2805             or selfA._toStore("InnovationAtCurrentAnalysis") \
2806             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2807             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2808             _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
2809             _Innovation = Ynpu - _HXa
2810         #
2811         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2812         # ---> avec analysis
2813         selfA.StoredVariables["Analysis"].store( Xa )
2814         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2815             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2816         if selfA._toStore("InnovationAtCurrentAnalysis"):
2817             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2818         # ---> avec current state
2819         if selfA._parameters["StoreInternalVariables"] \
2820             or selfA._toStore("CurrentState"):
2821             selfA.StoredVariables["CurrentState"].store( Xn )
2822         if selfA._toStore("ForecastState"):
2823             selfA.StoredVariables["ForecastState"].store( EMX )
2824         if selfA._toStore("BMA"):
2825             selfA.StoredVariables["BMA"].store( EMX - Xa )
2826         if selfA._toStore("InnovationAtCurrentState"):
2827             selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
2828         if selfA._toStore("SimulatedObservationAtCurrentState") \
2829             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2830             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
2831         # ---> autres
2832         if selfA._parameters["StoreInternalVariables"] \
2833             or selfA._toStore("CostFunctionJ") \
2834             or selfA._toStore("CostFunctionJb") \
2835             or selfA._toStore("CostFunctionJo") \
2836             or selfA._toStore("CurrentOptimum") \
2837             or selfA._toStore("APosterioriCovariance"):
2838             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2839             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
2840             J   = Jb + Jo
2841             selfA.StoredVariables["CostFunctionJb"].store( Jb )
2842             selfA.StoredVariables["CostFunctionJo"].store( Jo )
2843             selfA.StoredVariables["CostFunctionJ" ].store( J )
2844             #
2845             if selfA._toStore("IndexOfOptimum") \
2846                 or selfA._toStore("CurrentOptimum") \
2847                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2848                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2849                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2850                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2851                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2852             if selfA._toStore("IndexOfOptimum"):
2853                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2854             if selfA._toStore("CurrentOptimum"):
2855                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2856             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2857                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2858             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2859                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2860             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2861                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2862             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2863                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2864         if selfA._toStore("APosterioriCovariance"):
2865             Eai = EnsembleOfAnomalies( Xn ) / numpy.sqrt(__m-1) # Anomalies
2866             Pn = Eai @ Eai.T
2867             Pn = 0.5 * (Pn + Pn.T)
2868             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2869         if selfA._parameters["EstimationOf"] == "Parameters" \
2870             and J < previousJMinimum:
2871             previousJMinimum    = J
2872             XaMin               = Xa
2873             if selfA._toStore("APosterioriCovariance"):
2874                 covarianceXaMin = Pn
2875     #
2876     # Stockage final supplémentaire de l'optimum en estimation de paramètres
2877     # ----------------------------------------------------------------------
2878     if selfA._parameters["EstimationOf"] == "Parameters":
2879         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2880         selfA.StoredVariables["Analysis"].store( XaMin )
2881         if selfA._toStore("APosterioriCovariance"):
2882             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2883         if selfA._toStore("BMA"):
2884             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2885     #
2886     return 0
2887
2888 # ==============================================================================
2889 def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12",
2890     BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
2891     """
2892     Iterative EnKF
2893     """
2894     if selfA._parameters["EstimationOf"] == "Parameters":
2895         selfA._parameters["StoreInternalVariables"] = True
2896     #
2897     # Opérateurs
2898     # ----------
2899     H = HO["Direct"].appliedControledFormTo
2900     #
2901     if selfA._parameters["EstimationOf"] == "State":
2902         M = EM["Direct"].appliedControledFormTo
2903     #
2904     if CM is not None and "Tangent" in CM and U is not None:
2905         Cm = CM["Tangent"].asMatrix(Xb)
2906     else:
2907         Cm = None
2908     #
2909     # Nombre de pas identique au nombre de pas d'observations
2910     # -------------------------------------------------------
2911     if hasattr(Y,"stepnumber"):
2912         duration = Y.stepnumber()
2913         __p = numpy.cumprod(Y.shape())[-1]
2914     else:
2915         duration = 2
2916         __p = numpy.array(Y).size
2917     #
2918     # Précalcul des inversions de B et R
2919     # ----------------------------------
2920     if selfA._parameters["StoreInternalVariables"] \
2921         or selfA._toStore("CostFunctionJ") \
2922         or selfA._toStore("CostFunctionJb") \
2923         or selfA._toStore("CostFunctionJo") \
2924         or selfA._toStore("CurrentOptimum") \
2925         or selfA._toStore("APosterioriCovariance"):
2926         BI = B.getI()
2927     RI = R.getI()
2928     #
2929     # Initialisation
2930     # --------------
2931     __n = Xb.size
2932     __m = selfA._parameters["NumberOfMembers"]
2933     if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
2934     else:                         Pn = B
2935     if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
2936     else:                         Rn = R
2937     if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
2938     else:                         Qn = Q
2939     Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
2940     #
2941     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2942         selfA.StoredVariables["Analysis"].store( Xb )
2943         if selfA._toStore("APosterioriCovariance"):
2944             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2945             covarianceXa = Pn
2946     #
2947     previousJMinimum = numpy.finfo(float).max
2948     #
2949     for step in range(duration-1):
2950         if hasattr(Y,"store"):
2951             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
2952         else:
2953             Ynpu = numpy.ravel( Y ).reshape((__p,1))
2954         #
2955         if U is not None:
2956             if hasattr(U,"store") and len(U)>1:
2957                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2958             elif hasattr(U,"store") and len(U)==1:
2959                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2960             else:
2961                 Un = numpy.asmatrix(numpy.ravel( U )).T
2962         else:
2963             Un = None
2964         #
2965         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2966             Xn = CovarianceInflation( Xn,
2967                 selfA._parameters["InflationType"],
2968                 selfA._parameters["InflationFactor"],
2969                 )
2970         #
2971         #--------------------------
2972         if VariantM == "IEnKF12":
2973             Xfm = numpy.ravel(Xn.mean(axis=1, dtype=mfp).astype('float'))
2974             EaX = EnsembleOfAnomalies( Xn ) / numpy.sqrt(__m-1)
2975             __j = 0
2976             Deltaw = 1
2977             if not BnotT:
2978                 Ta  = numpy.eye(__m)
2979             vw  = numpy.zeros(__m)
2980             while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
2981                 vx1 = (Xfm + EaX @ vw).reshape((__n,1))
2982                 #
2983                 if BnotT:
2984                     E1 = vx1 + _epsilon * EaX
2985                 else:
2986                     E1 = vx1 + numpy.sqrt(__m-1) * EaX @ Ta
2987                 #
2988                 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q
2989                     E2 = M( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
2990                         argsAsSerie = True,
2991                         returnSerieAsArrayMatrix = True )
2992                 elif selfA._parameters["EstimationOf"] == "Parameters":
2993                     # --- > Par principe, M = Id
2994                     E2 = Xn
2995                 vx2 = E2.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2996                 vy1 = H((vx2, Un)).reshape((__p,1))
2997                 #
2998                 HE2 = H( [(E2[:,i,numpy.newaxis], Un) for i in range(__m)],
2999                     argsAsSerie = True,
3000                     returnSerieAsArrayMatrix = True )
3001                 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
3002                 #
3003                 if BnotT:
3004                     EaY = (HE2 - vy2) / _epsilon
3005                 else:
3006                     EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / numpy.sqrt(__m-1)
3007                 #
3008                 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy1 )))
3009                 mH = numpy.eye(__m) + EaY.transpose() @ (RI * EaY)
3010                 Deltaw = - numpy.linalg.solve(mH,GradJ)
3011                 #
3012                 vw = vw + Deltaw
3013                 #
3014                 if not BnotT:
3015                     Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
3016                 #
3017                 __j = __j + 1
3018             #
3019             A2 = EnsembleOfAnomalies( E2 )
3020             #
3021             if BnotT:
3022                 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
3023                 A2 = numpy.sqrt(__m-1) * A2 @ Ta / _epsilon
3024             #
3025             Xn = vx2 + A2
3026         #--------------------------
3027         else:
3028             raise ValueError("VariantM has to be chosen in the authorized methods list.")
3029         #
3030         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
3031             Xn = CovarianceInflation( Xn,
3032                 selfA._parameters["InflationType"],
3033                 selfA._parameters["InflationFactor"],
3034                 )
3035         #
3036         Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
3037         #--------------------------
3038         #
3039         if selfA._parameters["StoreInternalVariables"] \
3040             or selfA._toStore("CostFunctionJ") \
3041             or selfA._toStore("CostFunctionJb") \
3042             or selfA._toStore("CostFunctionJo") \
3043             or selfA._toStore("APosterioriCovariance") \
3044             or selfA._toStore("InnovationAtCurrentAnalysis") \
3045             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
3046             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3047             _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
3048             _Innovation = Ynpu - _HXa
3049         #
3050         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3051         # ---> avec analysis
3052         selfA.StoredVariables["Analysis"].store( Xa )
3053         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
3054             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
3055         if selfA._toStore("InnovationAtCurrentAnalysis"):
3056             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
3057         # ---> avec current state
3058         if selfA._parameters["StoreInternalVariables"] \
3059             or selfA._toStore("CurrentState"):
3060             selfA.StoredVariables["CurrentState"].store( Xn )
3061         if selfA._toStore("ForecastState"):
3062             selfA.StoredVariables["ForecastState"].store( E2 )
3063         if selfA._toStore("BMA"):
3064             selfA.StoredVariables["BMA"].store( E2 - Xa )
3065         if selfA._toStore("InnovationAtCurrentState"):
3066             selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
3067         if selfA._toStore("SimulatedObservationAtCurrentState") \
3068             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3069             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
3070         # ---> autres
3071         if selfA._parameters["StoreInternalVariables"] \
3072             or selfA._toStore("CostFunctionJ") \
3073             or selfA._toStore("CostFunctionJb") \
3074             or selfA._toStore("CostFunctionJo") \
3075             or selfA._toStore("CurrentOptimum") \
3076             or selfA._toStore("APosterioriCovariance"):
3077             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
3078             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
3079             J   = Jb + Jo
3080             selfA.StoredVariables["CostFunctionJb"].store( Jb )
3081             selfA.StoredVariables["CostFunctionJo"].store( Jo )
3082             selfA.StoredVariables["CostFunctionJ" ].store( J )
3083             #
3084             if selfA._toStore("IndexOfOptimum") \
3085                 or selfA._toStore("CurrentOptimum") \
3086                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
3087                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
3088                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
3089                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3090                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3091             if selfA._toStore("IndexOfOptimum"):
3092                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3093             if selfA._toStore("CurrentOptimum"):
3094                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
3095             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3096                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
3097             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3098                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3099             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3100                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3101             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3102                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3103         if selfA._toStore("APosterioriCovariance"):
3104             Eai = EnsembleOfAnomalies( Xn ) / numpy.sqrt(__m-1) # Anomalies
3105             Pn = Eai @ Eai.T
3106             Pn = 0.5 * (Pn + Pn.T)
3107             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
3108         if selfA._parameters["EstimationOf"] == "Parameters" \
3109             and J < previousJMinimum:
3110             previousJMinimum    = J
3111             XaMin               = Xa
3112             if selfA._toStore("APosterioriCovariance"):
3113                 covarianceXaMin = Pn
3114     #
3115     # Stockage final supplémentaire de l'optimum en estimation de paramètres
3116     # ----------------------------------------------------------------------
3117     if selfA._parameters["EstimationOf"] == "Parameters":
3118         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3119         selfA.StoredVariables["Analysis"].store( XaMin )
3120         if selfA._toStore("APosterioriCovariance"):
3121             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
3122         if selfA._toStore("BMA"):
3123             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
3124     #
3125     return 0
3126
3127 # ==============================================================================
3128 if __name__ == "__main__":
3129     print('\n AUTODIAGNOSTIC\n')