Salome HOME
Improvement and documentation of state initialization
[modules/adao.git] / src / daComposant / daCore / NumericObjects.py
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) 2008-2021 EDF R&D
4 #
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
9 #
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13 # Lesser General Public License for more details.
14 #
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
18 #
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
20 #
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
22
23 __doc__ = """
24     Définit les objets numériques génériques.
25 """
26 __author__ = "Jean-Philippe ARGAUD"
27
28 import os, time, copy, types, sys, logging
29 import math, numpy, scipy, scipy.optimize, scipy.version
30 from daCore.BasicObjects import Operator
31 from daCore.PlatformInfo import PlatformInfo
32 mpr = PlatformInfo().MachinePrecision()
33 mfp = PlatformInfo().MaximumPrecision()
34 # logging.getLogger().setLevel(logging.DEBUG)
35
36 # ==============================================================================
37 def ExecuteFunction( paire ):
38     assert len(paire) == 2, "Incorrect number of arguments"
39     X, funcrepr = paire
40     __X = numpy.asmatrix(numpy.ravel( X )).T
41     __sys_path_tmp = sys.path ; sys.path.insert(0,funcrepr["__userFunction__path"])
42     __module = __import__(funcrepr["__userFunction__modl"], globals(), locals(), [])
43     __fonction = getattr(__module,funcrepr["__userFunction__name"])
44     sys.path = __sys_path_tmp ; del __sys_path_tmp
45     __HX  = __fonction( __X )
46     return numpy.ravel( __HX )
47
48 # ==============================================================================
49 class FDApproximation(object):
50     """
51     Cette classe sert d'interface pour définir les opérateurs approximés. A la
52     création d'un objet, en fournissant une fonction "Function", on obtient un
53     objet qui dispose de 3 méthodes "DirectOperator", "TangentOperator" et
54     "AdjointOperator". On contrôle l'approximation DF avec l'incrément
55     multiplicatif "increment" valant par défaut 1%, ou avec l'incrément fixe
56     "dX" qui sera multiplié par "increment" (donc en %), et on effectue de DF
57     centrées si le booléen "centeredDF" est vrai.
58     """
59     def __init__(self,
60             name                  = "FDApproximation",
61             Function              = None,
62             centeredDF            = False,
63             increment             = 0.01,
64             dX                    = None,
65             avoidingRedundancy    = True,
66             toleranceInRedundancy = 1.e-18,
67             lenghtOfRedundancy    = -1,
68             mpEnabled             = False,
69             mpWorkers             = None,
70             mfEnabled             = False,
71             ):
72         self.__name = str(name)
73         if mpEnabled:
74             try:
75                 import multiprocessing
76                 self.__mpEnabled = True
77             except ImportError:
78                 self.__mpEnabled = False
79         else:
80             self.__mpEnabled = False
81         self.__mpWorkers = mpWorkers
82         if self.__mpWorkers is not None and self.__mpWorkers < 1:
83             self.__mpWorkers = None
84         logging.debug("FDA Calculs en multiprocessing : %s (nombre de processus : %s)"%(self.__mpEnabled,self.__mpWorkers))
85         #
86         if mfEnabled:
87             self.__mfEnabled = True
88         else:
89             self.__mfEnabled = False
90         logging.debug("FDA Calculs en multifonctions : %s"%(self.__mfEnabled,))
91         #
92         if avoidingRedundancy:
93             self.__avoidRC = True
94             self.__tolerBP = float(toleranceInRedundancy)
95             self.__lenghtRJ = int(lenghtOfRedundancy)
96             self.__listJPCP = [] # Jacobian Previous Calculated Points
97             self.__listJPCI = [] # Jacobian Previous Calculated Increment
98             self.__listJPCR = [] # Jacobian Previous Calculated Results
99             self.__listJPPN = [] # Jacobian Previous Calculated Point Norms
100             self.__listJPIN = [] # Jacobian Previous Calculated Increment Norms
101         else:
102             self.__avoidRC = False
103         #
104         if self.__mpEnabled:
105             if isinstance(Function,types.FunctionType):
106                 logging.debug("FDA Calculs en multiprocessing : FunctionType")
107                 self.__userFunction__name = Function.__name__
108                 try:
109                     mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
110                 except:
111                     mod = os.path.abspath(Function.__globals__['__file__'])
112                 if not os.path.isfile(mod):
113                     raise ImportError("No user defined function or method found with the name %s"%(mod,))
114                 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
115                 self.__userFunction__path = os.path.dirname(mod)
116                 del mod
117                 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled )
118                 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
119             elif isinstance(Function,types.MethodType):
120                 logging.debug("FDA Calculs en multiprocessing : MethodType")
121                 self.__userFunction__name = Function.__name__
122                 try:
123                     mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
124                 except:
125                     mod = os.path.abspath(Function.__func__.__globals__['__file__'])
126                 if not os.path.isfile(mod):
127                     raise ImportError("No user defined function or method found with the name %s"%(mod,))
128                 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
129                 self.__userFunction__path = os.path.dirname(mod)
130                 del mod
131                 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled )
132                 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
133             else:
134                 raise TypeError("User defined function or method has to be provided for finite differences approximation.")
135         else:
136             self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled )
137             self.__userFunction = self.__userOperator.appliedTo
138         #
139         self.__centeredDF = bool(centeredDF)
140         if abs(float(increment)) > 1.e-15:
141             self.__increment  = float(increment)
142         else:
143             self.__increment  = 0.01
144         if dX is None:
145             self.__dX     = None
146         else:
147             self.__dX     = numpy.asmatrix(numpy.ravel( dX )).T
148         logging.debug("FDA Reduction des doublons de calcul : %s"%self.__avoidRC)
149         if self.__avoidRC:
150             logging.debug("FDA Tolerance de determination des doublons : %.2e"%self.__tolerBP)
151
152     # ---------------------------------------------------------
153     def __doublon__(self, e, l, n, v=None):
154         __ac, __iac = False, -1
155         for i in range(len(l)-1,-1,-1):
156             if numpy.linalg.norm(e - l[i]) < self.__tolerBP * n[i]:
157                 __ac, __iac = True, i
158                 if v is not None: logging.debug("FDA Cas%s déja calculé, récupération du doublon %i"%(v,__iac))
159                 break
160         return __ac, __iac
161
162     # ---------------------------------------------------------
163     def DirectOperator(self, X ):
164         """
165         Calcul du direct à l'aide de la fonction fournie.
166         """
167         logging.debug("FDA Calcul DirectOperator (explicite)")
168         if self.__mfEnabled:
169             _HX = self.__userFunction( X, argsAsSerie = True )
170         else:
171             _X = numpy.asmatrix(numpy.ravel( X )).T
172             _HX = numpy.ravel(self.__userFunction( _X ))
173         #
174         return _HX
175
176     # ---------------------------------------------------------
177     def TangentMatrix(self, X ):
178         """
179         Calcul de l'opérateur tangent comme la Jacobienne par différences finies,
180         c'est-à-dire le gradient de H en X. On utilise des différences finies
181         directionnelles autour du point X. X est un numpy.matrix.
182
183         Différences finies centrées (approximation d'ordre 2):
184         1/ Pour chaque composante i de X, on ajoute et on enlève la perturbation
185            dX[i] à la  composante X[i], pour composer X_plus_dXi et X_moins_dXi, et
186            on calcule les réponses HX_plus_dXi = H( X_plus_dXi ) et HX_moins_dXi =
187            H( X_moins_dXi )
188         2/ On effectue les différences (HX_plus_dXi-HX_moins_dXi) et on divise par
189            le pas 2*dXi
190         3/ Chaque résultat, par composante, devient une colonne de la Jacobienne
191
192         Différences finies non centrées (approximation d'ordre 1):
193         1/ Pour chaque composante i de X, on ajoute la perturbation dX[i] à la
194            composante X[i] pour composer X_plus_dXi, et on calcule la réponse
195            HX_plus_dXi = H( X_plus_dXi )
196         2/ On calcule la valeur centrale HX = H(X)
197         3/ On effectue les différences (HX_plus_dXi-HX) et on divise par
198            le pas dXi
199         4/ Chaque résultat, par composante, devient une colonne de la Jacobienne
200
201         """
202         logging.debug("FDA Début du calcul de la Jacobienne")
203         logging.debug("FDA   Incrément de............: %s*X"%float(self.__increment))
204         logging.debug("FDA   Approximation centrée...: %s"%(self.__centeredDF))
205         #
206         if X is None or len(X)==0:
207             raise ValueError("Nominal point X for approximate derivatives can not be None or void (given X: %s)."%(str(X),))
208         #
209         _X = numpy.asmatrix(numpy.ravel( X )).T
210         #
211         if self.__dX is None:
212             _dX  = self.__increment * _X
213         else:
214             _dX = numpy.asmatrix(numpy.ravel( self.__dX )).T
215         #
216         if (_dX == 0.).any():
217             moyenne = _dX.mean()
218             if moyenne == 0.:
219                 _dX = numpy.where( _dX == 0., float(self.__increment), _dX )
220             else:
221                 _dX = numpy.where( _dX == 0., moyenne, _dX )
222         #
223         __alreadyCalculated  = False
224         if self.__avoidRC:
225             __bidon, __alreadyCalculatedP = self.__doublon__(_X,  self.__listJPCP, self.__listJPPN, None)
226             __bidon, __alreadyCalculatedI = self.__doublon__(_dX, self.__listJPCI, self.__listJPIN, None)
227             if __alreadyCalculatedP == __alreadyCalculatedI > -1:
228                 __alreadyCalculated, __i = True, __alreadyCalculatedP
229                 logging.debug("FDA Cas J déja calculé, récupération du doublon %i"%__i)
230         #
231         if __alreadyCalculated:
232             logging.debug("FDA   Calcul Jacobienne (par récupération du doublon %i)"%__i)
233             _Jacobienne = self.__listJPCR[__i]
234         else:
235             logging.debug("FDA   Calcul Jacobienne (explicite)")
236             if self.__centeredDF:
237                 #
238                 if self.__mpEnabled and not self.__mfEnabled:
239                     funcrepr = {
240                         "__userFunction__path" : self.__userFunction__path,
241                         "__userFunction__modl" : self.__userFunction__modl,
242                         "__userFunction__name" : self.__userFunction__name,
243                     }
244                     _jobs = []
245                     for i in range( len(_dX) ):
246                         _dXi            = _dX[i]
247                         _X_plus_dXi     = numpy.array( _X.A1, dtype=float )
248                         _X_plus_dXi[i]  = _X[i] + _dXi
249                         _X_moins_dXi    = numpy.array( _X.A1, dtype=float )
250                         _X_moins_dXi[i] = _X[i] - _dXi
251                         #
252                         _jobs.append( (_X_plus_dXi,  funcrepr) )
253                         _jobs.append( (_X_moins_dXi, funcrepr) )
254                     #
255                     import multiprocessing
256                     self.__pool = multiprocessing.Pool(self.__mpWorkers)
257                     _HX_plusmoins_dX = self.__pool.map( ExecuteFunction, _jobs )
258                     self.__pool.close()
259                     self.__pool.join()
260                     #
261                     _Jacobienne  = []
262                     for i in range( len(_dX) ):
263                         _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
264                     #
265                 elif self.__mfEnabled:
266                     _xserie = []
267                     for i in range( len(_dX) ):
268                         _dXi            = _dX[i]
269                         _X_plus_dXi     = numpy.array( _X.A1, dtype=float )
270                         _X_plus_dXi[i]  = _X[i] + _dXi
271                         _X_moins_dXi    = numpy.array( _X.A1, dtype=float )
272                         _X_moins_dXi[i] = _X[i] - _dXi
273                         #
274                         _xserie.append( _X_plus_dXi )
275                         _xserie.append( _X_moins_dXi )
276                     #
277                     _HX_plusmoins_dX = self.DirectOperator( _xserie )
278                      #
279                     _Jacobienne  = []
280                     for i in range( len(_dX) ):
281                         _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
282                     #
283                 else:
284                     _Jacobienne  = []
285                     for i in range( _dX.size ):
286                         _dXi            = _dX[i]
287                         _X_plus_dXi     = numpy.array( _X.A1, dtype=float )
288                         _X_plus_dXi[i]  = _X[i] + _dXi
289                         _X_moins_dXi    = numpy.array( _X.A1, dtype=float )
290                         _X_moins_dXi[i] = _X[i] - _dXi
291                         #
292                         _HX_plus_dXi    = self.DirectOperator( _X_plus_dXi )
293                         _HX_moins_dXi   = self.DirectOperator( _X_moins_dXi )
294                         #
295                         _Jacobienne.append( numpy.ravel( _HX_plus_dXi - _HX_moins_dXi ) / (2.*_dXi) )
296                 #
297             else:
298                 #
299                 if self.__mpEnabled and not self.__mfEnabled:
300                     funcrepr = {
301                         "__userFunction__path" : self.__userFunction__path,
302                         "__userFunction__modl" : self.__userFunction__modl,
303                         "__userFunction__name" : self.__userFunction__name,
304                     }
305                     _jobs = []
306                     _jobs.append( (_X.A1, funcrepr) )
307                     for i in range( len(_dX) ):
308                         _X_plus_dXi    = numpy.array( _X.A1, dtype=float )
309                         _X_plus_dXi[i] = _X[i] + _dX[i]
310                         #
311                         _jobs.append( (_X_plus_dXi, funcrepr) )
312                     #
313                     import multiprocessing
314                     self.__pool = multiprocessing.Pool(self.__mpWorkers)
315                     _HX_plus_dX = self.__pool.map( ExecuteFunction, _jobs )
316                     self.__pool.close()
317                     self.__pool.join()
318                     #
319                     _HX = _HX_plus_dX.pop(0)
320                     #
321                     _Jacobienne = []
322                     for i in range( len(_dX) ):
323                         _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
324                     #
325                 elif self.__mfEnabled:
326                     _xserie = []
327                     _xserie.append( _X.A1 )
328                     for i in range( len(_dX) ):
329                         _X_plus_dXi    = numpy.array( _X.A1, dtype=float )
330                         _X_plus_dXi[i] = _X[i] + _dX[i]
331                         #
332                         _xserie.append( _X_plus_dXi )
333                     #
334                     _HX_plus_dX = self.DirectOperator( _xserie )
335                     #
336                     _HX = _HX_plus_dX.pop(0)
337                     #
338                     _Jacobienne = []
339                     for i in range( len(_dX) ):
340                         _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
341                    #
342                 else:
343                     _Jacobienne  = []
344                     _HX = self.DirectOperator( _X )
345                     for i in range( _dX.size ):
346                         _dXi            = _dX[i]
347                         _X_plus_dXi     = numpy.array( _X.A1, dtype=float )
348                         _X_plus_dXi[i]  = _X[i] + _dXi
349                         #
350                         _HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
351                         #
352                         _Jacobienne.append( numpy.ravel(( _HX_plus_dXi - _HX ) / _dXi) )
353                 #
354             #
355             _Jacobienne = numpy.asmatrix( numpy.vstack( _Jacobienne ) ).T
356             if self.__avoidRC:
357                 if self.__lenghtRJ < 0: self.__lenghtRJ = 2 * _X.size
358                 while len(self.__listJPCP) > self.__lenghtRJ:
359                     self.__listJPCP.pop(0)
360                     self.__listJPCI.pop(0)
361                     self.__listJPCR.pop(0)
362                     self.__listJPPN.pop(0)
363                     self.__listJPIN.pop(0)
364                 self.__listJPCP.append( copy.copy(_X) )
365                 self.__listJPCI.append( copy.copy(_dX) )
366                 self.__listJPCR.append( copy.copy(_Jacobienne) )
367                 self.__listJPPN.append( numpy.linalg.norm(_X) )
368                 self.__listJPIN.append( numpy.linalg.norm(_Jacobienne) )
369         #
370         logging.debug("FDA Fin du calcul de la Jacobienne")
371         #
372         return _Jacobienne
373
374     # ---------------------------------------------------------
375     def TangentOperator(self, paire ):
376         """
377         Calcul du tangent à l'aide de la Jacobienne.
378         """
379         if self.__mfEnabled:
380             assert len(paire) == 1, "Incorrect lenght of arguments"
381             _paire = paire[0]
382             assert len(_paire) == 2, "Incorrect number of arguments"
383         else:
384             assert len(paire) == 2, "Incorrect number of arguments"
385             _paire = paire
386         X, dX = _paire
387         _Jacobienne = self.TangentMatrix( X )
388         if dX is None or len(dX) == 0:
389             #
390             # Calcul de la forme matricielle si le second argument est None
391             # -------------------------------------------------------------
392             if self.__mfEnabled: return [_Jacobienne,]
393             else:                return _Jacobienne
394         else:
395             #
396             # Calcul de la valeur linéarisée de H en X appliqué à dX
397             # ------------------------------------------------------
398             _dX = numpy.asmatrix(numpy.ravel( dX )).T
399             _HtX = numpy.dot(_Jacobienne, _dX)
400             if self.__mfEnabled: return [_HtX.A1,]
401             else:                return _HtX.A1
402
403     # ---------------------------------------------------------
404     def AdjointOperator(self, paire ):
405         """
406         Calcul de l'adjoint à l'aide de la Jacobienne.
407         """
408         if self.__mfEnabled:
409             assert len(paire) == 1, "Incorrect lenght of arguments"
410             _paire = paire[0]
411             assert len(_paire) == 2, "Incorrect number of arguments"
412         else:
413             assert len(paire) == 2, "Incorrect number of arguments"
414             _paire = paire
415         X, Y = _paire
416         _JacobienneT = self.TangentMatrix( X ).T
417         if Y is None or len(Y) == 0:
418             #
419             # Calcul de la forme matricielle si le second argument est None
420             # -------------------------------------------------------------
421             if self.__mfEnabled: return [_JacobienneT,]
422             else:                return _JacobienneT
423         else:
424             #
425             # Calcul de la valeur de l'adjoint en X appliqué à Y
426             # --------------------------------------------------
427             _Y = numpy.asmatrix(numpy.ravel( Y )).T
428             _HaY = numpy.dot(_JacobienneT, _Y)
429             if self.__mfEnabled: return [_HaY.A1,]
430             else:                return _HaY.A1
431
432 # ==============================================================================
433 def mmqr(
434         func     = None,
435         x0       = None,
436         fprime   = None,
437         bounds   = None,
438         quantile = 0.5,
439         maxfun   = 15000,
440         toler    = 1.e-06,
441         y        = None,
442         ):
443     """
444     Implémentation informatique de l'algorithme MMQR, basée sur la publication :
445     David R. Hunter, Kenneth Lange, "Quantile Regression via an MM Algorithm",
446     Journal of Computational and Graphical Statistics, 9, 1, pp.60-77, 2000.
447     """
448     #
449     # Recuperation des donnees et informations initiales
450     # --------------------------------------------------
451     variables = numpy.ravel( x0 )
452     mesures   = numpy.ravel( y )
453     increment = sys.float_info[0]
454     p         = variables.size
455     n         = mesures.size
456     quantile  = float(quantile)
457     #
458     # Calcul des parametres du MM
459     # ---------------------------
460     tn      = float(toler) / n
461     e0      = -tn / math.log(tn)
462     epsilon = (e0-tn)/(1+math.log(e0))
463     #
464     # Calculs d'initialisation
465     # ------------------------
466     residus  = mesures - numpy.ravel( func( variables ) )
467     poids    = 1./(epsilon+numpy.abs(residus))
468     veps     = 1. - 2. * quantile - residus * poids
469     lastsurrogate = -numpy.sum(residus*veps) - (1.-2.*quantile)*numpy.sum(residus)
470     iteration = 0
471     #
472     # Recherche iterative
473     # -------------------
474     while (increment > toler) and (iteration < maxfun) :
475         iteration += 1
476         #
477         Derivees  = numpy.array(fprime(variables))
478         Derivees  = Derivees.reshape(n,p) # Necessaire pour remettre en place la matrice si elle passe par des tuyaux YACS
479         DeriveesT = Derivees.transpose()
480         M         =   numpy.dot( DeriveesT , (numpy.array(numpy.matrix(p*[poids,]).T)*Derivees) )
481         SM        =   numpy.transpose(numpy.dot( DeriveesT , veps ))
482         step      = - numpy.linalg.lstsq( M, SM, rcond=-1 )[0]
483         #
484         variables = variables + step
485         if bounds is not None:
486             # Attention : boucle infinie à éviter si un intervalle est trop petit
487             while( (variables < numpy.ravel(numpy.asmatrix(bounds)[:,0])).any() or (variables > numpy.ravel(numpy.asmatrix(bounds)[:,1])).any() ):
488                 step      = step/2.
489                 variables = variables - step
490         residus   = mesures - numpy.ravel( func(variables) )
491         surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
492         #
493         while ( (surrogate > lastsurrogate) and ( max(list(numpy.abs(step))) > 1.e-16 ) ) :
494             step      = step/2.
495             variables = variables - step
496             residus   = mesures - numpy.ravel( func(variables) )
497             surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
498         #
499         increment     = lastsurrogate-surrogate
500         poids         = 1./(epsilon+numpy.abs(residus))
501         veps          = 1. - 2. * quantile - residus * poids
502         lastsurrogate = -numpy.sum(residus * veps) - (1.-2.*quantile)*numpy.sum(residus)
503     #
504     # Mesure d'écart
505     # --------------
506     Ecart = quantile * numpy.sum(residus) - numpy.sum( residus[residus<0] )
507     #
508     return variables, Ecart, [n,p,iteration,increment,0]
509
510 # ==============================================================================
511 def EnsembleOfCenteredPerturbations( _bgcenter, _bgcovariance, _nbmembers ):
512     "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
513     #
514     _bgcenter = numpy.ravel(_bgcenter)[:,None]
515     if _nbmembers < 1:
516         raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
517     #
518     if _bgcovariance is None:
519         BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
520     else:
521         _Z = numpy.random.multivariate_normal(numpy.zeros(_bgcenter.size), _bgcovariance, size=_nbmembers).T
522         BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers) + _Z
523     #
524     return BackgroundEnsemble
525
526 # ==============================================================================
527 def EnsembleOfBackgroundPerturbations( _bgcenter, _bgcovariance, _nbmembers, _withSVD = True):
528     "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
529     def __CenteredRandomAnomalies(Zr, N):
530         """
531         Génère une matrice de N anomalies aléatoires centrées sur Zr selon les
532         notes manuscrites de MB et conforme au code de PS avec eps = -1
533         """
534         eps = -1
535         Q = numpy.eye(N-1)-numpy.ones((N-1,N-1))/numpy.sqrt(N)/(numpy.sqrt(N)-eps)
536         Q = numpy.concatenate((Q, [eps*numpy.ones(N-1)/numpy.sqrt(N)]), axis=0)
537         R, _ = numpy.linalg.qr(numpy.random.normal(size = (N-1,N-1)))
538         Q = numpy.dot(Q,R)
539         Zr = numpy.dot(Q,Zr)
540         return Zr.T
541     #
542     _bgcenter = numpy.ravel(_bgcenter)[:,None]
543     if _nbmembers < 1:
544         raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
545     if _bgcovariance is None:
546         BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
547     else:
548         if _withSVD:
549             U, s, V = numpy.linalg.svd(_bgcovariance, full_matrices=False)
550             _nbctl = _bgcenter.size
551             if _nbmembers > _nbctl:
552                 _Z = numpy.concatenate((numpy.dot(
553                     numpy.diag(numpy.sqrt(s[:_nbctl])), V[:_nbctl]),
554                     numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1-_nbctl)), axis = 0)
555             else:
556                 _Z = numpy.dot(numpy.diag(numpy.sqrt(s[:_nbmembers-1])), V[:_nbmembers-1])
557             _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
558             BackgroundEnsemble = _bgcenter + _Zca
559         else:
560             if max(abs(_bgcovariance.flatten())) > 0:
561                 _nbctl = _bgcenter.size
562                 _Z = numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1)
563                 _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
564                 BackgroundEnsemble = _bgcenter + _Zca
565             else:
566                 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
567     #
568     return BackgroundEnsemble
569
570 # ==============================================================================
571 def EnsembleOfAnomalies( _ensemble, _optmean = None):
572     "Renvoie les anomalies centrées à partir d'un ensemble TailleEtat*NbMembres"
573     if _optmean is None:
574         Em = numpy.asarray(_ensemble).mean(axis=1, dtype=mfp).astype('float')[:,numpy.newaxis]
575     else:
576         Em = numpy.ravel(_optmean)[:,numpy.newaxis]
577     #
578     return numpy.asarray(_ensemble) - Em
579
580 # ==============================================================================
581 def CovarianceInflation(
582         InputCovOrEns,
583         InflationType   = None,
584         InflationFactor = None,
585         BackgroundCov   = None,
586         ):
587     """
588     Inflation applicable soit sur Pb ou Pa, soit sur les ensembles EXb ou EXa
589
590     Synthèse : Hunt 2007, section 2.3.5
591     """
592     if InflationFactor is None:
593         return InputCovOrEns
594     else:
595         InflationFactor = float(InflationFactor)
596     #
597     if InflationType in ["MultiplicativeOnAnalysisCovariance", "MultiplicativeOnBackgroundCovariance"]:
598         if InflationFactor < 1.:
599             raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
600         if InflationFactor < 1.+mpr:
601             return InputCovOrEns
602         OutputCovOrEns = InflationFactor**2 * InputCovOrEns
603     #
604     elif InflationType in ["MultiplicativeOnAnalysisAnomalies", "MultiplicativeOnBackgroundAnomalies"]:
605         if InflationFactor < 1.:
606             raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
607         if InflationFactor < 1.+mpr:
608             return InputCovOrEns
609         InputCovOrEnsMean = InputCovOrEns.mean(axis=1, dtype=mfp).astype('float')
610         OutputCovOrEns = InputCovOrEnsMean[:,numpy.newaxis] \
611             + InflationFactor * (InputCovOrEns - InputCovOrEnsMean[:,numpy.newaxis])
612     #
613     elif InflationType in ["AdditiveOnAnalysisCovariance", "AdditiveOnBackgroundCovariance"]:
614         if InflationFactor < 0.:
615             raise ValueError("Inflation factor for additive inflation has to be greater or equal than 0.")
616         if InflationFactor < mpr:
617             return InputCovOrEns
618         __n, __m = numpy.asarray(InputCovOrEns).shape
619         if __n != __m:
620             raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
621         OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * numpy.eye(__n)
622     #
623     elif InflationType == "HybridOnBackgroundCovariance":
624         if InflationFactor < 0.:
625             raise ValueError("Inflation factor for hybrid inflation has to be greater or equal than 0.")
626         if InflationFactor < mpr:
627             return InputCovOrEns
628         __n, __m = numpy.asarray(InputCovOrEns).shape
629         if __n != __m:
630             raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
631         if BackgroundCov is None:
632             raise ValueError("Background covariance matrix B has to be given for hybrid inflation.")
633         if InputCovOrEns.shape != BackgroundCov.shape:
634             raise ValueError("Ensemble covariance matrix has to be of same size than background covariance matrix B.")
635         OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * BackgroundCov
636     #
637     elif InflationType == "Relaxation":
638         raise NotImplementedError("InflationType Relaxation")
639     #
640     else:
641         raise ValueError("Error in inflation type, '%s' is not a valid keyword."%InflationType)
642     #
643     return OutputCovOrEns
644
645 # ==============================================================================
646 def std3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
647     """
648     3DVAR (Bouttier 1999, Courtier 1993)
649
650     selfA est identique au "self" d'algorithme appelant et contient les
651     valeurs.
652     """
653     #
654     # Correction pour pallier a un bug de TNC sur le retour du Minimum
655     if "Minimizer" in selfA._parameters and selfA._parameters["Minimizer"] == "TNC":
656         selfA.setParameterValue("StoreInternalVariables",True)
657     #
658     # Opérateurs
659     # ----------
660     Hm = HO["Direct"].appliedTo
661     Ha = HO["Adjoint"].appliedInXTo
662     #
663     # Utilisation éventuelle d'un vecteur H(Xb) précalculé
664     # ----------------------------------------------------
665     if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
666         HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
667     else:
668         HXb = Hm( Xb )
669     HXb = numpy.asmatrix(numpy.ravel( HXb )).T
670     if Y.size != HXb.size:
671         raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
672     if max(Y.shape) != max(HXb.shape):
673         raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
674     #
675     if selfA._toStore("JacobianMatrixAtBackground"):
676         HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
677         HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
678         selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
679     #
680     # Précalcul des inversions de B et R
681     # ----------------------------------
682     BI = B.getI()
683     RI = R.getI()
684     #
685     # Point de démarrage de l'optimisation
686     # ------------------------------------
687     Xini = selfA._parameters["InitializationPoint"]
688     #
689     # Définition de la fonction-coût
690     # ------------------------------
691     def CostFunction(x):
692         _X  = numpy.asmatrix(numpy.ravel( x )).T
693         if selfA._parameters["StoreInternalVariables"] or \
694             selfA._toStore("CurrentState") or \
695             selfA._toStore("CurrentOptimum"):
696             selfA.StoredVariables["CurrentState"].store( _X )
697         _HX = Hm( _X )
698         _HX = numpy.asmatrix(numpy.ravel( _HX )).T
699         _Innovation = Y - _HX
700         if selfA._toStore("SimulatedObservationAtCurrentState") or \
701             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
702             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
703         if selfA._toStore("InnovationAtCurrentState"):
704             selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
705         #
706         Jb  = float( 0.5 * (_X - Xb).T * BI * (_X - Xb) )
707         Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
708         J   = Jb + Jo
709         #
710         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
711         selfA.StoredVariables["CostFunctionJb"].store( Jb )
712         selfA.StoredVariables["CostFunctionJo"].store( Jo )
713         selfA.StoredVariables["CostFunctionJ" ].store( J )
714         if selfA._toStore("IndexOfOptimum") or \
715             selfA._toStore("CurrentOptimum") or \
716             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
717             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
718             selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
719             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
720             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
721         if selfA._toStore("IndexOfOptimum"):
722             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
723         if selfA._toStore("CurrentOptimum"):
724             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
725         if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
726             selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
727         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
728             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
729         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
730             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
731         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
732             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
733         return J
734     #
735     def GradientOfCostFunction(x):
736         _X      = numpy.asmatrix(numpy.ravel( x )).T
737         _HX     = Hm( _X )
738         _HX     = numpy.asmatrix(numpy.ravel( _HX )).T
739         GradJb  = BI * (_X - Xb)
740         GradJo  = - Ha( (_X, RI * (Y - _HX)) )
741         GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
742         return GradJ
743     #
744     # Minimisation de la fonctionnelle
745     # --------------------------------
746     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
747     #
748     if selfA._parameters["Minimizer"] == "LBFGSB":
749         if "0.19" <= scipy.version.version <= "1.1.0":
750             import lbfgsbhlt as optimiseur
751         else:
752             import scipy.optimize as optimiseur
753         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
754             func        = CostFunction,
755             x0          = Xini,
756             fprime      = GradientOfCostFunction,
757             args        = (),
758             bounds      = selfA._parameters["Bounds"],
759             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
760             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
761             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
762             iprint      = selfA._parameters["optiprint"],
763             )
764         nfeval = Informations['funcalls']
765         rc     = Informations['warnflag']
766     elif selfA._parameters["Minimizer"] == "TNC":
767         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
768             func        = CostFunction,
769             x0          = Xini,
770             fprime      = GradientOfCostFunction,
771             args        = (),
772             bounds      = selfA._parameters["Bounds"],
773             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
774             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
775             ftol        = selfA._parameters["CostDecrementTolerance"],
776             messages    = selfA._parameters["optmessages"],
777             )
778     elif selfA._parameters["Minimizer"] == "CG":
779         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
780             f           = CostFunction,
781             x0          = Xini,
782             fprime      = GradientOfCostFunction,
783             args        = (),
784             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
785             gtol        = selfA._parameters["GradientNormTolerance"],
786             disp        = selfA._parameters["optdisp"],
787             full_output = True,
788             )
789     elif selfA._parameters["Minimizer"] == "NCG":
790         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
791             f           = CostFunction,
792             x0          = Xini,
793             fprime      = GradientOfCostFunction,
794             args        = (),
795             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
796             avextol     = selfA._parameters["CostDecrementTolerance"],
797             disp        = selfA._parameters["optdisp"],
798             full_output = True,
799             )
800     elif selfA._parameters["Minimizer"] == "BFGS":
801         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
802             f           = CostFunction,
803             x0          = Xini,
804             fprime      = GradientOfCostFunction,
805             args        = (),
806             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
807             gtol        = selfA._parameters["GradientNormTolerance"],
808             disp        = selfA._parameters["optdisp"],
809             full_output = True,
810             )
811     else:
812         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
813     #
814     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
815     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
816     #
817     # Correction pour pallier a un bug de TNC sur le retour du Minimum
818     # ----------------------------------------------------------------
819     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
820         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
821     #
822     # Obtention de l'analyse
823     # ----------------------
824     Xa = numpy.asmatrix(numpy.ravel( Minimum )).T
825     #
826     selfA.StoredVariables["Analysis"].store( Xa.A1 )
827     #
828     if selfA._toStore("OMA") or \
829         selfA._toStore("SigmaObs2") or \
830         selfA._toStore("SimulationQuantiles") or \
831         selfA._toStore("SimulatedObservationAtOptimum"):
832         if selfA._toStore("SimulatedObservationAtCurrentState"):
833             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
834         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
835             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
836         else:
837             HXa = Hm( Xa )
838     #
839     # Calcul de la covariance d'analyse
840     # ---------------------------------
841     if selfA._toStore("APosterioriCovariance") or \
842         selfA._toStore("SimulationQuantiles") or \
843         selfA._toStore("JacobianMatrixAtOptimum") or \
844         selfA._toStore("KalmanGainAtOptimum"):
845         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
846         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
847     if selfA._toStore("APosterioriCovariance") or \
848         selfA._toStore("SimulationQuantiles") or \
849         selfA._toStore("KalmanGainAtOptimum"):
850         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
851         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
852     if selfA._toStore("APosterioriCovariance") or \
853         selfA._toStore("SimulationQuantiles"):
854         HessienneI = []
855         nb = Xa.size
856         for i in range(nb):
857             _ee    = numpy.matrix(numpy.zeros(nb)).T
858             _ee[i] = 1.
859             _HtEE  = numpy.dot(HtM,_ee)
860             _HtEE  = numpy.asmatrix(numpy.ravel( _HtEE )).T
861             HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
862         HessienneI = numpy.matrix( HessienneI )
863         A = HessienneI.I
864         if min(A.shape) != max(A.shape):
865             raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
866         if (numpy.diag(A) < 0).any():
867             raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
868         if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
869             try:
870                 L = numpy.linalg.cholesky( A )
871             except:
872                 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
873     if selfA._toStore("APosterioriCovariance"):
874         selfA.StoredVariables["APosterioriCovariance"].store( A )
875     if selfA._toStore("JacobianMatrixAtOptimum"):
876         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
877     if selfA._toStore("KalmanGainAtOptimum"):
878         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
879         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
880         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
881     #
882     # Calculs et/ou stockages supplémentaires
883     # ---------------------------------------
884     if selfA._toStore("Innovation") or \
885         selfA._toStore("SigmaObs2") or \
886         selfA._toStore("MahalanobisConsistency") or \
887         selfA._toStore("OMB"):
888         d  = Y - HXb
889     if selfA._toStore("Innovation"):
890         selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
891     if selfA._toStore("BMA"):
892         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
893     if selfA._toStore("OMA"):
894         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
895     if selfA._toStore("OMB"):
896         selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
897     if selfA._toStore("SigmaObs2"):
898         TraceR = R.trace(Y.size)
899         selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
900     if selfA._toStore("MahalanobisConsistency"):
901         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
902     if selfA._toStore("SimulationQuantiles"):
903         nech = selfA._parameters["NumberOfSamplesForQuantiles"]
904         HXa  = numpy.matrix(numpy.ravel( HXa )).T
905         YfQ  = None
906         for i in range(nech):
907             if selfA._parameters["SimulationForQuantiles"] == "Linear":
908                 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
909                 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
910                 Yr = HXa + dYr
911             elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
912                 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
913                 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
914             if YfQ is None:
915                 YfQ = Yr
916             else:
917                 YfQ = numpy.hstack((YfQ,Yr))
918         YfQ.sort(axis=-1)
919         YQ = None
920         for quantile in selfA._parameters["Quantiles"]:
921             if not (0. <= float(quantile) <= 1.): continue
922             indice = int(nech * float(quantile) - 1./nech)
923             if YQ is None: YQ = YfQ[:,indice]
924             else:          YQ = numpy.hstack((YQ,YfQ[:,indice]))
925         selfA.StoredVariables["SimulationQuantiles"].store( YQ )
926     if selfA._toStore("SimulatedObservationAtBackground"):
927         selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
928     if selfA._toStore("SimulatedObservationAtOptimum"):
929         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
930     #
931     return 0
932
933 # ==============================================================================
934 def van3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
935     """
936     3DVAR variational analysis with no inversion of B (Huang 2000)
937
938     selfA est identique au "self" d'algorithme appelant et contient les
939     valeurs.
940     """
941     #
942     # Correction pour pallier a un bug de TNC sur le retour du Minimum
943     if "Minimizer" in selfA._parameters and selfA._parameters["Minimizer"] == "TNC":
944         selfA.setParameterValue("StoreInternalVariables",True)
945     #
946     # Initialisations
947     # ---------------
948     Hm = HO["Direct"].appliedTo
949     Ha = HO["Adjoint"].appliedInXTo
950     #
951     # Précalcul des inversions de B et R
952     BT = B.getT()
953     RI = R.getI()
954     #
955     # Point de démarrage de l'optimisation
956     Xini = numpy.zeros(Xb.shape)
957     #
958     # Définition de la fonction-coût
959     # ------------------------------
960     def CostFunction(v):
961         _V = numpy.asmatrix(numpy.ravel( v )).T
962         _X = Xb + B * _V
963         if selfA._parameters["StoreInternalVariables"] or \
964             selfA._toStore("CurrentState") or \
965             selfA._toStore("CurrentOptimum"):
966             selfA.StoredVariables["CurrentState"].store( _X )
967         _HX = Hm( _X )
968         _HX = numpy.asmatrix(numpy.ravel( _HX )).T
969         _Innovation = Y - _HX
970         if selfA._toStore("SimulatedObservationAtCurrentState") or \
971             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
972             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
973         if selfA._toStore("InnovationAtCurrentState"):
974             selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
975         #
976         Jb  = float( 0.5 * _V.T * BT * _V )
977         Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
978         J   = Jb + Jo
979         #
980         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
981         selfA.StoredVariables["CostFunctionJb"].store( Jb )
982         selfA.StoredVariables["CostFunctionJo"].store( Jo )
983         selfA.StoredVariables["CostFunctionJ" ].store( J )
984         if selfA._toStore("IndexOfOptimum") or \
985             selfA._toStore("CurrentOptimum") or \
986             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
987             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
988             selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
989             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
990             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
991         if selfA._toStore("IndexOfOptimum"):
992             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
993         if selfA._toStore("CurrentOptimum"):
994             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
995         if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
996             selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
997         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
998             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
999         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1000             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1001         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1002             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1003         return J
1004     #
1005     def GradientOfCostFunction(v):
1006         _V = numpy.asmatrix(numpy.ravel( v )).T
1007         _X = Xb + B * _V
1008         _HX     = Hm( _X )
1009         _HX     = numpy.asmatrix(numpy.ravel( _HX )).T
1010         GradJb  = BT * _V
1011         GradJo  = - Ha( (_X, RI * (Y - _HX)) )
1012         GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
1013         return GradJ
1014     #
1015     # Minimisation de la fonctionnelle
1016     # --------------------------------
1017     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
1018     #
1019     if selfA._parameters["Minimizer"] == "LBFGSB":
1020         if "0.19" <= scipy.version.version <= "1.1.0":
1021             import lbfgsbhlt as optimiseur
1022         else:
1023             import scipy.optimize as optimiseur
1024         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
1025             func        = CostFunction,
1026             x0          = Xini,
1027             fprime      = GradientOfCostFunction,
1028             args        = (),
1029             bounds      = selfA._parameters["Bounds"],
1030             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
1031             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
1032             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
1033             iprint      = selfA._parameters["optiprint"],
1034             )
1035         nfeval = Informations['funcalls']
1036         rc     = Informations['warnflag']
1037     elif selfA._parameters["Minimizer"] == "TNC":
1038         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
1039             func        = CostFunction,
1040             x0          = Xini,
1041             fprime      = GradientOfCostFunction,
1042             args        = (),
1043             bounds      = selfA._parameters["Bounds"],
1044             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
1045             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
1046             ftol        = selfA._parameters["CostDecrementTolerance"],
1047             messages    = selfA._parameters["optmessages"],
1048             )
1049     elif selfA._parameters["Minimizer"] == "CG":
1050         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
1051             f           = CostFunction,
1052             x0          = Xini,
1053             fprime      = GradientOfCostFunction,
1054             args        = (),
1055             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1056             gtol        = selfA._parameters["GradientNormTolerance"],
1057             disp        = selfA._parameters["optdisp"],
1058             full_output = True,
1059             )
1060     elif selfA._parameters["Minimizer"] == "NCG":
1061         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
1062             f           = CostFunction,
1063             x0          = Xini,
1064             fprime      = GradientOfCostFunction,
1065             args        = (),
1066             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1067             avextol     = selfA._parameters["CostDecrementTolerance"],
1068             disp        = selfA._parameters["optdisp"],
1069             full_output = True,
1070             )
1071     elif selfA._parameters["Minimizer"] == "BFGS":
1072         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
1073             f           = CostFunction,
1074             x0          = Xini,
1075             fprime      = GradientOfCostFunction,
1076             args        = (),
1077             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1078             gtol        = selfA._parameters["GradientNormTolerance"],
1079             disp        = selfA._parameters["optdisp"],
1080             full_output = True,
1081             )
1082     else:
1083         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
1084     #
1085     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1086     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
1087     #
1088     # Correction pour pallier a un bug de TNC sur le retour du Minimum
1089     # ----------------------------------------------------------------
1090     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
1091         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
1092         Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
1093     else:
1094         Minimum = Xb + B * numpy.asmatrix(numpy.ravel( Minimum )).T
1095     #
1096     # Obtention de l'analyse
1097     # ----------------------
1098     Xa = Minimum
1099     #
1100     selfA.StoredVariables["Analysis"].store( Xa )
1101     #
1102     if selfA._toStore("OMA") or \
1103         selfA._toStore("SigmaObs2") or \
1104         selfA._toStore("SimulationQuantiles") or \
1105         selfA._toStore("SimulatedObservationAtOptimum"):
1106         if selfA._toStore("SimulatedObservationAtCurrentState"):
1107             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
1108         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1109             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
1110         else:
1111             HXa = Hm( Xa )
1112     #
1113     # Calcul de la covariance d'analyse
1114     # ---------------------------------
1115     if selfA._toStore("APosterioriCovariance") or \
1116         selfA._toStore("SimulationQuantiles") or \
1117         selfA._toStore("JacobianMatrixAtOptimum") or \
1118         selfA._toStore("KalmanGainAtOptimum"):
1119         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
1120         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
1121     if selfA._toStore("APosterioriCovariance") or \
1122         selfA._toStore("SimulationQuantiles") or \
1123         selfA._toStore("KalmanGainAtOptimum"):
1124         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
1125         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
1126     if selfA._toStore("APosterioriCovariance") or \
1127         selfA._toStore("SimulationQuantiles"):
1128         BI = B.getI()
1129         HessienneI = []
1130         nb = Xa.size
1131         for i in range(nb):
1132             _ee    = numpy.matrix(numpy.zeros(nb)).T
1133             _ee[i] = 1.
1134             _HtEE  = numpy.dot(HtM,_ee)
1135             _HtEE  = numpy.asmatrix(numpy.ravel( _HtEE )).T
1136             HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
1137         HessienneI = numpy.matrix( HessienneI )
1138         A = HessienneI.I
1139         if min(A.shape) != max(A.shape):
1140             raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
1141         if (numpy.diag(A) < 0).any():
1142             raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
1143         if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
1144             try:
1145                 L = numpy.linalg.cholesky( A )
1146             except:
1147                 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
1148     if selfA._toStore("APosterioriCovariance"):
1149         selfA.StoredVariables["APosterioriCovariance"].store( A )
1150     if selfA._toStore("JacobianMatrixAtOptimum"):
1151         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
1152     if selfA._toStore("KalmanGainAtOptimum"):
1153         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
1154         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
1155         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
1156     #
1157     # Calculs et/ou stockages supplémentaires
1158     # ---------------------------------------
1159     if selfA._toStore("Innovation") or \
1160         selfA._toStore("SigmaObs2") or \
1161         selfA._toStore("MahalanobisConsistency") or \
1162         selfA._toStore("OMB"):
1163         d  = Y - HXb
1164     if selfA._toStore("Innovation"):
1165         selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
1166     if selfA._toStore("BMA"):
1167         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
1168     if selfA._toStore("OMA"):
1169         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
1170     if selfA._toStore("OMB"):
1171         selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
1172     if selfA._toStore("SigmaObs2"):
1173         TraceR = R.trace(Y.size)
1174         selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
1175     if selfA._toStore("MahalanobisConsistency"):
1176         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
1177     if selfA._toStore("SimulationQuantiles"):
1178         nech = selfA._parameters["NumberOfSamplesForQuantiles"]
1179         HXa  = numpy.matrix(numpy.ravel( HXa )).T
1180         YfQ  = None
1181         for i in range(nech):
1182             if selfA._parameters["SimulationForQuantiles"] == "Linear":
1183                 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
1184                 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
1185                 Yr = HXa + dYr
1186             elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
1187                 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
1188                 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
1189             if YfQ is None:
1190                 YfQ = Yr
1191             else:
1192                 YfQ = numpy.hstack((YfQ,Yr))
1193         YfQ.sort(axis=-1)
1194         YQ = None
1195         for quantile in selfA._parameters["Quantiles"]:
1196             if not (0. <= float(quantile) <= 1.): continue
1197             indice = int(nech * float(quantile) - 1./nech)
1198             if YQ is None: YQ = YfQ[:,indice]
1199             else:          YQ = numpy.hstack((YQ,YfQ[:,indice]))
1200         selfA.StoredVariables["SimulationQuantiles"].store( YQ )
1201     if selfA._toStore("SimulatedObservationAtBackground"):
1202         selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
1203     if selfA._toStore("SimulatedObservationAtOptimum"):
1204         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
1205     #
1206     return 0
1207
1208 # ==============================================================================
1209 def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1210     """
1211     3DVAR incrémental (Courtier 1994, 1997)
1212
1213     selfA est identique au "self" d'algorithme appelant et contient les
1214     valeurs.
1215     """
1216     #
1217     # Correction pour pallier a un bug de TNC sur le retour du Minimum
1218     if "Minimizer" in selfA._parameters and selfA._parameters["Minimizer"] == "TNC":
1219         selfA.setParameterValue("StoreInternalVariables",True)
1220     #
1221     # Initialisations
1222     # ---------------
1223     #
1224     # Opérateur non-linéaire pour la boucle externe
1225     Hm = HO["Direct"].appliedTo
1226     #
1227     # Précalcul des inversions de B et R
1228     BI = B.getI()
1229     RI = R.getI()
1230     #
1231     # Point de démarrage de l'optimisation
1232     Xini = selfA._parameters["InitializationPoint"]
1233     #
1234     HXb = numpy.asmatrix(numpy.ravel( Hm( Xb ) )).T
1235     Innovation = Y - HXb
1236     #
1237     # Outer Loop
1238     # ----------
1239     iOuter = 0
1240     J      = 1./mpr
1241     DeltaJ = 1./mpr
1242     Xr     = Xini.reshape((-1,1))
1243     while abs(DeltaJ) >= selfA._parameters["CostDecrementTolerance"] and iOuter <= selfA._parameters["MaximumNumberOfSteps"]:
1244         #
1245         # Inner Loop
1246         # ----------
1247         Ht = HO["Tangent"].asMatrix(Xr)
1248         Ht = Ht.reshape(Y.size,Xr.size) # ADAO & check shape
1249         #
1250         # Définition de la fonction-coût
1251         # ------------------------------
1252         def CostFunction(dx):
1253             _dX  = numpy.asmatrix(numpy.ravel( dx )).T
1254             if selfA._parameters["StoreInternalVariables"] or \
1255                 selfA._toStore("CurrentState") or \
1256                 selfA._toStore("CurrentOptimum"):
1257                 selfA.StoredVariables["CurrentState"].store( Xb + _dX )
1258             _HdX = Ht * _dX
1259             _HdX = numpy.asmatrix(numpy.ravel( _HdX )).T
1260             _dInnovation = Innovation - _HdX
1261             if selfA._toStore("SimulatedObservationAtCurrentState") or \
1262                 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1263                 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HXb + _HdX )
1264             if selfA._toStore("InnovationAtCurrentState"):
1265                 selfA.StoredVariables["InnovationAtCurrentState"].store( _dInnovation )
1266             #
1267             Jb  = float( 0.5 * _dX.T * BI * _dX )
1268             Jo  = float( 0.5 * _dInnovation.T * RI * _dInnovation )
1269             J   = Jb + Jo
1270             #
1271             selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
1272             selfA.StoredVariables["CostFunctionJb"].store( Jb )
1273             selfA.StoredVariables["CostFunctionJo"].store( Jo )
1274             selfA.StoredVariables["CostFunctionJ" ].store( J )
1275             if selfA._toStore("IndexOfOptimum") or \
1276                 selfA._toStore("CurrentOptimum") or \
1277                 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
1278                 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
1279                 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
1280                 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1281                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1282             if selfA._toStore("IndexOfOptimum"):
1283                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1284             if selfA._toStore("CurrentOptimum"):
1285                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
1286             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1287                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
1288             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1289                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1290             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1291                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1292             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1293                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1294             return J
1295         #
1296         def GradientOfCostFunction(dx):
1297             _dX          = numpy.asmatrix(numpy.ravel( dx )).T
1298             _HdX         = Ht * _dX
1299             _HdX         = numpy.asmatrix(numpy.ravel( _HdX )).T
1300             _dInnovation = Innovation - _HdX
1301             GradJb       = BI * _dX
1302             GradJo       = - Ht.T @ (RI * _dInnovation)
1303             GradJ        = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
1304             return GradJ
1305         #
1306         # Minimisation de la fonctionnelle
1307         # --------------------------------
1308         nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
1309         #
1310         if selfA._parameters["Minimizer"] == "LBFGSB":
1311             # Minimum, J_optimal, Informations = scipy.optimize.fmin_l_bfgs_b(
1312             if "0.19" <= scipy.version.version <= "1.1.0":
1313                 import lbfgsbhlt as optimiseur
1314             else:
1315                 import scipy.optimize as optimiseur
1316             Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
1317                 func        = CostFunction,
1318                 x0          = numpy.zeros(Xini.size),
1319                 fprime      = GradientOfCostFunction,
1320                 args        = (),
1321                 bounds      = selfA._parameters["Bounds"],
1322                 maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
1323                 factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
1324                 pgtol       = selfA._parameters["ProjectedGradientTolerance"],
1325                 iprint      = selfA._parameters["optiprint"],
1326                 )
1327             nfeval = Informations['funcalls']
1328             rc     = Informations['warnflag']
1329         elif selfA._parameters["Minimizer"] == "TNC":
1330             Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
1331                 func        = CostFunction,
1332                 x0          = numpy.zeros(Xini.size),
1333                 fprime      = GradientOfCostFunction,
1334                 args        = (),
1335                 bounds      = selfA._parameters["Bounds"],
1336                 maxfun      = selfA._parameters["MaximumNumberOfSteps"],
1337                 pgtol       = selfA._parameters["ProjectedGradientTolerance"],
1338                 ftol        = selfA._parameters["CostDecrementTolerance"],
1339                 messages    = selfA._parameters["optmessages"],
1340                 )
1341         elif selfA._parameters["Minimizer"] == "CG":
1342             Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
1343                 f           = CostFunction,
1344                 x0          = numpy.zeros(Xini.size),
1345                 fprime      = GradientOfCostFunction,
1346                 args        = (),
1347                 maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1348                 gtol        = selfA._parameters["GradientNormTolerance"],
1349                 disp        = selfA._parameters["optdisp"],
1350                 full_output = True,
1351                 )
1352         elif selfA._parameters["Minimizer"] == "NCG":
1353             Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
1354                 f           = CostFunction,
1355                 x0          = numpy.zeros(Xini.size),
1356                 fprime      = GradientOfCostFunction,
1357                 args        = (),
1358                 maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1359                 avextol     = selfA._parameters["CostDecrementTolerance"],
1360                 disp        = selfA._parameters["optdisp"],
1361                 full_output = True,
1362                 )
1363         elif selfA._parameters["Minimizer"] == "BFGS":
1364             Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
1365                 f           = CostFunction,
1366                 x0          = numpy.zeros(Xini.size),
1367                 fprime      = GradientOfCostFunction,
1368                 args        = (),
1369                 maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1370                 gtol        = selfA._parameters["GradientNormTolerance"],
1371                 disp        = selfA._parameters["optdisp"],
1372                 full_output = True,
1373                 )
1374         else:
1375             raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
1376         #
1377         IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1378         MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
1379         #
1380         if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
1381             Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
1382             Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
1383         else:
1384             Minimum = Xb + numpy.asmatrix(numpy.ravel( Minimum )).T
1385         #
1386         Xr     = Minimum
1387         DeltaJ = selfA.StoredVariables["CostFunctionJ" ][-1] - J
1388         iOuter = selfA.StoredVariables["CurrentIterationNumber"][-1]
1389     #
1390     # Obtention de l'analyse
1391     # ----------------------
1392     Xa = Xr
1393     #
1394     selfA.StoredVariables["Analysis"].store( Xa )
1395     #
1396     if selfA._toStore("OMA") or \
1397         selfA._toStore("SigmaObs2") or \
1398         selfA._toStore("SimulationQuantiles") or \
1399         selfA._toStore("SimulatedObservationAtOptimum"):
1400         if selfA._toStore("SimulatedObservationAtCurrentState"):
1401             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
1402         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1403             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
1404         else:
1405             HXa = Hm( Xa )
1406     #
1407     # Calcul de la covariance d'analyse
1408     # ---------------------------------
1409     if selfA._toStore("APosterioriCovariance") or \
1410         selfA._toStore("SimulationQuantiles") or \
1411         selfA._toStore("JacobianMatrixAtOptimum") or \
1412         selfA._toStore("KalmanGainAtOptimum"):
1413         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
1414         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
1415     if selfA._toStore("APosterioriCovariance") or \
1416         selfA._toStore("SimulationQuantiles") or \
1417         selfA._toStore("KalmanGainAtOptimum"):
1418         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
1419         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
1420     if selfA._toStore("APosterioriCovariance") or \
1421         selfA._toStore("SimulationQuantiles"):
1422         HessienneI = []
1423         nb = Xa.size
1424         for i in range(nb):
1425             _ee    = numpy.matrix(numpy.zeros(nb)).T
1426             _ee[i] = 1.
1427             _HtEE  = numpy.dot(HtM,_ee)
1428             _HtEE  = numpy.asmatrix(numpy.ravel( _HtEE )).T
1429             HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
1430         HessienneI = numpy.matrix( HessienneI )
1431         A = HessienneI.I
1432         if min(A.shape) != max(A.shape):
1433             raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
1434         if (numpy.diag(A) < 0).any():
1435             raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
1436         if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
1437             try:
1438                 L = numpy.linalg.cholesky( A )
1439             except:
1440                 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
1441     if selfA._toStore("APosterioriCovariance"):
1442         selfA.StoredVariables["APosterioriCovariance"].store( A )
1443     if selfA._toStore("JacobianMatrixAtOptimum"):
1444         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
1445     if selfA._toStore("KalmanGainAtOptimum"):
1446         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
1447         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
1448         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
1449     #
1450     # Calculs et/ou stockages supplémentaires
1451     # ---------------------------------------
1452     if selfA._toStore("Innovation") or \
1453         selfA._toStore("SigmaObs2") or \
1454         selfA._toStore("MahalanobisConsistency") or \
1455         selfA._toStore("OMB"):
1456         d  = Y - HXb
1457     if selfA._toStore("Innovation"):
1458         selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
1459     if selfA._toStore("BMA"):
1460         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
1461     if selfA._toStore("OMA"):
1462         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
1463     if selfA._toStore("OMB"):
1464         selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
1465     if selfA._toStore("SigmaObs2"):
1466         TraceR = R.trace(Y.size)
1467         selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
1468     if selfA._toStore("MahalanobisConsistency"):
1469         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
1470     if selfA._toStore("SimulationQuantiles"):
1471         nech = selfA._parameters["NumberOfSamplesForQuantiles"]
1472         HXa  = numpy.matrix(numpy.ravel( HXa )).T
1473         YfQ  = None
1474         for i in range(nech):
1475             if selfA._parameters["SimulationForQuantiles"] == "Linear":
1476                 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
1477                 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
1478                 Yr = HXa + dYr
1479             elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
1480                 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
1481                 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
1482             if YfQ is None:
1483                 YfQ = Yr
1484             else:
1485                 YfQ = numpy.hstack((YfQ,Yr))
1486         YfQ.sort(axis=-1)
1487         YQ = None
1488         for quantile in selfA._parameters["Quantiles"]:
1489             if not (0. <= float(quantile) <= 1.): continue
1490             indice = int(nech * float(quantile) - 1./nech)
1491             if YQ is None: YQ = YfQ[:,indice]
1492             else:          YQ = numpy.hstack((YQ,YfQ[:,indice]))
1493         selfA.StoredVariables["SimulationQuantiles"].store( YQ )
1494     if selfA._toStore("SimulatedObservationAtBackground"):
1495         selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
1496     if selfA._toStore("SimulatedObservationAtOptimum"):
1497         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
1498     #
1499     return 0
1500
1501 # ==============================================================================
1502 def psas3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1503     """
1504     3DVAR PSAS (Huang 2000)
1505
1506     selfA est identique au "self" d'algorithme appelant et contient les
1507     valeurs.
1508     """
1509     #
1510     # Correction pour pallier a un bug de TNC sur le retour du Minimum
1511     if "Minimizer" in selfA._parameters and selfA._parameters["Minimizer"] == "TNC":
1512         selfA.setParameterValue("StoreInternalVariables",True)
1513     #
1514     # Initialisations
1515     # ---------------
1516     #
1517     # Opérateurs
1518     Hm = HO["Direct"].appliedTo
1519     #
1520     # Utilisation éventuelle d'un vecteur H(Xb) précalculé
1521     if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
1522         HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
1523     else:
1524         HXb = Hm( Xb )
1525     HXb = numpy.asmatrix(numpy.ravel( HXb )).T
1526     if Y.size != HXb.size:
1527         raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
1528     if max(Y.shape) != max(HXb.shape):
1529         raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
1530     #
1531     if selfA._toStore("JacobianMatrixAtBackground"):
1532         HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
1533         HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
1534         selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
1535     #
1536     Ht = HO["Tangent"].asMatrix(Xb)
1537     BHT = B * Ht.T
1538     HBHTpR = R + Ht * BHT
1539     Innovation = Y - HXb
1540     #
1541     # Point de démarrage de l'optimisation
1542     Xini = numpy.zeros(Xb.shape)
1543     #
1544     # Définition de la fonction-coût
1545     # ------------------------------
1546     def CostFunction(w):
1547         _W = numpy.asmatrix(numpy.ravel( w )).T
1548         if selfA._parameters["StoreInternalVariables"] or \
1549             selfA._toStore("CurrentState") or \
1550             selfA._toStore("CurrentOptimum"):
1551             selfA.StoredVariables["CurrentState"].store( Xb + BHT * _W )
1552         if selfA._toStore("SimulatedObservationAtCurrentState") or \
1553             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1554             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Hm( Xb + BHT * _W ) )
1555         if selfA._toStore("InnovationAtCurrentState"):
1556             selfA.StoredVariables["InnovationAtCurrentState"].store( Innovation )
1557         #
1558         Jb  = float( 0.5 * _W.T * HBHTpR * _W )
1559         Jo  = float( - _W.T * Innovation )
1560         J   = Jb + Jo
1561         #
1562         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
1563         selfA.StoredVariables["CostFunctionJb"].store( Jb )
1564         selfA.StoredVariables["CostFunctionJo"].store( Jo )
1565         selfA.StoredVariables["CostFunctionJ" ].store( J )
1566         if selfA._toStore("IndexOfOptimum") or \
1567             selfA._toStore("CurrentOptimum") or \
1568             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
1569             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
1570             selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
1571             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1572             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1573         if selfA._toStore("IndexOfOptimum"):
1574             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1575         if selfA._toStore("CurrentOptimum"):
1576             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
1577         if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1578             selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
1579         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1580             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1581         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1582             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1583         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1584             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1585         return J
1586     #
1587     def GradientOfCostFunction(w):
1588         _W = numpy.asmatrix(numpy.ravel( w )).T
1589         GradJb  = HBHTpR * _W
1590         GradJo  = - Innovation
1591         GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
1592         return GradJ
1593     #
1594     # Minimisation de la fonctionnelle
1595     # --------------------------------
1596     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
1597     #
1598     if selfA._parameters["Minimizer"] == "LBFGSB":
1599         # Minimum, J_optimal, Informations = scipy.optimize.fmin_l_bfgs_b(
1600         if "0.19" <= scipy.version.version <= "1.1.0":
1601             import lbfgsbhlt as optimiseur
1602         else:
1603             import scipy.optimize as optimiseur
1604         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
1605             func        = CostFunction,
1606             x0          = Xini,
1607             fprime      = GradientOfCostFunction,
1608             args        = (),
1609             bounds      = selfA._parameters["Bounds"],
1610             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
1611             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
1612             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
1613             iprint      = selfA._parameters["optiprint"],
1614             )
1615         nfeval = Informations['funcalls']
1616         rc     = Informations['warnflag']
1617     elif selfA._parameters["Minimizer"] == "TNC":
1618         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
1619             func        = CostFunction,
1620             x0          = Xini,
1621             fprime      = GradientOfCostFunction,
1622             args        = (),
1623             bounds      = selfA._parameters["Bounds"],
1624             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
1625             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
1626             ftol        = selfA._parameters["CostDecrementTolerance"],
1627             messages    = selfA._parameters["optmessages"],
1628             )
1629     elif selfA._parameters["Minimizer"] == "CG":
1630         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
1631             f           = CostFunction,
1632             x0          = Xini,
1633             fprime      = GradientOfCostFunction,
1634             args        = (),
1635             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1636             gtol        = selfA._parameters["GradientNormTolerance"],
1637             disp        = selfA._parameters["optdisp"],
1638             full_output = True,
1639             )
1640     elif selfA._parameters["Minimizer"] == "NCG":
1641         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
1642             f           = CostFunction,
1643             x0          = Xini,
1644             fprime      = GradientOfCostFunction,
1645             args        = (),
1646             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1647             avextol     = selfA._parameters["CostDecrementTolerance"],
1648             disp        = selfA._parameters["optdisp"],
1649             full_output = True,
1650             )
1651     elif selfA._parameters["Minimizer"] == "BFGS":
1652         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
1653             f           = CostFunction,
1654             x0          = Xini,
1655             fprime      = GradientOfCostFunction,
1656             args        = (),
1657             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1658             gtol        = selfA._parameters["GradientNormTolerance"],
1659             disp        = selfA._parameters["optdisp"],
1660             full_output = True,
1661             )
1662     else:
1663         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
1664     #
1665     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1666     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
1667     #
1668     # Correction pour pallier a un bug de TNC sur le retour du Minimum
1669     # ----------------------------------------------------------------
1670     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
1671         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
1672         Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
1673     else:
1674         Minimum = Xb + BHT * numpy.asmatrix(numpy.ravel( Minimum )).T
1675     #
1676     # Obtention de l'analyse
1677     # ----------------------
1678     Xa = Minimum
1679     #
1680     selfA.StoredVariables["Analysis"].store( Xa )
1681     #
1682     if selfA._toStore("OMA") or \
1683         selfA._toStore("SigmaObs2") or \
1684         selfA._toStore("SimulationQuantiles") or \
1685         selfA._toStore("SimulatedObservationAtOptimum"):
1686         if selfA._toStore("SimulatedObservationAtCurrentState"):
1687             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
1688         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1689             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
1690         else:
1691             HXa = Hm( Xa )
1692     #
1693     # Calcul de la covariance d'analyse
1694     # ---------------------------------
1695     if selfA._toStore("APosterioriCovariance") or \
1696         selfA._toStore("SimulationQuantiles") or \
1697         selfA._toStore("JacobianMatrixAtOptimum") or \
1698         selfA._toStore("KalmanGainAtOptimum"):
1699         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
1700         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
1701     if selfA._toStore("APosterioriCovariance") or \
1702         selfA._toStore("SimulationQuantiles") or \
1703         selfA._toStore("KalmanGainAtOptimum"):
1704         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
1705         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
1706     if selfA._toStore("APosterioriCovariance") or \
1707         selfA._toStore("SimulationQuantiles"):
1708         BI = B.getI()
1709         RI = R.getI()
1710         HessienneI = []
1711         nb = Xa.size
1712         for i in range(nb):
1713             _ee    = numpy.matrix(numpy.zeros(nb)).T
1714             _ee[i] = 1.
1715             _HtEE  = numpy.dot(HtM,_ee)
1716             _HtEE  = numpy.asmatrix(numpy.ravel( _HtEE )).T
1717             HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
1718         HessienneI = numpy.matrix( HessienneI )
1719         A = HessienneI.I
1720         if min(A.shape) != max(A.shape):
1721             raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
1722         if (numpy.diag(A) < 0).any():
1723             raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
1724         if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
1725             try:
1726                 L = numpy.linalg.cholesky( A )
1727             except:
1728                 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
1729     if selfA._toStore("APosterioriCovariance"):
1730         selfA.StoredVariables["APosterioriCovariance"].store( A )
1731     if selfA._toStore("JacobianMatrixAtOptimum"):
1732         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
1733     if selfA._toStore("KalmanGainAtOptimum"):
1734         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
1735         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
1736         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
1737     #
1738     # Calculs et/ou stockages supplémentaires
1739     # ---------------------------------------
1740     if selfA._toStore("Innovation") or \
1741         selfA._toStore("SigmaObs2") or \
1742         selfA._toStore("MahalanobisConsistency") or \
1743         selfA._toStore("OMB"):
1744         d  = Y - HXb
1745     if selfA._toStore("Innovation"):
1746         selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
1747     if selfA._toStore("BMA"):
1748         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
1749     if selfA._toStore("OMA"):
1750         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
1751     if selfA._toStore("OMB"):
1752         selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
1753     if selfA._toStore("SigmaObs2"):
1754         TraceR = R.trace(Y.size)
1755         selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
1756     if selfA._toStore("MahalanobisConsistency"):
1757         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
1758     if selfA._toStore("SimulationQuantiles"):
1759         nech = selfA._parameters["NumberOfSamplesForQuantiles"]
1760         HXa  = numpy.matrix(numpy.ravel( HXa )).T
1761         YfQ  = None
1762         for i in range(nech):
1763             if selfA._parameters["SimulationForQuantiles"] == "Linear":
1764                 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
1765                 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
1766                 Yr = HXa + dYr
1767             elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
1768                 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
1769                 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
1770             if YfQ is None:
1771                 YfQ = Yr
1772             else:
1773                 YfQ = numpy.hstack((YfQ,Yr))
1774         YfQ.sort(axis=-1)
1775         YQ = None
1776         for quantile in selfA._parameters["Quantiles"]:
1777             if not (0. <= float(quantile) <= 1.): continue
1778             indice = int(nech * float(quantile) - 1./nech)
1779             if YQ is None: YQ = YfQ[:,indice]
1780             else:          YQ = numpy.hstack((YQ,YfQ[:,indice]))
1781         selfA.StoredVariables["SimulationQuantiles"].store( YQ )
1782     if selfA._toStore("SimulatedObservationAtBackground"):
1783         selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
1784     if selfA._toStore("SimulatedObservationAtOptimum"):
1785         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
1786     #
1787     return 0
1788
1789 # ==============================================================================
1790 def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
1791     """
1792     Stochastic EnKF (Envensen 1994, Burgers 1998)
1793
1794     selfA est identique au "self" d'algorithme appelant et contient les
1795     valeurs.
1796     """
1797     if selfA._parameters["EstimationOf"] == "Parameters":
1798         selfA._parameters["StoreInternalVariables"] = True
1799     #
1800     # Opérateurs
1801     # ----------
1802     H = HO["Direct"].appliedControledFormTo
1803     #
1804     if selfA._parameters["EstimationOf"] == "State":
1805         M = EM["Direct"].appliedControledFormTo
1806     #
1807     if CM is not None and "Tangent" in CM and U is not None:
1808         Cm = CM["Tangent"].asMatrix(Xb)
1809     else:
1810         Cm = None
1811     #
1812     # Nombre de pas identique au nombre de pas d'observations
1813     # -------------------------------------------------------
1814     if hasattr(Y,"stepnumber"):
1815         duration = Y.stepnumber()
1816         __p = numpy.cumprod(Y.shape())[-1]
1817     else:
1818         duration = 2
1819         __p = numpy.array(Y).size
1820     #
1821     # Précalcul des inversions de B et R
1822     # ----------------------------------
1823     if selfA._parameters["StoreInternalVariables"] \
1824         or selfA._toStore("CostFunctionJ") \
1825         or selfA._toStore("CostFunctionJb") \
1826         or selfA._toStore("CostFunctionJo") \
1827         or selfA._toStore("CurrentOptimum") \
1828         or selfA._toStore("APosterioriCovariance"):
1829         BI = B.getI()
1830         RI = R.getI()
1831     #
1832     # Initialisation
1833     # --------------
1834     __n = Xb.size
1835     __m = selfA._parameters["NumberOfMembers"]
1836     if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
1837     else:                         Pn = B
1838     if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
1839     else:                         Rn = R
1840     if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
1841     else:                         Qn = Q
1842     Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
1843     #
1844     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1845         selfA.StoredVariables["Analysis"].store( numpy.ravel(Xb) )
1846         if selfA._toStore("APosterioriCovariance"):
1847             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1848             covarianceXa = Pn
1849     #
1850     previousJMinimum = numpy.finfo(float).max
1851     #
1852     for step in range(duration-1):
1853         if hasattr(Y,"store"):
1854             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,-1))
1855         else:
1856             Ynpu = numpy.ravel( Y ).reshape((__p,-1))
1857         #
1858         if U is not None:
1859             if hasattr(U,"store") and len(U)>1:
1860                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
1861             elif hasattr(U,"store") and len(U)==1:
1862                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
1863             else:
1864                 Un = numpy.asmatrix(numpy.ravel( U )).T
1865         else:
1866             Un = None
1867         #
1868         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
1869             Xn = CovarianceInflation( Xn,
1870                 selfA._parameters["InflationType"],
1871                 selfA._parameters["InflationFactor"],
1872                 )
1873         #
1874         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
1875             EMX = M( [(Xn[:,i], Un) for i in range(__m)],
1876                 argsAsSerie = True,
1877                 returnSerieAsArrayMatrix = True )
1878             qi = numpy.random.multivariate_normal(numpy.zeros(__n), Qn, size=__m).T
1879             Xn_predicted = EMX + qi
1880             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
1881                 argsAsSerie = True,
1882                 returnSerieAsArrayMatrix = True )
1883             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1884                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
1885                 Xn_predicted = Xn_predicted + Cm * Un
1886         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
1887             # --- > Par principe, M = Id, Q = 0
1888             Xn_predicted = Xn
1889             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
1890                 argsAsSerie = True,
1891                 returnSerieAsArrayMatrix = True )
1892         #
1893         # Mean of forecast and observation of forecast
1894         Xfm  = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
1895         Hfm  = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,-1))
1896         #
1897         #--------------------------
1898         if VariantM == "KalmanFilterFormula05":
1899             PfHT, HPfHT = 0., 0.
1900             for i in range(__m):
1901                 Exfi = Xn_predicted[:,i].reshape((__n,-1)) - Xfm
1902                 Eyfi = HX_predicted[:,i].reshape((__p,-1)) - Hfm
1903                 PfHT  += Exfi * Eyfi.T
1904                 HPfHT += Eyfi * Eyfi.T
1905             PfHT  = (1./(__m-1)) * PfHT
1906             HPfHT = (1./(__m-1)) * HPfHT
1907             Kn     = PfHT * ( R + HPfHT ).I
1908             del PfHT, HPfHT
1909             #
1910             for i in range(__m):
1911                 ri = numpy.random.multivariate_normal(numpy.zeros(__p), Rn)
1912                 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(Ynpu) + ri - HX_predicted[:,i])
1913         #--------------------------
1914         elif VariantM == "KalmanFilterFormula16":
1915             EpY   = EnsembleOfCenteredPerturbations(Ynpu, Rn, __m)
1916             EpYm  = EpY.mean(axis=1, dtype=mfp).astype('float').reshape((__p,-1))
1917             #
1918             EaX   = EnsembleOfAnomalies( Xn_predicted ) / numpy.sqrt(__m-1)
1919             EaY = (HX_predicted - Hfm - EpY + EpYm) / numpy.sqrt(__m-1)
1920             #
1921             Kn = EaX @ EaY.T @ numpy.linalg.inv( EaY @ EaY.T)
1922             #
1923             for i in range(__m):
1924                 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(EpY[:,i]) - HX_predicted[:,i])
1925         #--------------------------
1926         else:
1927             raise ValueError("VariantM has to be chosen in the authorized methods list.")
1928         #
1929         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
1930             Xn = CovarianceInflation( Xn,
1931                 selfA._parameters["InflationType"],
1932                 selfA._parameters["InflationFactor"],
1933                 )
1934         #
1935         Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
1936         #--------------------------
1937         #
1938         if selfA._parameters["StoreInternalVariables"] \
1939             or selfA._toStore("CostFunctionJ") \
1940             or selfA._toStore("CostFunctionJb") \
1941             or selfA._toStore("CostFunctionJo") \
1942             or selfA._toStore("APosterioriCovariance") \
1943             or selfA._toStore("InnovationAtCurrentAnalysis") \
1944             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1945             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1946             _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
1947             _Innovation = Ynpu - _HXa
1948         #
1949         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1950         # ---> avec analysis
1951         selfA.StoredVariables["Analysis"].store( Xa )
1952         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1953             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
1954         if selfA._toStore("InnovationAtCurrentAnalysis"):
1955             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1956         # ---> avec current state
1957         if selfA._parameters["StoreInternalVariables"] \
1958             or selfA._toStore("CurrentState"):
1959             selfA.StoredVariables["CurrentState"].store( Xn )
1960         if selfA._toStore("ForecastState"):
1961             selfA.StoredVariables["ForecastState"].store( EMX )
1962         if selfA._toStore("BMA"):
1963             selfA.StoredVariables["BMA"].store( EMX - Xa.reshape((__n,1)) )
1964         if selfA._toStore("InnovationAtCurrentState"):
1965             selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
1966         if selfA._toStore("SimulatedObservationAtCurrentState") \
1967             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1968             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
1969         # ---> autres
1970         if selfA._parameters["StoreInternalVariables"] \
1971             or selfA._toStore("CostFunctionJ") \
1972             or selfA._toStore("CostFunctionJb") \
1973             or selfA._toStore("CostFunctionJo") \
1974             or selfA._toStore("CurrentOptimum") \
1975             or selfA._toStore("APosterioriCovariance"):
1976             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
1977             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
1978             J   = Jb + Jo
1979             selfA.StoredVariables["CostFunctionJb"].store( Jb )
1980             selfA.StoredVariables["CostFunctionJo"].store( Jo )
1981             selfA.StoredVariables["CostFunctionJ" ].store( J )
1982             #
1983             if selfA._toStore("IndexOfOptimum") \
1984                 or selfA._toStore("CurrentOptimum") \
1985                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1986                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1987                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1988                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1989                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1990             if selfA._toStore("IndexOfOptimum"):
1991                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1992             if selfA._toStore("CurrentOptimum"):
1993                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1994             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1995                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1996             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1997                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1998             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1999                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2000             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2001                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2002         if selfA._toStore("APosterioriCovariance"):
2003             Eai = EnsembleOfAnomalies( Xn ) / numpy.sqrt(__m-1) # Anomalies
2004             Pn = Eai @ Eai.T
2005             Pn = 0.5 * (Pn + Pn.T)
2006             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2007         if selfA._parameters["EstimationOf"] == "Parameters" \
2008             and J < previousJMinimum:
2009             previousJMinimum    = J
2010             XaMin               = Xa
2011             if selfA._toStore("APosterioriCovariance"):
2012                 covarianceXaMin = Pn
2013     #
2014     # Stockage final supplémentaire de l'optimum en estimation de paramètres
2015     # ----------------------------------------------------------------------
2016     if selfA._parameters["EstimationOf"] == "Parameters":
2017         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2018         selfA.StoredVariables["Analysis"].store( XaMin )
2019         if selfA._toStore("APosterioriCovariance"):
2020             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2021         if selfA._toStore("BMA"):
2022             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2023     #
2024     return 0
2025
2026 # ==============================================================================
2027 def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
2028     """
2029     Ensemble-Transform EnKF (ETKF or Deterministic EnKF: Bishop 2001, Hunt 2007)
2030
2031     selfA est identique au "self" d'algorithme appelant et contient les
2032     valeurs.
2033     """
2034     if selfA._parameters["EstimationOf"] == "Parameters":
2035         selfA._parameters["StoreInternalVariables"] = True
2036     #
2037     # Opérateurs
2038     # ----------
2039     H = HO["Direct"].appliedControledFormTo
2040     #
2041     if selfA._parameters["EstimationOf"] == "State":
2042         M = EM["Direct"].appliedControledFormTo
2043     #
2044     if CM is not None and "Tangent" in CM and U is not None:
2045         Cm = CM["Tangent"].asMatrix(Xb)
2046     else:
2047         Cm = None
2048     #
2049     # Nombre de pas identique au nombre de pas d'observations
2050     # -------------------------------------------------------
2051     if hasattr(Y,"stepnumber"):
2052         duration = Y.stepnumber()
2053         __p = numpy.cumprod(Y.shape())[-1]
2054     else:
2055         duration = 2
2056         __p = numpy.array(Y).size
2057     #
2058     # Précalcul des inversions de B et R
2059     # ----------------------------------
2060     if selfA._parameters["StoreInternalVariables"] \
2061         or selfA._toStore("CostFunctionJ") \
2062         or selfA._toStore("CostFunctionJb") \
2063         or selfA._toStore("CostFunctionJo") \
2064         or selfA._toStore("CurrentOptimum") \
2065         or selfA._toStore("APosterioriCovariance"):
2066         BI = B.getI()
2067         RI = R.getI()
2068     elif VariantM != "KalmanFilterFormula":
2069         RI = R.getI()
2070     if VariantM == "KalmanFilterFormula":
2071         RIdemi = R.choleskyI()
2072     #
2073     # Initialisation
2074     # --------------
2075     __n = Xb.size
2076     __m = selfA._parameters["NumberOfMembers"]
2077     if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
2078     else:                         Pn = B
2079     if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
2080     else:                         Rn = R
2081     if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
2082     else:                         Qn = Q
2083     Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
2084     #
2085     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2086         selfA.StoredVariables["Analysis"].store( numpy.ravel(Xb) )
2087         if selfA._toStore("APosterioriCovariance"):
2088             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2089             covarianceXa = Pn
2090     #
2091     previousJMinimum = numpy.finfo(float).max
2092     #
2093     for step in range(duration-1):
2094         if hasattr(Y,"store"):
2095             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,-1))
2096         else:
2097             Ynpu = numpy.ravel( Y ).reshape((__p,-1))
2098         #
2099         if U is not None:
2100             if hasattr(U,"store") and len(U)>1:
2101                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2102             elif hasattr(U,"store") and len(U)==1:
2103                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2104             else:
2105                 Un = numpy.asmatrix(numpy.ravel( U )).T
2106         else:
2107             Un = None
2108         #
2109         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2110             Xn = CovarianceInflation( Xn,
2111                 selfA._parameters["InflationType"],
2112                 selfA._parameters["InflationFactor"],
2113                 )
2114         #
2115         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
2116             EMX = M( [(Xn[:,i], Un) for i in range(__m)],
2117                 argsAsSerie = True,
2118                 returnSerieAsArrayMatrix = True )
2119             qi = numpy.random.multivariate_normal(numpy.zeros(__n), Qn, size=__m).T
2120             Xn_predicted = EMX + qi
2121             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2122                 argsAsSerie = True,
2123                 returnSerieAsArrayMatrix = True )
2124             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2125                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2126                 Xn_predicted = Xn_predicted + Cm * Un
2127         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
2128             # --- > Par principe, M = Id, Q = 0
2129             Xn_predicted = Xn
2130             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2131                 argsAsSerie = True,
2132                 returnSerieAsArrayMatrix = True )
2133         #
2134         # Mean of forecast and observation of forecast
2135         Xfm  = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
2136         Hfm  = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2137         #
2138         # Anomalies
2139         EaX   = EnsembleOfAnomalies( Xn_predicted )
2140         EaHX  = numpy.array(HX_predicted - Hfm)
2141         #
2142         #--------------------------
2143         if VariantM == "KalmanFilterFormula":
2144             mS    = RIdemi * EaHX / numpy.sqrt(__m-1)
2145             delta = RIdemi * ( Ynpu - Hfm )
2146             mT    = numpy.linalg.inv( numpy.eye(__m) + mS.T @ mS )
2147             vw    = mT @ mS.transpose() @ delta
2148             #
2149             Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
2150             mU    = numpy.eye(__m)
2151             #
2152             EaX   = EaX / numpy.sqrt(__m-1)
2153             Xn    = Xfm + EaX @ ( vw.reshape((__m,-1)) + numpy.sqrt(__m-1) * Tdemi @ mU )
2154         #--------------------------
2155         elif VariantM == "Variational":
2156             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
2157             def CostFunction(w):
2158                 _A  = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
2159                 _Jo = 0.5 * _A.T @ (RI * _A)
2160                 _Jb = 0.5 * (__m-1) * w.T @ w
2161                 _J  = _Jo + _Jb
2162                 return float(_J)
2163             def GradientOfCostFunction(w):
2164                 _A  = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
2165                 _GardJo = - EaHX.T @ (RI * _A)
2166                 _GradJb = (__m-1) * w.reshape((__m,1))
2167                 _GradJ  = _GardJo + _GradJb
2168                 return numpy.ravel(_GradJ)
2169             vw = scipy.optimize.fmin_cg(
2170                 f           = CostFunction,
2171                 x0          = numpy.zeros(__m),
2172                 fprime      = GradientOfCostFunction,
2173                 args        = (),
2174                 disp        = False,
2175                 )
2176             #
2177             Hto = EaHX.T @ (RI * EaHX)
2178             Htb = (__m-1) * numpy.eye(__m)
2179             Hta = Hto + Htb
2180             #
2181             Pta = numpy.linalg.inv( Hta )
2182             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
2183             #
2184             Xn  = Xfm + EaX @ (vw[:,None] + EWa)
2185         #--------------------------
2186         elif VariantM == "FiniteSize11": # Jauge Boc2011
2187             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
2188             def CostFunction(w):
2189                 _A  = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
2190                 _Jo = 0.5 * _A.T @ (RI * _A)
2191                 _Jb = 0.5 * __m * math.log(1 + 1/__m + w.T @ w)
2192                 _J  = _Jo + _Jb
2193                 return float(_J)
2194             def GradientOfCostFunction(w):
2195                 _A  = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
2196                 _GardJo = - EaHX.T @ (RI * _A)
2197                 _GradJb = __m * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
2198                 _GradJ  = _GardJo + _GradJb
2199                 return numpy.ravel(_GradJ)
2200             vw = scipy.optimize.fmin_cg(
2201                 f           = CostFunction,
2202                 x0          = numpy.zeros(__m),
2203                 fprime      = GradientOfCostFunction,
2204                 args        = (),
2205                 disp        = False,
2206                 )
2207             #
2208             Hto = EaHX.T @ (RI * EaHX)
2209             Htb = __m * \
2210                 ( (1 + 1/__m + vw.T @ vw) * numpy.eye(__m) - 2 * vw @ vw.T ) \
2211                 / (1 + 1/__m + vw.T @ vw)**2
2212             Hta = Hto + Htb
2213             #
2214             Pta = numpy.linalg.inv( Hta )
2215             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
2216             #
2217             Xn  = Xfm + EaX @ (vw.reshape((__m,-1)) + EWa)
2218         #--------------------------
2219         elif VariantM == "FiniteSize15": # Jauge Boc2015
2220             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
2221             def CostFunction(w):
2222                 _A  = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
2223                 _Jo = 0.5 * _A.T * RI * _A
2224                 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w)
2225                 _J  = _Jo + _Jb
2226                 return float(_J)
2227             def GradientOfCostFunction(w):
2228                 _A  = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
2229                 _GardJo = - EaHX.T @ (RI * _A)
2230                 _GradJb = (__m+1) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
2231                 _GradJ  = _GardJo + _GradJb
2232                 return numpy.ravel(_GradJ)
2233             vw = scipy.optimize.fmin_cg(
2234                 f           = CostFunction,
2235                 x0          = numpy.zeros(__m),
2236                 fprime      = GradientOfCostFunction,
2237                 args        = (),
2238                 disp        = False,
2239                 )
2240             #
2241             Hto = EaHX.T @ (RI * EaHX)
2242             Htb = (__m+1) * \
2243                 ( (1 + 1/__m + vw.T @ vw) * numpy.eye(__m) - 2 * vw @ vw.T ) \
2244                 / (1 + 1/__m + vw.T @ vw)**2
2245             Hta = Hto + Htb
2246             #
2247             Pta = numpy.linalg.inv( Hta )
2248             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
2249             #
2250             Xn  = Xfm + EaX @ (vw.reshape((__m,-1)) + EWa)
2251         #--------------------------
2252         elif VariantM == "FiniteSize16": # Jauge Boc2016
2253             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
2254             def CostFunction(w):
2255                 _A  = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
2256                 _Jo = 0.5 * _A.T @ (RI * _A)
2257                 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w / (__m-1))
2258                 _J  = _Jo + _Jb
2259                 return float(_J)
2260             def GradientOfCostFunction(w):
2261                 _A  = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
2262                 _GardJo = - EaHX.T @ (RI * _A)
2263                 _GradJb = ((__m+1) / (__m-1)) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w / (__m-1))
2264                 _GradJ  = _GardJo + _GradJb
2265                 return numpy.ravel(_GradJ)
2266             vw = scipy.optimize.fmin_cg(
2267                 f           = CostFunction,
2268                 x0          = numpy.zeros(__m),
2269                 fprime      = GradientOfCostFunction,
2270                 args        = (),
2271                 disp        = False,
2272                 )
2273             #
2274             Hto = EaHX.T @ (RI * EaHX)
2275             Htb = ((__m+1) / (__m-1)) * \
2276                 ( (1 + 1/__m + vw.T @ vw / (__m-1)) * numpy.eye(__m) - 2 * vw @ vw.T / (__m-1) ) \
2277                 / (1 + 1/__m + vw.T @ vw / (__m-1))**2
2278             Hta = Hto + Htb
2279             #
2280             Pta = numpy.linalg.inv( Hta )
2281             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
2282             #
2283             Xn  = Xfm + EaX @ (vw[:,None] + EWa)
2284         #--------------------------
2285         else:
2286             raise ValueError("VariantM has to be chosen in the authorized methods list.")
2287         #
2288         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2289             Xn = CovarianceInflation( Xn,
2290                 selfA._parameters["InflationType"],
2291                 selfA._parameters["InflationFactor"],
2292                 )
2293         #
2294         Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
2295         #--------------------------
2296         #
2297         if selfA._parameters["StoreInternalVariables"] \
2298             or selfA._toStore("CostFunctionJ") \
2299             or selfA._toStore("CostFunctionJb") \
2300             or selfA._toStore("CostFunctionJo") \
2301             or selfA._toStore("APosterioriCovariance") \
2302             or selfA._toStore("InnovationAtCurrentAnalysis") \
2303             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2304             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2305             _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
2306             _Innovation = Ynpu - _HXa
2307         #
2308         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2309         # ---> avec analysis
2310         selfA.StoredVariables["Analysis"].store( Xa )
2311         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2312             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2313         if selfA._toStore("InnovationAtCurrentAnalysis"):
2314             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2315         # ---> avec current state
2316         if selfA._parameters["StoreInternalVariables"] \
2317             or selfA._toStore("CurrentState"):
2318             selfA.StoredVariables["CurrentState"].store( Xn )
2319         if selfA._toStore("ForecastState"):
2320             selfA.StoredVariables["ForecastState"].store( EMX )
2321         if selfA._toStore("BMA"):
2322             selfA.StoredVariables["BMA"].store( EMX - Xa.reshape((__n,1)) )
2323         if selfA._toStore("InnovationAtCurrentState"):
2324             selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu.reshape((__p,1)) )
2325         if selfA._toStore("SimulatedObservationAtCurrentState") \
2326             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2327             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
2328         # ---> autres
2329         if selfA._parameters["StoreInternalVariables"] \
2330             or selfA._toStore("CostFunctionJ") \
2331             or selfA._toStore("CostFunctionJb") \
2332             or selfA._toStore("CostFunctionJo") \
2333             or selfA._toStore("CurrentOptimum") \
2334             or selfA._toStore("APosterioriCovariance"):
2335             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2336             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
2337             J   = Jb + Jo
2338             selfA.StoredVariables["CostFunctionJb"].store( Jb )
2339             selfA.StoredVariables["CostFunctionJo"].store( Jo )
2340             selfA.StoredVariables["CostFunctionJ" ].store( J )
2341             #
2342             if selfA._toStore("IndexOfOptimum") \
2343                 or selfA._toStore("CurrentOptimum") \
2344                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2345                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2346                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2347                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2348                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2349             if selfA._toStore("IndexOfOptimum"):
2350                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2351             if selfA._toStore("CurrentOptimum"):
2352                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2353             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2354                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2355             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2356                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2357             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2358                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2359             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2360                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2361         if selfA._toStore("APosterioriCovariance"):
2362             Eai = EnsembleOfAnomalies( Xn ) / numpy.sqrt(__m-1) # Anomalies
2363             Pn = Eai @ Eai.T
2364             Pn = 0.5 * (Pn + Pn.T)
2365             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2366         if selfA._parameters["EstimationOf"] == "Parameters" \
2367             and J < previousJMinimum:
2368             previousJMinimum    = J
2369             XaMin               = Xa
2370             if selfA._toStore("APosterioriCovariance"):
2371                 covarianceXaMin = Pn
2372     #
2373     # Stockage final supplémentaire de l'optimum en estimation de paramètres
2374     # ----------------------------------------------------------------------
2375     if selfA._parameters["EstimationOf"] == "Parameters":
2376         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2377         selfA.StoredVariables["Analysis"].store( XaMin )
2378         if selfA._toStore("APosterioriCovariance"):
2379             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2380         if selfA._toStore("BMA"):
2381             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2382     #
2383     return 0
2384
2385 # ==============================================================================
2386 def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13",
2387     BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
2388     """
2389     Maximum Likelihood Ensemble Filter (EnKF/MLEF Zupanski 2005, Bocquet 2013)
2390
2391     selfA est identique au "self" d'algorithme appelant et contient les
2392     valeurs.
2393     """
2394     if selfA._parameters["EstimationOf"] == "Parameters":
2395         selfA._parameters["StoreInternalVariables"] = True
2396     #
2397     # Opérateurs
2398     # ----------
2399     H = HO["Direct"].appliedControledFormTo
2400     #
2401     if selfA._parameters["EstimationOf"] == "State":
2402         M = EM["Direct"].appliedControledFormTo
2403     #
2404     if CM is not None and "Tangent" in CM and U is not None:
2405         Cm = CM["Tangent"].asMatrix(Xb)
2406     else:
2407         Cm = None
2408     #
2409     # Nombre de pas identique au nombre de pas d'observations
2410     # -------------------------------------------------------
2411     if hasattr(Y,"stepnumber"):
2412         duration = Y.stepnumber()
2413         __p = numpy.cumprod(Y.shape())[-1]
2414     else:
2415         duration = 2
2416         __p = numpy.array(Y).size
2417     #
2418     # Précalcul des inversions de B et R
2419     # ----------------------------------
2420     if selfA._parameters["StoreInternalVariables"] \
2421         or selfA._toStore("CostFunctionJ") \
2422         or selfA._toStore("CostFunctionJb") \
2423         or selfA._toStore("CostFunctionJo") \
2424         or selfA._toStore("CurrentOptimum") \
2425         or selfA._toStore("APosterioriCovariance"):
2426         BI = B.getI()
2427     RI = R.getI()
2428     #
2429     # Initialisation
2430     # --------------
2431     __n = Xb.size
2432     __m = selfA._parameters["NumberOfMembers"]
2433     if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
2434     else:                         Pn = B
2435     if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
2436     else:                         Rn = R
2437     if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
2438     else:                         Qn = Q
2439     Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
2440     #
2441     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2442         selfA.StoredVariables["Analysis"].store( numpy.ravel(Xb) )
2443         if selfA._toStore("APosterioriCovariance"):
2444             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2445             covarianceXa = Pn
2446     #
2447     previousJMinimum = numpy.finfo(float).max
2448     #
2449     for step in range(duration-1):
2450         if hasattr(Y,"store"):
2451             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,-1))
2452         else:
2453             Ynpu = numpy.ravel( Y ).reshape((__p,-1))
2454         #
2455         if U is not None:
2456             if hasattr(U,"store") and len(U)>1:
2457                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2458             elif hasattr(U,"store") and len(U)==1:
2459                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2460             else:
2461                 Un = numpy.asmatrix(numpy.ravel( U )).T
2462         else:
2463             Un = None
2464         #
2465         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2466             Xn = CovarianceInflation( Xn,
2467                 selfA._parameters["InflationType"],
2468                 selfA._parameters["InflationFactor"],
2469                 )
2470         #
2471         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
2472             EMX = M( [(Xn[:,i], Un) for i in range(__m)],
2473                 argsAsSerie = True,
2474                 returnSerieAsArrayMatrix = True )
2475             qi = numpy.random.multivariate_normal(numpy.zeros(__n), Qn, size=__m).T
2476             Xn_predicted = EMX + qi
2477             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2478                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2479                 Xn_predicted = Xn_predicted + Cm * Un
2480         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
2481             # --- > Par principe, M = Id, Q = 0
2482             Xn_predicted = Xn
2483         #
2484         #--------------------------
2485         if VariantM == "MLEF13":
2486             Xfm = numpy.ravel(Xn_predicted.mean(axis=1, dtype=mfp).astype('float'))
2487             EaX = EnsembleOfAnomalies( Xn_predicted ) / numpy.sqrt(__m-1)
2488             Ua  = numpy.eye(__m)
2489             __j = 0
2490             Deltaw = 1
2491             if not BnotT:
2492                 Ta  = numpy.eye(__m)
2493             vw  = numpy.zeros(__m)
2494             while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
2495                 vx1 = (Xfm + EaX @ vw).reshape((__n,-1))
2496                 #
2497                 if BnotT:
2498                     E1 = vx1 + _epsilon * EaX
2499                 else:
2500                     E1 = vx1 + numpy.sqrt(__m-1) * EaX @ Ta
2501                 #
2502                 HE2 = H( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
2503                     argsAsSerie = True,
2504                     returnSerieAsArrayMatrix = True )
2505                 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,-1))
2506                 #
2507                 if BnotT:
2508                     EaY = (HE2 - vy2) / _epsilon
2509                 else:
2510                     EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / numpy.sqrt(__m-1)
2511                 #
2512                 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy2 )))
2513                 mH = numpy.eye(__m) + EaY.transpose() @ (RI * EaY)
2514                 Deltaw = - numpy.linalg.solve(mH,GradJ)
2515                 #
2516                 vw = vw + Deltaw
2517                 #
2518                 if not BnotT:
2519                     Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2520                 #
2521                 __j = __j + 1
2522             #
2523             if BnotT:
2524                 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2525             #
2526             Xn = vx1 + numpy.sqrt(__m-1) * EaX @ Ta @ Ua
2527         #--------------------------
2528         else:
2529             raise ValueError("VariantM has to be chosen in the authorized methods list.")
2530         #
2531         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2532             Xn = CovarianceInflation( Xn,
2533                 selfA._parameters["InflationType"],
2534                 selfA._parameters["InflationFactor"],
2535                 )
2536         #
2537         Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
2538         #--------------------------
2539         #
2540         if selfA._parameters["StoreInternalVariables"] \
2541             or selfA._toStore("CostFunctionJ") \
2542             or selfA._toStore("CostFunctionJb") \
2543             or selfA._toStore("CostFunctionJo") \
2544             or selfA._toStore("APosterioriCovariance") \
2545             or selfA._toStore("InnovationAtCurrentAnalysis") \
2546             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2547             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2548             _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
2549             _Innovation = Ynpu - _HXa
2550         #
2551         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2552         # ---> avec analysis
2553         selfA.StoredVariables["Analysis"].store( Xa )
2554         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2555             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2556         if selfA._toStore("InnovationAtCurrentAnalysis"):
2557             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2558         # ---> avec current state
2559         if selfA._parameters["StoreInternalVariables"] \
2560             or selfA._toStore("CurrentState"):
2561             selfA.StoredVariables["CurrentState"].store( Xn )
2562         if selfA._toStore("ForecastState"):
2563             selfA.StoredVariables["ForecastState"].store( EMX )
2564         if selfA._toStore("BMA"):
2565             selfA.StoredVariables["BMA"].store( EMX - Xa.reshape((__n,1)) )
2566         if selfA._toStore("InnovationAtCurrentState"):
2567             selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu.reshape((__p,-1)) )
2568         if selfA._toStore("SimulatedObservationAtCurrentState") \
2569             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2570             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
2571         # ---> autres
2572         if selfA._parameters["StoreInternalVariables"] \
2573             or selfA._toStore("CostFunctionJ") \
2574             or selfA._toStore("CostFunctionJb") \
2575             or selfA._toStore("CostFunctionJo") \
2576             or selfA._toStore("CurrentOptimum") \
2577             or selfA._toStore("APosterioriCovariance"):
2578             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2579             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
2580             J   = Jb + Jo
2581             selfA.StoredVariables["CostFunctionJb"].store( Jb )
2582             selfA.StoredVariables["CostFunctionJo"].store( Jo )
2583             selfA.StoredVariables["CostFunctionJ" ].store( J )
2584             #
2585             if selfA._toStore("IndexOfOptimum") \
2586                 or selfA._toStore("CurrentOptimum") \
2587                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2588                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2589                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2590                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2591                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2592             if selfA._toStore("IndexOfOptimum"):
2593                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2594             if selfA._toStore("CurrentOptimum"):
2595                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2596             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2597                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2598             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2599                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2600             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2601                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2602             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2603                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2604         if selfA._toStore("APosterioriCovariance"):
2605             Eai = EnsembleOfAnomalies( Xn ) / numpy.sqrt(__m-1) # Anomalies
2606             Pn = Eai @ Eai.T
2607             Pn = 0.5 * (Pn + Pn.T)
2608             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2609         if selfA._parameters["EstimationOf"] == "Parameters" \
2610             and J < previousJMinimum:
2611             previousJMinimum    = J
2612             XaMin               = Xa
2613             if selfA._toStore("APosterioriCovariance"):
2614                 covarianceXaMin = Pn
2615     #
2616     # Stockage final supplémentaire de l'optimum en estimation de paramètres
2617     # ----------------------------------------------------------------------
2618     if selfA._parameters["EstimationOf"] == "Parameters":
2619         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2620         selfA.StoredVariables["Analysis"].store( XaMin )
2621         if selfA._toStore("APosterioriCovariance"):
2622             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2623         if selfA._toStore("BMA"):
2624             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2625     #
2626     return 0
2627
2628 # ==============================================================================
2629 def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12",
2630     BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
2631     """
2632     Iterative EnKF (Sakov 2012, Sakov 2018)
2633
2634     selfA est identique au "self" d'algorithme appelant et contient les
2635     valeurs.
2636     """
2637     if selfA._parameters["EstimationOf"] == "Parameters":
2638         selfA._parameters["StoreInternalVariables"] = True
2639     #
2640     # Opérateurs
2641     # ----------
2642     H = HO["Direct"].appliedControledFormTo
2643     #
2644     if selfA._parameters["EstimationOf"] == "State":
2645         M = EM["Direct"].appliedControledFormTo
2646     #
2647     if CM is not None and "Tangent" in CM and U is not None:
2648         Cm = CM["Tangent"].asMatrix(Xb)
2649     else:
2650         Cm = None
2651     #
2652     # Nombre de pas identique au nombre de pas d'observations
2653     # -------------------------------------------------------
2654     if hasattr(Y,"stepnumber"):
2655         duration = Y.stepnumber()
2656         __p = numpy.cumprod(Y.shape())[-1]
2657     else:
2658         duration = 2
2659         __p = numpy.array(Y).size
2660     #
2661     # Précalcul des inversions de B et R
2662     # ----------------------------------
2663     if selfA._parameters["StoreInternalVariables"] \
2664         or selfA._toStore("CostFunctionJ") \
2665         or selfA._toStore("CostFunctionJb") \
2666         or selfA._toStore("CostFunctionJo") \
2667         or selfA._toStore("CurrentOptimum") \
2668         or selfA._toStore("APosterioriCovariance"):
2669         BI = B.getI()
2670     RI = R.getI()
2671     #
2672     # Initialisation
2673     # --------------
2674     __n = Xb.size
2675     __m = selfA._parameters["NumberOfMembers"]
2676     if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
2677     else:                         Pn = B
2678     if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
2679     else:                         Rn = R
2680     if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
2681     else:                         Qn = Q
2682     Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
2683     #
2684     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2685         selfA.StoredVariables["Analysis"].store( numpy.ravel(Xb) )
2686         if selfA._toStore("APosterioriCovariance"):
2687             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2688             covarianceXa = Pn
2689     #
2690     previousJMinimum = numpy.finfo(float).max
2691     #
2692     for step in range(duration-1):
2693         if hasattr(Y,"store"):
2694             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,-1))
2695         else:
2696             Ynpu = numpy.ravel( Y ).reshape((__p,-1))
2697         #
2698         if U is not None:
2699             if hasattr(U,"store") and len(U)>1:
2700                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2701             elif hasattr(U,"store") and len(U)==1:
2702                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2703             else:
2704                 Un = numpy.asmatrix(numpy.ravel( U )).T
2705         else:
2706             Un = None
2707         #
2708         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2709             Xn = CovarianceInflation( Xn,
2710                 selfA._parameters["InflationType"],
2711                 selfA._parameters["InflationFactor"],
2712                 )
2713         #
2714         #--------------------------
2715         if VariantM == "IEnKF12":
2716             Xfm = numpy.ravel(Xn.mean(axis=1, dtype=mfp).astype('float'))
2717             EaX = EnsembleOfAnomalies( Xn ) / numpy.sqrt(__m-1)
2718             __j = 0
2719             Deltaw = 1
2720             if not BnotT:
2721                 Ta  = numpy.eye(__m)
2722             vw  = numpy.zeros(__m)
2723             while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
2724                 vx1 = (Xfm + EaX @ vw).reshape((__n,-1))
2725                 #
2726                 if BnotT:
2727                     E1 = vx1 + _epsilon * EaX
2728                 else:
2729                     E1 = vx1 + numpy.sqrt(__m-1) * EaX @ Ta
2730                 #
2731                 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q
2732                     E2 = M( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
2733                         argsAsSerie = True,
2734                         returnSerieAsArrayMatrix = True )
2735                 elif selfA._parameters["EstimationOf"] == "Parameters":
2736                     # --- > Par principe, M = Id
2737                     E2 = Xn
2738                 vx2 = E2.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
2739                 vy1 = H((vx2, Un)).reshape((__p,-1))
2740                 #
2741                 HE2 = H( [(E2[:,i,numpy.newaxis], Un) for i in range(__m)],
2742                     argsAsSerie = True,
2743                     returnSerieAsArrayMatrix = True )
2744                 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,-1))
2745                 #
2746                 if BnotT:
2747                     EaY = (HE2 - vy2) / _epsilon
2748                 else:
2749                     EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / numpy.sqrt(__m-1)
2750                 #
2751                 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy1 )))
2752                 mH = numpy.eye(__m) + EaY.transpose() @ (RI * EaY)
2753                 Deltaw = - numpy.linalg.solve(mH,GradJ)
2754                 #
2755                 vw = vw + Deltaw
2756                 #
2757                 if not BnotT:
2758                     Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2759                 #
2760                 __j = __j + 1
2761             #
2762             A2 = EnsembleOfAnomalies( E2 )
2763             #
2764             if BnotT:
2765                 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2766                 A2 = numpy.sqrt(__m-1) * A2 @ Ta / _epsilon
2767             #
2768             Xn = vx2 + A2
2769         #--------------------------
2770         else:
2771             raise ValueError("VariantM has to be chosen in the authorized methods list.")
2772         #
2773         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2774             Xn = CovarianceInflation( Xn,
2775                 selfA._parameters["InflationType"],
2776                 selfA._parameters["InflationFactor"],
2777                 )
2778         #
2779         Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
2780         #--------------------------
2781         #
2782         if selfA._parameters["StoreInternalVariables"] \
2783             or selfA._toStore("CostFunctionJ") \
2784             or selfA._toStore("CostFunctionJb") \
2785             or selfA._toStore("CostFunctionJo") \
2786             or selfA._toStore("APosterioriCovariance") \
2787             or selfA._toStore("InnovationAtCurrentAnalysis") \
2788             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2789             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2790             _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
2791             _Innovation = Ynpu - _HXa
2792         #
2793         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2794         # ---> avec analysis
2795         selfA.StoredVariables["Analysis"].store( Xa )
2796         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2797             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2798         if selfA._toStore("InnovationAtCurrentAnalysis"):
2799             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2800         # ---> avec current state
2801         if selfA._parameters["StoreInternalVariables"] \
2802             or selfA._toStore("CurrentState"):
2803             selfA.StoredVariables["CurrentState"].store( Xn )
2804         if selfA._toStore("ForecastState"):
2805             selfA.StoredVariables["ForecastState"].store( E2 )
2806         if selfA._toStore("BMA"):
2807             selfA.StoredVariables["BMA"].store( E2 - Xa )
2808         if selfA._toStore("InnovationAtCurrentState"):
2809             selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu.reshape((__p,-1)) )
2810         if selfA._toStore("SimulatedObservationAtCurrentState") \
2811             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2812             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
2813         # ---> autres
2814         if selfA._parameters["StoreInternalVariables"] \
2815             or selfA._toStore("CostFunctionJ") \
2816             or selfA._toStore("CostFunctionJb") \
2817             or selfA._toStore("CostFunctionJo") \
2818             or selfA._toStore("CurrentOptimum") \
2819             or selfA._toStore("APosterioriCovariance"):
2820             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2821             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
2822             J   = Jb + Jo
2823             selfA.StoredVariables["CostFunctionJb"].store( Jb )
2824             selfA.StoredVariables["CostFunctionJo"].store( Jo )
2825             selfA.StoredVariables["CostFunctionJ" ].store( J )
2826             #
2827             if selfA._toStore("IndexOfOptimum") \
2828                 or selfA._toStore("CurrentOptimum") \
2829                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2830                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2831                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2832                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2833                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2834             if selfA._toStore("IndexOfOptimum"):
2835                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2836             if selfA._toStore("CurrentOptimum"):
2837                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2838             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2839                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2840             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2841                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2842             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2843                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2844             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2845                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2846         if selfA._toStore("APosterioriCovariance"):
2847             Eai = EnsembleOfAnomalies( Xn ) / numpy.sqrt(__m-1) # Anomalies
2848             Pn = Eai @ Eai.T
2849             Pn = 0.5 * (Pn + Pn.T)
2850             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2851         if selfA._parameters["EstimationOf"] == "Parameters" \
2852             and J < previousJMinimum:
2853             previousJMinimum    = J
2854             XaMin               = Xa
2855             if selfA._toStore("APosterioriCovariance"):
2856                 covarianceXaMin = Pn
2857     #
2858     # Stockage final supplémentaire de l'optimum en estimation de paramètres
2859     # ----------------------------------------------------------------------
2860     if selfA._parameters["EstimationOf"] == "Parameters":
2861         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2862         selfA.StoredVariables["Analysis"].store( XaMin )
2863         if selfA._toStore("APosterioriCovariance"):
2864             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2865         if selfA._toStore("BMA"):
2866             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2867     #
2868     return 0
2869
2870 # ==============================================================================
2871 if __name__ == "__main__":
2872     print('\n AUTODIAGNOSTIC\n')