Salome HOME
Improvement and extension of 3DVAR algorithm
[modules/adao.git] / src / daComposant / daCore / NumericObjects.py
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) 2008-2021 EDF R&D
4 #
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
9 #
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13 # Lesser General Public License for more details.
14 #
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
18 #
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
20 #
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
22
23 __doc__ = """
24     Définit les objets numériques génériques.
25 """
26 __author__ = "Jean-Philippe ARGAUD"
27
28 import os, time, copy, types, sys, logging
29 import math, numpy, scipy, scipy.optimize, scipy.version
30 from daCore.BasicObjects import Operator
31 from daCore.PlatformInfo import PlatformInfo
32 mpr = PlatformInfo().MachinePrecision()
33 mfp = PlatformInfo().MaximumPrecision()
34 # logging.getLogger().setLevel(logging.DEBUG)
35
36 # ==============================================================================
37 def ExecuteFunction( paire ):
38     assert len(paire) == 2, "Incorrect number of arguments"
39     X, funcrepr = paire
40     __X = numpy.asmatrix(numpy.ravel( X )).T
41     __sys_path_tmp = sys.path ; sys.path.insert(0,funcrepr["__userFunction__path"])
42     __module = __import__(funcrepr["__userFunction__modl"], globals(), locals(), [])
43     __fonction = getattr(__module,funcrepr["__userFunction__name"])
44     sys.path = __sys_path_tmp ; del __sys_path_tmp
45     __HX  = __fonction( __X )
46     return numpy.ravel( __HX )
47
48 # ==============================================================================
49 class FDApproximation(object):
50     """
51     Cette classe sert d'interface pour définir les opérateurs approximés. A la
52     création d'un objet, en fournissant une fonction "Function", on obtient un
53     objet qui dispose de 3 méthodes "DirectOperator", "TangentOperator" et
54     "AdjointOperator". On contrôle l'approximation DF avec l'incrément
55     multiplicatif "increment" valant par défaut 1%, ou avec l'incrément fixe
56     "dX" qui sera multiplié par "increment" (donc en %), et on effectue de DF
57     centrées si le booléen "centeredDF" est vrai.
58     """
59     def __init__(self,
60             name                  = "FDApproximation",
61             Function              = None,
62             centeredDF            = False,
63             increment             = 0.01,
64             dX                    = None,
65             avoidingRedundancy    = True,
66             toleranceInRedundancy = 1.e-18,
67             lenghtOfRedundancy    = -1,
68             mpEnabled             = False,
69             mpWorkers             = None,
70             mfEnabled             = False,
71             ):
72         self.__name = str(name)
73         if mpEnabled:
74             try:
75                 import multiprocessing
76                 self.__mpEnabled = True
77             except ImportError:
78                 self.__mpEnabled = False
79         else:
80             self.__mpEnabled = False
81         self.__mpWorkers = mpWorkers
82         if self.__mpWorkers is not None and self.__mpWorkers < 1:
83             self.__mpWorkers = None
84         logging.debug("FDA Calculs en multiprocessing : %s (nombre de processus : %s)"%(self.__mpEnabled,self.__mpWorkers))
85         #
86         if mfEnabled:
87             self.__mfEnabled = True
88         else:
89             self.__mfEnabled = False
90         logging.debug("FDA Calculs en multifonctions : %s"%(self.__mfEnabled,))
91         #
92         if avoidingRedundancy:
93             self.__avoidRC = True
94             self.__tolerBP = float(toleranceInRedundancy)
95             self.__lenghtRJ = int(lenghtOfRedundancy)
96             self.__listJPCP = [] # Jacobian Previous Calculated Points
97             self.__listJPCI = [] # Jacobian Previous Calculated Increment
98             self.__listJPCR = [] # Jacobian Previous Calculated Results
99             self.__listJPPN = [] # Jacobian Previous Calculated Point Norms
100             self.__listJPIN = [] # Jacobian Previous Calculated Increment Norms
101         else:
102             self.__avoidRC = False
103         #
104         if self.__mpEnabled:
105             if isinstance(Function,types.FunctionType):
106                 logging.debug("FDA Calculs en multiprocessing : FunctionType")
107                 self.__userFunction__name = Function.__name__
108                 try:
109                     mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
110                 except:
111                     mod = os.path.abspath(Function.__globals__['__file__'])
112                 if not os.path.isfile(mod):
113                     raise ImportError("No user defined function or method found with the name %s"%(mod,))
114                 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
115                 self.__userFunction__path = os.path.dirname(mod)
116                 del mod
117                 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled )
118                 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
119             elif isinstance(Function,types.MethodType):
120                 logging.debug("FDA Calculs en multiprocessing : MethodType")
121                 self.__userFunction__name = Function.__name__
122                 try:
123                     mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
124                 except:
125                     mod = os.path.abspath(Function.__func__.__globals__['__file__'])
126                 if not os.path.isfile(mod):
127                     raise ImportError("No user defined function or method found with the name %s"%(mod,))
128                 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
129                 self.__userFunction__path = os.path.dirname(mod)
130                 del mod
131                 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled )
132                 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
133             else:
134                 raise TypeError("User defined function or method has to be provided for finite differences approximation.")
135         else:
136             self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled )
137             self.__userFunction = self.__userOperator.appliedTo
138         #
139         self.__centeredDF = bool(centeredDF)
140         if abs(float(increment)) > 1.e-15:
141             self.__increment  = float(increment)
142         else:
143             self.__increment  = 0.01
144         if dX is None:
145             self.__dX     = None
146         else:
147             self.__dX     = numpy.asmatrix(numpy.ravel( dX )).T
148         logging.debug("FDA Reduction des doublons de calcul : %s"%self.__avoidRC)
149         if self.__avoidRC:
150             logging.debug("FDA Tolerance de determination des doublons : %.2e"%self.__tolerBP)
151
152     # ---------------------------------------------------------
153     def __doublon__(self, e, l, n, v=None):
154         __ac, __iac = False, -1
155         for i in range(len(l)-1,-1,-1):
156             if numpy.linalg.norm(e - l[i]) < self.__tolerBP * n[i]:
157                 __ac, __iac = True, i
158                 if v is not None: logging.debug("FDA Cas%s déja calculé, récupération du doublon %i"%(v,__iac))
159                 break
160         return __ac, __iac
161
162     # ---------------------------------------------------------
163     def DirectOperator(self, X ):
164         """
165         Calcul du direct à l'aide de la fonction fournie.
166         """
167         logging.debug("FDA Calcul DirectOperator (explicite)")
168         if self.__mfEnabled:
169             _HX = self.__userFunction( X, argsAsSerie = True )
170         else:
171             _X = numpy.asmatrix(numpy.ravel( X )).T
172             _HX = numpy.ravel(self.__userFunction( _X ))
173         #
174         return _HX
175
176     # ---------------------------------------------------------
177     def TangentMatrix(self, X ):
178         """
179         Calcul de l'opérateur tangent comme la Jacobienne par différences finies,
180         c'est-à-dire le gradient de H en X. On utilise des différences finies
181         directionnelles autour du point X. X est un numpy.matrix.
182
183         Différences finies centrées (approximation d'ordre 2):
184         1/ Pour chaque composante i de X, on ajoute et on enlève la perturbation
185            dX[i] à la  composante X[i], pour composer X_plus_dXi et X_moins_dXi, et
186            on calcule les réponses HX_plus_dXi = H( X_plus_dXi ) et HX_moins_dXi =
187            H( X_moins_dXi )
188         2/ On effectue les différences (HX_plus_dXi-HX_moins_dXi) et on divise par
189            le pas 2*dXi
190         3/ Chaque résultat, par composante, devient une colonne de la Jacobienne
191
192         Différences finies non centrées (approximation d'ordre 1):
193         1/ Pour chaque composante i de X, on ajoute la perturbation dX[i] à la
194            composante X[i] pour composer X_plus_dXi, et on calcule la réponse
195            HX_plus_dXi = H( X_plus_dXi )
196         2/ On calcule la valeur centrale HX = H(X)
197         3/ On effectue les différences (HX_plus_dXi-HX) et on divise par
198            le pas dXi
199         4/ Chaque résultat, par composante, devient une colonne de la Jacobienne
200
201         """
202         logging.debug("FDA Début du calcul de la Jacobienne")
203         logging.debug("FDA   Incrément de............: %s*X"%float(self.__increment))
204         logging.debug("FDA   Approximation centrée...: %s"%(self.__centeredDF))
205         #
206         if X is None or len(X)==0:
207             raise ValueError("Nominal point X for approximate derivatives can not be None or void (given X: %s)."%(str(X),))
208         #
209         _X = numpy.asmatrix(numpy.ravel( X )).T
210         #
211         if self.__dX is None:
212             _dX  = self.__increment * _X
213         else:
214             _dX = numpy.asmatrix(numpy.ravel( self.__dX )).T
215         #
216         if (_dX == 0.).any():
217             moyenne = _dX.mean()
218             if moyenne == 0.:
219                 _dX = numpy.where( _dX == 0., float(self.__increment), _dX )
220             else:
221                 _dX = numpy.where( _dX == 0., moyenne, _dX )
222         #
223         __alreadyCalculated  = False
224         if self.__avoidRC:
225             __bidon, __alreadyCalculatedP = self.__doublon__(_X,  self.__listJPCP, self.__listJPPN, None)
226             __bidon, __alreadyCalculatedI = self.__doublon__(_dX, self.__listJPCI, self.__listJPIN, None)
227             if __alreadyCalculatedP == __alreadyCalculatedI > -1:
228                 __alreadyCalculated, __i = True, __alreadyCalculatedP
229                 logging.debug("FDA Cas J déja calculé, récupération du doublon %i"%__i)
230         #
231         if __alreadyCalculated:
232             logging.debug("FDA   Calcul Jacobienne (par récupération du doublon %i)"%__i)
233             _Jacobienne = self.__listJPCR[__i]
234         else:
235             logging.debug("FDA   Calcul Jacobienne (explicite)")
236             if self.__centeredDF:
237                 #
238                 if self.__mpEnabled and not self.__mfEnabled:
239                     funcrepr = {
240                         "__userFunction__path" : self.__userFunction__path,
241                         "__userFunction__modl" : self.__userFunction__modl,
242                         "__userFunction__name" : self.__userFunction__name,
243                     }
244                     _jobs = []
245                     for i in range( len(_dX) ):
246                         _dXi            = _dX[i]
247                         _X_plus_dXi     = numpy.array( _X.A1, dtype=float )
248                         _X_plus_dXi[i]  = _X[i] + _dXi
249                         _X_moins_dXi    = numpy.array( _X.A1, dtype=float )
250                         _X_moins_dXi[i] = _X[i] - _dXi
251                         #
252                         _jobs.append( (_X_plus_dXi,  funcrepr) )
253                         _jobs.append( (_X_moins_dXi, funcrepr) )
254                     #
255                     import multiprocessing
256                     self.__pool = multiprocessing.Pool(self.__mpWorkers)
257                     _HX_plusmoins_dX = self.__pool.map( ExecuteFunction, _jobs )
258                     self.__pool.close()
259                     self.__pool.join()
260                     #
261                     _Jacobienne  = []
262                     for i in range( len(_dX) ):
263                         _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
264                     #
265                 elif self.__mfEnabled:
266                     _xserie = []
267                     for i in range( len(_dX) ):
268                         _dXi            = _dX[i]
269                         _X_plus_dXi     = numpy.array( _X.A1, dtype=float )
270                         _X_plus_dXi[i]  = _X[i] + _dXi
271                         _X_moins_dXi    = numpy.array( _X.A1, dtype=float )
272                         _X_moins_dXi[i] = _X[i] - _dXi
273                         #
274                         _xserie.append( _X_plus_dXi )
275                         _xserie.append( _X_moins_dXi )
276                     #
277                     _HX_plusmoins_dX = self.DirectOperator( _xserie )
278                      #
279                     _Jacobienne  = []
280                     for i in range( len(_dX) ):
281                         _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
282                     #
283                 else:
284                     _Jacobienne  = []
285                     for i in range( _dX.size ):
286                         _dXi            = _dX[i]
287                         _X_plus_dXi     = numpy.array( _X.A1, dtype=float )
288                         _X_plus_dXi[i]  = _X[i] + _dXi
289                         _X_moins_dXi    = numpy.array( _X.A1, dtype=float )
290                         _X_moins_dXi[i] = _X[i] - _dXi
291                         #
292                         _HX_plus_dXi    = self.DirectOperator( _X_plus_dXi )
293                         _HX_moins_dXi   = self.DirectOperator( _X_moins_dXi )
294                         #
295                         _Jacobienne.append( numpy.ravel( _HX_plus_dXi - _HX_moins_dXi ) / (2.*_dXi) )
296                 #
297             else:
298                 #
299                 if self.__mpEnabled and not self.__mfEnabled:
300                     funcrepr = {
301                         "__userFunction__path" : self.__userFunction__path,
302                         "__userFunction__modl" : self.__userFunction__modl,
303                         "__userFunction__name" : self.__userFunction__name,
304                     }
305                     _jobs = []
306                     _jobs.append( (_X.A1, funcrepr) )
307                     for i in range( len(_dX) ):
308                         _X_plus_dXi    = numpy.array( _X.A1, dtype=float )
309                         _X_plus_dXi[i] = _X[i] + _dX[i]
310                         #
311                         _jobs.append( (_X_plus_dXi, funcrepr) )
312                     #
313                     import multiprocessing
314                     self.__pool = multiprocessing.Pool(self.__mpWorkers)
315                     _HX_plus_dX = self.__pool.map( ExecuteFunction, _jobs )
316                     self.__pool.close()
317                     self.__pool.join()
318                     #
319                     _HX = _HX_plus_dX.pop(0)
320                     #
321                     _Jacobienne = []
322                     for i in range( len(_dX) ):
323                         _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
324                     #
325                 elif self.__mfEnabled:
326                     _xserie = []
327                     _xserie.append( _X.A1 )
328                     for i in range( len(_dX) ):
329                         _X_plus_dXi    = numpy.array( _X.A1, dtype=float )
330                         _X_plus_dXi[i] = _X[i] + _dX[i]
331                         #
332                         _xserie.append( _X_plus_dXi )
333                     #
334                     _HX_plus_dX = self.DirectOperator( _xserie )
335                     #
336                     _HX = _HX_plus_dX.pop(0)
337                     #
338                     _Jacobienne = []
339                     for i in range( len(_dX) ):
340                         _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
341                    #
342                 else:
343                     _Jacobienne  = []
344                     _HX = self.DirectOperator( _X )
345                     for i in range( _dX.size ):
346                         _dXi            = _dX[i]
347                         _X_plus_dXi     = numpy.array( _X.A1, dtype=float )
348                         _X_plus_dXi[i]  = _X[i] + _dXi
349                         #
350                         _HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
351                         #
352                         _Jacobienne.append( numpy.ravel(( _HX_plus_dXi - _HX ) / _dXi) )
353                 #
354             #
355             _Jacobienne = numpy.asmatrix( numpy.vstack( _Jacobienne ) ).T
356             if self.__avoidRC:
357                 if self.__lenghtRJ < 0: self.__lenghtRJ = 2 * _X.size
358                 while len(self.__listJPCP) > self.__lenghtRJ:
359                     self.__listJPCP.pop(0)
360                     self.__listJPCI.pop(0)
361                     self.__listJPCR.pop(0)
362                     self.__listJPPN.pop(0)
363                     self.__listJPIN.pop(0)
364                 self.__listJPCP.append( copy.copy(_X) )
365                 self.__listJPCI.append( copy.copy(_dX) )
366                 self.__listJPCR.append( copy.copy(_Jacobienne) )
367                 self.__listJPPN.append( numpy.linalg.norm(_X) )
368                 self.__listJPIN.append( numpy.linalg.norm(_Jacobienne) )
369         #
370         logging.debug("FDA Fin du calcul de la Jacobienne")
371         #
372         return _Jacobienne
373
374     # ---------------------------------------------------------
375     def TangentOperator(self, paire ):
376         """
377         Calcul du tangent à l'aide de la Jacobienne.
378         """
379         if self.__mfEnabled:
380             assert len(paire) == 1, "Incorrect lenght of arguments"
381             _paire = paire[0]
382             assert len(_paire) == 2, "Incorrect number of arguments"
383         else:
384             assert len(paire) == 2, "Incorrect number of arguments"
385             _paire = paire
386         X, dX = _paire
387         _Jacobienne = self.TangentMatrix( X )
388         if dX is None or len(dX) == 0:
389             #
390             # Calcul de la forme matricielle si le second argument est None
391             # -------------------------------------------------------------
392             if self.__mfEnabled: return [_Jacobienne,]
393             else:                return _Jacobienne
394         else:
395             #
396             # Calcul de la valeur linéarisée de H en X appliqué à dX
397             # ------------------------------------------------------
398             _dX = numpy.asmatrix(numpy.ravel( dX )).T
399             _HtX = numpy.dot(_Jacobienne, _dX)
400             if self.__mfEnabled: return [_HtX.A1,]
401             else:                return _HtX.A1
402
403     # ---------------------------------------------------------
404     def AdjointOperator(self, paire ):
405         """
406         Calcul de l'adjoint à l'aide de la Jacobienne.
407         """
408         if self.__mfEnabled:
409             assert len(paire) == 1, "Incorrect lenght of arguments"
410             _paire = paire[0]
411             assert len(_paire) == 2, "Incorrect number of arguments"
412         else:
413             assert len(paire) == 2, "Incorrect number of arguments"
414             _paire = paire
415         X, Y = _paire
416         _JacobienneT = self.TangentMatrix( X ).T
417         if Y is None or len(Y) == 0:
418             #
419             # Calcul de la forme matricielle si le second argument est None
420             # -------------------------------------------------------------
421             if self.__mfEnabled: return [_JacobienneT,]
422             else:                return _JacobienneT
423         else:
424             #
425             # Calcul de la valeur de l'adjoint en X appliqué à Y
426             # --------------------------------------------------
427             _Y = numpy.asmatrix(numpy.ravel( Y )).T
428             _HaY = numpy.dot(_JacobienneT, _Y)
429             if self.__mfEnabled: return [_HaY.A1,]
430             else:                return _HaY.A1
431
432 # ==============================================================================
433 def mmqr(
434         func     = None,
435         x0       = None,
436         fprime   = None,
437         bounds   = None,
438         quantile = 0.5,
439         maxfun   = 15000,
440         toler    = 1.e-06,
441         y        = None,
442         ):
443     """
444     Implémentation informatique de l'algorithme MMQR, basée sur la publication :
445     David R. Hunter, Kenneth Lange, "Quantile Regression via an MM Algorithm",
446     Journal of Computational and Graphical Statistics, 9, 1, pp.60-77, 2000.
447     """
448     #
449     # Recuperation des donnees et informations initiales
450     # --------------------------------------------------
451     variables = numpy.ravel( x0 )
452     mesures   = numpy.ravel( y )
453     increment = sys.float_info[0]
454     p         = variables.size
455     n         = mesures.size
456     quantile  = float(quantile)
457     #
458     # Calcul des parametres du MM
459     # ---------------------------
460     tn      = float(toler) / n
461     e0      = -tn / math.log(tn)
462     epsilon = (e0-tn)/(1+math.log(e0))
463     #
464     # Calculs d'initialisation
465     # ------------------------
466     residus  = mesures - numpy.ravel( func( variables ) )
467     poids    = 1./(epsilon+numpy.abs(residus))
468     veps     = 1. - 2. * quantile - residus * poids
469     lastsurrogate = -numpy.sum(residus*veps) - (1.-2.*quantile)*numpy.sum(residus)
470     iteration = 0
471     #
472     # Recherche iterative
473     # -------------------
474     while (increment > toler) and (iteration < maxfun) :
475         iteration += 1
476         #
477         Derivees  = numpy.array(fprime(variables))
478         Derivees  = Derivees.reshape(n,p) # Necessaire pour remettre en place la matrice si elle passe par des tuyaux YACS
479         DeriveesT = Derivees.transpose()
480         M         =   numpy.dot( DeriveesT , (numpy.array(numpy.matrix(p*[poids,]).T)*Derivees) )
481         SM        =   numpy.transpose(numpy.dot( DeriveesT , veps ))
482         step      = - numpy.linalg.lstsq( M, SM, rcond=-1 )[0]
483         #
484         variables = variables + step
485         if bounds is not None:
486             # Attention : boucle infinie à éviter si un intervalle est trop petit
487             while( (variables < numpy.ravel(numpy.asmatrix(bounds)[:,0])).any() or (variables > numpy.ravel(numpy.asmatrix(bounds)[:,1])).any() ):
488                 step      = step/2.
489                 variables = variables - step
490         residus   = mesures - numpy.ravel( func(variables) )
491         surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
492         #
493         while ( (surrogate > lastsurrogate) and ( max(list(numpy.abs(step))) > 1.e-16 ) ) :
494             step      = step/2.
495             variables = variables - step
496             residus   = mesures - numpy.ravel( func(variables) )
497             surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
498         #
499         increment     = lastsurrogate-surrogate
500         poids         = 1./(epsilon+numpy.abs(residus))
501         veps          = 1. - 2. * quantile - residus * poids
502         lastsurrogate = -numpy.sum(residus * veps) - (1.-2.*quantile)*numpy.sum(residus)
503     #
504     # Mesure d'écart
505     # --------------
506     Ecart = quantile * numpy.sum(residus) - numpy.sum( residus[residus<0] )
507     #
508     return variables, Ecart, [n,p,iteration,increment,0]
509
510 # ==============================================================================
511 def EnsembleOfCenteredPerturbations( _bgcenter, _bgcovariance, _nbmembers ):
512     "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
513     #
514     _bgcenter = numpy.ravel(_bgcenter)[:,None]
515     if _nbmembers < 1:
516         raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
517     #
518     if _bgcovariance is None:
519         BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
520     else:
521         _Z = numpy.random.multivariate_normal(numpy.zeros(_bgcenter.size), _bgcovariance, size=_nbmembers).T
522         BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers) + _Z
523     #
524     return BackgroundEnsemble
525
526 # ==============================================================================
527 def EnsembleOfBackgroundPerturbations( _bgcenter, _bgcovariance, _nbmembers, _withSVD = True):
528     "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
529     def __CenteredRandomAnomalies(Zr, N):
530         """
531         Génère une matrice de N anomalies aléatoires centrées sur Zr selon les
532         notes manuscrites de MB et conforme au code de PS avec eps = -1
533         """
534         eps = -1
535         Q = numpy.eye(N-1)-numpy.ones((N-1,N-1))/numpy.sqrt(N)/(numpy.sqrt(N)-eps)
536         Q = numpy.concatenate((Q, [eps*numpy.ones(N-1)/numpy.sqrt(N)]), axis=0)
537         R, _ = numpy.linalg.qr(numpy.random.normal(size = (N-1,N-1)))
538         Q = numpy.dot(Q,R)
539         Zr = numpy.dot(Q,Zr)
540         return Zr.T
541     #
542     _bgcenter = numpy.ravel(_bgcenter)[:,None]
543     if _nbmembers < 1:
544         raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
545     if _bgcovariance is None:
546         BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
547     else:
548         if _withSVD:
549             U, s, V = numpy.linalg.svd(_bgcovariance, full_matrices=False)
550             _nbctl = _bgcenter.size
551             if _nbmembers > _nbctl:
552                 _Z = numpy.concatenate((numpy.dot(
553                     numpy.diag(numpy.sqrt(s[:_nbctl])), V[:_nbctl]),
554                     numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1-_nbctl)), axis = 0)
555             else:
556                 _Z = numpy.dot(numpy.diag(numpy.sqrt(s[:_nbmembers-1])), V[:_nbmembers-1])
557             _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
558             BackgroundEnsemble = _bgcenter + _Zca
559         else:
560             if max(abs(_bgcovariance.flatten())) > 0:
561                 _nbctl = _bgcenter.size
562                 _Z = numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1)
563                 _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
564                 BackgroundEnsemble = _bgcenter + _Zca
565             else:
566                 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
567     #
568     return BackgroundEnsemble
569
570 # ==============================================================================
571 def EnsembleOfAnomalies( _ensemble, _optmean = None):
572     "Renvoie les anomalies centrées à partir d'un ensemble TailleEtat*NbMembres"
573     if _optmean is None:
574         Em = numpy.asarray(_ensemble).mean(axis=1, dtype=mfp).astype('float')[:,numpy.newaxis]
575     else:
576         Em = numpy.ravel(_optmean)[:,numpy.newaxis]
577     #
578     return numpy.asarray(_ensemble) - Em
579
580 # ==============================================================================
581 def CovarianceInflation(
582         InputCovOrEns,
583         InflationType   = None,
584         InflationFactor = None,
585         BackgroundCov   = None,
586         ):
587     """
588     Inflation applicable soit sur Pb ou Pa, soit sur les ensembles EXb ou EXa
589
590     Synthèse : Hunt 2007, section 2.3.5
591     """
592     if InflationFactor is None:
593         return InputCovOrEns
594     else:
595         InflationFactor = float(InflationFactor)
596     #
597     if InflationType in ["MultiplicativeOnAnalysisCovariance", "MultiplicativeOnBackgroundCovariance"]:
598         if InflationFactor < 1.:
599             raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
600         if InflationFactor < 1.+mpr:
601             return InputCovOrEns
602         OutputCovOrEns = InflationFactor**2 * InputCovOrEns
603     #
604     elif InflationType in ["MultiplicativeOnAnalysisAnomalies", "MultiplicativeOnBackgroundAnomalies"]:
605         if InflationFactor < 1.:
606             raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
607         if InflationFactor < 1.+mpr:
608             return InputCovOrEns
609         InputCovOrEnsMean = InputCovOrEns.mean(axis=1, dtype=mfp).astype('float')
610         OutputCovOrEns = InputCovOrEnsMean[:,numpy.newaxis] \
611             + InflationFactor * (InputCovOrEns - InputCovOrEnsMean[:,numpy.newaxis])
612     #
613     elif InflationType in ["AdditiveOnAnalysisCovariance", "AdditiveOnBackgroundCovariance"]:
614         if InflationFactor < 0.:
615             raise ValueError("Inflation factor for additive inflation has to be greater or equal than 0.")
616         if InflationFactor < mpr:
617             return InputCovOrEns
618         __n, __m = numpy.asarray(InputCovOrEns).shape
619         if __n != __m:
620             raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
621         OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * numpy.eye(__n)
622     #
623     elif InflationType == "HybridOnBackgroundCovariance":
624         if InflationFactor < 0.:
625             raise ValueError("Inflation factor for hybrid inflation has to be greater or equal than 0.")
626         if InflationFactor < mpr:
627             return InputCovOrEns
628         __n, __m = numpy.asarray(InputCovOrEns).shape
629         if __n != __m:
630             raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
631         if BackgroundCov is None:
632             raise ValueError("Background covariance matrix B has to be given for hybrid inflation.")
633         if InputCovOrEns.shape != BackgroundCov.shape:
634             raise ValueError("Ensemble covariance matrix has to be of same size than background covariance matrix B.")
635         OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * BackgroundCov
636     #
637     elif InflationType == "Relaxation":
638         raise NotImplementedError("InflationType Relaxation")
639     #
640     else:
641         raise ValueError("Error in inflation type, '%s' is not a valid keyword."%InflationType)
642     #
643     return OutputCovOrEns
644
645 # ==============================================================================
646 def multi3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, oneCycle):
647     """
648     Chapeau : 3DVAR multi-pas et multi-méthodes
649     """
650     #
651     # Initialisation
652     # --------------
653     Xn = numpy.ravel(Xb).reshape((-1,1))
654     #
655     if selfA._parameters["EstimationOf"] == "State":
656         M = EM["Direct"].appliedTo
657         #
658         if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
659             selfA.StoredVariables["Analysis"].store( Xn )
660             if selfA._toStore("APosterioriCovariance"):
661                 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(Xn.size)
662                 else:                         Pn = B
663                 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
664             if selfA._toStore("ForecastState"):
665                 selfA.StoredVariables["ForecastState"].store( Xn )
666     #
667     if hasattr(Y,"stepnumber"):
668         duration = Y.stepnumber()
669     else:
670         duration = 2
671     #
672     # Multi-pas
673     # ---------
674     for step in range(duration-1):
675         if hasattr(Y,"store"):
676             Ynpu = numpy.ravel( Y[step+1] ).reshape((-1,1))
677         else:
678             Ynpu = numpy.ravel( Y ).reshape((-1,1))
679         #
680         if selfA._parameters["EstimationOf"] == "State": # Forecast
681             Xn = selfA.StoredVariables["Analysis"][-1]
682             Xn_predicted = M( Xn )
683             if selfA._toStore("ForecastState"):
684                 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
685         elif selfA._parameters["EstimationOf"] == "Parameters": # No forecast
686             # --- > Par principe, M = Id, Q = 0
687             Xn_predicted = Xn
688         Xn_predicted = numpy.ravel(Xn_predicted).reshape((-1,1))
689         #
690         oneCycle(selfA, Xn_predicted, Ynpu, U, HO, None, None, R, B, None)
691     #
692     return 0
693
694 # ==============================================================================
695 def std3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
696     """
697     3DVAR (Bouttier 1999, Courtier 1993)
698
699     selfA est identique au "self" d'algorithme appelant et contient les
700     valeurs.
701     """
702     #
703     # Correction pour pallier a un bug de TNC sur le retour du Minimum
704     if "Minimizer" in selfA._parameters and selfA._parameters["Minimizer"] == "TNC":
705         selfA.setParameterValue("StoreInternalVariables",True)
706     #
707     # Opérateurs
708     # ----------
709     Hm = HO["Direct"].appliedTo
710     Ha = HO["Adjoint"].appliedInXTo
711     #
712     # Utilisation éventuelle d'un vecteur H(Xb) précalculé
713     # ----------------------------------------------------
714     if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
715         HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
716     else:
717         HXb = Hm( Xb )
718     HXb = numpy.asmatrix(numpy.ravel( HXb )).T
719     if Y.size != HXb.size:
720         raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
721     if max(Y.shape) != max(HXb.shape):
722         raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
723     #
724     if selfA._toStore("JacobianMatrixAtBackground"):
725         HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
726         HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
727         selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
728     #
729     # Précalcul des inversions de B et R
730     # ----------------------------------
731     BI = B.getI()
732     RI = R.getI()
733     #
734     # Point de démarrage de l'optimisation
735     # ------------------------------------
736     Xini = selfA._parameters["InitializationPoint"]
737     #
738     # Définition de la fonction-coût
739     # ------------------------------
740     def CostFunction(x):
741         _X  = numpy.asmatrix(numpy.ravel( x )).T
742         if selfA._parameters["StoreInternalVariables"] or \
743             selfA._toStore("CurrentState") or \
744             selfA._toStore("CurrentOptimum"):
745             selfA.StoredVariables["CurrentState"].store( _X )
746         _HX = Hm( _X )
747         _HX = numpy.asmatrix(numpy.ravel( _HX )).T
748         _Innovation = Y - _HX
749         if selfA._toStore("SimulatedObservationAtCurrentState") or \
750             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
751             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
752         if selfA._toStore("InnovationAtCurrentState"):
753             selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
754         #
755         Jb  = float( 0.5 * (_X - Xb).T * BI * (_X - Xb) )
756         Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
757         J   = Jb + Jo
758         #
759         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
760         selfA.StoredVariables["CostFunctionJb"].store( Jb )
761         selfA.StoredVariables["CostFunctionJo"].store( Jo )
762         selfA.StoredVariables["CostFunctionJ" ].store( J )
763         if selfA._toStore("IndexOfOptimum") or \
764             selfA._toStore("CurrentOptimum") or \
765             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
766             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
767             selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
768             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
769             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
770         if selfA._toStore("IndexOfOptimum"):
771             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
772         if selfA._toStore("CurrentOptimum"):
773             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
774         if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
775             selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
776         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
777             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
778         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
779             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
780         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
781             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
782         return J
783     #
784     def GradientOfCostFunction(x):
785         _X      = numpy.asmatrix(numpy.ravel( x )).T
786         _HX     = Hm( _X )
787         _HX     = numpy.asmatrix(numpy.ravel( _HX )).T
788         GradJb  = BI * (_X - Xb)
789         GradJo  = - Ha( (_X, RI * (Y - _HX)) )
790         GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
791         return GradJ
792     #
793     # Minimisation de la fonctionnelle
794     # --------------------------------
795     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
796     #
797     if selfA._parameters["Minimizer"] == "LBFGSB":
798         if "0.19" <= scipy.version.version <= "1.1.0":
799             import lbfgsbhlt as optimiseur
800         else:
801             import scipy.optimize as optimiseur
802         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
803             func        = CostFunction,
804             x0          = Xini,
805             fprime      = GradientOfCostFunction,
806             args        = (),
807             bounds      = selfA._parameters["Bounds"],
808             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
809             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
810             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
811             iprint      = selfA._parameters["optiprint"],
812             )
813         nfeval = Informations['funcalls']
814         rc     = Informations['warnflag']
815     elif selfA._parameters["Minimizer"] == "TNC":
816         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
817             func        = CostFunction,
818             x0          = Xini,
819             fprime      = GradientOfCostFunction,
820             args        = (),
821             bounds      = selfA._parameters["Bounds"],
822             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
823             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
824             ftol        = selfA._parameters["CostDecrementTolerance"],
825             messages    = selfA._parameters["optmessages"],
826             )
827     elif selfA._parameters["Minimizer"] == "CG":
828         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
829             f           = CostFunction,
830             x0          = Xini,
831             fprime      = GradientOfCostFunction,
832             args        = (),
833             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
834             gtol        = selfA._parameters["GradientNormTolerance"],
835             disp        = selfA._parameters["optdisp"],
836             full_output = True,
837             )
838     elif selfA._parameters["Minimizer"] == "NCG":
839         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
840             f           = CostFunction,
841             x0          = Xini,
842             fprime      = GradientOfCostFunction,
843             args        = (),
844             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
845             avextol     = selfA._parameters["CostDecrementTolerance"],
846             disp        = selfA._parameters["optdisp"],
847             full_output = True,
848             )
849     elif selfA._parameters["Minimizer"] == "BFGS":
850         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
851             f           = CostFunction,
852             x0          = Xini,
853             fprime      = GradientOfCostFunction,
854             args        = (),
855             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
856             gtol        = selfA._parameters["GradientNormTolerance"],
857             disp        = selfA._parameters["optdisp"],
858             full_output = True,
859             )
860     else:
861         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
862     #
863     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
864     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
865     #
866     # Correction pour pallier a un bug de TNC sur le retour du Minimum
867     # ----------------------------------------------------------------
868     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
869         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
870     #
871     # Obtention de l'analyse
872     # ----------------------
873     Xa = numpy.asmatrix(numpy.ravel( Minimum )).T
874     #
875     selfA.StoredVariables["Analysis"].store( Xa.A1 )
876     #
877     if selfA._toStore("OMA") or \
878         selfA._toStore("SigmaObs2") or \
879         selfA._toStore("SimulationQuantiles") or \
880         selfA._toStore("SimulatedObservationAtOptimum"):
881         if selfA._toStore("SimulatedObservationAtCurrentState"):
882             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
883         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
884             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
885         else:
886             HXa = Hm( Xa )
887     #
888     # Calcul de la covariance d'analyse
889     # ---------------------------------
890     if selfA._toStore("APosterioriCovariance") or \
891         selfA._toStore("SimulationQuantiles") or \
892         selfA._toStore("JacobianMatrixAtOptimum") or \
893         selfA._toStore("KalmanGainAtOptimum"):
894         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
895         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
896     if selfA._toStore("APosterioriCovariance") or \
897         selfA._toStore("SimulationQuantiles") or \
898         selfA._toStore("KalmanGainAtOptimum"):
899         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
900         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
901     if selfA._toStore("APosterioriCovariance") or \
902         selfA._toStore("SimulationQuantiles"):
903         HessienneI = []
904         nb = Xa.size
905         for i in range(nb):
906             _ee    = numpy.matrix(numpy.zeros(nb)).T
907             _ee[i] = 1.
908             _HtEE  = numpy.dot(HtM,_ee)
909             _HtEE  = numpy.asmatrix(numpy.ravel( _HtEE )).T
910             HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
911         HessienneI = numpy.matrix( HessienneI )
912         A = HessienneI.I
913         if min(A.shape) != max(A.shape):
914             raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
915         if (numpy.diag(A) < 0).any():
916             raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
917         if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
918             try:
919                 L = numpy.linalg.cholesky( A )
920             except:
921                 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
922     if selfA._toStore("APosterioriCovariance"):
923         selfA.StoredVariables["APosterioriCovariance"].store( A )
924     if selfA._toStore("JacobianMatrixAtOptimum"):
925         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
926     if selfA._toStore("KalmanGainAtOptimum"):
927         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
928         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
929         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
930     #
931     # Calculs et/ou stockages supplémentaires
932     # ---------------------------------------
933     if selfA._toStore("Innovation") or \
934         selfA._toStore("SigmaObs2") or \
935         selfA._toStore("MahalanobisConsistency") or \
936         selfA._toStore("OMB"):
937         d  = Y - HXb
938     if selfA._toStore("Innovation"):
939         selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
940     if selfA._toStore("BMA"):
941         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
942     if selfA._toStore("OMA"):
943         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
944     if selfA._toStore("OMB"):
945         selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
946     if selfA._toStore("SigmaObs2"):
947         TraceR = R.trace(Y.size)
948         selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
949     if selfA._toStore("MahalanobisConsistency"):
950         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
951     if selfA._toStore("SimulationQuantiles"):
952         nech = selfA._parameters["NumberOfSamplesForQuantiles"]
953         HXa  = numpy.matrix(numpy.ravel( HXa )).T
954         YfQ  = None
955         for i in range(nech):
956             if selfA._parameters["SimulationForQuantiles"] == "Linear":
957                 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
958                 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
959                 Yr = HXa + dYr
960             elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
961                 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
962                 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
963             if YfQ is None:
964                 YfQ = Yr
965             else:
966                 YfQ = numpy.hstack((YfQ,Yr))
967         YfQ.sort(axis=-1)
968         YQ = None
969         for quantile in selfA._parameters["Quantiles"]:
970             if not (0. <= float(quantile) <= 1.): continue
971             indice = int(nech * float(quantile) - 1./nech)
972             if YQ is None: YQ = YfQ[:,indice]
973             else:          YQ = numpy.hstack((YQ,YfQ[:,indice]))
974         selfA.StoredVariables["SimulationQuantiles"].store( YQ )
975     if selfA._toStore("SimulatedObservationAtBackground"):
976         selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
977     if selfA._toStore("SimulatedObservationAtOptimum"):
978         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
979     #
980     return 0
981
982 # ==============================================================================
983 def van3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
984     """
985     3DVAR variational analysis with no inversion of B (Huang 2000)
986
987     selfA est identique au "self" d'algorithme appelant et contient les
988     valeurs.
989     """
990     #
991     # Correction pour pallier a un bug de TNC sur le retour du Minimum
992     if "Minimizer" in selfA._parameters and selfA._parameters["Minimizer"] == "TNC":
993         selfA.setParameterValue("StoreInternalVariables",True)
994     #
995     # Initialisations
996     # ---------------
997     Hm = HO["Direct"].appliedTo
998     Ha = HO["Adjoint"].appliedInXTo
999     #
1000     # Précalcul des inversions de B et R
1001     BT = B.getT()
1002     RI = R.getI()
1003     #
1004     # Point de démarrage de l'optimisation
1005     Xini = numpy.zeros(Xb.shape)
1006     #
1007     # Définition de la fonction-coût
1008     # ------------------------------
1009     def CostFunction(v):
1010         _V = numpy.asmatrix(numpy.ravel( v )).T
1011         _X = Xb + B * _V
1012         if selfA._parameters["StoreInternalVariables"] or \
1013             selfA._toStore("CurrentState") or \
1014             selfA._toStore("CurrentOptimum"):
1015             selfA.StoredVariables["CurrentState"].store( _X )
1016         _HX = Hm( _X )
1017         _HX = numpy.asmatrix(numpy.ravel( _HX )).T
1018         _Innovation = Y - _HX
1019         if selfA._toStore("SimulatedObservationAtCurrentState") or \
1020             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1021             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
1022         if selfA._toStore("InnovationAtCurrentState"):
1023             selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
1024         #
1025         Jb  = float( 0.5 * _V.T * BT * _V )
1026         Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
1027         J   = Jb + Jo
1028         #
1029         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
1030         selfA.StoredVariables["CostFunctionJb"].store( Jb )
1031         selfA.StoredVariables["CostFunctionJo"].store( Jo )
1032         selfA.StoredVariables["CostFunctionJ" ].store( J )
1033         if selfA._toStore("IndexOfOptimum") or \
1034             selfA._toStore("CurrentOptimum") or \
1035             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
1036             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
1037             selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
1038             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1039             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1040         if selfA._toStore("IndexOfOptimum"):
1041             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1042         if selfA._toStore("CurrentOptimum"):
1043             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
1044         if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1045             selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
1046         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1047             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1048         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1049             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1050         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1051             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1052         return J
1053     #
1054     def GradientOfCostFunction(v):
1055         _V = numpy.asmatrix(numpy.ravel( v )).T
1056         _X = Xb + B * _V
1057         _HX     = Hm( _X )
1058         _HX     = numpy.asmatrix(numpy.ravel( _HX )).T
1059         GradJb  = BT * _V
1060         GradJo  = - Ha( (_X, RI * (Y - _HX)) )
1061         GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
1062         return GradJ
1063     #
1064     # Minimisation de la fonctionnelle
1065     # --------------------------------
1066     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
1067     #
1068     if selfA._parameters["Minimizer"] == "LBFGSB":
1069         if "0.19" <= scipy.version.version <= "1.1.0":
1070             import lbfgsbhlt as optimiseur
1071         else:
1072             import scipy.optimize as optimiseur
1073         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
1074             func        = CostFunction,
1075             x0          = Xini,
1076             fprime      = GradientOfCostFunction,
1077             args        = (),
1078             bounds      = selfA._parameters["Bounds"],
1079             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
1080             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
1081             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
1082             iprint      = selfA._parameters["optiprint"],
1083             )
1084         nfeval = Informations['funcalls']
1085         rc     = Informations['warnflag']
1086     elif selfA._parameters["Minimizer"] == "TNC":
1087         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
1088             func        = CostFunction,
1089             x0          = Xini,
1090             fprime      = GradientOfCostFunction,
1091             args        = (),
1092             bounds      = selfA._parameters["Bounds"],
1093             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
1094             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
1095             ftol        = selfA._parameters["CostDecrementTolerance"],
1096             messages    = selfA._parameters["optmessages"],
1097             )
1098     elif selfA._parameters["Minimizer"] == "CG":
1099         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
1100             f           = CostFunction,
1101             x0          = Xini,
1102             fprime      = GradientOfCostFunction,
1103             args        = (),
1104             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1105             gtol        = selfA._parameters["GradientNormTolerance"],
1106             disp        = selfA._parameters["optdisp"],
1107             full_output = True,
1108             )
1109     elif selfA._parameters["Minimizer"] == "NCG":
1110         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
1111             f           = CostFunction,
1112             x0          = Xini,
1113             fprime      = GradientOfCostFunction,
1114             args        = (),
1115             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1116             avextol     = selfA._parameters["CostDecrementTolerance"],
1117             disp        = selfA._parameters["optdisp"],
1118             full_output = True,
1119             )
1120     elif selfA._parameters["Minimizer"] == "BFGS":
1121         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
1122             f           = CostFunction,
1123             x0          = Xini,
1124             fprime      = GradientOfCostFunction,
1125             args        = (),
1126             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1127             gtol        = selfA._parameters["GradientNormTolerance"],
1128             disp        = selfA._parameters["optdisp"],
1129             full_output = True,
1130             )
1131     else:
1132         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
1133     #
1134     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1135     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
1136     #
1137     # Correction pour pallier a un bug de TNC sur le retour du Minimum
1138     # ----------------------------------------------------------------
1139     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
1140         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
1141         Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
1142     else:
1143         Minimum = Xb + B * numpy.asmatrix(numpy.ravel( Minimum )).T
1144     #
1145     # Obtention de l'analyse
1146     # ----------------------
1147     Xa = Minimum
1148     #
1149     selfA.StoredVariables["Analysis"].store( Xa )
1150     #
1151     if selfA._toStore("OMA") or \
1152         selfA._toStore("SigmaObs2") or \
1153         selfA._toStore("SimulationQuantiles") or \
1154         selfA._toStore("SimulatedObservationAtOptimum"):
1155         if selfA._toStore("SimulatedObservationAtCurrentState"):
1156             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
1157         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1158             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
1159         else:
1160             HXa = Hm( Xa )
1161     #
1162     # Calcul de la covariance d'analyse
1163     # ---------------------------------
1164     if selfA._toStore("APosterioriCovariance") or \
1165         selfA._toStore("SimulationQuantiles") or \
1166         selfA._toStore("JacobianMatrixAtOptimum") or \
1167         selfA._toStore("KalmanGainAtOptimum"):
1168         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
1169         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
1170     if selfA._toStore("APosterioriCovariance") or \
1171         selfA._toStore("SimulationQuantiles") or \
1172         selfA._toStore("KalmanGainAtOptimum"):
1173         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
1174         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
1175     if selfA._toStore("APosterioriCovariance") or \
1176         selfA._toStore("SimulationQuantiles"):
1177         BI = B.getI()
1178         HessienneI = []
1179         nb = Xa.size
1180         for i in range(nb):
1181             _ee    = numpy.matrix(numpy.zeros(nb)).T
1182             _ee[i] = 1.
1183             _HtEE  = numpy.dot(HtM,_ee)
1184             _HtEE  = numpy.asmatrix(numpy.ravel( _HtEE )).T
1185             HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
1186         HessienneI = numpy.matrix( HessienneI )
1187         A = HessienneI.I
1188         if min(A.shape) != max(A.shape):
1189             raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
1190         if (numpy.diag(A) < 0).any():
1191             raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
1192         if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
1193             try:
1194                 L = numpy.linalg.cholesky( A )
1195             except:
1196                 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
1197     if selfA._toStore("APosterioriCovariance"):
1198         selfA.StoredVariables["APosterioriCovariance"].store( A )
1199     if selfA._toStore("JacobianMatrixAtOptimum"):
1200         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
1201     if selfA._toStore("KalmanGainAtOptimum"):
1202         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
1203         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
1204         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
1205     #
1206     # Calculs et/ou stockages supplémentaires
1207     # ---------------------------------------
1208     if selfA._toStore("Innovation") or \
1209         selfA._toStore("SigmaObs2") or \
1210         selfA._toStore("MahalanobisConsistency") or \
1211         selfA._toStore("OMB"):
1212         d  = Y - HXb
1213     if selfA._toStore("Innovation"):
1214         selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
1215     if selfA._toStore("BMA"):
1216         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
1217     if selfA._toStore("OMA"):
1218         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
1219     if selfA._toStore("OMB"):
1220         selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
1221     if selfA._toStore("SigmaObs2"):
1222         TraceR = R.trace(Y.size)
1223         selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
1224     if selfA._toStore("MahalanobisConsistency"):
1225         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
1226     if selfA._toStore("SimulationQuantiles"):
1227         nech = selfA._parameters["NumberOfSamplesForQuantiles"]
1228         HXa  = numpy.matrix(numpy.ravel( HXa )).T
1229         YfQ  = None
1230         for i in range(nech):
1231             if selfA._parameters["SimulationForQuantiles"] == "Linear":
1232                 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
1233                 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
1234                 Yr = HXa + dYr
1235             elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
1236                 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
1237                 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
1238             if YfQ is None:
1239                 YfQ = Yr
1240             else:
1241                 YfQ = numpy.hstack((YfQ,Yr))
1242         YfQ.sort(axis=-1)
1243         YQ = None
1244         for quantile in selfA._parameters["Quantiles"]:
1245             if not (0. <= float(quantile) <= 1.): continue
1246             indice = int(nech * float(quantile) - 1./nech)
1247             if YQ is None: YQ = YfQ[:,indice]
1248             else:          YQ = numpy.hstack((YQ,YfQ[:,indice]))
1249         selfA.StoredVariables["SimulationQuantiles"].store( YQ )
1250     if selfA._toStore("SimulatedObservationAtBackground"):
1251         selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
1252     if selfA._toStore("SimulatedObservationAtOptimum"):
1253         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
1254     #
1255     return 0
1256
1257 # ==============================================================================
1258 def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1259     """
1260     3DVAR incrémental (Courtier 1994, 1997)
1261
1262     selfA est identique au "self" d'algorithme appelant et contient les
1263     valeurs.
1264     """
1265     #
1266     # Correction pour pallier a un bug de TNC sur le retour du Minimum
1267     if "Minimizer" in selfA._parameters and selfA._parameters["Minimizer"] == "TNC":
1268         selfA.setParameterValue("StoreInternalVariables",True)
1269     #
1270     # Initialisations
1271     # ---------------
1272     #
1273     # Opérateur non-linéaire pour la boucle externe
1274     Hm = HO["Direct"].appliedTo
1275     #
1276     # Précalcul des inversions de B et R
1277     BI = B.getI()
1278     RI = R.getI()
1279     #
1280     # Point de démarrage de l'optimisation
1281     Xini = selfA._parameters["InitializationPoint"]
1282     #
1283     HXb = numpy.asmatrix(numpy.ravel( Hm( Xb ) )).T
1284     Innovation = Y - HXb
1285     #
1286     # Outer Loop
1287     # ----------
1288     iOuter = 0
1289     J      = 1./mpr
1290     DeltaJ = 1./mpr
1291     Xr     = Xini.reshape((-1,1))
1292     while abs(DeltaJ) >= selfA._parameters["CostDecrementTolerance"] and iOuter <= selfA._parameters["MaximumNumberOfSteps"]:
1293         #
1294         # Inner Loop
1295         # ----------
1296         Ht = HO["Tangent"].asMatrix(Xr)
1297         Ht = Ht.reshape(Y.size,Xr.size) # ADAO & check shape
1298         #
1299         # Définition de la fonction-coût
1300         # ------------------------------
1301         def CostFunction(dx):
1302             _dX  = numpy.asmatrix(numpy.ravel( dx )).T
1303             if selfA._parameters["StoreInternalVariables"] or \
1304                 selfA._toStore("CurrentState") or \
1305                 selfA._toStore("CurrentOptimum"):
1306                 selfA.StoredVariables["CurrentState"].store( Xb + _dX )
1307             _HdX = Ht * _dX
1308             _HdX = numpy.asmatrix(numpy.ravel( _HdX )).T
1309             _dInnovation = Innovation - _HdX
1310             if selfA._toStore("SimulatedObservationAtCurrentState") or \
1311                 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1312                 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HXb + _HdX )
1313             if selfA._toStore("InnovationAtCurrentState"):
1314                 selfA.StoredVariables["InnovationAtCurrentState"].store( _dInnovation )
1315             #
1316             Jb  = float( 0.5 * _dX.T * BI * _dX )
1317             Jo  = float( 0.5 * _dInnovation.T * RI * _dInnovation )
1318             J   = Jb + Jo
1319             #
1320             selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
1321             selfA.StoredVariables["CostFunctionJb"].store( Jb )
1322             selfA.StoredVariables["CostFunctionJo"].store( Jo )
1323             selfA.StoredVariables["CostFunctionJ" ].store( J )
1324             if selfA._toStore("IndexOfOptimum") or \
1325                 selfA._toStore("CurrentOptimum") or \
1326                 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
1327                 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
1328                 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
1329                 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1330                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1331             if selfA._toStore("IndexOfOptimum"):
1332                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1333             if selfA._toStore("CurrentOptimum"):
1334                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
1335             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1336                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
1337             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1338                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1339             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1340                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1341             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1342                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1343             return J
1344         #
1345         def GradientOfCostFunction(dx):
1346             _dX          = numpy.asmatrix(numpy.ravel( dx )).T
1347             _HdX         = Ht * _dX
1348             _HdX         = numpy.asmatrix(numpy.ravel( _HdX )).T
1349             _dInnovation = Innovation - _HdX
1350             GradJb       = BI * _dX
1351             GradJo       = - Ht.T @ (RI * _dInnovation)
1352             GradJ        = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
1353             return GradJ
1354         #
1355         # Minimisation de la fonctionnelle
1356         # --------------------------------
1357         nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
1358         #
1359         if selfA._parameters["Minimizer"] == "LBFGSB":
1360             # Minimum, J_optimal, Informations = scipy.optimize.fmin_l_bfgs_b(
1361             if "0.19" <= scipy.version.version <= "1.1.0":
1362                 import lbfgsbhlt as optimiseur
1363             else:
1364                 import scipy.optimize as optimiseur
1365             Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
1366                 func        = CostFunction,
1367                 x0          = numpy.zeros(Xini.size),
1368                 fprime      = GradientOfCostFunction,
1369                 args        = (),
1370                 bounds      = selfA._parameters["Bounds"],
1371                 maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
1372                 factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
1373                 pgtol       = selfA._parameters["ProjectedGradientTolerance"],
1374                 iprint      = selfA._parameters["optiprint"],
1375                 )
1376             nfeval = Informations['funcalls']
1377             rc     = Informations['warnflag']
1378         elif selfA._parameters["Minimizer"] == "TNC":
1379             Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
1380                 func        = CostFunction,
1381                 x0          = numpy.zeros(Xini.size),
1382                 fprime      = GradientOfCostFunction,
1383                 args        = (),
1384                 bounds      = selfA._parameters["Bounds"],
1385                 maxfun      = selfA._parameters["MaximumNumberOfSteps"],
1386                 pgtol       = selfA._parameters["ProjectedGradientTolerance"],
1387                 ftol        = selfA._parameters["CostDecrementTolerance"],
1388                 messages    = selfA._parameters["optmessages"],
1389                 )
1390         elif selfA._parameters["Minimizer"] == "CG":
1391             Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
1392                 f           = CostFunction,
1393                 x0          = numpy.zeros(Xini.size),
1394                 fprime      = GradientOfCostFunction,
1395                 args        = (),
1396                 maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1397                 gtol        = selfA._parameters["GradientNormTolerance"],
1398                 disp        = selfA._parameters["optdisp"],
1399                 full_output = True,
1400                 )
1401         elif selfA._parameters["Minimizer"] == "NCG":
1402             Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
1403                 f           = CostFunction,
1404                 x0          = numpy.zeros(Xini.size),
1405                 fprime      = GradientOfCostFunction,
1406                 args        = (),
1407                 maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1408                 avextol     = selfA._parameters["CostDecrementTolerance"],
1409                 disp        = selfA._parameters["optdisp"],
1410                 full_output = True,
1411                 )
1412         elif selfA._parameters["Minimizer"] == "BFGS":
1413             Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
1414                 f           = CostFunction,
1415                 x0          = numpy.zeros(Xini.size),
1416                 fprime      = GradientOfCostFunction,
1417                 args        = (),
1418                 maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1419                 gtol        = selfA._parameters["GradientNormTolerance"],
1420                 disp        = selfA._parameters["optdisp"],
1421                 full_output = True,
1422                 )
1423         else:
1424             raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
1425         #
1426         IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1427         MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
1428         #
1429         if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
1430             Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
1431             Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
1432         else:
1433             Minimum = Xb + numpy.asmatrix(numpy.ravel( Minimum )).T
1434         #
1435         Xr     = Minimum
1436         DeltaJ = selfA.StoredVariables["CostFunctionJ" ][-1] - J
1437         iOuter = selfA.StoredVariables["CurrentIterationNumber"][-1]
1438     #
1439     # Obtention de l'analyse
1440     # ----------------------
1441     Xa = Xr
1442     #
1443     selfA.StoredVariables["Analysis"].store( Xa )
1444     #
1445     if selfA._toStore("OMA") or \
1446         selfA._toStore("SigmaObs2") or \
1447         selfA._toStore("SimulationQuantiles") or \
1448         selfA._toStore("SimulatedObservationAtOptimum"):
1449         if selfA._toStore("SimulatedObservationAtCurrentState"):
1450             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
1451         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1452             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
1453         else:
1454             HXa = Hm( Xa )
1455     #
1456     # Calcul de la covariance d'analyse
1457     # ---------------------------------
1458     if selfA._toStore("APosterioriCovariance") or \
1459         selfA._toStore("SimulationQuantiles") or \
1460         selfA._toStore("JacobianMatrixAtOptimum") or \
1461         selfA._toStore("KalmanGainAtOptimum"):
1462         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
1463         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
1464     if selfA._toStore("APosterioriCovariance") or \
1465         selfA._toStore("SimulationQuantiles") or \
1466         selfA._toStore("KalmanGainAtOptimum"):
1467         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
1468         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
1469     if selfA._toStore("APosterioriCovariance") or \
1470         selfA._toStore("SimulationQuantiles"):
1471         HessienneI = []
1472         nb = Xa.size
1473         for i in range(nb):
1474             _ee    = numpy.matrix(numpy.zeros(nb)).T
1475             _ee[i] = 1.
1476             _HtEE  = numpy.dot(HtM,_ee)
1477             _HtEE  = numpy.asmatrix(numpy.ravel( _HtEE )).T
1478             HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
1479         HessienneI = numpy.matrix( HessienneI )
1480         A = HessienneI.I
1481         if min(A.shape) != max(A.shape):
1482             raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
1483         if (numpy.diag(A) < 0).any():
1484             raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
1485         if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
1486             try:
1487                 L = numpy.linalg.cholesky( A )
1488             except:
1489                 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
1490     if selfA._toStore("APosterioriCovariance"):
1491         selfA.StoredVariables["APosterioriCovariance"].store( A )
1492     if selfA._toStore("JacobianMatrixAtOptimum"):
1493         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
1494     if selfA._toStore("KalmanGainAtOptimum"):
1495         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
1496         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
1497         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
1498     #
1499     # Calculs et/ou stockages supplémentaires
1500     # ---------------------------------------
1501     if selfA._toStore("Innovation") or \
1502         selfA._toStore("SigmaObs2") or \
1503         selfA._toStore("MahalanobisConsistency") or \
1504         selfA._toStore("OMB"):
1505         d  = Y - HXb
1506     if selfA._toStore("Innovation"):
1507         selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
1508     if selfA._toStore("BMA"):
1509         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
1510     if selfA._toStore("OMA"):
1511         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
1512     if selfA._toStore("OMB"):
1513         selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
1514     if selfA._toStore("SigmaObs2"):
1515         TraceR = R.trace(Y.size)
1516         selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
1517     if selfA._toStore("MahalanobisConsistency"):
1518         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
1519     if selfA._toStore("SimulationQuantiles"):
1520         nech = selfA._parameters["NumberOfSamplesForQuantiles"]
1521         HXa  = numpy.matrix(numpy.ravel( HXa )).T
1522         YfQ  = None
1523         for i in range(nech):
1524             if selfA._parameters["SimulationForQuantiles"] == "Linear":
1525                 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
1526                 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
1527                 Yr = HXa + dYr
1528             elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
1529                 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
1530                 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
1531             if YfQ is None:
1532                 YfQ = Yr
1533             else:
1534                 YfQ = numpy.hstack((YfQ,Yr))
1535         YfQ.sort(axis=-1)
1536         YQ = None
1537         for quantile in selfA._parameters["Quantiles"]:
1538             if not (0. <= float(quantile) <= 1.): continue
1539             indice = int(nech * float(quantile) - 1./nech)
1540             if YQ is None: YQ = YfQ[:,indice]
1541             else:          YQ = numpy.hstack((YQ,YfQ[:,indice]))
1542         selfA.StoredVariables["SimulationQuantiles"].store( YQ )
1543     if selfA._toStore("SimulatedObservationAtBackground"):
1544         selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
1545     if selfA._toStore("SimulatedObservationAtOptimum"):
1546         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
1547     #
1548     return 0
1549
1550 # ==============================================================================
1551 def psas3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1552     """
1553     3DVAR PSAS (Huang 2000)
1554
1555     selfA est identique au "self" d'algorithme appelant et contient les
1556     valeurs.
1557     """
1558     #
1559     # Correction pour pallier a un bug de TNC sur le retour du Minimum
1560     if "Minimizer" in selfA._parameters and selfA._parameters["Minimizer"] == "TNC":
1561         selfA.setParameterValue("StoreInternalVariables",True)
1562     #
1563     # Initialisations
1564     # ---------------
1565     #
1566     # Opérateurs
1567     Hm = HO["Direct"].appliedTo
1568     #
1569     # Utilisation éventuelle d'un vecteur H(Xb) précalculé
1570     if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
1571         HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
1572     else:
1573         HXb = Hm( Xb )
1574     HXb = numpy.asmatrix(numpy.ravel( HXb )).T
1575     if Y.size != HXb.size:
1576         raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
1577     if max(Y.shape) != max(HXb.shape):
1578         raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
1579     #
1580     if selfA._toStore("JacobianMatrixAtBackground"):
1581         HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
1582         HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
1583         selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
1584     #
1585     Ht = HO["Tangent"].asMatrix(Xb)
1586     BHT = B * Ht.T
1587     HBHTpR = R + Ht * BHT
1588     Innovation = Y - HXb
1589     #
1590     # Point de démarrage de l'optimisation
1591     Xini = numpy.zeros(Xb.shape)
1592     #
1593     # Définition de la fonction-coût
1594     # ------------------------------
1595     def CostFunction(w):
1596         _W = numpy.asmatrix(numpy.ravel( w )).T
1597         if selfA._parameters["StoreInternalVariables"] or \
1598             selfA._toStore("CurrentState") or \
1599             selfA._toStore("CurrentOptimum"):
1600             selfA.StoredVariables["CurrentState"].store( Xb + BHT * _W )
1601         if selfA._toStore("SimulatedObservationAtCurrentState") or \
1602             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1603             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Hm( Xb + BHT * _W ) )
1604         if selfA._toStore("InnovationAtCurrentState"):
1605             selfA.StoredVariables["InnovationAtCurrentState"].store( Innovation )
1606         #
1607         Jb  = float( 0.5 * _W.T * HBHTpR * _W )
1608         Jo  = float( - _W.T * Innovation )
1609         J   = Jb + Jo
1610         #
1611         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
1612         selfA.StoredVariables["CostFunctionJb"].store( Jb )
1613         selfA.StoredVariables["CostFunctionJo"].store( Jo )
1614         selfA.StoredVariables["CostFunctionJ" ].store( J )
1615         if selfA._toStore("IndexOfOptimum") or \
1616             selfA._toStore("CurrentOptimum") or \
1617             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
1618             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
1619             selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
1620             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1621             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1622         if selfA._toStore("IndexOfOptimum"):
1623             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1624         if selfA._toStore("CurrentOptimum"):
1625             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
1626         if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1627             selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
1628         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1629             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1630         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1631             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1632         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1633             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1634         return J
1635     #
1636     def GradientOfCostFunction(w):
1637         _W = numpy.asmatrix(numpy.ravel( w )).T
1638         GradJb  = HBHTpR * _W
1639         GradJo  = - Innovation
1640         GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
1641         return GradJ
1642     #
1643     # Minimisation de la fonctionnelle
1644     # --------------------------------
1645     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
1646     #
1647     if selfA._parameters["Minimizer"] == "LBFGSB":
1648         # Minimum, J_optimal, Informations = scipy.optimize.fmin_l_bfgs_b(
1649         if "0.19" <= scipy.version.version <= "1.1.0":
1650             import lbfgsbhlt as optimiseur
1651         else:
1652             import scipy.optimize as optimiseur
1653         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
1654             func        = CostFunction,
1655             x0          = Xini,
1656             fprime      = GradientOfCostFunction,
1657             args        = (),
1658             bounds      = selfA._parameters["Bounds"],
1659             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
1660             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
1661             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
1662             iprint      = selfA._parameters["optiprint"],
1663             )
1664         nfeval = Informations['funcalls']
1665         rc     = Informations['warnflag']
1666     elif selfA._parameters["Minimizer"] == "TNC":
1667         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
1668             func        = CostFunction,
1669             x0          = Xini,
1670             fprime      = GradientOfCostFunction,
1671             args        = (),
1672             bounds      = selfA._parameters["Bounds"],
1673             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
1674             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
1675             ftol        = selfA._parameters["CostDecrementTolerance"],
1676             messages    = selfA._parameters["optmessages"],
1677             )
1678     elif selfA._parameters["Minimizer"] == "CG":
1679         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
1680             f           = CostFunction,
1681             x0          = Xini,
1682             fprime      = GradientOfCostFunction,
1683             args        = (),
1684             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1685             gtol        = selfA._parameters["GradientNormTolerance"],
1686             disp        = selfA._parameters["optdisp"],
1687             full_output = True,
1688             )
1689     elif selfA._parameters["Minimizer"] == "NCG":
1690         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
1691             f           = CostFunction,
1692             x0          = Xini,
1693             fprime      = GradientOfCostFunction,
1694             args        = (),
1695             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1696             avextol     = selfA._parameters["CostDecrementTolerance"],
1697             disp        = selfA._parameters["optdisp"],
1698             full_output = True,
1699             )
1700     elif selfA._parameters["Minimizer"] == "BFGS":
1701         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
1702             f           = CostFunction,
1703             x0          = Xini,
1704             fprime      = GradientOfCostFunction,
1705             args        = (),
1706             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1707             gtol        = selfA._parameters["GradientNormTolerance"],
1708             disp        = selfA._parameters["optdisp"],
1709             full_output = True,
1710             )
1711     else:
1712         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
1713     #
1714     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1715     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
1716     #
1717     # Correction pour pallier a un bug de TNC sur le retour du Minimum
1718     # ----------------------------------------------------------------
1719     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
1720         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
1721         Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
1722     else:
1723         Minimum = Xb + BHT * numpy.asmatrix(numpy.ravel( Minimum )).T
1724     #
1725     # Obtention de l'analyse
1726     # ----------------------
1727     Xa = Minimum
1728     #
1729     selfA.StoredVariables["Analysis"].store( Xa )
1730     #
1731     if selfA._toStore("OMA") or \
1732         selfA._toStore("SigmaObs2") or \
1733         selfA._toStore("SimulationQuantiles") or \
1734         selfA._toStore("SimulatedObservationAtOptimum"):
1735         if selfA._toStore("SimulatedObservationAtCurrentState"):
1736             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
1737         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1738             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
1739         else:
1740             HXa = Hm( Xa )
1741     #
1742     # Calcul de la covariance d'analyse
1743     # ---------------------------------
1744     if selfA._toStore("APosterioriCovariance") or \
1745         selfA._toStore("SimulationQuantiles") or \
1746         selfA._toStore("JacobianMatrixAtOptimum") or \
1747         selfA._toStore("KalmanGainAtOptimum"):
1748         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
1749         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
1750     if selfA._toStore("APosterioriCovariance") or \
1751         selfA._toStore("SimulationQuantiles") or \
1752         selfA._toStore("KalmanGainAtOptimum"):
1753         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
1754         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
1755     if selfA._toStore("APosterioriCovariance") or \
1756         selfA._toStore("SimulationQuantiles"):
1757         BI = B.getI()
1758         RI = R.getI()
1759         HessienneI = []
1760         nb = Xa.size
1761         for i in range(nb):
1762             _ee    = numpy.matrix(numpy.zeros(nb)).T
1763             _ee[i] = 1.
1764             _HtEE  = numpy.dot(HtM,_ee)
1765             _HtEE  = numpy.asmatrix(numpy.ravel( _HtEE )).T
1766             HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
1767         HessienneI = numpy.matrix( HessienneI )
1768         A = HessienneI.I
1769         if min(A.shape) != max(A.shape):
1770             raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
1771         if (numpy.diag(A) < 0).any():
1772             raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
1773         if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
1774             try:
1775                 L = numpy.linalg.cholesky( A )
1776             except:
1777                 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
1778     if selfA._toStore("APosterioriCovariance"):
1779         selfA.StoredVariables["APosterioriCovariance"].store( A )
1780     if selfA._toStore("JacobianMatrixAtOptimum"):
1781         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
1782     if selfA._toStore("KalmanGainAtOptimum"):
1783         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
1784         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
1785         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
1786     #
1787     # Calculs et/ou stockages supplémentaires
1788     # ---------------------------------------
1789     if selfA._toStore("Innovation") or \
1790         selfA._toStore("SigmaObs2") or \
1791         selfA._toStore("MahalanobisConsistency") or \
1792         selfA._toStore("OMB"):
1793         d  = Y - HXb
1794     if selfA._toStore("Innovation"):
1795         selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
1796     if selfA._toStore("BMA"):
1797         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
1798     if selfA._toStore("OMA"):
1799         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
1800     if selfA._toStore("OMB"):
1801         selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
1802     if selfA._toStore("SigmaObs2"):
1803         TraceR = R.trace(Y.size)
1804         selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
1805     if selfA._toStore("MahalanobisConsistency"):
1806         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
1807     if selfA._toStore("SimulationQuantiles"):
1808         nech = selfA._parameters["NumberOfSamplesForQuantiles"]
1809         HXa  = numpy.matrix(numpy.ravel( HXa )).T
1810         YfQ  = None
1811         for i in range(nech):
1812             if selfA._parameters["SimulationForQuantiles"] == "Linear":
1813                 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
1814                 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
1815                 Yr = HXa + dYr
1816             elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
1817                 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
1818                 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
1819             if YfQ is None:
1820                 YfQ = Yr
1821             else:
1822                 YfQ = numpy.hstack((YfQ,Yr))
1823         YfQ.sort(axis=-1)
1824         YQ = None
1825         for quantile in selfA._parameters["Quantiles"]:
1826             if not (0. <= float(quantile) <= 1.): continue
1827             indice = int(nech * float(quantile) - 1./nech)
1828             if YQ is None: YQ = YfQ[:,indice]
1829             else:          YQ = numpy.hstack((YQ,YfQ[:,indice]))
1830         selfA.StoredVariables["SimulationQuantiles"].store( YQ )
1831     if selfA._toStore("SimulatedObservationAtBackground"):
1832         selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
1833     if selfA._toStore("SimulatedObservationAtOptimum"):
1834         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
1835     #
1836     return 0
1837
1838 # ==============================================================================
1839 def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
1840     """
1841     Stochastic EnKF (Envensen 1994, Burgers 1998)
1842
1843     selfA est identique au "self" d'algorithme appelant et contient les
1844     valeurs.
1845     """
1846     if selfA._parameters["EstimationOf"] == "Parameters":
1847         selfA._parameters["StoreInternalVariables"] = True
1848     #
1849     # Opérateurs
1850     # ----------
1851     H = HO["Direct"].appliedControledFormTo
1852     #
1853     if selfA._parameters["EstimationOf"] == "State":
1854         M = EM["Direct"].appliedControledFormTo
1855     #
1856     if CM is not None and "Tangent" in CM and U is not None:
1857         Cm = CM["Tangent"].asMatrix(Xb)
1858     else:
1859         Cm = None
1860     #
1861     # Nombre de pas identique au nombre de pas d'observations
1862     # -------------------------------------------------------
1863     if hasattr(Y,"stepnumber"):
1864         duration = Y.stepnumber()
1865         __p = numpy.cumprod(Y.shape())[-1]
1866     else:
1867         duration = 2
1868         __p = numpy.array(Y).size
1869     #
1870     # Précalcul des inversions de B et R
1871     # ----------------------------------
1872     if selfA._parameters["StoreInternalVariables"] \
1873         or selfA._toStore("CostFunctionJ") \
1874         or selfA._toStore("CostFunctionJb") \
1875         or selfA._toStore("CostFunctionJo") \
1876         or selfA._toStore("CurrentOptimum") \
1877         or selfA._toStore("APosterioriCovariance"):
1878         BI = B.getI()
1879         RI = R.getI()
1880     #
1881     # Initialisation
1882     # --------------
1883     __n = Xb.size
1884     __m = selfA._parameters["NumberOfMembers"]
1885     if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
1886     else:                         Pn = B
1887     if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
1888     else:                         Rn = R
1889     if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
1890     else:                         Qn = Q
1891     Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
1892     #
1893     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1894         selfA.StoredVariables["Analysis"].store( numpy.ravel(Xb) )
1895         if selfA._toStore("APosterioriCovariance"):
1896             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1897             covarianceXa = Pn
1898     #
1899     previousJMinimum = numpy.finfo(float).max
1900     #
1901     for step in range(duration-1):
1902         if hasattr(Y,"store"):
1903             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,-1))
1904         else:
1905             Ynpu = numpy.ravel( Y ).reshape((__p,-1))
1906         #
1907         if U is not None:
1908             if hasattr(U,"store") and len(U)>1:
1909                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
1910             elif hasattr(U,"store") and len(U)==1:
1911                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
1912             else:
1913                 Un = numpy.asmatrix(numpy.ravel( U )).T
1914         else:
1915             Un = None
1916         #
1917         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
1918             Xn = CovarianceInflation( Xn,
1919                 selfA._parameters["InflationType"],
1920                 selfA._parameters["InflationFactor"],
1921                 )
1922         #
1923         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
1924             EMX = M( [(Xn[:,i], Un) for i in range(__m)],
1925                 argsAsSerie = True,
1926                 returnSerieAsArrayMatrix = True )
1927             qi = numpy.random.multivariate_normal(numpy.zeros(__n), Qn, size=__m).T
1928             Xn_predicted = EMX + qi
1929             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
1930                 argsAsSerie = True,
1931                 returnSerieAsArrayMatrix = True )
1932             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1933                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
1934                 Xn_predicted = Xn_predicted + Cm * Un
1935         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
1936             # --- > Par principe, M = Id, Q = 0
1937             Xn_predicted = Xn
1938             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
1939                 argsAsSerie = True,
1940                 returnSerieAsArrayMatrix = True )
1941         #
1942         # Mean of forecast and observation of forecast
1943         Xfm  = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
1944         Hfm  = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,-1))
1945         #
1946         #--------------------------
1947         if VariantM == "KalmanFilterFormula05":
1948             PfHT, HPfHT = 0., 0.
1949             for i in range(__m):
1950                 Exfi = Xn_predicted[:,i].reshape((__n,-1)) - Xfm
1951                 Eyfi = HX_predicted[:,i].reshape((__p,-1)) - Hfm
1952                 PfHT  += Exfi * Eyfi.T
1953                 HPfHT += Eyfi * Eyfi.T
1954             PfHT  = (1./(__m-1)) * PfHT
1955             HPfHT = (1./(__m-1)) * HPfHT
1956             Kn     = PfHT * ( R + HPfHT ).I
1957             del PfHT, HPfHT
1958             #
1959             for i in range(__m):
1960                 ri = numpy.random.multivariate_normal(numpy.zeros(__p), Rn)
1961                 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(Ynpu) + ri - HX_predicted[:,i])
1962         #--------------------------
1963         elif VariantM == "KalmanFilterFormula16":
1964             EpY   = EnsembleOfCenteredPerturbations(Ynpu, Rn, __m)
1965             EpYm  = EpY.mean(axis=1, dtype=mfp).astype('float').reshape((__p,-1))
1966             #
1967             EaX   = EnsembleOfAnomalies( Xn_predicted ) / numpy.sqrt(__m-1)
1968             EaY = (HX_predicted - Hfm - EpY + EpYm) / numpy.sqrt(__m-1)
1969             #
1970             Kn = EaX @ EaY.T @ numpy.linalg.inv( EaY @ EaY.T)
1971             #
1972             for i in range(__m):
1973                 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(EpY[:,i]) - HX_predicted[:,i])
1974         #--------------------------
1975         else:
1976             raise ValueError("VariantM has to be chosen in the authorized methods list.")
1977         #
1978         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
1979             Xn = CovarianceInflation( Xn,
1980                 selfA._parameters["InflationType"],
1981                 selfA._parameters["InflationFactor"],
1982                 )
1983         #
1984         Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
1985         #--------------------------
1986         #
1987         if selfA._parameters["StoreInternalVariables"] \
1988             or selfA._toStore("CostFunctionJ") \
1989             or selfA._toStore("CostFunctionJb") \
1990             or selfA._toStore("CostFunctionJo") \
1991             or selfA._toStore("APosterioriCovariance") \
1992             or selfA._toStore("InnovationAtCurrentAnalysis") \
1993             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1994             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1995             _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
1996             _Innovation = Ynpu - _HXa
1997         #
1998         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1999         # ---> avec analysis
2000         selfA.StoredVariables["Analysis"].store( Xa )
2001         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2002             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2003         if selfA._toStore("InnovationAtCurrentAnalysis"):
2004             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2005         # ---> avec current state
2006         if selfA._parameters["StoreInternalVariables"] \
2007             or selfA._toStore("CurrentState"):
2008             selfA.StoredVariables["CurrentState"].store( Xn )
2009         if selfA._toStore("ForecastState"):
2010             selfA.StoredVariables["ForecastState"].store( EMX )
2011         if selfA._toStore("BMA"):
2012             selfA.StoredVariables["BMA"].store( EMX - Xa.reshape((__n,1)) )
2013         if selfA._toStore("InnovationAtCurrentState"):
2014             selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
2015         if selfA._toStore("SimulatedObservationAtCurrentState") \
2016             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2017             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
2018         # ---> autres
2019         if selfA._parameters["StoreInternalVariables"] \
2020             or selfA._toStore("CostFunctionJ") \
2021             or selfA._toStore("CostFunctionJb") \
2022             or selfA._toStore("CostFunctionJo") \
2023             or selfA._toStore("CurrentOptimum") \
2024             or selfA._toStore("APosterioriCovariance"):
2025             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2026             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
2027             J   = Jb + Jo
2028             selfA.StoredVariables["CostFunctionJb"].store( Jb )
2029             selfA.StoredVariables["CostFunctionJo"].store( Jo )
2030             selfA.StoredVariables["CostFunctionJ" ].store( J )
2031             #
2032             if selfA._toStore("IndexOfOptimum") \
2033                 or selfA._toStore("CurrentOptimum") \
2034                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2035                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2036                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2037                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2038                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2039             if selfA._toStore("IndexOfOptimum"):
2040                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2041             if selfA._toStore("CurrentOptimum"):
2042                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2043             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2044                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2045             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2046                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2047             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2048                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2049             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2050                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2051         if selfA._toStore("APosterioriCovariance"):
2052             Eai = EnsembleOfAnomalies( Xn ) / numpy.sqrt(__m-1) # Anomalies
2053             Pn = Eai @ Eai.T
2054             Pn = 0.5 * (Pn + Pn.T)
2055             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2056         if selfA._parameters["EstimationOf"] == "Parameters" \
2057             and J < previousJMinimum:
2058             previousJMinimum    = J
2059             XaMin               = Xa
2060             if selfA._toStore("APosterioriCovariance"):
2061                 covarianceXaMin = Pn
2062     #
2063     # Stockage final supplémentaire de l'optimum en estimation de paramètres
2064     # ----------------------------------------------------------------------
2065     if selfA._parameters["EstimationOf"] == "Parameters":
2066         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2067         selfA.StoredVariables["Analysis"].store( XaMin )
2068         if selfA._toStore("APosterioriCovariance"):
2069             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2070         if selfA._toStore("BMA"):
2071             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2072     #
2073     return 0
2074
2075 # ==============================================================================
2076 def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
2077     """
2078     Ensemble-Transform EnKF (ETKF or Deterministic EnKF: Bishop 2001, Hunt 2007)
2079
2080     selfA est identique au "self" d'algorithme appelant et contient les
2081     valeurs.
2082     """
2083     if selfA._parameters["EstimationOf"] == "Parameters":
2084         selfA._parameters["StoreInternalVariables"] = True
2085     #
2086     # Opérateurs
2087     # ----------
2088     H = HO["Direct"].appliedControledFormTo
2089     #
2090     if selfA._parameters["EstimationOf"] == "State":
2091         M = EM["Direct"].appliedControledFormTo
2092     #
2093     if CM is not None and "Tangent" in CM and U is not None:
2094         Cm = CM["Tangent"].asMatrix(Xb)
2095     else:
2096         Cm = None
2097     #
2098     # Nombre de pas identique au nombre de pas d'observations
2099     # -------------------------------------------------------
2100     if hasattr(Y,"stepnumber"):
2101         duration = Y.stepnumber()
2102         __p = numpy.cumprod(Y.shape())[-1]
2103     else:
2104         duration = 2
2105         __p = numpy.array(Y).size
2106     #
2107     # Précalcul des inversions de B et R
2108     # ----------------------------------
2109     if selfA._parameters["StoreInternalVariables"] \
2110         or selfA._toStore("CostFunctionJ") \
2111         or selfA._toStore("CostFunctionJb") \
2112         or selfA._toStore("CostFunctionJo") \
2113         or selfA._toStore("CurrentOptimum") \
2114         or selfA._toStore("APosterioriCovariance"):
2115         BI = B.getI()
2116         RI = R.getI()
2117     elif VariantM != "KalmanFilterFormula":
2118         RI = R.getI()
2119     if VariantM == "KalmanFilterFormula":
2120         RIdemi = R.choleskyI()
2121     #
2122     # Initialisation
2123     # --------------
2124     __n = Xb.size
2125     __m = selfA._parameters["NumberOfMembers"]
2126     if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
2127     else:                         Pn = B
2128     if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
2129     else:                         Rn = R
2130     if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
2131     else:                         Qn = Q
2132     Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
2133     #
2134     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2135         selfA.StoredVariables["Analysis"].store( numpy.ravel(Xb) )
2136         if selfA._toStore("APosterioriCovariance"):
2137             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2138             covarianceXa = Pn
2139     #
2140     previousJMinimum = numpy.finfo(float).max
2141     #
2142     for step in range(duration-1):
2143         if hasattr(Y,"store"):
2144             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,-1))
2145         else:
2146             Ynpu = numpy.ravel( Y ).reshape((__p,-1))
2147         #
2148         if U is not None:
2149             if hasattr(U,"store") and len(U)>1:
2150                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2151             elif hasattr(U,"store") and len(U)==1:
2152                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2153             else:
2154                 Un = numpy.asmatrix(numpy.ravel( U )).T
2155         else:
2156             Un = None
2157         #
2158         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2159             Xn = CovarianceInflation( Xn,
2160                 selfA._parameters["InflationType"],
2161                 selfA._parameters["InflationFactor"],
2162                 )
2163         #
2164         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
2165             EMX = M( [(Xn[:,i], Un) for i in range(__m)],
2166                 argsAsSerie = True,
2167                 returnSerieAsArrayMatrix = True )
2168             qi = numpy.random.multivariate_normal(numpy.zeros(__n), Qn, size=__m).T
2169             Xn_predicted = EMX + qi
2170             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2171                 argsAsSerie = True,
2172                 returnSerieAsArrayMatrix = True )
2173             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2174                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2175                 Xn_predicted = Xn_predicted + Cm * Un
2176         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
2177             # --- > Par principe, M = Id, Q = 0
2178             Xn_predicted = Xn
2179             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2180                 argsAsSerie = True,
2181                 returnSerieAsArrayMatrix = True )
2182         #
2183         # Mean of forecast and observation of forecast
2184         Xfm  = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
2185         Hfm  = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2186         #
2187         # Anomalies
2188         EaX   = EnsembleOfAnomalies( Xn_predicted )
2189         EaHX  = numpy.array(HX_predicted - Hfm)
2190         #
2191         #--------------------------
2192         if VariantM == "KalmanFilterFormula":
2193             mS    = RIdemi * EaHX / numpy.sqrt(__m-1)
2194             delta = RIdemi * ( Ynpu - Hfm )
2195             mT    = numpy.linalg.inv( numpy.eye(__m) + mS.T @ mS )
2196             vw    = mT @ mS.transpose() @ delta
2197             #
2198             Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
2199             mU    = numpy.eye(__m)
2200             #
2201             EaX   = EaX / numpy.sqrt(__m-1)
2202             Xn    = Xfm + EaX @ ( vw.reshape((__m,-1)) + numpy.sqrt(__m-1) * Tdemi @ mU )
2203         #--------------------------
2204         elif VariantM == "Variational":
2205             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
2206             def CostFunction(w):
2207                 _A  = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
2208                 _Jo = 0.5 * _A.T @ (RI * _A)
2209                 _Jb = 0.5 * (__m-1) * w.T @ w
2210                 _J  = _Jo + _Jb
2211                 return float(_J)
2212             def GradientOfCostFunction(w):
2213                 _A  = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
2214                 _GardJo = - EaHX.T @ (RI * _A)
2215                 _GradJb = (__m-1) * w.reshape((__m,1))
2216                 _GradJ  = _GardJo + _GradJb
2217                 return numpy.ravel(_GradJ)
2218             vw = scipy.optimize.fmin_cg(
2219                 f           = CostFunction,
2220                 x0          = numpy.zeros(__m),
2221                 fprime      = GradientOfCostFunction,
2222                 args        = (),
2223                 disp        = False,
2224                 )
2225             #
2226             Hto = EaHX.T @ (RI * EaHX)
2227             Htb = (__m-1) * numpy.eye(__m)
2228             Hta = Hto + Htb
2229             #
2230             Pta = numpy.linalg.inv( Hta )
2231             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
2232             #
2233             Xn  = Xfm + EaX @ (vw[:,None] + EWa)
2234         #--------------------------
2235         elif VariantM == "FiniteSize11": # Jauge Boc2011
2236             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
2237             def CostFunction(w):
2238                 _A  = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
2239                 _Jo = 0.5 * _A.T @ (RI * _A)
2240                 _Jb = 0.5 * __m * math.log(1 + 1/__m + w.T @ w)
2241                 _J  = _Jo + _Jb
2242                 return float(_J)
2243             def GradientOfCostFunction(w):
2244                 _A  = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
2245                 _GardJo = - EaHX.T @ (RI * _A)
2246                 _GradJb = __m * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
2247                 _GradJ  = _GardJo + _GradJb
2248                 return numpy.ravel(_GradJ)
2249             vw = scipy.optimize.fmin_cg(
2250                 f           = CostFunction,
2251                 x0          = numpy.zeros(__m),
2252                 fprime      = GradientOfCostFunction,
2253                 args        = (),
2254                 disp        = False,
2255                 )
2256             #
2257             Hto = EaHX.T @ (RI * EaHX)
2258             Htb = __m * \
2259                 ( (1 + 1/__m + vw.T @ vw) * numpy.eye(__m) - 2 * vw @ vw.T ) \
2260                 / (1 + 1/__m + vw.T @ vw)**2
2261             Hta = Hto + Htb
2262             #
2263             Pta = numpy.linalg.inv( Hta )
2264             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
2265             #
2266             Xn  = Xfm + EaX @ (vw.reshape((__m,-1)) + EWa)
2267         #--------------------------
2268         elif VariantM == "FiniteSize15": # Jauge Boc2015
2269             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
2270             def CostFunction(w):
2271                 _A  = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
2272                 _Jo = 0.5 * _A.T * RI * _A
2273                 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w)
2274                 _J  = _Jo + _Jb
2275                 return float(_J)
2276             def GradientOfCostFunction(w):
2277                 _A  = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
2278                 _GardJo = - EaHX.T @ (RI * _A)
2279                 _GradJb = (__m+1) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
2280                 _GradJ  = _GardJo + _GradJb
2281                 return numpy.ravel(_GradJ)
2282             vw = scipy.optimize.fmin_cg(
2283                 f           = CostFunction,
2284                 x0          = numpy.zeros(__m),
2285                 fprime      = GradientOfCostFunction,
2286                 args        = (),
2287                 disp        = False,
2288                 )
2289             #
2290             Hto = EaHX.T @ (RI * EaHX)
2291             Htb = (__m+1) * \
2292                 ( (1 + 1/__m + vw.T @ vw) * numpy.eye(__m) - 2 * vw @ vw.T ) \
2293                 / (1 + 1/__m + vw.T @ vw)**2
2294             Hta = Hto + Htb
2295             #
2296             Pta = numpy.linalg.inv( Hta )
2297             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
2298             #
2299             Xn  = Xfm + EaX @ (vw.reshape((__m,-1)) + EWa)
2300         #--------------------------
2301         elif VariantM == "FiniteSize16": # Jauge Boc2016
2302             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
2303             def CostFunction(w):
2304                 _A  = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
2305                 _Jo = 0.5 * _A.T @ (RI * _A)
2306                 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w / (__m-1))
2307                 _J  = _Jo + _Jb
2308                 return float(_J)
2309             def GradientOfCostFunction(w):
2310                 _A  = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
2311                 _GardJo = - EaHX.T @ (RI * _A)
2312                 _GradJb = ((__m+1) / (__m-1)) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w / (__m-1))
2313                 _GradJ  = _GardJo + _GradJb
2314                 return numpy.ravel(_GradJ)
2315             vw = scipy.optimize.fmin_cg(
2316                 f           = CostFunction,
2317                 x0          = numpy.zeros(__m),
2318                 fprime      = GradientOfCostFunction,
2319                 args        = (),
2320                 disp        = False,
2321                 )
2322             #
2323             Hto = EaHX.T @ (RI * EaHX)
2324             Htb = ((__m+1) / (__m-1)) * \
2325                 ( (1 + 1/__m + vw.T @ vw / (__m-1)) * numpy.eye(__m) - 2 * vw @ vw.T / (__m-1) ) \
2326                 / (1 + 1/__m + vw.T @ vw / (__m-1))**2
2327             Hta = Hto + Htb
2328             #
2329             Pta = numpy.linalg.inv( Hta )
2330             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
2331             #
2332             Xn  = Xfm + EaX @ (vw[:,None] + EWa)
2333         #--------------------------
2334         else:
2335             raise ValueError("VariantM has to be chosen in the authorized methods list.")
2336         #
2337         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2338             Xn = CovarianceInflation( Xn,
2339                 selfA._parameters["InflationType"],
2340                 selfA._parameters["InflationFactor"],
2341                 )
2342         #
2343         Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
2344         #--------------------------
2345         #
2346         if selfA._parameters["StoreInternalVariables"] \
2347             or selfA._toStore("CostFunctionJ") \
2348             or selfA._toStore("CostFunctionJb") \
2349             or selfA._toStore("CostFunctionJo") \
2350             or selfA._toStore("APosterioriCovariance") \
2351             or selfA._toStore("InnovationAtCurrentAnalysis") \
2352             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2353             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2354             _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
2355             _Innovation = Ynpu - _HXa
2356         #
2357         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2358         # ---> avec analysis
2359         selfA.StoredVariables["Analysis"].store( Xa )
2360         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2361             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2362         if selfA._toStore("InnovationAtCurrentAnalysis"):
2363             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2364         # ---> avec current state
2365         if selfA._parameters["StoreInternalVariables"] \
2366             or selfA._toStore("CurrentState"):
2367             selfA.StoredVariables["CurrentState"].store( Xn )
2368         if selfA._toStore("ForecastState"):
2369             selfA.StoredVariables["ForecastState"].store( EMX )
2370         if selfA._toStore("BMA"):
2371             selfA.StoredVariables["BMA"].store( EMX - Xa.reshape((__n,1)) )
2372         if selfA._toStore("InnovationAtCurrentState"):
2373             selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu.reshape((__p,1)) )
2374         if selfA._toStore("SimulatedObservationAtCurrentState") \
2375             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2376             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
2377         # ---> autres
2378         if selfA._parameters["StoreInternalVariables"] \
2379             or selfA._toStore("CostFunctionJ") \
2380             or selfA._toStore("CostFunctionJb") \
2381             or selfA._toStore("CostFunctionJo") \
2382             or selfA._toStore("CurrentOptimum") \
2383             or selfA._toStore("APosterioriCovariance"):
2384             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2385             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
2386             J   = Jb + Jo
2387             selfA.StoredVariables["CostFunctionJb"].store( Jb )
2388             selfA.StoredVariables["CostFunctionJo"].store( Jo )
2389             selfA.StoredVariables["CostFunctionJ" ].store( J )
2390             #
2391             if selfA._toStore("IndexOfOptimum") \
2392                 or selfA._toStore("CurrentOptimum") \
2393                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2394                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2395                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2396                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2397                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2398             if selfA._toStore("IndexOfOptimum"):
2399                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2400             if selfA._toStore("CurrentOptimum"):
2401                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2402             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2403                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2404             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2405                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2406             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2407                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2408             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2409                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2410         if selfA._toStore("APosterioriCovariance"):
2411             Eai = EnsembleOfAnomalies( Xn ) / numpy.sqrt(__m-1) # Anomalies
2412             Pn = Eai @ Eai.T
2413             Pn = 0.5 * (Pn + Pn.T)
2414             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2415         if selfA._parameters["EstimationOf"] == "Parameters" \
2416             and J < previousJMinimum:
2417             previousJMinimum    = J
2418             XaMin               = Xa
2419             if selfA._toStore("APosterioriCovariance"):
2420                 covarianceXaMin = Pn
2421     #
2422     # Stockage final supplémentaire de l'optimum en estimation de paramètres
2423     # ----------------------------------------------------------------------
2424     if selfA._parameters["EstimationOf"] == "Parameters":
2425         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2426         selfA.StoredVariables["Analysis"].store( XaMin )
2427         if selfA._toStore("APosterioriCovariance"):
2428             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2429         if selfA._toStore("BMA"):
2430             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2431     #
2432     return 0
2433
2434 # ==============================================================================
2435 def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13",
2436     BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
2437     """
2438     Maximum Likelihood Ensemble Filter (EnKF/MLEF Zupanski 2005, Bocquet 2013)
2439
2440     selfA est identique au "self" d'algorithme appelant et contient les
2441     valeurs.
2442     """
2443     if selfA._parameters["EstimationOf"] == "Parameters":
2444         selfA._parameters["StoreInternalVariables"] = True
2445     #
2446     # Opérateurs
2447     # ----------
2448     H = HO["Direct"].appliedControledFormTo
2449     #
2450     if selfA._parameters["EstimationOf"] == "State":
2451         M = EM["Direct"].appliedControledFormTo
2452     #
2453     if CM is not None and "Tangent" in CM and U is not None:
2454         Cm = CM["Tangent"].asMatrix(Xb)
2455     else:
2456         Cm = None
2457     #
2458     # Nombre de pas identique au nombre de pas d'observations
2459     # -------------------------------------------------------
2460     if hasattr(Y,"stepnumber"):
2461         duration = Y.stepnumber()
2462         __p = numpy.cumprod(Y.shape())[-1]
2463     else:
2464         duration = 2
2465         __p = numpy.array(Y).size
2466     #
2467     # Précalcul des inversions de B et R
2468     # ----------------------------------
2469     if selfA._parameters["StoreInternalVariables"] \
2470         or selfA._toStore("CostFunctionJ") \
2471         or selfA._toStore("CostFunctionJb") \
2472         or selfA._toStore("CostFunctionJo") \
2473         or selfA._toStore("CurrentOptimum") \
2474         or selfA._toStore("APosterioriCovariance"):
2475         BI = B.getI()
2476     RI = R.getI()
2477     #
2478     # Initialisation
2479     # --------------
2480     __n = Xb.size
2481     __m = selfA._parameters["NumberOfMembers"]
2482     if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
2483     else:                         Pn = B
2484     if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
2485     else:                         Rn = R
2486     if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
2487     else:                         Qn = Q
2488     Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
2489     #
2490     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2491         selfA.StoredVariables["Analysis"].store( numpy.ravel(Xb) )
2492         if selfA._toStore("APosterioriCovariance"):
2493             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2494             covarianceXa = Pn
2495     #
2496     previousJMinimum = numpy.finfo(float).max
2497     #
2498     for step in range(duration-1):
2499         if hasattr(Y,"store"):
2500             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,-1))
2501         else:
2502             Ynpu = numpy.ravel( Y ).reshape((__p,-1))
2503         #
2504         if U is not None:
2505             if hasattr(U,"store") and len(U)>1:
2506                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2507             elif hasattr(U,"store") and len(U)==1:
2508                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2509             else:
2510                 Un = numpy.asmatrix(numpy.ravel( U )).T
2511         else:
2512             Un = None
2513         #
2514         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2515             Xn = CovarianceInflation( Xn,
2516                 selfA._parameters["InflationType"],
2517                 selfA._parameters["InflationFactor"],
2518                 )
2519         #
2520         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
2521             EMX = M( [(Xn[:,i], Un) for i in range(__m)],
2522                 argsAsSerie = True,
2523                 returnSerieAsArrayMatrix = True )
2524             qi = numpy.random.multivariate_normal(numpy.zeros(__n), Qn, size=__m).T
2525             Xn_predicted = EMX + qi
2526             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2527                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2528                 Xn_predicted = Xn_predicted + Cm * Un
2529         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
2530             # --- > Par principe, M = Id, Q = 0
2531             Xn_predicted = Xn
2532         #
2533         #--------------------------
2534         if VariantM == "MLEF13":
2535             Xfm = numpy.ravel(Xn_predicted.mean(axis=1, dtype=mfp).astype('float'))
2536             EaX = EnsembleOfAnomalies( Xn_predicted ) / numpy.sqrt(__m-1)
2537             Ua  = numpy.eye(__m)
2538             __j = 0
2539             Deltaw = 1
2540             if not BnotT:
2541                 Ta  = numpy.eye(__m)
2542             vw  = numpy.zeros(__m)
2543             while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
2544                 vx1 = (Xfm + EaX @ vw).reshape((__n,-1))
2545                 #
2546                 if BnotT:
2547                     E1 = vx1 + _epsilon * EaX
2548                 else:
2549                     E1 = vx1 + numpy.sqrt(__m-1) * EaX @ Ta
2550                 #
2551                 HE2 = H( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
2552                     argsAsSerie = True,
2553                     returnSerieAsArrayMatrix = True )
2554                 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,-1))
2555                 #
2556                 if BnotT:
2557                     EaY = (HE2 - vy2) / _epsilon
2558                 else:
2559                     EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / numpy.sqrt(__m-1)
2560                 #
2561                 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy2 )))
2562                 mH = numpy.eye(__m) + EaY.transpose() @ (RI * EaY)
2563                 Deltaw = - numpy.linalg.solve(mH,GradJ)
2564                 #
2565                 vw = vw + Deltaw
2566                 #
2567                 if not BnotT:
2568                     Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2569                 #
2570                 __j = __j + 1
2571             #
2572             if BnotT:
2573                 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2574             #
2575             Xn = vx1 + numpy.sqrt(__m-1) * EaX @ Ta @ Ua
2576         #--------------------------
2577         else:
2578             raise ValueError("VariantM has to be chosen in the authorized methods list.")
2579         #
2580         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2581             Xn = CovarianceInflation( Xn,
2582                 selfA._parameters["InflationType"],
2583                 selfA._parameters["InflationFactor"],
2584                 )
2585         #
2586         Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
2587         #--------------------------
2588         #
2589         if selfA._parameters["StoreInternalVariables"] \
2590             or selfA._toStore("CostFunctionJ") \
2591             or selfA._toStore("CostFunctionJb") \
2592             or selfA._toStore("CostFunctionJo") \
2593             or selfA._toStore("APosterioriCovariance") \
2594             or selfA._toStore("InnovationAtCurrentAnalysis") \
2595             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2596             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2597             _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
2598             _Innovation = Ynpu - _HXa
2599         #
2600         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2601         # ---> avec analysis
2602         selfA.StoredVariables["Analysis"].store( Xa )
2603         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2604             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2605         if selfA._toStore("InnovationAtCurrentAnalysis"):
2606             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2607         # ---> avec current state
2608         if selfA._parameters["StoreInternalVariables"] \
2609             or selfA._toStore("CurrentState"):
2610             selfA.StoredVariables["CurrentState"].store( Xn )
2611         if selfA._toStore("ForecastState"):
2612             selfA.StoredVariables["ForecastState"].store( EMX )
2613         if selfA._toStore("BMA"):
2614             selfA.StoredVariables["BMA"].store( EMX - Xa.reshape((__n,1)) )
2615         if selfA._toStore("InnovationAtCurrentState"):
2616             selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu.reshape((__p,-1)) )
2617         if selfA._toStore("SimulatedObservationAtCurrentState") \
2618             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2619             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
2620         # ---> autres
2621         if selfA._parameters["StoreInternalVariables"] \
2622             or selfA._toStore("CostFunctionJ") \
2623             or selfA._toStore("CostFunctionJb") \
2624             or selfA._toStore("CostFunctionJo") \
2625             or selfA._toStore("CurrentOptimum") \
2626             or selfA._toStore("APosterioriCovariance"):
2627             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2628             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
2629             J   = Jb + Jo
2630             selfA.StoredVariables["CostFunctionJb"].store( Jb )
2631             selfA.StoredVariables["CostFunctionJo"].store( Jo )
2632             selfA.StoredVariables["CostFunctionJ" ].store( J )
2633             #
2634             if selfA._toStore("IndexOfOptimum") \
2635                 or selfA._toStore("CurrentOptimum") \
2636                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2637                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2638                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2639                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2640                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2641             if selfA._toStore("IndexOfOptimum"):
2642                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2643             if selfA._toStore("CurrentOptimum"):
2644                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2645             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2646                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2647             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2648                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2649             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2650                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2651             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2652                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2653         if selfA._toStore("APosterioriCovariance"):
2654             Eai = EnsembleOfAnomalies( Xn ) / numpy.sqrt(__m-1) # Anomalies
2655             Pn = Eai @ Eai.T
2656             Pn = 0.5 * (Pn + Pn.T)
2657             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2658         if selfA._parameters["EstimationOf"] == "Parameters" \
2659             and J < previousJMinimum:
2660             previousJMinimum    = J
2661             XaMin               = Xa
2662             if selfA._toStore("APosterioriCovariance"):
2663                 covarianceXaMin = Pn
2664     #
2665     # Stockage final supplémentaire de l'optimum en estimation de paramètres
2666     # ----------------------------------------------------------------------
2667     if selfA._parameters["EstimationOf"] == "Parameters":
2668         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2669         selfA.StoredVariables["Analysis"].store( XaMin )
2670         if selfA._toStore("APosterioriCovariance"):
2671             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2672         if selfA._toStore("BMA"):
2673             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2674     #
2675     return 0
2676
2677 # ==============================================================================
2678 def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12",
2679     BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
2680     """
2681     Iterative EnKF (Sakov 2012, Sakov 2018)
2682
2683     selfA est identique au "self" d'algorithme appelant et contient les
2684     valeurs.
2685     """
2686     if selfA._parameters["EstimationOf"] == "Parameters":
2687         selfA._parameters["StoreInternalVariables"] = True
2688     #
2689     # Opérateurs
2690     # ----------
2691     H = HO["Direct"].appliedControledFormTo
2692     #
2693     if selfA._parameters["EstimationOf"] == "State":
2694         M = EM["Direct"].appliedControledFormTo
2695     #
2696     if CM is not None and "Tangent" in CM and U is not None:
2697         Cm = CM["Tangent"].asMatrix(Xb)
2698     else:
2699         Cm = None
2700     #
2701     # Nombre de pas identique au nombre de pas d'observations
2702     # -------------------------------------------------------
2703     if hasattr(Y,"stepnumber"):
2704         duration = Y.stepnumber()
2705         __p = numpy.cumprod(Y.shape())[-1]
2706     else:
2707         duration = 2
2708         __p = numpy.array(Y).size
2709     #
2710     # Précalcul des inversions de B et R
2711     # ----------------------------------
2712     if selfA._parameters["StoreInternalVariables"] \
2713         or selfA._toStore("CostFunctionJ") \
2714         or selfA._toStore("CostFunctionJb") \
2715         or selfA._toStore("CostFunctionJo") \
2716         or selfA._toStore("CurrentOptimum") \
2717         or selfA._toStore("APosterioriCovariance"):
2718         BI = B.getI()
2719     RI = R.getI()
2720     #
2721     # Initialisation
2722     # --------------
2723     __n = Xb.size
2724     __m = selfA._parameters["NumberOfMembers"]
2725     if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
2726     else:                         Pn = B
2727     if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
2728     else:                         Rn = R
2729     if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
2730     else:                         Qn = Q
2731     Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
2732     #
2733     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2734         selfA.StoredVariables["Analysis"].store( numpy.ravel(Xb) )
2735         if selfA._toStore("APosterioriCovariance"):
2736             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2737             covarianceXa = Pn
2738     #
2739     previousJMinimum = numpy.finfo(float).max
2740     #
2741     for step in range(duration-1):
2742         if hasattr(Y,"store"):
2743             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,-1))
2744         else:
2745             Ynpu = numpy.ravel( Y ).reshape((__p,-1))
2746         #
2747         if U is not None:
2748             if hasattr(U,"store") and len(U)>1:
2749                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2750             elif hasattr(U,"store") and len(U)==1:
2751                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2752             else:
2753                 Un = numpy.asmatrix(numpy.ravel( U )).T
2754         else:
2755             Un = None
2756         #
2757         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2758             Xn = CovarianceInflation( Xn,
2759                 selfA._parameters["InflationType"],
2760                 selfA._parameters["InflationFactor"],
2761                 )
2762         #
2763         #--------------------------
2764         if VariantM == "IEnKF12":
2765             Xfm = numpy.ravel(Xn.mean(axis=1, dtype=mfp).astype('float'))
2766             EaX = EnsembleOfAnomalies( Xn ) / numpy.sqrt(__m-1)
2767             __j = 0
2768             Deltaw = 1
2769             if not BnotT:
2770                 Ta  = numpy.eye(__m)
2771             vw  = numpy.zeros(__m)
2772             while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
2773                 vx1 = (Xfm + EaX @ vw).reshape((__n,-1))
2774                 #
2775                 if BnotT:
2776                     E1 = vx1 + _epsilon * EaX
2777                 else:
2778                     E1 = vx1 + numpy.sqrt(__m-1) * EaX @ Ta
2779                 #
2780                 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q
2781                     E2 = M( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
2782                         argsAsSerie = True,
2783                         returnSerieAsArrayMatrix = True )
2784                 elif selfA._parameters["EstimationOf"] == "Parameters":
2785                     # --- > Par principe, M = Id
2786                     E2 = Xn
2787                 vx2 = E2.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
2788                 vy1 = H((vx2, Un)).reshape((__p,-1))
2789                 #
2790                 HE2 = H( [(E2[:,i,numpy.newaxis], Un) for i in range(__m)],
2791                     argsAsSerie = True,
2792                     returnSerieAsArrayMatrix = True )
2793                 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,-1))
2794                 #
2795                 if BnotT:
2796                     EaY = (HE2 - vy2) / _epsilon
2797                 else:
2798                     EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / numpy.sqrt(__m-1)
2799                 #
2800                 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy1 )))
2801                 mH = numpy.eye(__m) + EaY.transpose() @ (RI * EaY)
2802                 Deltaw = - numpy.linalg.solve(mH,GradJ)
2803                 #
2804                 vw = vw + Deltaw
2805                 #
2806                 if not BnotT:
2807                     Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2808                 #
2809                 __j = __j + 1
2810             #
2811             A2 = EnsembleOfAnomalies( E2 )
2812             #
2813             if BnotT:
2814                 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2815                 A2 = numpy.sqrt(__m-1) * A2 @ Ta / _epsilon
2816             #
2817             Xn = vx2 + A2
2818         #--------------------------
2819         else:
2820             raise ValueError("VariantM has to be chosen in the authorized methods list.")
2821         #
2822         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2823             Xn = CovarianceInflation( Xn,
2824                 selfA._parameters["InflationType"],
2825                 selfA._parameters["InflationFactor"],
2826                 )
2827         #
2828         Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
2829         #--------------------------
2830         #
2831         if selfA._parameters["StoreInternalVariables"] \
2832             or selfA._toStore("CostFunctionJ") \
2833             or selfA._toStore("CostFunctionJb") \
2834             or selfA._toStore("CostFunctionJo") \
2835             or selfA._toStore("APosterioriCovariance") \
2836             or selfA._toStore("InnovationAtCurrentAnalysis") \
2837             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2838             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2839             _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
2840             _Innovation = Ynpu - _HXa
2841         #
2842         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2843         # ---> avec analysis
2844         selfA.StoredVariables["Analysis"].store( Xa )
2845         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2846             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2847         if selfA._toStore("InnovationAtCurrentAnalysis"):
2848             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2849         # ---> avec current state
2850         if selfA._parameters["StoreInternalVariables"] \
2851             or selfA._toStore("CurrentState"):
2852             selfA.StoredVariables["CurrentState"].store( Xn )
2853         if selfA._toStore("ForecastState"):
2854             selfA.StoredVariables["ForecastState"].store( E2 )
2855         if selfA._toStore("BMA"):
2856             selfA.StoredVariables["BMA"].store( E2 - Xa )
2857         if selfA._toStore("InnovationAtCurrentState"):
2858             selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu.reshape((__p,-1)) )
2859         if selfA._toStore("SimulatedObservationAtCurrentState") \
2860             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2861             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
2862         # ---> autres
2863         if selfA._parameters["StoreInternalVariables"] \
2864             or selfA._toStore("CostFunctionJ") \
2865             or selfA._toStore("CostFunctionJb") \
2866             or selfA._toStore("CostFunctionJo") \
2867             or selfA._toStore("CurrentOptimum") \
2868             or selfA._toStore("APosterioriCovariance"):
2869             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2870             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
2871             J   = Jb + Jo
2872             selfA.StoredVariables["CostFunctionJb"].store( Jb )
2873             selfA.StoredVariables["CostFunctionJo"].store( Jo )
2874             selfA.StoredVariables["CostFunctionJ" ].store( J )
2875             #
2876             if selfA._toStore("IndexOfOptimum") \
2877                 or selfA._toStore("CurrentOptimum") \
2878                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2879                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2880                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2881                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2882                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2883             if selfA._toStore("IndexOfOptimum"):
2884                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2885             if selfA._toStore("CurrentOptimum"):
2886                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2887             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2888                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2889             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2890                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2891             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2892                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2893             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2894                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2895         if selfA._toStore("APosterioriCovariance"):
2896             Eai = EnsembleOfAnomalies( Xn ) / numpy.sqrt(__m-1) # Anomalies
2897             Pn = Eai @ Eai.T
2898             Pn = 0.5 * (Pn + Pn.T)
2899             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2900         if selfA._parameters["EstimationOf"] == "Parameters" \
2901             and J < previousJMinimum:
2902             previousJMinimum    = J
2903             XaMin               = Xa
2904             if selfA._toStore("APosterioriCovariance"):
2905                 covarianceXaMin = Pn
2906     #
2907     # Stockage final supplémentaire de l'optimum en estimation de paramètres
2908     # ----------------------------------------------------------------------
2909     if selfA._parameters["EstimationOf"] == "Parameters":
2910         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2911         selfA.StoredVariables["Analysis"].store( XaMin )
2912         if selfA._toStore("APosterioriCovariance"):
2913             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2914         if selfA._toStore("BMA"):
2915             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2916     #
2917     return 0
2918
2919 # ==============================================================================
2920 if __name__ == "__main__":
2921     print('\n AUTODIAGNOSTIC\n')