Salome HOME
Minor improvements and fixes for internal variables
[modules/adao.git] / src / daComposant / daCore / NumericObjects.py
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) 2008-2021 EDF R&D
4 #
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
9 #
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13 # Lesser General Public License for more details.
14 #
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
18 #
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
20 #
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
22
23 __doc__ = """
24     Définit les objets numériques génériques.
25 """
26 __author__ = "Jean-Philippe ARGAUD"
27
28 import os, time, copy, types, sys, logging
29 import math, numpy, scipy, scipy.optimize, scipy.version
30 from daCore.BasicObjects import Operator
31 from daCore.PlatformInfo import PlatformInfo
32 mpr = PlatformInfo().MachinePrecision()
33 mfp = PlatformInfo().MaximumPrecision()
34 # logging.getLogger().setLevel(logging.DEBUG)
35
36 # ==============================================================================
37 def ExecuteFunction( paire ):
38     assert len(paire) == 2, "Incorrect number of arguments"
39     X, funcrepr = paire
40     __X = numpy.asmatrix(numpy.ravel( X )).T
41     __sys_path_tmp = sys.path ; sys.path.insert(0,funcrepr["__userFunction__path"])
42     __module = __import__(funcrepr["__userFunction__modl"], globals(), locals(), [])
43     __fonction = getattr(__module,funcrepr["__userFunction__name"])
44     sys.path = __sys_path_tmp ; del __sys_path_tmp
45     __HX  = __fonction( __X )
46     return numpy.ravel( __HX )
47
48 # ==============================================================================
49 class FDApproximation(object):
50     """
51     Cette classe sert d'interface pour définir les opérateurs approximés. A la
52     création d'un objet, en fournissant une fonction "Function", on obtient un
53     objet qui dispose de 3 méthodes "DirectOperator", "TangentOperator" et
54     "AdjointOperator". On contrôle l'approximation DF avec l'incrément
55     multiplicatif "increment" valant par défaut 1%, ou avec l'incrément fixe
56     "dX" qui sera multiplié par "increment" (donc en %), et on effectue de DF
57     centrées si le booléen "centeredDF" est vrai.
58     """
59     def __init__(self,
60             name                  = "FDApproximation",
61             Function              = None,
62             centeredDF            = False,
63             increment             = 0.01,
64             dX                    = None,
65             avoidingRedundancy    = True,
66             toleranceInRedundancy = 1.e-18,
67             lenghtOfRedundancy    = -1,
68             mpEnabled             = False,
69             mpWorkers             = None,
70             mfEnabled             = False,
71             ):
72         self.__name = str(name)
73         if mpEnabled:
74             try:
75                 import multiprocessing
76                 self.__mpEnabled = True
77             except ImportError:
78                 self.__mpEnabled = False
79         else:
80             self.__mpEnabled = False
81         self.__mpWorkers = mpWorkers
82         if self.__mpWorkers is not None and self.__mpWorkers < 1:
83             self.__mpWorkers = None
84         logging.debug("FDA Calculs en multiprocessing : %s (nombre de processus : %s)"%(self.__mpEnabled,self.__mpWorkers))
85         #
86         if mfEnabled:
87             self.__mfEnabled = True
88         else:
89             self.__mfEnabled = False
90         logging.debug("FDA Calculs en multifonctions : %s"%(self.__mfEnabled,))
91         #
92         if avoidingRedundancy:
93             self.__avoidRC = True
94             self.__tolerBP = float(toleranceInRedundancy)
95             self.__lenghtRJ = int(lenghtOfRedundancy)
96             self.__listJPCP = [] # Jacobian Previous Calculated Points
97             self.__listJPCI = [] # Jacobian Previous Calculated Increment
98             self.__listJPCR = [] # Jacobian Previous Calculated Results
99             self.__listJPPN = [] # Jacobian Previous Calculated Point Norms
100             self.__listJPIN = [] # Jacobian Previous Calculated Increment Norms
101         else:
102             self.__avoidRC = False
103         #
104         if self.__mpEnabled:
105             if isinstance(Function,types.FunctionType):
106                 logging.debug("FDA Calculs en multiprocessing : FunctionType")
107                 self.__userFunction__name = Function.__name__
108                 try:
109                     mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
110                 except:
111                     mod = os.path.abspath(Function.__globals__['__file__'])
112                 if not os.path.isfile(mod):
113                     raise ImportError("No user defined function or method found with the name %s"%(mod,))
114                 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
115                 self.__userFunction__path = os.path.dirname(mod)
116                 del mod
117                 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled )
118                 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
119             elif isinstance(Function,types.MethodType):
120                 logging.debug("FDA Calculs en multiprocessing : MethodType")
121                 self.__userFunction__name = Function.__name__
122                 try:
123                     mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
124                 except:
125                     mod = os.path.abspath(Function.__func__.__globals__['__file__'])
126                 if not os.path.isfile(mod):
127                     raise ImportError("No user defined function or method found with the name %s"%(mod,))
128                 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
129                 self.__userFunction__path = os.path.dirname(mod)
130                 del mod
131                 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled )
132                 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
133             else:
134                 raise TypeError("User defined function or method has to be provided for finite differences approximation.")
135         else:
136             self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled )
137             self.__userFunction = self.__userOperator.appliedTo
138         #
139         self.__centeredDF = bool(centeredDF)
140         if abs(float(increment)) > 1.e-15:
141             self.__increment  = float(increment)
142         else:
143             self.__increment  = 0.01
144         if dX is None:
145             self.__dX     = None
146         else:
147             self.__dX     = numpy.asmatrix(numpy.ravel( dX )).T
148         logging.debug("FDA Reduction des doublons de calcul : %s"%self.__avoidRC)
149         if self.__avoidRC:
150             logging.debug("FDA Tolerance de determination des doublons : %.2e"%self.__tolerBP)
151
152     # ---------------------------------------------------------
153     def __doublon__(self, e, l, n, v=None):
154         __ac, __iac = False, -1
155         for i in range(len(l)-1,-1,-1):
156             if numpy.linalg.norm(e - l[i]) < self.__tolerBP * n[i]:
157                 __ac, __iac = True, i
158                 if v is not None: logging.debug("FDA Cas%s déja calculé, récupération du doublon %i"%(v,__iac))
159                 break
160         return __ac, __iac
161
162     # ---------------------------------------------------------
163     def DirectOperator(self, X ):
164         """
165         Calcul du direct à l'aide de la fonction fournie.
166         """
167         logging.debug("FDA Calcul DirectOperator (explicite)")
168         if self.__mfEnabled:
169             _HX = self.__userFunction( X, argsAsSerie = True )
170         else:
171             _X = numpy.asmatrix(numpy.ravel( X )).T
172             _HX = numpy.ravel(self.__userFunction( _X ))
173         #
174         return _HX
175
176     # ---------------------------------------------------------
177     def TangentMatrix(self, X ):
178         """
179         Calcul de l'opérateur tangent comme la Jacobienne par différences finies,
180         c'est-à-dire le gradient de H en X. On utilise des différences finies
181         directionnelles autour du point X. X est un numpy.matrix.
182
183         Différences finies centrées (approximation d'ordre 2):
184         1/ Pour chaque composante i de X, on ajoute et on enlève la perturbation
185            dX[i] à la  composante X[i], pour composer X_plus_dXi et X_moins_dXi, et
186            on calcule les réponses HX_plus_dXi = H( X_plus_dXi ) et HX_moins_dXi =
187            H( X_moins_dXi )
188         2/ On effectue les différences (HX_plus_dXi-HX_moins_dXi) et on divise par
189            le pas 2*dXi
190         3/ Chaque résultat, par composante, devient une colonne de la Jacobienne
191
192         Différences finies non centrées (approximation d'ordre 1):
193         1/ Pour chaque composante i de X, on ajoute la perturbation dX[i] à la
194            composante X[i] pour composer X_plus_dXi, et on calcule la réponse
195            HX_plus_dXi = H( X_plus_dXi )
196         2/ On calcule la valeur centrale HX = H(X)
197         3/ On effectue les différences (HX_plus_dXi-HX) et on divise par
198            le pas dXi
199         4/ Chaque résultat, par composante, devient une colonne de la Jacobienne
200
201         """
202         logging.debug("FDA Début du calcul de la Jacobienne")
203         logging.debug("FDA   Incrément de............: %s*X"%float(self.__increment))
204         logging.debug("FDA   Approximation centrée...: %s"%(self.__centeredDF))
205         #
206         if X is None or len(X)==0:
207             raise ValueError("Nominal point X for approximate derivatives can not be None or void (given X: %s)."%(str(X),))
208         #
209         _X = numpy.asmatrix(numpy.ravel( X )).T
210         #
211         if self.__dX is None:
212             _dX  = self.__increment * _X
213         else:
214             _dX = numpy.asmatrix(numpy.ravel( self.__dX )).T
215         #
216         if (_dX == 0.).any():
217             moyenne = _dX.mean()
218             if moyenne == 0.:
219                 _dX = numpy.where( _dX == 0., float(self.__increment), _dX )
220             else:
221                 _dX = numpy.where( _dX == 0., moyenne, _dX )
222         #
223         __alreadyCalculated  = False
224         if self.__avoidRC:
225             __bidon, __alreadyCalculatedP = self.__doublon__(_X,  self.__listJPCP, self.__listJPPN, None)
226             __bidon, __alreadyCalculatedI = self.__doublon__(_dX, self.__listJPCI, self.__listJPIN, None)
227             if __alreadyCalculatedP == __alreadyCalculatedI > -1:
228                 __alreadyCalculated, __i = True, __alreadyCalculatedP
229                 logging.debug("FDA Cas J déja calculé, récupération du doublon %i"%__i)
230         #
231         if __alreadyCalculated:
232             logging.debug("FDA   Calcul Jacobienne (par récupération du doublon %i)"%__i)
233             _Jacobienne = self.__listJPCR[__i]
234         else:
235             logging.debug("FDA   Calcul Jacobienne (explicite)")
236             if self.__centeredDF:
237                 #
238                 if self.__mpEnabled and not self.__mfEnabled:
239                     funcrepr = {
240                         "__userFunction__path" : self.__userFunction__path,
241                         "__userFunction__modl" : self.__userFunction__modl,
242                         "__userFunction__name" : self.__userFunction__name,
243                     }
244                     _jobs = []
245                     for i in range( len(_dX) ):
246                         _dXi            = _dX[i]
247                         _X_plus_dXi     = numpy.array( _X.A1, dtype=float )
248                         _X_plus_dXi[i]  = _X[i] + _dXi
249                         _X_moins_dXi    = numpy.array( _X.A1, dtype=float )
250                         _X_moins_dXi[i] = _X[i] - _dXi
251                         #
252                         _jobs.append( (_X_plus_dXi,  funcrepr) )
253                         _jobs.append( (_X_moins_dXi, funcrepr) )
254                     #
255                     import multiprocessing
256                     self.__pool = multiprocessing.Pool(self.__mpWorkers)
257                     _HX_plusmoins_dX = self.__pool.map( ExecuteFunction, _jobs )
258                     self.__pool.close()
259                     self.__pool.join()
260                     #
261                     _Jacobienne  = []
262                     for i in range( len(_dX) ):
263                         _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
264                     #
265                 elif self.__mfEnabled:
266                     _xserie = []
267                     for i in range( len(_dX) ):
268                         _dXi            = _dX[i]
269                         _X_plus_dXi     = numpy.array( _X.A1, dtype=float )
270                         _X_plus_dXi[i]  = _X[i] + _dXi
271                         _X_moins_dXi    = numpy.array( _X.A1, dtype=float )
272                         _X_moins_dXi[i] = _X[i] - _dXi
273                         #
274                         _xserie.append( _X_plus_dXi )
275                         _xserie.append( _X_moins_dXi )
276                     #
277                     _HX_plusmoins_dX = self.DirectOperator( _xserie )
278                      #
279                     _Jacobienne  = []
280                     for i in range( len(_dX) ):
281                         _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
282                     #
283                 else:
284                     _Jacobienne  = []
285                     for i in range( _dX.size ):
286                         _dXi            = _dX[i]
287                         _X_plus_dXi     = numpy.array( _X.A1, dtype=float )
288                         _X_plus_dXi[i]  = _X[i] + _dXi
289                         _X_moins_dXi    = numpy.array( _X.A1, dtype=float )
290                         _X_moins_dXi[i] = _X[i] - _dXi
291                         #
292                         _HX_plus_dXi    = self.DirectOperator( _X_plus_dXi )
293                         _HX_moins_dXi   = self.DirectOperator( _X_moins_dXi )
294                         #
295                         _Jacobienne.append( numpy.ravel( _HX_plus_dXi - _HX_moins_dXi ) / (2.*_dXi) )
296                 #
297             else:
298                 #
299                 if self.__mpEnabled and not self.__mfEnabled:
300                     funcrepr = {
301                         "__userFunction__path" : self.__userFunction__path,
302                         "__userFunction__modl" : self.__userFunction__modl,
303                         "__userFunction__name" : self.__userFunction__name,
304                     }
305                     _jobs = []
306                     _jobs.append( (_X.A1, funcrepr) )
307                     for i in range( len(_dX) ):
308                         _X_plus_dXi    = numpy.array( _X.A1, dtype=float )
309                         _X_plus_dXi[i] = _X[i] + _dX[i]
310                         #
311                         _jobs.append( (_X_plus_dXi, funcrepr) )
312                     #
313                     import multiprocessing
314                     self.__pool = multiprocessing.Pool(self.__mpWorkers)
315                     _HX_plus_dX = self.__pool.map( ExecuteFunction, _jobs )
316                     self.__pool.close()
317                     self.__pool.join()
318                     #
319                     _HX = _HX_plus_dX.pop(0)
320                     #
321                     _Jacobienne = []
322                     for i in range( len(_dX) ):
323                         _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
324                     #
325                 elif self.__mfEnabled:
326                     _xserie = []
327                     _xserie.append( _X.A1 )
328                     for i in range( len(_dX) ):
329                         _X_plus_dXi    = numpy.array( _X.A1, dtype=float )
330                         _X_plus_dXi[i] = _X[i] + _dX[i]
331                         #
332                         _xserie.append( _X_plus_dXi )
333                     #
334                     _HX_plus_dX = self.DirectOperator( _xserie )
335                     #
336                     _HX = _HX_plus_dX.pop(0)
337                     #
338                     _Jacobienne = []
339                     for i in range( len(_dX) ):
340                         _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
341                    #
342                 else:
343                     _Jacobienne  = []
344                     _HX = self.DirectOperator( _X )
345                     for i in range( _dX.size ):
346                         _dXi            = _dX[i]
347                         _X_plus_dXi     = numpy.array( _X.A1, dtype=float )
348                         _X_plus_dXi[i]  = _X[i] + _dXi
349                         #
350                         _HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
351                         #
352                         _Jacobienne.append( numpy.ravel(( _HX_plus_dXi - _HX ) / _dXi) )
353                 #
354             #
355             _Jacobienne = numpy.asmatrix( numpy.vstack( _Jacobienne ) ).T
356             if self.__avoidRC:
357                 if self.__lenghtRJ < 0: self.__lenghtRJ = 2 * _X.size
358                 while len(self.__listJPCP) > self.__lenghtRJ:
359                     self.__listJPCP.pop(0)
360                     self.__listJPCI.pop(0)
361                     self.__listJPCR.pop(0)
362                     self.__listJPPN.pop(0)
363                     self.__listJPIN.pop(0)
364                 self.__listJPCP.append( copy.copy(_X) )
365                 self.__listJPCI.append( copy.copy(_dX) )
366                 self.__listJPCR.append( copy.copy(_Jacobienne) )
367                 self.__listJPPN.append( numpy.linalg.norm(_X) )
368                 self.__listJPIN.append( numpy.linalg.norm(_Jacobienne) )
369         #
370         logging.debug("FDA Fin du calcul de la Jacobienne")
371         #
372         return _Jacobienne
373
374     # ---------------------------------------------------------
375     def TangentOperator(self, paire ):
376         """
377         Calcul du tangent à l'aide de la Jacobienne.
378         """
379         if self.__mfEnabled:
380             assert len(paire) == 1, "Incorrect lenght of arguments"
381             _paire = paire[0]
382             assert len(_paire) == 2, "Incorrect number of arguments"
383         else:
384             assert len(paire) == 2, "Incorrect number of arguments"
385             _paire = paire
386         X, dX = _paire
387         _Jacobienne = self.TangentMatrix( X )
388         if dX is None or len(dX) == 0:
389             #
390             # Calcul de la forme matricielle si le second argument est None
391             # -------------------------------------------------------------
392             if self.__mfEnabled: return [_Jacobienne,]
393             else:                return _Jacobienne
394         else:
395             #
396             # Calcul de la valeur linéarisée de H en X appliqué à dX
397             # ------------------------------------------------------
398             _dX = numpy.asmatrix(numpy.ravel( dX )).T
399             _HtX = numpy.dot(_Jacobienne, _dX)
400             if self.__mfEnabled: return [_HtX.A1,]
401             else:                return _HtX.A1
402
403     # ---------------------------------------------------------
404     def AdjointOperator(self, paire ):
405         """
406         Calcul de l'adjoint à l'aide de la Jacobienne.
407         """
408         if self.__mfEnabled:
409             assert len(paire) == 1, "Incorrect lenght of arguments"
410             _paire = paire[0]
411             assert len(_paire) == 2, "Incorrect number of arguments"
412         else:
413             assert len(paire) == 2, "Incorrect number of arguments"
414             _paire = paire
415         X, Y = _paire
416         _JacobienneT = self.TangentMatrix( X ).T
417         if Y is None or len(Y) == 0:
418             #
419             # Calcul de la forme matricielle si le second argument est None
420             # -------------------------------------------------------------
421             if self.__mfEnabled: return [_JacobienneT,]
422             else:                return _JacobienneT
423         else:
424             #
425             # Calcul de la valeur de l'adjoint en X appliqué à Y
426             # --------------------------------------------------
427             _Y = numpy.asmatrix(numpy.ravel( Y )).T
428             _HaY = numpy.dot(_JacobienneT, _Y)
429             if self.__mfEnabled: return [_HaY.A1,]
430             else:                return _HaY.A1
431
432 # ==============================================================================
433 def mmqr(
434         func     = None,
435         x0       = None,
436         fprime   = None,
437         bounds   = None,
438         quantile = 0.5,
439         maxfun   = 15000,
440         toler    = 1.e-06,
441         y        = None,
442         ):
443     """
444     Implémentation informatique de l'algorithme MMQR, basée sur la publication :
445     David R. Hunter, Kenneth Lange, "Quantile Regression via an MM Algorithm",
446     Journal of Computational and Graphical Statistics, 9, 1, pp.60-77, 2000.
447     """
448     #
449     # Recuperation des donnees et informations initiales
450     # --------------------------------------------------
451     variables = numpy.ravel( x0 )
452     mesures   = numpy.ravel( y )
453     increment = sys.float_info[0]
454     p         = variables.size
455     n         = mesures.size
456     quantile  = float(quantile)
457     #
458     # Calcul des parametres du MM
459     # ---------------------------
460     tn      = float(toler) / n
461     e0      = -tn / math.log(tn)
462     epsilon = (e0-tn)/(1+math.log(e0))
463     #
464     # Calculs d'initialisation
465     # ------------------------
466     residus  = mesures - numpy.ravel( func( variables ) )
467     poids    = 1./(epsilon+numpy.abs(residus))
468     veps     = 1. - 2. * quantile - residus * poids
469     lastsurrogate = -numpy.sum(residus*veps) - (1.-2.*quantile)*numpy.sum(residus)
470     iteration = 0
471     #
472     # Recherche iterative
473     # -------------------
474     while (increment > toler) and (iteration < maxfun) :
475         iteration += 1
476         #
477         Derivees  = numpy.array(fprime(variables))
478         Derivees  = Derivees.reshape(n,p) # Necessaire pour remettre en place la matrice si elle passe par des tuyaux YACS
479         DeriveesT = Derivees.transpose()
480         M         =   numpy.dot( DeriveesT , (numpy.array(numpy.matrix(p*[poids,]).T)*Derivees) )
481         SM        =   numpy.transpose(numpy.dot( DeriveesT , veps ))
482         step      = - numpy.linalg.lstsq( M, SM, rcond=-1 )[0]
483         #
484         variables = variables + step
485         if bounds is not None:
486             # Attention : boucle infinie à éviter si un intervalle est trop petit
487             while( (variables < numpy.ravel(numpy.asmatrix(bounds)[:,0])).any() or (variables > numpy.ravel(numpy.asmatrix(bounds)[:,1])).any() ):
488                 step      = step/2.
489                 variables = variables - step
490         residus   = mesures - numpy.ravel( func(variables) )
491         surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
492         #
493         while ( (surrogate > lastsurrogate) and ( max(list(numpy.abs(step))) > 1.e-16 ) ) :
494             step      = step/2.
495             variables = variables - step
496             residus   = mesures - numpy.ravel( func(variables) )
497             surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
498         #
499         increment     = lastsurrogate-surrogate
500         poids         = 1./(epsilon+numpy.abs(residus))
501         veps          = 1. - 2. * quantile - residus * poids
502         lastsurrogate = -numpy.sum(residus * veps) - (1.-2.*quantile)*numpy.sum(residus)
503     #
504     # Mesure d'écart
505     # --------------
506     Ecart = quantile * numpy.sum(residus) - numpy.sum( residus[residus<0] )
507     #
508     return variables, Ecart, [n,p,iteration,increment,0]
509
510 # ==============================================================================
511 def EnsembleOfCenteredPerturbations( _bgcenter, _bgcovariance, _nbmembers ):
512     "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
513     #
514     _bgcenter = numpy.ravel(_bgcenter)[:,None]
515     if _nbmembers < 1:
516         raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
517     #
518     if _bgcovariance is None:
519         BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
520     else:
521         _Z = numpy.random.multivariate_normal(numpy.zeros(_bgcenter.size), _bgcovariance, size=_nbmembers).T
522         BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers) + _Z
523     #
524     return BackgroundEnsemble
525
526 # ==============================================================================
527 def EnsembleOfBackgroundPerturbations( _bgcenter, _bgcovariance, _nbmembers, _withSVD = True):
528     "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
529     def __CenteredRandomAnomalies(Zr, N):
530         """
531         Génère une matrice de N anomalies aléatoires centrées sur Zr selon les
532         notes manuscrites de MB et conforme au code de PS avec eps = -1
533         """
534         eps = -1
535         Q = numpy.eye(N-1)-numpy.ones((N-1,N-1))/numpy.sqrt(N)/(numpy.sqrt(N)-eps)
536         Q = numpy.concatenate((Q, [eps*numpy.ones(N-1)/numpy.sqrt(N)]), axis=0)
537         R, _ = numpy.linalg.qr(numpy.random.normal(size = (N-1,N-1)))
538         Q = numpy.dot(Q,R)
539         Zr = numpy.dot(Q,Zr)
540         return Zr.T
541     #
542     _bgcenter = numpy.ravel(_bgcenter)[:,None]
543     if _nbmembers < 1:
544         raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
545     if _bgcovariance is None:
546         BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
547     else:
548         if _withSVD:
549             U, s, V = numpy.linalg.svd(_bgcovariance, full_matrices=False)
550             _nbctl = _bgcenter.size
551             if _nbmembers > _nbctl:
552                 _Z = numpy.concatenate((numpy.dot(
553                     numpy.diag(numpy.sqrt(s[:_nbctl])), V[:_nbctl]),
554                     numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1-_nbctl)), axis = 0)
555             else:
556                 _Z = numpy.dot(numpy.diag(numpy.sqrt(s[:_nbmembers-1])), V[:_nbmembers-1])
557             _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
558             BackgroundEnsemble = _bgcenter + _Zca
559         else:
560             if max(abs(_bgcovariance.flatten())) > 0:
561                 _nbctl = _bgcenter.size
562                 _Z = numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1)
563                 _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
564                 BackgroundEnsemble = _bgcenter + _Zca
565             else:
566                 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
567     #
568     return BackgroundEnsemble
569
570 # ==============================================================================
571 def EnsembleOfAnomalies( _ensemble, _optmean = None):
572     "Renvoie les anomalies centrées à partir d'un ensemble TailleEtat*NbMembres"
573     if _optmean is None:
574         Em = numpy.asarray(_ensemble).mean(axis=1, dtype=mfp).astype('float')[:,numpy.newaxis]
575     else:
576         Em = numpy.ravel(_optmean)[:,numpy.newaxis]
577     #
578     return numpy.asarray(_ensemble) - Em
579
580 # ==============================================================================
581 def CovarianceInflation(
582         InputCovOrEns,
583         InflationType   = None,
584         InflationFactor = None,
585         BackgroundCov   = None,
586         ):
587     """
588     Inflation applicable soit sur Pb ou Pa, soit sur les ensembles EXb ou EXa
589
590     Synthèse : Hunt 2007, section 2.3.5
591     """
592     if InflationFactor is None:
593         return InputCovOrEns
594     else:
595         InflationFactor = float(InflationFactor)
596     #
597     if InflationType in ["MultiplicativeOnAnalysisCovariance", "MultiplicativeOnBackgroundCovariance"]:
598         if InflationFactor < 1.:
599             raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
600         if InflationFactor < 1.+mpr:
601             return InputCovOrEns
602         OutputCovOrEns = InflationFactor**2 * InputCovOrEns
603     #
604     elif InflationType in ["MultiplicativeOnAnalysisAnomalies", "MultiplicativeOnBackgroundAnomalies"]:
605         if InflationFactor < 1.:
606             raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
607         if InflationFactor < 1.+mpr:
608             return InputCovOrEns
609         InputCovOrEnsMean = InputCovOrEns.mean(axis=1, dtype=mfp).astype('float')
610         OutputCovOrEns = InputCovOrEnsMean[:,numpy.newaxis] \
611             + InflationFactor * (InputCovOrEns - InputCovOrEnsMean[:,numpy.newaxis])
612     #
613     elif InflationType in ["AdditiveOnAnalysisCovariance", "AdditiveOnBackgroundCovariance"]:
614         if InflationFactor < 0.:
615             raise ValueError("Inflation factor for additive inflation has to be greater or equal than 0.")
616         if InflationFactor < mpr:
617             return InputCovOrEns
618         __n, __m = numpy.asarray(InputCovOrEns).shape
619         if __n != __m:
620             raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
621         OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * numpy.eye(__n)
622     #
623     elif InflationType == "HybridOnBackgroundCovariance":
624         if InflationFactor < 0.:
625             raise ValueError("Inflation factor for hybrid inflation has to be greater or equal than 0.")
626         if InflationFactor < mpr:
627             return InputCovOrEns
628         __n, __m = numpy.asarray(InputCovOrEns).shape
629         if __n != __m:
630             raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
631         if BackgroundCov is None:
632             raise ValueError("Background covariance matrix B has to be given for hybrid inflation.")
633         if InputCovOrEns.shape != BackgroundCov.shape:
634             raise ValueError("Ensemble covariance matrix has to be of same size than background covariance matrix B.")
635         OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * BackgroundCov
636     #
637     elif InflationType == "Relaxation":
638         raise NotImplementedError("InflationType Relaxation")
639     #
640     else:
641         raise ValueError("Error in inflation type, '%s' is not a valid keyword."%InflationType)
642     #
643     return OutputCovOrEns
644
645 # ==============================================================================
646 def multi3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, oneCycle):
647     """
648     Chapeau : 3DVAR multi-pas et multi-méthodes
649     """
650     #
651     # Initialisation
652     # --------------
653     Xn = numpy.ravel(Xb).reshape((-1,1))
654     #
655     if selfA._parameters["EstimationOf"] == "State":
656         M = EM["Direct"].appliedTo
657         #
658         if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
659             selfA.StoredVariables["Analysis"].store( Xn )
660             if selfA._toStore("APosterioriCovariance"):
661                 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(Xn.size)
662                 else:                         Pn = B
663                 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
664             if selfA._toStore("ForecastState"):
665                 selfA.StoredVariables["ForecastState"].store( Xn )
666     #
667     if hasattr(Y,"stepnumber"):
668         duration = Y.stepnumber()
669     else:
670         duration = 2
671     #
672     # Multi-pas
673     # ---------
674     for step in range(duration-1):
675         if hasattr(Y,"store"):
676             Ynpu = numpy.ravel( Y[step+1] ).reshape((-1,1))
677         else:
678             Ynpu = numpy.ravel( Y ).reshape((-1,1))
679         #
680         if selfA._parameters["EstimationOf"] == "State": # Forecast
681             Xn = selfA.StoredVariables["Analysis"][-1]
682             Xn_predicted = M( Xn )
683             if selfA._toStore("ForecastState"):
684                 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
685         elif selfA._parameters["EstimationOf"] == "Parameters": # No forecast
686             # --- > Par principe, M = Id, Q = 0
687             Xn_predicted = Xn
688         Xn_predicted = numpy.ravel(Xn_predicted).reshape((-1,1))
689         #
690         oneCycle(selfA, Xn_predicted, Ynpu, U, HO, None, None, R, B, None)
691     #
692     return 0
693
694 # ==============================================================================
695 def std3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
696     """
697     3DVAR (Bouttier 1999, Courtier 1993)
698
699     selfA est identique au "self" d'algorithme appelant et contient les
700     valeurs.
701     """
702     #
703     # Opérateurs
704     # ----------
705     Hm = HO["Direct"].appliedTo
706     Ha = HO["Adjoint"].appliedInXTo
707     #
708     # Utilisation éventuelle d'un vecteur H(Xb) précalculé
709     # ----------------------------------------------------
710     if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
711         HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
712     else:
713         HXb = Hm( Xb )
714     HXb = numpy.asmatrix(numpy.ravel( HXb )).T
715     if Y.size != HXb.size:
716         raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
717     if max(Y.shape) != max(HXb.shape):
718         raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
719     #
720     if selfA._toStore("JacobianMatrixAtBackground"):
721         HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
722         HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
723         selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
724     #
725     # Précalcul des inversions de B et R
726     # ----------------------------------
727     BI = B.getI()
728     RI = R.getI()
729     #
730     # Point de démarrage de l'optimisation
731     # ------------------------------------
732     Xini = selfA._parameters["InitializationPoint"]
733     #
734     # Définition de la fonction-coût
735     # ------------------------------
736     def CostFunction(x):
737         _X  = numpy.asmatrix(numpy.ravel( x )).T
738         if selfA._parameters["StoreInternalVariables"] or \
739             selfA._toStore("CurrentState") or \
740             selfA._toStore("CurrentOptimum"):
741             selfA.StoredVariables["CurrentState"].store( _X )
742         _HX = Hm( _X )
743         _HX = numpy.asmatrix(numpy.ravel( _HX )).T
744         _Innovation = Y - _HX
745         if selfA._toStore("SimulatedObservationAtCurrentState") or \
746             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
747             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
748         if selfA._toStore("InnovationAtCurrentState"):
749             selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
750         #
751         Jb  = float( 0.5 * (_X - Xb).T * BI * (_X - Xb) )
752         Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
753         J   = Jb + Jo
754         #
755         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
756         selfA.StoredVariables["CostFunctionJb"].store( Jb )
757         selfA.StoredVariables["CostFunctionJo"].store( Jo )
758         selfA.StoredVariables["CostFunctionJ" ].store( J )
759         if selfA._toStore("IndexOfOptimum") or \
760             selfA._toStore("CurrentOptimum") or \
761             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
762             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
763             selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
764             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
765             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
766         if selfA._toStore("IndexOfOptimum"):
767             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
768         if selfA._toStore("CurrentOptimum"):
769             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
770         if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
771             selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
772         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
773             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
774         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
775             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
776         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
777             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
778         return J
779     #
780     def GradientOfCostFunction(x):
781         _X      = numpy.asmatrix(numpy.ravel( x )).T
782         _HX     = Hm( _X )
783         _HX     = numpy.asmatrix(numpy.ravel( _HX )).T
784         GradJb  = BI * (_X - Xb)
785         GradJo  = - Ha( (_X, RI * (Y - _HX)) )
786         GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
787         return GradJ
788     #
789     # Minimisation de la fonctionnelle
790     # --------------------------------
791     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
792     #
793     if selfA._parameters["Minimizer"] == "LBFGSB":
794         if "0.19" <= scipy.version.version <= "1.1.0":
795             import lbfgsbhlt as optimiseur
796         else:
797             import scipy.optimize as optimiseur
798         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
799             func        = CostFunction,
800             x0          = Xini,
801             fprime      = GradientOfCostFunction,
802             args        = (),
803             bounds      = selfA._parameters["Bounds"],
804             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
805             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
806             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
807             iprint      = selfA._parameters["optiprint"],
808             )
809         nfeval = Informations['funcalls']
810         rc     = Informations['warnflag']
811     elif selfA._parameters["Minimizer"] == "TNC":
812         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
813             func        = CostFunction,
814             x0          = Xini,
815             fprime      = GradientOfCostFunction,
816             args        = (),
817             bounds      = selfA._parameters["Bounds"],
818             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
819             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
820             ftol        = selfA._parameters["CostDecrementTolerance"],
821             messages    = selfA._parameters["optmessages"],
822             )
823     elif selfA._parameters["Minimizer"] == "CG":
824         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
825             f           = CostFunction,
826             x0          = Xini,
827             fprime      = GradientOfCostFunction,
828             args        = (),
829             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
830             gtol        = selfA._parameters["GradientNormTolerance"],
831             disp        = selfA._parameters["optdisp"],
832             full_output = True,
833             )
834     elif selfA._parameters["Minimizer"] == "NCG":
835         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
836             f           = CostFunction,
837             x0          = Xini,
838             fprime      = GradientOfCostFunction,
839             args        = (),
840             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
841             avextol     = selfA._parameters["CostDecrementTolerance"],
842             disp        = selfA._parameters["optdisp"],
843             full_output = True,
844             )
845     elif selfA._parameters["Minimizer"] == "BFGS":
846         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
847             f           = CostFunction,
848             x0          = Xini,
849             fprime      = GradientOfCostFunction,
850             args        = (),
851             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
852             gtol        = selfA._parameters["GradientNormTolerance"],
853             disp        = selfA._parameters["optdisp"],
854             full_output = True,
855             )
856     else:
857         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
858     #
859     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
860     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
861     #
862     # Correction pour pallier a un bug de TNC sur le retour du Minimum
863     # ----------------------------------------------------------------
864     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
865         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
866     #
867     # Obtention de l'analyse
868     # ----------------------
869     Xa = numpy.asmatrix(numpy.ravel( Minimum )).T
870     #
871     selfA.StoredVariables["Analysis"].store( Xa )
872     #
873     if selfA._toStore("OMA") or \
874         selfA._toStore("SigmaObs2") or \
875         selfA._toStore("SimulationQuantiles") or \
876         selfA._toStore("SimulatedObservationAtOptimum"):
877         if selfA._toStore("SimulatedObservationAtCurrentState"):
878             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
879         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
880             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
881         else:
882             HXa = Hm( Xa )
883     #
884     # Calcul de la covariance d'analyse
885     # ---------------------------------
886     if selfA._toStore("APosterioriCovariance") or \
887         selfA._toStore("SimulationQuantiles") or \
888         selfA._toStore("JacobianMatrixAtOptimum") or \
889         selfA._toStore("KalmanGainAtOptimum"):
890         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
891         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
892     if selfA._toStore("APosterioriCovariance") or \
893         selfA._toStore("SimulationQuantiles") or \
894         selfA._toStore("KalmanGainAtOptimum"):
895         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
896         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
897     if selfA._toStore("APosterioriCovariance") or \
898         selfA._toStore("SimulationQuantiles"):
899         HessienneI = []
900         nb = Xa.size
901         for i in range(nb):
902             _ee    = numpy.matrix(numpy.zeros(nb)).T
903             _ee[i] = 1.
904             _HtEE  = numpy.dot(HtM,_ee)
905             _HtEE  = numpy.asmatrix(numpy.ravel( _HtEE )).T
906             HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
907         HessienneI = numpy.matrix( HessienneI )
908         A = HessienneI.I
909         if min(A.shape) != max(A.shape):
910             raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
911         if (numpy.diag(A) < 0).any():
912             raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
913         if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
914             try:
915                 L = numpy.linalg.cholesky( A )
916             except:
917                 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
918     if selfA._toStore("APosterioriCovariance"):
919         selfA.StoredVariables["APosterioriCovariance"].store( A )
920     if selfA._toStore("JacobianMatrixAtOptimum"):
921         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
922     if selfA._toStore("KalmanGainAtOptimum"):
923         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
924         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
925         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
926     #
927     # Calculs et/ou stockages supplémentaires
928     # ---------------------------------------
929     if selfA._toStore("Innovation") or \
930         selfA._toStore("SigmaObs2") or \
931         selfA._toStore("MahalanobisConsistency") or \
932         selfA._toStore("OMB"):
933         d  = Y - HXb
934     if selfA._toStore("Innovation"):
935         selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
936     if selfA._toStore("BMA"):
937         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
938     if selfA._toStore("OMA"):
939         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
940     if selfA._toStore("OMB"):
941         selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
942     if selfA._toStore("SigmaObs2"):
943         TraceR = R.trace(Y.size)
944         selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
945     if selfA._toStore("MahalanobisConsistency"):
946         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
947     if selfA._toStore("SimulationQuantiles"):
948         nech = selfA._parameters["NumberOfSamplesForQuantiles"]
949         HXa  = numpy.matrix(numpy.ravel( HXa )).T
950         YfQ  = None
951         for i in range(nech):
952             if selfA._parameters["SimulationForQuantiles"] == "Linear":
953                 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
954                 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
955                 Yr = HXa + dYr
956             elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
957                 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
958                 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
959             if YfQ is None:
960                 YfQ = Yr
961             else:
962                 YfQ = numpy.hstack((YfQ,Yr))
963         YfQ.sort(axis=-1)
964         YQ = None
965         for quantile in selfA._parameters["Quantiles"]:
966             if not (0. <= float(quantile) <= 1.): continue
967             indice = int(nech * float(quantile) - 1./nech)
968             if YQ is None: YQ = YfQ[:,indice]
969             else:          YQ = numpy.hstack((YQ,YfQ[:,indice]))
970         selfA.StoredVariables["SimulationQuantiles"].store( YQ )
971     if selfA._toStore("SimulatedObservationAtBackground"):
972         selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
973     if selfA._toStore("SimulatedObservationAtOptimum"):
974         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
975     #
976     return 0
977
978 # ==============================================================================
979 def van3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
980     """
981     3DVAR variational analysis with no inversion of B (Huang 2000)
982
983     selfA est identique au "self" d'algorithme appelant et contient les
984     valeurs.
985     """
986     #
987     # Initialisations
988     # ---------------
989     Hm = HO["Direct"].appliedTo
990     Ha = HO["Adjoint"].appliedInXTo
991     #
992     # Précalcul des inversions de B et R
993     BT = B.getT()
994     RI = R.getI()
995     #
996     # Point de démarrage de l'optimisation
997     Xini = numpy.zeros(Xb.shape)
998     #
999     # Définition de la fonction-coût
1000     # ------------------------------
1001     def CostFunction(v):
1002         _V = numpy.asmatrix(numpy.ravel( v )).T
1003         _X = Xb + B * _V
1004         if selfA._parameters["StoreInternalVariables"] or \
1005             selfA._toStore("CurrentState") or \
1006             selfA._toStore("CurrentOptimum"):
1007             selfA.StoredVariables["CurrentState"].store( _X )
1008         _HX = Hm( _X )
1009         _HX = numpy.asmatrix(numpy.ravel( _HX )).T
1010         _Innovation = Y - _HX
1011         if selfA._toStore("SimulatedObservationAtCurrentState") or \
1012             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1013             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
1014         if selfA._toStore("InnovationAtCurrentState"):
1015             selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
1016         #
1017         Jb  = float( 0.5 * _V.T * BT * _V )
1018         Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
1019         J   = Jb + Jo
1020         #
1021         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
1022         selfA.StoredVariables["CostFunctionJb"].store( Jb )
1023         selfA.StoredVariables["CostFunctionJo"].store( Jo )
1024         selfA.StoredVariables["CostFunctionJ" ].store( J )
1025         if selfA._toStore("IndexOfOptimum") or \
1026             selfA._toStore("CurrentOptimum") or \
1027             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
1028             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
1029             selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
1030             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1031             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1032         if selfA._toStore("IndexOfOptimum"):
1033             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1034         if selfA._toStore("CurrentOptimum"):
1035             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
1036         if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1037             selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
1038         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1039             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1040         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1041             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1042         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1043             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1044         return J
1045     #
1046     def GradientOfCostFunction(v):
1047         _V = numpy.asmatrix(numpy.ravel( v )).T
1048         _X = Xb + B * _V
1049         _HX     = Hm( _X )
1050         _HX     = numpy.asmatrix(numpy.ravel( _HX )).T
1051         GradJb  = BT * _V
1052         GradJo  = - Ha( (_X, RI * (Y - _HX)) )
1053         GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
1054         return GradJ
1055     #
1056     # Minimisation de la fonctionnelle
1057     # --------------------------------
1058     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
1059     #
1060     if selfA._parameters["Minimizer"] == "LBFGSB":
1061         if "0.19" <= scipy.version.version <= "1.1.0":
1062             import lbfgsbhlt as optimiseur
1063         else:
1064             import scipy.optimize as optimiseur
1065         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
1066             func        = CostFunction,
1067             x0          = Xini,
1068             fprime      = GradientOfCostFunction,
1069             args        = (),
1070             bounds      = selfA._parameters["Bounds"],
1071             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
1072             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
1073             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
1074             iprint      = selfA._parameters["optiprint"],
1075             )
1076         nfeval = Informations['funcalls']
1077         rc     = Informations['warnflag']
1078     elif selfA._parameters["Minimizer"] == "TNC":
1079         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
1080             func        = CostFunction,
1081             x0          = Xini,
1082             fprime      = GradientOfCostFunction,
1083             args        = (),
1084             bounds      = selfA._parameters["Bounds"],
1085             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
1086             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
1087             ftol        = selfA._parameters["CostDecrementTolerance"],
1088             messages    = selfA._parameters["optmessages"],
1089             )
1090     elif selfA._parameters["Minimizer"] == "CG":
1091         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
1092             f           = CostFunction,
1093             x0          = Xini,
1094             fprime      = GradientOfCostFunction,
1095             args        = (),
1096             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1097             gtol        = selfA._parameters["GradientNormTolerance"],
1098             disp        = selfA._parameters["optdisp"],
1099             full_output = True,
1100             )
1101     elif selfA._parameters["Minimizer"] == "NCG":
1102         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
1103             f           = CostFunction,
1104             x0          = Xini,
1105             fprime      = GradientOfCostFunction,
1106             args        = (),
1107             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1108             avextol     = selfA._parameters["CostDecrementTolerance"],
1109             disp        = selfA._parameters["optdisp"],
1110             full_output = True,
1111             )
1112     elif selfA._parameters["Minimizer"] == "BFGS":
1113         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
1114             f           = CostFunction,
1115             x0          = Xini,
1116             fprime      = GradientOfCostFunction,
1117             args        = (),
1118             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1119             gtol        = selfA._parameters["GradientNormTolerance"],
1120             disp        = selfA._parameters["optdisp"],
1121             full_output = True,
1122             )
1123     else:
1124         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
1125     #
1126     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1127     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
1128     #
1129     # Correction pour pallier a un bug de TNC sur le retour du Minimum
1130     # ----------------------------------------------------------------
1131     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
1132         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
1133         Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
1134     else:
1135         Minimum = Xb + B * numpy.asmatrix(numpy.ravel( Minimum )).T
1136     #
1137     # Obtention de l'analyse
1138     # ----------------------
1139     Xa = Minimum
1140     #
1141     selfA.StoredVariables["Analysis"].store( Xa )
1142     #
1143     if selfA._toStore("OMA") or \
1144         selfA._toStore("SigmaObs2") or \
1145         selfA._toStore("SimulationQuantiles") or \
1146         selfA._toStore("SimulatedObservationAtOptimum"):
1147         if selfA._toStore("SimulatedObservationAtCurrentState"):
1148             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
1149         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1150             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
1151         else:
1152             HXa = Hm( Xa )
1153     #
1154     # Calcul de la covariance d'analyse
1155     # ---------------------------------
1156     if selfA._toStore("APosterioriCovariance") or \
1157         selfA._toStore("SimulationQuantiles") or \
1158         selfA._toStore("JacobianMatrixAtOptimum") or \
1159         selfA._toStore("KalmanGainAtOptimum"):
1160         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
1161         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
1162     if selfA._toStore("APosterioriCovariance") or \
1163         selfA._toStore("SimulationQuantiles") or \
1164         selfA._toStore("KalmanGainAtOptimum"):
1165         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
1166         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
1167     if selfA._toStore("APosterioriCovariance") or \
1168         selfA._toStore("SimulationQuantiles"):
1169         BI = B.getI()
1170         HessienneI = []
1171         nb = Xa.size
1172         for i in range(nb):
1173             _ee    = numpy.matrix(numpy.zeros(nb)).T
1174             _ee[i] = 1.
1175             _HtEE  = numpy.dot(HtM,_ee)
1176             _HtEE  = numpy.asmatrix(numpy.ravel( _HtEE )).T
1177             HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
1178         HessienneI = numpy.matrix( HessienneI )
1179         A = HessienneI.I
1180         if min(A.shape) != max(A.shape):
1181             raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
1182         if (numpy.diag(A) < 0).any():
1183             raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
1184         if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
1185             try:
1186                 L = numpy.linalg.cholesky( A )
1187             except:
1188                 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
1189     if selfA._toStore("APosterioriCovariance"):
1190         selfA.StoredVariables["APosterioriCovariance"].store( A )
1191     if selfA._toStore("JacobianMatrixAtOptimum"):
1192         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
1193     if selfA._toStore("KalmanGainAtOptimum"):
1194         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
1195         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
1196         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
1197     #
1198     # Calculs et/ou stockages supplémentaires
1199     # ---------------------------------------
1200     if selfA._toStore("Innovation") or \
1201         selfA._toStore("SigmaObs2") or \
1202         selfA._toStore("MahalanobisConsistency") or \
1203         selfA._toStore("OMB"):
1204         d  = Y - HXb
1205     if selfA._toStore("Innovation"):
1206         selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
1207     if selfA._toStore("BMA"):
1208         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
1209     if selfA._toStore("OMA"):
1210         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
1211     if selfA._toStore("OMB"):
1212         selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
1213     if selfA._toStore("SigmaObs2"):
1214         TraceR = R.trace(Y.size)
1215         selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
1216     if selfA._toStore("MahalanobisConsistency"):
1217         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
1218     if selfA._toStore("SimulationQuantiles"):
1219         nech = selfA._parameters["NumberOfSamplesForQuantiles"]
1220         HXa  = numpy.matrix(numpy.ravel( HXa )).T
1221         YfQ  = None
1222         for i in range(nech):
1223             if selfA._parameters["SimulationForQuantiles"] == "Linear":
1224                 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
1225                 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
1226                 Yr = HXa + dYr
1227             elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
1228                 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
1229                 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
1230             if YfQ is None:
1231                 YfQ = Yr
1232             else:
1233                 YfQ = numpy.hstack((YfQ,Yr))
1234         YfQ.sort(axis=-1)
1235         YQ = None
1236         for quantile in selfA._parameters["Quantiles"]:
1237             if not (0. <= float(quantile) <= 1.): continue
1238             indice = int(nech * float(quantile) - 1./nech)
1239             if YQ is None: YQ = YfQ[:,indice]
1240             else:          YQ = numpy.hstack((YQ,YfQ[:,indice]))
1241         selfA.StoredVariables["SimulationQuantiles"].store( YQ )
1242     if selfA._toStore("SimulatedObservationAtBackground"):
1243         selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
1244     if selfA._toStore("SimulatedObservationAtOptimum"):
1245         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
1246     #
1247     return 0
1248
1249 # ==============================================================================
1250 def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1251     """
1252     3DVAR incrémental (Courtier 1994, 1997)
1253
1254     selfA est identique au "self" d'algorithme appelant et contient les
1255     valeurs.
1256     """
1257     #
1258     # Initialisations
1259     # ---------------
1260     #
1261     # Opérateur non-linéaire pour la boucle externe
1262     Hm = HO["Direct"].appliedTo
1263     #
1264     # Précalcul des inversions de B et R
1265     BI = B.getI()
1266     RI = R.getI()
1267     #
1268     # Point de démarrage de l'optimisation
1269     Xini = selfA._parameters["InitializationPoint"]
1270     #
1271     HXb = numpy.asmatrix(numpy.ravel( Hm( Xb ) )).T
1272     Innovation = Y - HXb
1273     #
1274     # Outer Loop
1275     # ----------
1276     iOuter = 0
1277     J      = 1./mpr
1278     DeltaJ = 1./mpr
1279     Xr     = Xini.reshape((-1,1))
1280     while abs(DeltaJ) >= selfA._parameters["CostDecrementTolerance"] and iOuter <= selfA._parameters["MaximumNumberOfSteps"]:
1281         #
1282         # Inner Loop
1283         # ----------
1284         Ht = HO["Tangent"].asMatrix(Xr)
1285         Ht = Ht.reshape(Y.size,Xr.size) # ADAO & check shape
1286         #
1287         # Définition de la fonction-coût
1288         # ------------------------------
1289         def CostFunction(dx):
1290             _dX  = numpy.asmatrix(numpy.ravel( dx )).T
1291             if selfA._parameters["StoreInternalVariables"] or \
1292                 selfA._toStore("CurrentState") or \
1293                 selfA._toStore("CurrentOptimum"):
1294                 selfA.StoredVariables["CurrentState"].store( Xb + _dX )
1295             _HdX = Ht * _dX
1296             _HdX = numpy.asmatrix(numpy.ravel( _HdX )).T
1297             _dInnovation = Innovation - _HdX
1298             if selfA._toStore("SimulatedObservationAtCurrentState") or \
1299                 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1300                 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HXb + _HdX )
1301             if selfA._toStore("InnovationAtCurrentState"):
1302                 selfA.StoredVariables["InnovationAtCurrentState"].store( _dInnovation )
1303             #
1304             Jb  = float( 0.5 * _dX.T * BI * _dX )
1305             Jo  = float( 0.5 * _dInnovation.T * RI * _dInnovation )
1306             J   = Jb + Jo
1307             #
1308             selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
1309             selfA.StoredVariables["CostFunctionJb"].store( Jb )
1310             selfA.StoredVariables["CostFunctionJo"].store( Jo )
1311             selfA.StoredVariables["CostFunctionJ" ].store( J )
1312             if selfA._toStore("IndexOfOptimum") or \
1313                 selfA._toStore("CurrentOptimum") or \
1314                 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
1315                 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
1316                 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
1317                 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1318                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1319             if selfA._toStore("IndexOfOptimum"):
1320                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1321             if selfA._toStore("CurrentOptimum"):
1322                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
1323             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1324                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
1325             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1326                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1327             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1328                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1329             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1330                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1331             return J
1332         #
1333         def GradientOfCostFunction(dx):
1334             _dX          = numpy.asmatrix(numpy.ravel( dx )).T
1335             _HdX         = Ht * _dX
1336             _HdX         = numpy.asmatrix(numpy.ravel( _HdX )).T
1337             _dInnovation = Innovation - _HdX
1338             GradJb       = BI * _dX
1339             GradJo       = - Ht.T @ (RI * _dInnovation)
1340             GradJ        = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
1341             return GradJ
1342         #
1343         # Minimisation de la fonctionnelle
1344         # --------------------------------
1345         nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
1346         #
1347         if selfA._parameters["Minimizer"] == "LBFGSB":
1348             # Minimum, J_optimal, Informations = scipy.optimize.fmin_l_bfgs_b(
1349             if "0.19" <= scipy.version.version <= "1.1.0":
1350                 import lbfgsbhlt as optimiseur
1351             else:
1352                 import scipy.optimize as optimiseur
1353             Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
1354                 func        = CostFunction,
1355                 x0          = numpy.zeros(Xini.size),
1356                 fprime      = GradientOfCostFunction,
1357                 args        = (),
1358                 bounds      = selfA._parameters["Bounds"],
1359                 maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
1360                 factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
1361                 pgtol       = selfA._parameters["ProjectedGradientTolerance"],
1362                 iprint      = selfA._parameters["optiprint"],
1363                 )
1364             nfeval = Informations['funcalls']
1365             rc     = Informations['warnflag']
1366         elif selfA._parameters["Minimizer"] == "TNC":
1367             Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
1368                 func        = CostFunction,
1369                 x0          = numpy.zeros(Xini.size),
1370                 fprime      = GradientOfCostFunction,
1371                 args        = (),
1372                 bounds      = selfA._parameters["Bounds"],
1373                 maxfun      = selfA._parameters["MaximumNumberOfSteps"],
1374                 pgtol       = selfA._parameters["ProjectedGradientTolerance"],
1375                 ftol        = selfA._parameters["CostDecrementTolerance"],
1376                 messages    = selfA._parameters["optmessages"],
1377                 )
1378         elif selfA._parameters["Minimizer"] == "CG":
1379             Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
1380                 f           = CostFunction,
1381                 x0          = numpy.zeros(Xini.size),
1382                 fprime      = GradientOfCostFunction,
1383                 args        = (),
1384                 maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1385                 gtol        = selfA._parameters["GradientNormTolerance"],
1386                 disp        = selfA._parameters["optdisp"],
1387                 full_output = True,
1388                 )
1389         elif selfA._parameters["Minimizer"] == "NCG":
1390             Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
1391                 f           = CostFunction,
1392                 x0          = numpy.zeros(Xini.size),
1393                 fprime      = GradientOfCostFunction,
1394                 args        = (),
1395                 maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1396                 avextol     = selfA._parameters["CostDecrementTolerance"],
1397                 disp        = selfA._parameters["optdisp"],
1398                 full_output = True,
1399                 )
1400         elif selfA._parameters["Minimizer"] == "BFGS":
1401             Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
1402                 f           = CostFunction,
1403                 x0          = numpy.zeros(Xini.size),
1404                 fprime      = GradientOfCostFunction,
1405                 args        = (),
1406                 maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1407                 gtol        = selfA._parameters["GradientNormTolerance"],
1408                 disp        = selfA._parameters["optdisp"],
1409                 full_output = True,
1410                 )
1411         else:
1412             raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
1413         #
1414         IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1415         MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
1416         #
1417         if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
1418             Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
1419             Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
1420         else:
1421             Minimum = Xb + numpy.asmatrix(numpy.ravel( Minimum )).T
1422         #
1423         Xr     = Minimum
1424         DeltaJ = selfA.StoredVariables["CostFunctionJ" ][-1] - J
1425         iOuter = selfA.StoredVariables["CurrentIterationNumber"][-1]
1426     #
1427     # Obtention de l'analyse
1428     # ----------------------
1429     Xa = Xr
1430     #
1431     selfA.StoredVariables["Analysis"].store( Xa )
1432     #
1433     if selfA._toStore("OMA") or \
1434         selfA._toStore("SigmaObs2") or \
1435         selfA._toStore("SimulationQuantiles") or \
1436         selfA._toStore("SimulatedObservationAtOptimum"):
1437         if selfA._toStore("SimulatedObservationAtCurrentState"):
1438             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
1439         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1440             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
1441         else:
1442             HXa = Hm( Xa )
1443     #
1444     # Calcul de la covariance d'analyse
1445     # ---------------------------------
1446     if selfA._toStore("APosterioriCovariance") or \
1447         selfA._toStore("SimulationQuantiles") or \
1448         selfA._toStore("JacobianMatrixAtOptimum") or \
1449         selfA._toStore("KalmanGainAtOptimum"):
1450         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
1451         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
1452     if selfA._toStore("APosterioriCovariance") or \
1453         selfA._toStore("SimulationQuantiles") or \
1454         selfA._toStore("KalmanGainAtOptimum"):
1455         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
1456         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
1457     if selfA._toStore("APosterioriCovariance") or \
1458         selfA._toStore("SimulationQuantiles"):
1459         HessienneI = []
1460         nb = Xa.size
1461         for i in range(nb):
1462             _ee    = numpy.matrix(numpy.zeros(nb)).T
1463             _ee[i] = 1.
1464             _HtEE  = numpy.dot(HtM,_ee)
1465             _HtEE  = numpy.asmatrix(numpy.ravel( _HtEE )).T
1466             HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
1467         HessienneI = numpy.matrix( HessienneI )
1468         A = HessienneI.I
1469         if min(A.shape) != max(A.shape):
1470             raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
1471         if (numpy.diag(A) < 0).any():
1472             raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
1473         if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
1474             try:
1475                 L = numpy.linalg.cholesky( A )
1476             except:
1477                 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
1478     if selfA._toStore("APosterioriCovariance"):
1479         selfA.StoredVariables["APosterioriCovariance"].store( A )
1480     if selfA._toStore("JacobianMatrixAtOptimum"):
1481         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
1482     if selfA._toStore("KalmanGainAtOptimum"):
1483         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
1484         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
1485         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
1486     #
1487     # Calculs et/ou stockages supplémentaires
1488     # ---------------------------------------
1489     if selfA._toStore("Innovation") or \
1490         selfA._toStore("SigmaObs2") or \
1491         selfA._toStore("MahalanobisConsistency") or \
1492         selfA._toStore("OMB"):
1493         d  = Y - HXb
1494     if selfA._toStore("Innovation"):
1495         selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
1496     if selfA._toStore("BMA"):
1497         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
1498     if selfA._toStore("OMA"):
1499         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
1500     if selfA._toStore("OMB"):
1501         selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
1502     if selfA._toStore("SigmaObs2"):
1503         TraceR = R.trace(Y.size)
1504         selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
1505     if selfA._toStore("MahalanobisConsistency"):
1506         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
1507     if selfA._toStore("SimulationQuantiles"):
1508         nech = selfA._parameters["NumberOfSamplesForQuantiles"]
1509         HXa  = numpy.matrix(numpy.ravel( HXa )).T
1510         YfQ  = None
1511         for i in range(nech):
1512             if selfA._parameters["SimulationForQuantiles"] == "Linear":
1513                 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
1514                 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
1515                 Yr = HXa + dYr
1516             elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
1517                 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
1518                 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
1519             if YfQ is None:
1520                 YfQ = Yr
1521             else:
1522                 YfQ = numpy.hstack((YfQ,Yr))
1523         YfQ.sort(axis=-1)
1524         YQ = None
1525         for quantile in selfA._parameters["Quantiles"]:
1526             if not (0. <= float(quantile) <= 1.): continue
1527             indice = int(nech * float(quantile) - 1./nech)
1528             if YQ is None: YQ = YfQ[:,indice]
1529             else:          YQ = numpy.hstack((YQ,YfQ[:,indice]))
1530         selfA.StoredVariables["SimulationQuantiles"].store( YQ )
1531     if selfA._toStore("SimulatedObservationAtBackground"):
1532         selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
1533     if selfA._toStore("SimulatedObservationAtOptimum"):
1534         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
1535     #
1536     return 0
1537
1538 # ==============================================================================
1539 def psas3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1540     """
1541     3DVAR PSAS (Huang 2000)
1542
1543     selfA est identique au "self" d'algorithme appelant et contient les
1544     valeurs.
1545     """
1546     #
1547     # Initialisations
1548     # ---------------
1549     #
1550     # Opérateurs
1551     Hm = HO["Direct"].appliedTo
1552     #
1553     # Utilisation éventuelle d'un vecteur H(Xb) précalculé
1554     if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
1555         HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
1556     else:
1557         HXb = Hm( Xb )
1558     HXb = numpy.asmatrix(numpy.ravel( HXb )).T
1559     if Y.size != HXb.size:
1560         raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
1561     if max(Y.shape) != max(HXb.shape):
1562         raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
1563     #
1564     if selfA._toStore("JacobianMatrixAtBackground"):
1565         HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
1566         HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
1567         selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
1568     #
1569     Ht = HO["Tangent"].asMatrix(Xb)
1570     BHT = B * Ht.T
1571     HBHTpR = R + Ht * BHT
1572     Innovation = Y - HXb
1573     #
1574     # Point de démarrage de l'optimisation
1575     Xini = numpy.zeros(Xb.shape)
1576     #
1577     # Définition de la fonction-coût
1578     # ------------------------------
1579     def CostFunction(w):
1580         _W = numpy.asmatrix(numpy.ravel( w )).T
1581         if selfA._parameters["StoreInternalVariables"] or \
1582             selfA._toStore("CurrentState") or \
1583             selfA._toStore("CurrentOptimum"):
1584             selfA.StoredVariables["CurrentState"].store( Xb + BHT * _W )
1585         if selfA._toStore("SimulatedObservationAtCurrentState") or \
1586             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1587             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Hm( Xb + BHT * _W ) )
1588         if selfA._toStore("InnovationAtCurrentState"):
1589             selfA.StoredVariables["InnovationAtCurrentState"].store( Innovation )
1590         #
1591         Jb  = float( 0.5 * _W.T * HBHTpR * _W )
1592         Jo  = float( - _W.T * Innovation )
1593         J   = Jb + Jo
1594         #
1595         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
1596         selfA.StoredVariables["CostFunctionJb"].store( Jb )
1597         selfA.StoredVariables["CostFunctionJo"].store( Jo )
1598         selfA.StoredVariables["CostFunctionJ" ].store( J )
1599         if selfA._toStore("IndexOfOptimum") or \
1600             selfA._toStore("CurrentOptimum") or \
1601             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
1602             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
1603             selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
1604             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1605             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1606         if selfA._toStore("IndexOfOptimum"):
1607             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1608         if selfA._toStore("CurrentOptimum"):
1609             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
1610         if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1611             selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
1612         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1613             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1614         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1615             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1616         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1617             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1618         return J
1619     #
1620     def GradientOfCostFunction(w):
1621         _W = numpy.asmatrix(numpy.ravel( w )).T
1622         GradJb  = HBHTpR * _W
1623         GradJo  = - Innovation
1624         GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
1625         return GradJ
1626     #
1627     # Minimisation de la fonctionnelle
1628     # --------------------------------
1629     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
1630     #
1631     if selfA._parameters["Minimizer"] == "LBFGSB":
1632         # Minimum, J_optimal, Informations = scipy.optimize.fmin_l_bfgs_b(
1633         if "0.19" <= scipy.version.version <= "1.1.0":
1634             import lbfgsbhlt as optimiseur
1635         else:
1636             import scipy.optimize as optimiseur
1637         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
1638             func        = CostFunction,
1639             x0          = Xini,
1640             fprime      = GradientOfCostFunction,
1641             args        = (),
1642             bounds      = selfA._parameters["Bounds"],
1643             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
1644             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
1645             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
1646             iprint      = selfA._parameters["optiprint"],
1647             )
1648         nfeval = Informations['funcalls']
1649         rc     = Informations['warnflag']
1650     elif selfA._parameters["Minimizer"] == "TNC":
1651         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
1652             func        = CostFunction,
1653             x0          = Xini,
1654             fprime      = GradientOfCostFunction,
1655             args        = (),
1656             bounds      = selfA._parameters["Bounds"],
1657             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
1658             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
1659             ftol        = selfA._parameters["CostDecrementTolerance"],
1660             messages    = selfA._parameters["optmessages"],
1661             )
1662     elif selfA._parameters["Minimizer"] == "CG":
1663         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
1664             f           = CostFunction,
1665             x0          = Xini,
1666             fprime      = GradientOfCostFunction,
1667             args        = (),
1668             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1669             gtol        = selfA._parameters["GradientNormTolerance"],
1670             disp        = selfA._parameters["optdisp"],
1671             full_output = True,
1672             )
1673     elif selfA._parameters["Minimizer"] == "NCG":
1674         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
1675             f           = CostFunction,
1676             x0          = Xini,
1677             fprime      = GradientOfCostFunction,
1678             args        = (),
1679             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1680             avextol     = selfA._parameters["CostDecrementTolerance"],
1681             disp        = selfA._parameters["optdisp"],
1682             full_output = True,
1683             )
1684     elif selfA._parameters["Minimizer"] == "BFGS":
1685         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
1686             f           = CostFunction,
1687             x0          = Xini,
1688             fprime      = GradientOfCostFunction,
1689             args        = (),
1690             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1691             gtol        = selfA._parameters["GradientNormTolerance"],
1692             disp        = selfA._parameters["optdisp"],
1693             full_output = True,
1694             )
1695     else:
1696         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
1697     #
1698     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1699     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
1700     #
1701     # Correction pour pallier a un bug de TNC sur le retour du Minimum
1702     # ----------------------------------------------------------------
1703     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
1704         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
1705         Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
1706     else:
1707         Minimum = Xb + BHT * numpy.asmatrix(numpy.ravel( Minimum )).T
1708     #
1709     # Obtention de l'analyse
1710     # ----------------------
1711     Xa = Minimum
1712     #
1713     selfA.StoredVariables["Analysis"].store( Xa )
1714     #
1715     if selfA._toStore("OMA") or \
1716         selfA._toStore("SigmaObs2") or \
1717         selfA._toStore("SimulationQuantiles") or \
1718         selfA._toStore("SimulatedObservationAtOptimum"):
1719         if selfA._toStore("SimulatedObservationAtCurrentState"):
1720             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
1721         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1722             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
1723         else:
1724             HXa = Hm( Xa )
1725     #
1726     # Calcul de la covariance d'analyse
1727     # ---------------------------------
1728     if selfA._toStore("APosterioriCovariance") or \
1729         selfA._toStore("SimulationQuantiles") or \
1730         selfA._toStore("JacobianMatrixAtOptimum") or \
1731         selfA._toStore("KalmanGainAtOptimum"):
1732         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
1733         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
1734     if selfA._toStore("APosterioriCovariance") or \
1735         selfA._toStore("SimulationQuantiles") or \
1736         selfA._toStore("KalmanGainAtOptimum"):
1737         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
1738         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
1739     if selfA._toStore("APosterioriCovariance") or \
1740         selfA._toStore("SimulationQuantiles"):
1741         BI = B.getI()
1742         RI = R.getI()
1743         HessienneI = []
1744         nb = Xa.size
1745         for i in range(nb):
1746             _ee    = numpy.matrix(numpy.zeros(nb)).T
1747             _ee[i] = 1.
1748             _HtEE  = numpy.dot(HtM,_ee)
1749             _HtEE  = numpy.asmatrix(numpy.ravel( _HtEE )).T
1750             HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
1751         HessienneI = numpy.matrix( HessienneI )
1752         A = HessienneI.I
1753         if min(A.shape) != max(A.shape):
1754             raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
1755         if (numpy.diag(A) < 0).any():
1756             raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
1757         if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
1758             try:
1759                 L = numpy.linalg.cholesky( A )
1760             except:
1761                 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
1762     if selfA._toStore("APosterioriCovariance"):
1763         selfA.StoredVariables["APosterioriCovariance"].store( A )
1764     if selfA._toStore("JacobianMatrixAtOptimum"):
1765         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
1766     if selfA._toStore("KalmanGainAtOptimum"):
1767         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
1768         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
1769         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
1770     #
1771     # Calculs et/ou stockages supplémentaires
1772     # ---------------------------------------
1773     if selfA._toStore("Innovation") or \
1774         selfA._toStore("SigmaObs2") or \
1775         selfA._toStore("MahalanobisConsistency") or \
1776         selfA._toStore("OMB"):
1777         d  = Y - HXb
1778     if selfA._toStore("Innovation"):
1779         selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
1780     if selfA._toStore("BMA"):
1781         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
1782     if selfA._toStore("OMA"):
1783         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
1784     if selfA._toStore("OMB"):
1785         selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
1786     if selfA._toStore("SigmaObs2"):
1787         TraceR = R.trace(Y.size)
1788         selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
1789     if selfA._toStore("MahalanobisConsistency"):
1790         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
1791     if selfA._toStore("SimulationQuantiles"):
1792         nech = selfA._parameters["NumberOfSamplesForQuantiles"]
1793         HXa  = numpy.matrix(numpy.ravel( HXa )).T
1794         YfQ  = None
1795         for i in range(nech):
1796             if selfA._parameters["SimulationForQuantiles"] == "Linear":
1797                 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
1798                 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
1799                 Yr = HXa + dYr
1800             elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
1801                 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
1802                 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
1803             if YfQ is None:
1804                 YfQ = Yr
1805             else:
1806                 YfQ = numpy.hstack((YfQ,Yr))
1807         YfQ.sort(axis=-1)
1808         YQ = None
1809         for quantile in selfA._parameters["Quantiles"]:
1810             if not (0. <= float(quantile) <= 1.): continue
1811             indice = int(nech * float(quantile) - 1./nech)
1812             if YQ is None: YQ = YfQ[:,indice]
1813             else:          YQ = numpy.hstack((YQ,YfQ[:,indice]))
1814         selfA.StoredVariables["SimulationQuantiles"].store( YQ )
1815     if selfA._toStore("SimulatedObservationAtBackground"):
1816         selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
1817     if selfA._toStore("SimulatedObservationAtOptimum"):
1818         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
1819     #
1820     return 0
1821
1822 # ==============================================================================
1823 def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
1824     """
1825     Stochastic EnKF (Envensen 1994, Burgers 1998)
1826
1827     selfA est identique au "self" d'algorithme appelant et contient les
1828     valeurs.
1829     """
1830     if selfA._parameters["EstimationOf"] == "Parameters":
1831         selfA._parameters["StoreInternalVariables"] = True
1832     #
1833     # Opérateurs
1834     # ----------
1835     H = HO["Direct"].appliedControledFormTo
1836     #
1837     if selfA._parameters["EstimationOf"] == "State":
1838         M = EM["Direct"].appliedControledFormTo
1839     #
1840     if CM is not None and "Tangent" in CM and U is not None:
1841         Cm = CM["Tangent"].asMatrix(Xb)
1842     else:
1843         Cm = None
1844     #
1845     # Nombre de pas identique au nombre de pas d'observations
1846     # -------------------------------------------------------
1847     if hasattr(Y,"stepnumber"):
1848         duration = Y.stepnumber()
1849         __p = numpy.cumprod(Y.shape())[-1]
1850     else:
1851         duration = 2
1852         __p = numpy.array(Y).size
1853     #
1854     # Précalcul des inversions de B et R
1855     # ----------------------------------
1856     if selfA._parameters["StoreInternalVariables"] \
1857         or selfA._toStore("CostFunctionJ") \
1858         or selfA._toStore("CostFunctionJb") \
1859         or selfA._toStore("CostFunctionJo") \
1860         or selfA._toStore("CurrentOptimum") \
1861         or selfA._toStore("APosterioriCovariance"):
1862         BI = B.getI()
1863         RI = R.getI()
1864     #
1865     # Initialisation
1866     # --------------
1867     __n = Xb.size
1868     __m = selfA._parameters["NumberOfMembers"]
1869     if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
1870     else:                         Pn = B
1871     if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
1872     else:                         Rn = R
1873     if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
1874     else:                         Qn = Q
1875     Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
1876     #
1877     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1878         selfA.StoredVariables["Analysis"].store( Xb )
1879         if selfA._toStore("APosterioriCovariance"):
1880             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1881             covarianceXa = Pn
1882     #
1883     previousJMinimum = numpy.finfo(float).max
1884     #
1885     for step in range(duration-1):
1886         if hasattr(Y,"store"):
1887             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,-1))
1888         else:
1889             Ynpu = numpy.ravel( Y ).reshape((__p,-1))
1890         #
1891         if U is not None:
1892             if hasattr(U,"store") and len(U)>1:
1893                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
1894             elif hasattr(U,"store") and len(U)==1:
1895                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
1896             else:
1897                 Un = numpy.asmatrix(numpy.ravel( U )).T
1898         else:
1899             Un = None
1900         #
1901         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
1902             Xn = CovarianceInflation( Xn,
1903                 selfA._parameters["InflationType"],
1904                 selfA._parameters["InflationFactor"],
1905                 )
1906         #
1907         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
1908             EMX = M( [(Xn[:,i], Un) for i in range(__m)],
1909                 argsAsSerie = True,
1910                 returnSerieAsArrayMatrix = True )
1911             qi = numpy.random.multivariate_normal(numpy.zeros(__n), Qn, size=__m).T
1912             Xn_predicted = EMX + qi
1913             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
1914                 argsAsSerie = True,
1915                 returnSerieAsArrayMatrix = True )
1916             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1917                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
1918                 Xn_predicted = Xn_predicted + Cm * Un
1919         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
1920             # --- > Par principe, M = Id, Q = 0
1921             Xn_predicted = Xn
1922             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
1923                 argsAsSerie = True,
1924                 returnSerieAsArrayMatrix = True )
1925         #
1926         # Mean of forecast and observation of forecast
1927         Xfm  = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
1928         Hfm  = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,-1))
1929         #
1930         #--------------------------
1931         if VariantM == "KalmanFilterFormula05":
1932             PfHT, HPfHT = 0., 0.
1933             for i in range(__m):
1934                 Exfi = Xn_predicted[:,i].reshape((__n,-1)) - Xfm
1935                 Eyfi = HX_predicted[:,i].reshape((__p,-1)) - Hfm
1936                 PfHT  += Exfi * Eyfi.T
1937                 HPfHT += Eyfi * Eyfi.T
1938             PfHT  = (1./(__m-1)) * PfHT
1939             HPfHT = (1./(__m-1)) * HPfHT
1940             Kn     = PfHT * ( R + HPfHT ).I
1941             del PfHT, HPfHT
1942             #
1943             for i in range(__m):
1944                 ri = numpy.random.multivariate_normal(numpy.zeros(__p), Rn)
1945                 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(Ynpu) + ri - HX_predicted[:,i])
1946         #--------------------------
1947         elif VariantM == "KalmanFilterFormula16":
1948             EpY   = EnsembleOfCenteredPerturbations(Ynpu, Rn, __m)
1949             EpYm  = EpY.mean(axis=1, dtype=mfp).astype('float').reshape((__p,-1))
1950             #
1951             EaX   = EnsembleOfAnomalies( Xn_predicted ) / numpy.sqrt(__m-1)
1952             EaY = (HX_predicted - Hfm - EpY + EpYm) / numpy.sqrt(__m-1)
1953             #
1954             Kn = EaX @ EaY.T @ numpy.linalg.inv( EaY @ EaY.T)
1955             #
1956             for i in range(__m):
1957                 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(EpY[:,i]) - HX_predicted[:,i])
1958         #--------------------------
1959         else:
1960             raise ValueError("VariantM has to be chosen in the authorized methods list.")
1961         #
1962         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
1963             Xn = CovarianceInflation( Xn,
1964                 selfA._parameters["InflationType"],
1965                 selfA._parameters["InflationFactor"],
1966                 )
1967         #
1968         Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
1969         #--------------------------
1970         #
1971         if selfA._parameters["StoreInternalVariables"] \
1972             or selfA._toStore("CostFunctionJ") \
1973             or selfA._toStore("CostFunctionJb") \
1974             or selfA._toStore("CostFunctionJo") \
1975             or selfA._toStore("APosterioriCovariance") \
1976             or selfA._toStore("InnovationAtCurrentAnalysis") \
1977             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1978             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1979             _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
1980             _Innovation = Ynpu - _HXa
1981         #
1982         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1983         # ---> avec analysis
1984         selfA.StoredVariables["Analysis"].store( Xa )
1985         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1986             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
1987         if selfA._toStore("InnovationAtCurrentAnalysis"):
1988             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1989         # ---> avec current state
1990         if selfA._parameters["StoreInternalVariables"] \
1991             or selfA._toStore("CurrentState"):
1992             selfA.StoredVariables["CurrentState"].store( Xn )
1993         if selfA._toStore("ForecastState"):
1994             selfA.StoredVariables["ForecastState"].store( EMX )
1995         if selfA._toStore("BMA"):
1996             selfA.StoredVariables["BMA"].store( EMX - Xa.reshape((__n,1)) )
1997         if selfA._toStore("InnovationAtCurrentState"):
1998             selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
1999         if selfA._toStore("SimulatedObservationAtCurrentState") \
2000             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2001             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
2002         # ---> autres
2003         if selfA._parameters["StoreInternalVariables"] \
2004             or selfA._toStore("CostFunctionJ") \
2005             or selfA._toStore("CostFunctionJb") \
2006             or selfA._toStore("CostFunctionJo") \
2007             or selfA._toStore("CurrentOptimum") \
2008             or selfA._toStore("APosterioriCovariance"):
2009             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2010             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
2011             J   = Jb + Jo
2012             selfA.StoredVariables["CostFunctionJb"].store( Jb )
2013             selfA.StoredVariables["CostFunctionJo"].store( Jo )
2014             selfA.StoredVariables["CostFunctionJ" ].store( J )
2015             #
2016             if selfA._toStore("IndexOfOptimum") \
2017                 or selfA._toStore("CurrentOptimum") \
2018                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2019                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2020                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2021                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2022                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2023             if selfA._toStore("IndexOfOptimum"):
2024                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2025             if selfA._toStore("CurrentOptimum"):
2026                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2027             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2028                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2029             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2030                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2031             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2032                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2033             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2034                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2035         if selfA._toStore("APosterioriCovariance"):
2036             Eai = EnsembleOfAnomalies( Xn ) / numpy.sqrt(__m-1) # Anomalies
2037             Pn = Eai @ Eai.T
2038             Pn = 0.5 * (Pn + Pn.T)
2039             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2040         if selfA._parameters["EstimationOf"] == "Parameters" \
2041             and J < previousJMinimum:
2042             previousJMinimum    = J
2043             XaMin               = Xa
2044             if selfA._toStore("APosterioriCovariance"):
2045                 covarianceXaMin = Pn
2046     #
2047     # Stockage final supplémentaire de l'optimum en estimation de paramètres
2048     # ----------------------------------------------------------------------
2049     if selfA._parameters["EstimationOf"] == "Parameters":
2050         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2051         selfA.StoredVariables["Analysis"].store( XaMin )
2052         if selfA._toStore("APosterioriCovariance"):
2053             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2054         if selfA._toStore("BMA"):
2055             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2056     #
2057     return 0
2058
2059 # ==============================================================================
2060 def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
2061     """
2062     Ensemble-Transform EnKF (ETKF or Deterministic EnKF: Bishop 2001, Hunt 2007)
2063
2064     selfA est identique au "self" d'algorithme appelant et contient les
2065     valeurs.
2066     """
2067     if selfA._parameters["EstimationOf"] == "Parameters":
2068         selfA._parameters["StoreInternalVariables"] = True
2069     #
2070     # Opérateurs
2071     # ----------
2072     H = HO["Direct"].appliedControledFormTo
2073     #
2074     if selfA._parameters["EstimationOf"] == "State":
2075         M = EM["Direct"].appliedControledFormTo
2076     #
2077     if CM is not None and "Tangent" in CM and U is not None:
2078         Cm = CM["Tangent"].asMatrix(Xb)
2079     else:
2080         Cm = None
2081     #
2082     # Nombre de pas identique au nombre de pas d'observations
2083     # -------------------------------------------------------
2084     if hasattr(Y,"stepnumber"):
2085         duration = Y.stepnumber()
2086         __p = numpy.cumprod(Y.shape())[-1]
2087     else:
2088         duration = 2
2089         __p = numpy.array(Y).size
2090     #
2091     # Précalcul des inversions de B et R
2092     # ----------------------------------
2093     if selfA._parameters["StoreInternalVariables"] \
2094         or selfA._toStore("CostFunctionJ") \
2095         or selfA._toStore("CostFunctionJb") \
2096         or selfA._toStore("CostFunctionJo") \
2097         or selfA._toStore("CurrentOptimum") \
2098         or selfA._toStore("APosterioriCovariance"):
2099         BI = B.getI()
2100         RI = R.getI()
2101     elif VariantM != "KalmanFilterFormula":
2102         RI = R.getI()
2103     if VariantM == "KalmanFilterFormula":
2104         RIdemi = R.choleskyI()
2105     #
2106     # Initialisation
2107     # --------------
2108     __n = Xb.size
2109     __m = selfA._parameters["NumberOfMembers"]
2110     if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
2111     else:                         Pn = B
2112     if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
2113     else:                         Rn = R
2114     if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
2115     else:                         Qn = Q
2116     Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
2117     #
2118     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2119         selfA.StoredVariables["Analysis"].store( Xb )
2120         if selfA._toStore("APosterioriCovariance"):
2121             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2122             covarianceXa = Pn
2123     #
2124     previousJMinimum = numpy.finfo(float).max
2125     #
2126     for step in range(duration-1):
2127         if hasattr(Y,"store"):
2128             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,-1))
2129         else:
2130             Ynpu = numpy.ravel( Y ).reshape((__p,-1))
2131         #
2132         if U is not None:
2133             if hasattr(U,"store") and len(U)>1:
2134                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2135             elif hasattr(U,"store") and len(U)==1:
2136                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2137             else:
2138                 Un = numpy.asmatrix(numpy.ravel( U )).T
2139         else:
2140             Un = None
2141         #
2142         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2143             Xn = CovarianceInflation( Xn,
2144                 selfA._parameters["InflationType"],
2145                 selfA._parameters["InflationFactor"],
2146                 )
2147         #
2148         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
2149             EMX = M( [(Xn[:,i], Un) for i in range(__m)],
2150                 argsAsSerie = True,
2151                 returnSerieAsArrayMatrix = True )
2152             qi = numpy.random.multivariate_normal(numpy.zeros(__n), Qn, size=__m).T
2153             Xn_predicted = EMX + qi
2154             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2155                 argsAsSerie = True,
2156                 returnSerieAsArrayMatrix = True )
2157             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2158                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2159                 Xn_predicted = Xn_predicted + Cm * Un
2160         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
2161             # --- > Par principe, M = Id, Q = 0
2162             Xn_predicted = Xn
2163             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2164                 argsAsSerie = True,
2165                 returnSerieAsArrayMatrix = True )
2166         #
2167         # Mean of forecast and observation of forecast
2168         Xfm  = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
2169         Hfm  = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,-1))
2170         #
2171         # Anomalies
2172         EaX   = EnsembleOfAnomalies( Xn_predicted )
2173         EaHX  = numpy.array(HX_predicted - Hfm)
2174         #
2175         #--------------------------
2176         if VariantM == "KalmanFilterFormula":
2177             mS    = RIdemi * EaHX / numpy.sqrt(__m-1)
2178             delta = RIdemi * ( Ynpu - Hfm )
2179             mT    = numpy.linalg.inv( numpy.eye(__m) + mS.T @ mS )
2180             vw    = mT @ mS.transpose() @ delta
2181             #
2182             Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
2183             mU    = numpy.eye(__m)
2184             #
2185             EaX   = EaX / numpy.sqrt(__m-1)
2186             Xn    = Xfm + EaX @ ( vw.reshape((__m,-1)) + numpy.sqrt(__m-1) * Tdemi @ mU )
2187         #--------------------------
2188         elif VariantM == "Variational":
2189             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
2190             def CostFunction(w):
2191                 _A  = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
2192                 _Jo = 0.5 * _A.T @ (RI * _A)
2193                 _Jb = 0.5 * (__m-1) * w.T @ w
2194                 _J  = _Jo + _Jb
2195                 return float(_J)
2196             def GradientOfCostFunction(w):
2197                 _A  = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
2198                 _GardJo = - EaHX.T @ (RI * _A)
2199                 _GradJb = (__m-1) * w.reshape((__m,1))
2200                 _GradJ  = _GardJo + _GradJb
2201                 return numpy.ravel(_GradJ)
2202             vw = scipy.optimize.fmin_cg(
2203                 f           = CostFunction,
2204                 x0          = numpy.zeros(__m),
2205                 fprime      = GradientOfCostFunction,
2206                 args        = (),
2207                 disp        = False,
2208                 )
2209             #
2210             Hto = EaHX.T @ (RI * EaHX)
2211             Htb = (__m-1) * numpy.eye(__m)
2212             Hta = Hto + Htb
2213             #
2214             Pta = numpy.linalg.inv( Hta )
2215             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
2216             #
2217             Xn  = Xfm + EaX @ (vw[:,None] + EWa)
2218         #--------------------------
2219         elif VariantM == "FiniteSize11": # Jauge Boc2011
2220             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
2221             def CostFunction(w):
2222                 _A  = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
2223                 _Jo = 0.5 * _A.T @ (RI * _A)
2224                 _Jb = 0.5 * __m * math.log(1 + 1/__m + w.T @ w)
2225                 _J  = _Jo + _Jb
2226                 return float(_J)
2227             def GradientOfCostFunction(w):
2228                 _A  = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
2229                 _GardJo = - EaHX.T @ (RI * _A)
2230                 _GradJb = __m * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
2231                 _GradJ  = _GardJo + _GradJb
2232                 return numpy.ravel(_GradJ)
2233             vw = scipy.optimize.fmin_cg(
2234                 f           = CostFunction,
2235                 x0          = numpy.zeros(__m),
2236                 fprime      = GradientOfCostFunction,
2237                 args        = (),
2238                 disp        = False,
2239                 )
2240             #
2241             Hto = EaHX.T @ (RI * EaHX)
2242             Htb = __m * \
2243                 ( (1 + 1/__m + vw.T @ vw) * numpy.eye(__m) - 2 * vw @ vw.T ) \
2244                 / (1 + 1/__m + vw.T @ vw)**2
2245             Hta = Hto + Htb
2246             #
2247             Pta = numpy.linalg.inv( Hta )
2248             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
2249             #
2250             Xn  = Xfm + EaX @ (vw.reshape((__m,-1)) + EWa)
2251         #--------------------------
2252         elif VariantM == "FiniteSize15": # Jauge Boc2015
2253             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
2254             def CostFunction(w):
2255                 _A  = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
2256                 _Jo = 0.5 * _A.T * RI * _A
2257                 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w)
2258                 _J  = _Jo + _Jb
2259                 return float(_J)
2260             def GradientOfCostFunction(w):
2261                 _A  = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
2262                 _GardJo = - EaHX.T @ (RI * _A)
2263                 _GradJb = (__m+1) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
2264                 _GradJ  = _GardJo + _GradJb
2265                 return numpy.ravel(_GradJ)
2266             vw = scipy.optimize.fmin_cg(
2267                 f           = CostFunction,
2268                 x0          = numpy.zeros(__m),
2269                 fprime      = GradientOfCostFunction,
2270                 args        = (),
2271                 disp        = False,
2272                 )
2273             #
2274             Hto = EaHX.T @ (RI * EaHX)
2275             Htb = (__m+1) * \
2276                 ( (1 + 1/__m + vw.T @ vw) * numpy.eye(__m) - 2 * vw @ vw.T ) \
2277                 / (1 + 1/__m + vw.T @ vw)**2
2278             Hta = Hto + Htb
2279             #
2280             Pta = numpy.linalg.inv( Hta )
2281             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
2282             #
2283             Xn  = Xfm + EaX @ (vw.reshape((__m,-1)) + EWa)
2284         #--------------------------
2285         elif VariantM == "FiniteSize16": # Jauge Boc2016
2286             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
2287             def CostFunction(w):
2288                 _A  = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
2289                 _Jo = 0.5 * _A.T @ (RI * _A)
2290                 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w / (__m-1))
2291                 _J  = _Jo + _Jb
2292                 return float(_J)
2293             def GradientOfCostFunction(w):
2294                 _A  = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
2295                 _GardJo = - EaHX.T @ (RI * _A)
2296                 _GradJb = ((__m+1) / (__m-1)) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w / (__m-1))
2297                 _GradJ  = _GardJo + _GradJb
2298                 return numpy.ravel(_GradJ)
2299             vw = scipy.optimize.fmin_cg(
2300                 f           = CostFunction,
2301                 x0          = numpy.zeros(__m),
2302                 fprime      = GradientOfCostFunction,
2303                 args        = (),
2304                 disp        = False,
2305                 )
2306             #
2307             Hto = EaHX.T @ (RI * EaHX)
2308             Htb = ((__m+1) / (__m-1)) * \
2309                 ( (1 + 1/__m + vw.T @ vw / (__m-1)) * numpy.eye(__m) - 2 * vw @ vw.T / (__m-1) ) \
2310                 / (1 + 1/__m + vw.T @ vw / (__m-1))**2
2311             Hta = Hto + Htb
2312             #
2313             Pta = numpy.linalg.inv( Hta )
2314             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
2315             #
2316             Xn  = Xfm + EaX @ (vw[:,None] + EWa)
2317         #--------------------------
2318         else:
2319             raise ValueError("VariantM has to be chosen in the authorized methods list.")
2320         #
2321         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2322             Xn = CovarianceInflation( Xn,
2323                 selfA._parameters["InflationType"],
2324                 selfA._parameters["InflationFactor"],
2325                 )
2326         #
2327         Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
2328         #--------------------------
2329         #
2330         if selfA._parameters["StoreInternalVariables"] \
2331             or selfA._toStore("CostFunctionJ") \
2332             or selfA._toStore("CostFunctionJb") \
2333             or selfA._toStore("CostFunctionJo") \
2334             or selfA._toStore("APosterioriCovariance") \
2335             or selfA._toStore("InnovationAtCurrentAnalysis") \
2336             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2337             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2338             _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
2339             _Innovation = Ynpu - _HXa
2340         #
2341         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2342         # ---> avec analysis
2343         selfA.StoredVariables["Analysis"].store( Xa )
2344         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2345             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2346         if selfA._toStore("InnovationAtCurrentAnalysis"):
2347             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2348         # ---> avec current state
2349         if selfA._parameters["StoreInternalVariables"] \
2350             or selfA._toStore("CurrentState"):
2351             selfA.StoredVariables["CurrentState"].store( Xn )
2352         if selfA._toStore("ForecastState"):
2353             selfA.StoredVariables["ForecastState"].store( EMX )
2354         if selfA._toStore("BMA"):
2355             selfA.StoredVariables["BMA"].store( EMX - Xa.reshape((__n,1)) )
2356         if selfA._toStore("InnovationAtCurrentState"):
2357             selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu.reshape((__p,1)) )
2358         if selfA._toStore("SimulatedObservationAtCurrentState") \
2359             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2360             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
2361         # ---> autres
2362         if selfA._parameters["StoreInternalVariables"] \
2363             or selfA._toStore("CostFunctionJ") \
2364             or selfA._toStore("CostFunctionJb") \
2365             or selfA._toStore("CostFunctionJo") \
2366             or selfA._toStore("CurrentOptimum") \
2367             or selfA._toStore("APosterioriCovariance"):
2368             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2369             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
2370             J   = Jb + Jo
2371             selfA.StoredVariables["CostFunctionJb"].store( Jb )
2372             selfA.StoredVariables["CostFunctionJo"].store( Jo )
2373             selfA.StoredVariables["CostFunctionJ" ].store( J )
2374             #
2375             if selfA._toStore("IndexOfOptimum") \
2376                 or selfA._toStore("CurrentOptimum") \
2377                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2378                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2379                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2380                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2381                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2382             if selfA._toStore("IndexOfOptimum"):
2383                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2384             if selfA._toStore("CurrentOptimum"):
2385                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2386             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2387                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2388             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2389                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2390             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2391                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2392             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2393                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2394         if selfA._toStore("APosterioriCovariance"):
2395             Eai = EnsembleOfAnomalies( Xn ) / numpy.sqrt(__m-1) # Anomalies
2396             Pn = Eai @ Eai.T
2397             Pn = 0.5 * (Pn + Pn.T)
2398             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2399         if selfA._parameters["EstimationOf"] == "Parameters" \
2400             and J < previousJMinimum:
2401             previousJMinimum    = J
2402             XaMin               = Xa
2403             if selfA._toStore("APosterioriCovariance"):
2404                 covarianceXaMin = Pn
2405     #
2406     # Stockage final supplémentaire de l'optimum en estimation de paramètres
2407     # ----------------------------------------------------------------------
2408     if selfA._parameters["EstimationOf"] == "Parameters":
2409         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2410         selfA.StoredVariables["Analysis"].store( XaMin )
2411         if selfA._toStore("APosterioriCovariance"):
2412             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2413         if selfA._toStore("BMA"):
2414             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2415     #
2416     return 0
2417
2418 # ==============================================================================
2419 def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13",
2420     BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
2421     """
2422     Maximum Likelihood Ensemble Filter (EnKF/MLEF Zupanski 2005, Bocquet 2013)
2423
2424     selfA est identique au "self" d'algorithme appelant et contient les
2425     valeurs.
2426     """
2427     if selfA._parameters["EstimationOf"] == "Parameters":
2428         selfA._parameters["StoreInternalVariables"] = True
2429     #
2430     # Opérateurs
2431     # ----------
2432     H = HO["Direct"].appliedControledFormTo
2433     #
2434     if selfA._parameters["EstimationOf"] == "State":
2435         M = EM["Direct"].appliedControledFormTo
2436     #
2437     if CM is not None and "Tangent" in CM and U is not None:
2438         Cm = CM["Tangent"].asMatrix(Xb)
2439     else:
2440         Cm = None
2441     #
2442     # Nombre de pas identique au nombre de pas d'observations
2443     # -------------------------------------------------------
2444     if hasattr(Y,"stepnumber"):
2445         duration = Y.stepnumber()
2446         __p = numpy.cumprod(Y.shape())[-1]
2447     else:
2448         duration = 2
2449         __p = numpy.array(Y).size
2450     #
2451     # Précalcul des inversions de B et R
2452     # ----------------------------------
2453     if selfA._parameters["StoreInternalVariables"] \
2454         or selfA._toStore("CostFunctionJ") \
2455         or selfA._toStore("CostFunctionJb") \
2456         or selfA._toStore("CostFunctionJo") \
2457         or selfA._toStore("CurrentOptimum") \
2458         or selfA._toStore("APosterioriCovariance"):
2459         BI = B.getI()
2460     RI = R.getI()
2461     #
2462     # Initialisation
2463     # --------------
2464     __n = Xb.size
2465     __m = selfA._parameters["NumberOfMembers"]
2466     if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
2467     else:                         Pn = B
2468     if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
2469     else:                         Rn = R
2470     if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
2471     else:                         Qn = Q
2472     Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
2473     #
2474     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2475         selfA.StoredVariables["Analysis"].store( Xb )
2476         if selfA._toStore("APosterioriCovariance"):
2477             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2478             covarianceXa = Pn
2479     #
2480     previousJMinimum = numpy.finfo(float).max
2481     #
2482     for step in range(duration-1):
2483         if hasattr(Y,"store"):
2484             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,-1))
2485         else:
2486             Ynpu = numpy.ravel( Y ).reshape((__p,-1))
2487         #
2488         if U is not None:
2489             if hasattr(U,"store") and len(U)>1:
2490                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2491             elif hasattr(U,"store") and len(U)==1:
2492                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2493             else:
2494                 Un = numpy.asmatrix(numpy.ravel( U )).T
2495         else:
2496             Un = None
2497         #
2498         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2499             Xn = CovarianceInflation( Xn,
2500                 selfA._parameters["InflationType"],
2501                 selfA._parameters["InflationFactor"],
2502                 )
2503         #
2504         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
2505             EMX = M( [(Xn[:,i], Un) for i in range(__m)],
2506                 argsAsSerie = True,
2507                 returnSerieAsArrayMatrix = True )
2508             qi = numpy.random.multivariate_normal(numpy.zeros(__n), Qn, size=__m).T
2509             Xn_predicted = EMX + qi
2510             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2511                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2512                 Xn_predicted = Xn_predicted + Cm * Un
2513         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
2514             # --- > Par principe, M = Id, Q = 0
2515             Xn_predicted = Xn
2516         #
2517         #--------------------------
2518         if VariantM == "MLEF13":
2519             Xfm = numpy.ravel(Xn_predicted.mean(axis=1, dtype=mfp).astype('float'))
2520             EaX = EnsembleOfAnomalies( Xn_predicted ) / numpy.sqrt(__m-1)
2521             Ua  = numpy.eye(__m)
2522             __j = 0
2523             Deltaw = 1
2524             if not BnotT:
2525                 Ta  = numpy.eye(__m)
2526             vw  = numpy.zeros(__m)
2527             while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
2528                 vx1 = (Xfm + EaX @ vw).reshape((__n,-1))
2529                 #
2530                 if BnotT:
2531                     E1 = vx1 + _epsilon * EaX
2532                 else:
2533                     E1 = vx1 + numpy.sqrt(__m-1) * EaX @ Ta
2534                 #
2535                 HE2 = H( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
2536                     argsAsSerie = True,
2537                     returnSerieAsArrayMatrix = True )
2538                 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,-1))
2539                 #
2540                 if BnotT:
2541                     EaY = (HE2 - vy2) / _epsilon
2542                 else:
2543                     EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / numpy.sqrt(__m-1)
2544                 #
2545                 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy2 )))
2546                 mH = numpy.eye(__m) + EaY.transpose() @ (RI * EaY)
2547                 Deltaw = - numpy.linalg.solve(mH,GradJ)
2548                 #
2549                 vw = vw + Deltaw
2550                 #
2551                 if not BnotT:
2552                     Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2553                 #
2554                 __j = __j + 1
2555             #
2556             if BnotT:
2557                 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2558             #
2559             Xn = vx1 + numpy.sqrt(__m-1) * EaX @ Ta @ Ua
2560         #--------------------------
2561         else:
2562             raise ValueError("VariantM has to be chosen in the authorized methods list.")
2563         #
2564         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2565             Xn = CovarianceInflation( Xn,
2566                 selfA._parameters["InflationType"],
2567                 selfA._parameters["InflationFactor"],
2568                 )
2569         #
2570         Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
2571         #--------------------------
2572         #
2573         if selfA._parameters["StoreInternalVariables"] \
2574             or selfA._toStore("CostFunctionJ") \
2575             or selfA._toStore("CostFunctionJb") \
2576             or selfA._toStore("CostFunctionJo") \
2577             or selfA._toStore("APosterioriCovariance") \
2578             or selfA._toStore("InnovationAtCurrentAnalysis") \
2579             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2580             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2581             _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
2582             _Innovation = Ynpu - _HXa
2583         #
2584         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2585         # ---> avec analysis
2586         selfA.StoredVariables["Analysis"].store( Xa )
2587         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2588             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2589         if selfA._toStore("InnovationAtCurrentAnalysis"):
2590             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2591         # ---> avec current state
2592         if selfA._parameters["StoreInternalVariables"] \
2593             or selfA._toStore("CurrentState"):
2594             selfA.StoredVariables["CurrentState"].store( Xn )
2595         if selfA._toStore("ForecastState"):
2596             selfA.StoredVariables["ForecastState"].store( EMX )
2597         if selfA._toStore("BMA"):
2598             selfA.StoredVariables["BMA"].store( EMX - Xa.reshape((__n,1)) )
2599         if selfA._toStore("InnovationAtCurrentState"):
2600             selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu.reshape((__p,-1)) )
2601         if selfA._toStore("SimulatedObservationAtCurrentState") \
2602             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2603             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
2604         # ---> autres
2605         if selfA._parameters["StoreInternalVariables"] \
2606             or selfA._toStore("CostFunctionJ") \
2607             or selfA._toStore("CostFunctionJb") \
2608             or selfA._toStore("CostFunctionJo") \
2609             or selfA._toStore("CurrentOptimum") \
2610             or selfA._toStore("APosterioriCovariance"):
2611             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2612             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
2613             J   = Jb + Jo
2614             selfA.StoredVariables["CostFunctionJb"].store( Jb )
2615             selfA.StoredVariables["CostFunctionJo"].store( Jo )
2616             selfA.StoredVariables["CostFunctionJ" ].store( J )
2617             #
2618             if selfA._toStore("IndexOfOptimum") \
2619                 or selfA._toStore("CurrentOptimum") \
2620                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2621                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2622                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2623                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2624                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2625             if selfA._toStore("IndexOfOptimum"):
2626                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2627             if selfA._toStore("CurrentOptimum"):
2628                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2629             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2630                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2631             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2632                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2633             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2634                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2635             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2636                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2637         if selfA._toStore("APosterioriCovariance"):
2638             Eai = EnsembleOfAnomalies( Xn ) / numpy.sqrt(__m-1) # Anomalies
2639             Pn = Eai @ Eai.T
2640             Pn = 0.5 * (Pn + Pn.T)
2641             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2642         if selfA._parameters["EstimationOf"] == "Parameters" \
2643             and J < previousJMinimum:
2644             previousJMinimum    = J
2645             XaMin               = Xa
2646             if selfA._toStore("APosterioriCovariance"):
2647                 covarianceXaMin = Pn
2648     #
2649     # Stockage final supplémentaire de l'optimum en estimation de paramètres
2650     # ----------------------------------------------------------------------
2651     if selfA._parameters["EstimationOf"] == "Parameters":
2652         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2653         selfA.StoredVariables["Analysis"].store( XaMin )
2654         if selfA._toStore("APosterioriCovariance"):
2655             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2656         if selfA._toStore("BMA"):
2657             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2658     #
2659     return 0
2660
2661 # ==============================================================================
2662 def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12",
2663     BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
2664     """
2665     Iterative EnKF (Sakov 2012, Sakov 2018)
2666
2667     selfA est identique au "self" d'algorithme appelant et contient les
2668     valeurs.
2669     """
2670     if selfA._parameters["EstimationOf"] == "Parameters":
2671         selfA._parameters["StoreInternalVariables"] = True
2672     #
2673     # Opérateurs
2674     # ----------
2675     H = HO["Direct"].appliedControledFormTo
2676     #
2677     if selfA._parameters["EstimationOf"] == "State":
2678         M = EM["Direct"].appliedControledFormTo
2679     #
2680     if CM is not None and "Tangent" in CM and U is not None:
2681         Cm = CM["Tangent"].asMatrix(Xb)
2682     else:
2683         Cm = None
2684     #
2685     # Nombre de pas identique au nombre de pas d'observations
2686     # -------------------------------------------------------
2687     if hasattr(Y,"stepnumber"):
2688         duration = Y.stepnumber()
2689         __p = numpy.cumprod(Y.shape())[-1]
2690     else:
2691         duration = 2
2692         __p = numpy.array(Y).size
2693     #
2694     # Précalcul des inversions de B et R
2695     # ----------------------------------
2696     if selfA._parameters["StoreInternalVariables"] \
2697         or selfA._toStore("CostFunctionJ") \
2698         or selfA._toStore("CostFunctionJb") \
2699         or selfA._toStore("CostFunctionJo") \
2700         or selfA._toStore("CurrentOptimum") \
2701         or selfA._toStore("APosterioriCovariance"):
2702         BI = B.getI()
2703     RI = R.getI()
2704     #
2705     # Initialisation
2706     # --------------
2707     __n = Xb.size
2708     __m = selfA._parameters["NumberOfMembers"]
2709     if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
2710     else:                         Pn = B
2711     if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
2712     else:                         Rn = R
2713     if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
2714     else:                         Qn = Q
2715     Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
2716     #
2717     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2718         selfA.StoredVariables["Analysis"].store( Xb )
2719         if selfA._toStore("APosterioriCovariance"):
2720             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2721             covarianceXa = Pn
2722     #
2723     previousJMinimum = numpy.finfo(float).max
2724     #
2725     for step in range(duration-1):
2726         if hasattr(Y,"store"):
2727             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,-1))
2728         else:
2729             Ynpu = numpy.ravel( Y ).reshape((__p,-1))
2730         #
2731         if U is not None:
2732             if hasattr(U,"store") and len(U)>1:
2733                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2734             elif hasattr(U,"store") and len(U)==1:
2735                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2736             else:
2737                 Un = numpy.asmatrix(numpy.ravel( U )).T
2738         else:
2739             Un = None
2740         #
2741         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2742             Xn = CovarianceInflation( Xn,
2743                 selfA._parameters["InflationType"],
2744                 selfA._parameters["InflationFactor"],
2745                 )
2746         #
2747         #--------------------------
2748         if VariantM == "IEnKF12":
2749             Xfm = numpy.ravel(Xn.mean(axis=1, dtype=mfp).astype('float'))
2750             EaX = EnsembleOfAnomalies( Xn ) / numpy.sqrt(__m-1)
2751             __j = 0
2752             Deltaw = 1
2753             if not BnotT:
2754                 Ta  = numpy.eye(__m)
2755             vw  = numpy.zeros(__m)
2756             while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
2757                 vx1 = (Xfm + EaX @ vw).reshape((__n,-1))
2758                 #
2759                 if BnotT:
2760                     E1 = vx1 + _epsilon * EaX
2761                 else:
2762                     E1 = vx1 + numpy.sqrt(__m-1) * EaX @ Ta
2763                 #
2764                 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q
2765                     E2 = M( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
2766                         argsAsSerie = True,
2767                         returnSerieAsArrayMatrix = True )
2768                 elif selfA._parameters["EstimationOf"] == "Parameters":
2769                     # --- > Par principe, M = Id
2770                     E2 = Xn
2771                 vx2 = E2.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
2772                 vy1 = H((vx2, Un)).reshape((__p,-1))
2773                 #
2774                 HE2 = H( [(E2[:,i,numpy.newaxis], Un) for i in range(__m)],
2775                     argsAsSerie = True,
2776                     returnSerieAsArrayMatrix = True )
2777                 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,-1))
2778                 #
2779                 if BnotT:
2780                     EaY = (HE2 - vy2) / _epsilon
2781                 else:
2782                     EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / numpy.sqrt(__m-1)
2783                 #
2784                 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy1 )))
2785                 mH = numpy.eye(__m) + EaY.transpose() @ (RI * EaY)
2786                 Deltaw = - numpy.linalg.solve(mH,GradJ)
2787                 #
2788                 vw = vw + Deltaw
2789                 #
2790                 if not BnotT:
2791                     Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2792                 #
2793                 __j = __j + 1
2794             #
2795             A2 = EnsembleOfAnomalies( E2 )
2796             #
2797             if BnotT:
2798                 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2799                 A2 = numpy.sqrt(__m-1) * A2 @ Ta / _epsilon
2800             #
2801             Xn = vx2 + A2
2802         #--------------------------
2803         else:
2804             raise ValueError("VariantM has to be chosen in the authorized methods list.")
2805         #
2806         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2807             Xn = CovarianceInflation( Xn,
2808                 selfA._parameters["InflationType"],
2809                 selfA._parameters["InflationFactor"],
2810                 )
2811         #
2812         Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
2813         #--------------------------
2814         #
2815         if selfA._parameters["StoreInternalVariables"] \
2816             or selfA._toStore("CostFunctionJ") \
2817             or selfA._toStore("CostFunctionJb") \
2818             or selfA._toStore("CostFunctionJo") \
2819             or selfA._toStore("APosterioriCovariance") \
2820             or selfA._toStore("InnovationAtCurrentAnalysis") \
2821             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2822             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2823             _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
2824             _Innovation = Ynpu - _HXa
2825         #
2826         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2827         # ---> avec analysis
2828         selfA.StoredVariables["Analysis"].store( Xa )
2829         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2830             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2831         if selfA._toStore("InnovationAtCurrentAnalysis"):
2832             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2833         # ---> avec current state
2834         if selfA._parameters["StoreInternalVariables"] \
2835             or selfA._toStore("CurrentState"):
2836             selfA.StoredVariables["CurrentState"].store( Xn )
2837         if selfA._toStore("ForecastState"):
2838             selfA.StoredVariables["ForecastState"].store( E2 )
2839         if selfA._toStore("BMA"):
2840             selfA.StoredVariables["BMA"].store( E2 - Xa )
2841         if selfA._toStore("InnovationAtCurrentState"):
2842             selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu.reshape((__p,-1)) )
2843         if selfA._toStore("SimulatedObservationAtCurrentState") \
2844             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2845             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
2846         # ---> autres
2847         if selfA._parameters["StoreInternalVariables"] \
2848             or selfA._toStore("CostFunctionJ") \
2849             or selfA._toStore("CostFunctionJb") \
2850             or selfA._toStore("CostFunctionJo") \
2851             or selfA._toStore("CurrentOptimum") \
2852             or selfA._toStore("APosterioriCovariance"):
2853             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2854             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
2855             J   = Jb + Jo
2856             selfA.StoredVariables["CostFunctionJb"].store( Jb )
2857             selfA.StoredVariables["CostFunctionJo"].store( Jo )
2858             selfA.StoredVariables["CostFunctionJ" ].store( J )
2859             #
2860             if selfA._toStore("IndexOfOptimum") \
2861                 or selfA._toStore("CurrentOptimum") \
2862                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2863                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2864                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2865                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2866                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2867             if selfA._toStore("IndexOfOptimum"):
2868                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2869             if selfA._toStore("CurrentOptimum"):
2870                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2871             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2872                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2873             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2874                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2875             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2876                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2877             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2878                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2879         if selfA._toStore("APosterioriCovariance"):
2880             Eai = EnsembleOfAnomalies( Xn ) / numpy.sqrt(__m-1) # Anomalies
2881             Pn = Eai @ Eai.T
2882             Pn = 0.5 * (Pn + Pn.T)
2883             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2884         if selfA._parameters["EstimationOf"] == "Parameters" \
2885             and J < previousJMinimum:
2886             previousJMinimum    = J
2887             XaMin               = Xa
2888             if selfA._toStore("APosterioriCovariance"):
2889                 covarianceXaMin = Pn
2890     #
2891     # Stockage final supplémentaire de l'optimum en estimation de paramètres
2892     # ----------------------------------------------------------------------
2893     if selfA._parameters["EstimationOf"] == "Parameters":
2894         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2895         selfA.StoredVariables["Analysis"].store( XaMin )
2896         if selfA._toStore("APosterioriCovariance"):
2897             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2898         if selfA._toStore("BMA"):
2899             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2900     #
2901     return 0
2902
2903 # ==============================================================================
2904 if __name__ == "__main__":
2905     print('\n AUTODIAGNOSTIC\n')