Salome HOME
Improvement and extension of 3DVAR algorithm
[modules/adao.git] / src / daComposant / daCore / NumericObjects.py
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) 2008-2021 EDF R&D
4 #
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
9 #
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13 # Lesser General Public License for more details.
14 #
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
18 #
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
20 #
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
22
23 __doc__ = """
24     Définit les objets numériques génériques.
25 """
26 __author__ = "Jean-Philippe ARGAUD"
27
28 import os, time, copy, types, sys, logging
29 import math, numpy, scipy, scipy.optimize, scipy.version
30 from daCore.BasicObjects import Operator
31 from daCore.PlatformInfo import PlatformInfo
32 mpr = PlatformInfo().MachinePrecision()
33 mfp = PlatformInfo().MaximumPrecision()
34 # logging.getLogger().setLevel(logging.DEBUG)
35
36 # ==============================================================================
37 def ExecuteFunction( paire ):
38     assert len(paire) == 2, "Incorrect number of arguments"
39     X, funcrepr = paire
40     __X = numpy.asmatrix(numpy.ravel( X )).T
41     __sys_path_tmp = sys.path ; sys.path.insert(0,funcrepr["__userFunction__path"])
42     __module = __import__(funcrepr["__userFunction__modl"], globals(), locals(), [])
43     __fonction = getattr(__module,funcrepr["__userFunction__name"])
44     sys.path = __sys_path_tmp ; del __sys_path_tmp
45     __HX  = __fonction( __X )
46     return numpy.ravel( __HX )
47
48 # ==============================================================================
49 class FDApproximation(object):
50     """
51     Cette classe sert d'interface pour définir les opérateurs approximés. A la
52     création d'un objet, en fournissant une fonction "Function", on obtient un
53     objet qui dispose de 3 méthodes "DirectOperator", "TangentOperator" et
54     "AdjointOperator". On contrôle l'approximation DF avec l'incrément
55     multiplicatif "increment" valant par défaut 1%, ou avec l'incrément fixe
56     "dX" qui sera multiplié par "increment" (donc en %), et on effectue de DF
57     centrées si le booléen "centeredDF" est vrai.
58     """
59     def __init__(self,
60             name                  = "FDApproximation",
61             Function              = None,
62             centeredDF            = False,
63             increment             = 0.01,
64             dX                    = None,
65             avoidingRedundancy    = True,
66             toleranceInRedundancy = 1.e-18,
67             lenghtOfRedundancy    = -1,
68             mpEnabled             = False,
69             mpWorkers             = None,
70             mfEnabled             = False,
71             ):
72         self.__name = str(name)
73         if mpEnabled:
74             try:
75                 import multiprocessing
76                 self.__mpEnabled = True
77             except ImportError:
78                 self.__mpEnabled = False
79         else:
80             self.__mpEnabled = False
81         self.__mpWorkers = mpWorkers
82         if self.__mpWorkers is not None and self.__mpWorkers < 1:
83             self.__mpWorkers = None
84         logging.debug("FDA Calculs en multiprocessing : %s (nombre de processus : %s)"%(self.__mpEnabled,self.__mpWorkers))
85         #
86         if mfEnabled:
87             self.__mfEnabled = True
88         else:
89             self.__mfEnabled = False
90         logging.debug("FDA Calculs en multifonctions : %s"%(self.__mfEnabled,))
91         #
92         if avoidingRedundancy:
93             self.__avoidRC = True
94             self.__tolerBP = float(toleranceInRedundancy)
95             self.__lenghtRJ = int(lenghtOfRedundancy)
96             self.__listJPCP = [] # Jacobian Previous Calculated Points
97             self.__listJPCI = [] # Jacobian Previous Calculated Increment
98             self.__listJPCR = [] # Jacobian Previous Calculated Results
99             self.__listJPPN = [] # Jacobian Previous Calculated Point Norms
100             self.__listJPIN = [] # Jacobian Previous Calculated Increment Norms
101         else:
102             self.__avoidRC = False
103         #
104         if self.__mpEnabled:
105             if isinstance(Function,types.FunctionType):
106                 logging.debug("FDA Calculs en multiprocessing : FunctionType")
107                 self.__userFunction__name = Function.__name__
108                 try:
109                     mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
110                 except:
111                     mod = os.path.abspath(Function.__globals__['__file__'])
112                 if not os.path.isfile(mod):
113                     raise ImportError("No user defined function or method found with the name %s"%(mod,))
114                 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
115                 self.__userFunction__path = os.path.dirname(mod)
116                 del mod
117                 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled )
118                 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
119             elif isinstance(Function,types.MethodType):
120                 logging.debug("FDA Calculs en multiprocessing : MethodType")
121                 self.__userFunction__name = Function.__name__
122                 try:
123                     mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
124                 except:
125                     mod = os.path.abspath(Function.__func__.__globals__['__file__'])
126                 if not os.path.isfile(mod):
127                     raise ImportError("No user defined function or method found with the name %s"%(mod,))
128                 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
129                 self.__userFunction__path = os.path.dirname(mod)
130                 del mod
131                 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled )
132                 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
133             else:
134                 raise TypeError("User defined function or method has to be provided for finite differences approximation.")
135         else:
136             self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled )
137             self.__userFunction = self.__userOperator.appliedTo
138         #
139         self.__centeredDF = bool(centeredDF)
140         if abs(float(increment)) > 1.e-15:
141             self.__increment  = float(increment)
142         else:
143             self.__increment  = 0.01
144         if dX is None:
145             self.__dX     = None
146         else:
147             self.__dX     = numpy.asmatrix(numpy.ravel( dX )).T
148         logging.debug("FDA Reduction des doublons de calcul : %s"%self.__avoidRC)
149         if self.__avoidRC:
150             logging.debug("FDA Tolerance de determination des doublons : %.2e"%self.__tolerBP)
151
152     # ---------------------------------------------------------
153     def __doublon__(self, e, l, n, v=None):
154         __ac, __iac = False, -1
155         for i in range(len(l)-1,-1,-1):
156             if numpy.linalg.norm(e - l[i]) < self.__tolerBP * n[i]:
157                 __ac, __iac = True, i
158                 if v is not None: logging.debug("FDA Cas%s déja calculé, récupération du doublon %i"%(v,__iac))
159                 break
160         return __ac, __iac
161
162     # ---------------------------------------------------------
163     def DirectOperator(self, X ):
164         """
165         Calcul du direct à l'aide de la fonction fournie.
166         """
167         logging.debug("FDA Calcul DirectOperator (explicite)")
168         if self.__mfEnabled:
169             _HX = self.__userFunction( X, argsAsSerie = True )
170         else:
171             _X = numpy.asmatrix(numpy.ravel( X )).T
172             _HX = numpy.ravel(self.__userFunction( _X ))
173         #
174         return _HX
175
176     # ---------------------------------------------------------
177     def TangentMatrix(self, X ):
178         """
179         Calcul de l'opérateur tangent comme la Jacobienne par différences finies,
180         c'est-à-dire le gradient de H en X. On utilise des différences finies
181         directionnelles autour du point X. X est un numpy.matrix.
182
183         Différences finies centrées (approximation d'ordre 2):
184         1/ Pour chaque composante i de X, on ajoute et on enlève la perturbation
185            dX[i] à la  composante X[i], pour composer X_plus_dXi et X_moins_dXi, et
186            on calcule les réponses HX_plus_dXi = H( X_plus_dXi ) et HX_moins_dXi =
187            H( X_moins_dXi )
188         2/ On effectue les différences (HX_plus_dXi-HX_moins_dXi) et on divise par
189            le pas 2*dXi
190         3/ Chaque résultat, par composante, devient une colonne de la Jacobienne
191
192         Différences finies non centrées (approximation d'ordre 1):
193         1/ Pour chaque composante i de X, on ajoute la perturbation dX[i] à la
194            composante X[i] pour composer X_plus_dXi, et on calcule la réponse
195            HX_plus_dXi = H( X_plus_dXi )
196         2/ On calcule la valeur centrale HX = H(X)
197         3/ On effectue les différences (HX_plus_dXi-HX) et on divise par
198            le pas dXi
199         4/ Chaque résultat, par composante, devient une colonne de la Jacobienne
200
201         """
202         logging.debug("FDA Début du calcul de la Jacobienne")
203         logging.debug("FDA   Incrément de............: %s*X"%float(self.__increment))
204         logging.debug("FDA   Approximation centrée...: %s"%(self.__centeredDF))
205         #
206         if X is None or len(X)==0:
207             raise ValueError("Nominal point X for approximate derivatives can not be None or void (given X: %s)."%(str(X),))
208         #
209         _X = numpy.asmatrix(numpy.ravel( X )).T
210         #
211         if self.__dX is None:
212             _dX  = self.__increment * _X
213         else:
214             _dX = numpy.asmatrix(numpy.ravel( self.__dX )).T
215         #
216         if (_dX == 0.).any():
217             moyenne = _dX.mean()
218             if moyenne == 0.:
219                 _dX = numpy.where( _dX == 0., float(self.__increment), _dX )
220             else:
221                 _dX = numpy.where( _dX == 0., moyenne, _dX )
222         #
223         __alreadyCalculated  = False
224         if self.__avoidRC:
225             __bidon, __alreadyCalculatedP = self.__doublon__(_X,  self.__listJPCP, self.__listJPPN, None)
226             __bidon, __alreadyCalculatedI = self.__doublon__(_dX, self.__listJPCI, self.__listJPIN, None)
227             if __alreadyCalculatedP == __alreadyCalculatedI > -1:
228                 __alreadyCalculated, __i = True, __alreadyCalculatedP
229                 logging.debug("FDA Cas J déja calculé, récupération du doublon %i"%__i)
230         #
231         if __alreadyCalculated:
232             logging.debug("FDA   Calcul Jacobienne (par récupération du doublon %i)"%__i)
233             _Jacobienne = self.__listJPCR[__i]
234         else:
235             logging.debug("FDA   Calcul Jacobienne (explicite)")
236             if self.__centeredDF:
237                 #
238                 if self.__mpEnabled and not self.__mfEnabled:
239                     funcrepr = {
240                         "__userFunction__path" : self.__userFunction__path,
241                         "__userFunction__modl" : self.__userFunction__modl,
242                         "__userFunction__name" : self.__userFunction__name,
243                     }
244                     _jobs = []
245                     for i in range( len(_dX) ):
246                         _dXi            = _dX[i]
247                         _X_plus_dXi     = numpy.array( _X.A1, dtype=float )
248                         _X_plus_dXi[i]  = _X[i] + _dXi
249                         _X_moins_dXi    = numpy.array( _X.A1, dtype=float )
250                         _X_moins_dXi[i] = _X[i] - _dXi
251                         #
252                         _jobs.append( (_X_plus_dXi,  funcrepr) )
253                         _jobs.append( (_X_moins_dXi, funcrepr) )
254                     #
255                     import multiprocessing
256                     self.__pool = multiprocessing.Pool(self.__mpWorkers)
257                     _HX_plusmoins_dX = self.__pool.map( ExecuteFunction, _jobs )
258                     self.__pool.close()
259                     self.__pool.join()
260                     #
261                     _Jacobienne  = []
262                     for i in range( len(_dX) ):
263                         _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
264                     #
265                 elif self.__mfEnabled:
266                     _xserie = []
267                     for i in range( len(_dX) ):
268                         _dXi            = _dX[i]
269                         _X_plus_dXi     = numpy.array( _X.A1, dtype=float )
270                         _X_plus_dXi[i]  = _X[i] + _dXi
271                         _X_moins_dXi    = numpy.array( _X.A1, dtype=float )
272                         _X_moins_dXi[i] = _X[i] - _dXi
273                         #
274                         _xserie.append( _X_plus_dXi )
275                         _xserie.append( _X_moins_dXi )
276                     #
277                     _HX_plusmoins_dX = self.DirectOperator( _xserie )
278                      #
279                     _Jacobienne  = []
280                     for i in range( len(_dX) ):
281                         _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
282                     #
283                 else:
284                     _Jacobienne  = []
285                     for i in range( _dX.size ):
286                         _dXi            = _dX[i]
287                         _X_plus_dXi     = numpy.array( _X.A1, dtype=float )
288                         _X_plus_dXi[i]  = _X[i] + _dXi
289                         _X_moins_dXi    = numpy.array( _X.A1, dtype=float )
290                         _X_moins_dXi[i] = _X[i] - _dXi
291                         #
292                         _HX_plus_dXi    = self.DirectOperator( _X_plus_dXi )
293                         _HX_moins_dXi   = self.DirectOperator( _X_moins_dXi )
294                         #
295                         _Jacobienne.append( numpy.ravel( _HX_plus_dXi - _HX_moins_dXi ) / (2.*_dXi) )
296                 #
297             else:
298                 #
299                 if self.__mpEnabled and not self.__mfEnabled:
300                     funcrepr = {
301                         "__userFunction__path" : self.__userFunction__path,
302                         "__userFunction__modl" : self.__userFunction__modl,
303                         "__userFunction__name" : self.__userFunction__name,
304                     }
305                     _jobs = []
306                     _jobs.append( (_X.A1, funcrepr) )
307                     for i in range( len(_dX) ):
308                         _X_plus_dXi    = numpy.array( _X.A1, dtype=float )
309                         _X_plus_dXi[i] = _X[i] + _dX[i]
310                         #
311                         _jobs.append( (_X_plus_dXi, funcrepr) )
312                     #
313                     import multiprocessing
314                     self.__pool = multiprocessing.Pool(self.__mpWorkers)
315                     _HX_plus_dX = self.__pool.map( ExecuteFunction, _jobs )
316                     self.__pool.close()
317                     self.__pool.join()
318                     #
319                     _HX = _HX_plus_dX.pop(0)
320                     #
321                     _Jacobienne = []
322                     for i in range( len(_dX) ):
323                         _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
324                     #
325                 elif self.__mfEnabled:
326                     _xserie = []
327                     _xserie.append( _X.A1 )
328                     for i in range( len(_dX) ):
329                         _X_plus_dXi    = numpy.array( _X.A1, dtype=float )
330                         _X_plus_dXi[i] = _X[i] + _dX[i]
331                         #
332                         _xserie.append( _X_plus_dXi )
333                     #
334                     _HX_plus_dX = self.DirectOperator( _xserie )
335                     #
336                     _HX = _HX_plus_dX.pop(0)
337                     #
338                     _Jacobienne = []
339                     for i in range( len(_dX) ):
340                         _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
341                    #
342                 else:
343                     _Jacobienne  = []
344                     _HX = self.DirectOperator( _X )
345                     for i in range( _dX.size ):
346                         _dXi            = _dX[i]
347                         _X_plus_dXi     = numpy.array( _X.A1, dtype=float )
348                         _X_plus_dXi[i]  = _X[i] + _dXi
349                         #
350                         _HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
351                         #
352                         _Jacobienne.append( numpy.ravel(( _HX_plus_dXi - _HX ) / _dXi) )
353                 #
354             #
355             _Jacobienne = numpy.asmatrix( numpy.vstack( _Jacobienne ) ).T
356             if self.__avoidRC:
357                 if self.__lenghtRJ < 0: self.__lenghtRJ = 2 * _X.size
358                 while len(self.__listJPCP) > self.__lenghtRJ:
359                     self.__listJPCP.pop(0)
360                     self.__listJPCI.pop(0)
361                     self.__listJPCR.pop(0)
362                     self.__listJPPN.pop(0)
363                     self.__listJPIN.pop(0)
364                 self.__listJPCP.append( copy.copy(_X) )
365                 self.__listJPCI.append( copy.copy(_dX) )
366                 self.__listJPCR.append( copy.copy(_Jacobienne) )
367                 self.__listJPPN.append( numpy.linalg.norm(_X) )
368                 self.__listJPIN.append( numpy.linalg.norm(_Jacobienne) )
369         #
370         logging.debug("FDA Fin du calcul de la Jacobienne")
371         #
372         return _Jacobienne
373
374     # ---------------------------------------------------------
375     def TangentOperator(self, paire ):
376         """
377         Calcul du tangent à l'aide de la Jacobienne.
378         """
379         if self.__mfEnabled:
380             assert len(paire) == 1, "Incorrect lenght of arguments"
381             _paire = paire[0]
382             assert len(_paire) == 2, "Incorrect number of arguments"
383         else:
384             assert len(paire) == 2, "Incorrect number of arguments"
385             _paire = paire
386         X, dX = _paire
387         _Jacobienne = self.TangentMatrix( X )
388         if dX is None or len(dX) == 0:
389             #
390             # Calcul de la forme matricielle si le second argument est None
391             # -------------------------------------------------------------
392             if self.__mfEnabled: return [_Jacobienne,]
393             else:                return _Jacobienne
394         else:
395             #
396             # Calcul de la valeur linéarisée de H en X appliqué à dX
397             # ------------------------------------------------------
398             _dX = numpy.asmatrix(numpy.ravel( dX )).T
399             _HtX = numpy.dot(_Jacobienne, _dX)
400             if self.__mfEnabled: return [_HtX.A1,]
401             else:                return _HtX.A1
402
403     # ---------------------------------------------------------
404     def AdjointOperator(self, paire ):
405         """
406         Calcul de l'adjoint à l'aide de la Jacobienne.
407         """
408         if self.__mfEnabled:
409             assert len(paire) == 1, "Incorrect lenght of arguments"
410             _paire = paire[0]
411             assert len(_paire) == 2, "Incorrect number of arguments"
412         else:
413             assert len(paire) == 2, "Incorrect number of arguments"
414             _paire = paire
415         X, Y = _paire
416         _JacobienneT = self.TangentMatrix( X ).T
417         if Y is None or len(Y) == 0:
418             #
419             # Calcul de la forme matricielle si le second argument est None
420             # -------------------------------------------------------------
421             if self.__mfEnabled: return [_JacobienneT,]
422             else:                return _JacobienneT
423         else:
424             #
425             # Calcul de la valeur de l'adjoint en X appliqué à Y
426             # --------------------------------------------------
427             _Y = numpy.asmatrix(numpy.ravel( Y )).T
428             _HaY = numpy.dot(_JacobienneT, _Y)
429             if self.__mfEnabled: return [_HaY.A1,]
430             else:                return _HaY.A1
431
432 # ==============================================================================
433 def mmqr(
434         func     = None,
435         x0       = None,
436         fprime   = None,
437         bounds   = None,
438         quantile = 0.5,
439         maxfun   = 15000,
440         toler    = 1.e-06,
441         y        = None,
442         ):
443     """
444     Implémentation informatique de l'algorithme MMQR, basée sur la publication :
445     David R. Hunter, Kenneth Lange, "Quantile Regression via an MM Algorithm",
446     Journal of Computational and Graphical Statistics, 9, 1, pp.60-77, 2000.
447     """
448     #
449     # Recuperation des donnees et informations initiales
450     # --------------------------------------------------
451     variables = numpy.ravel( x0 )
452     mesures   = numpy.ravel( y )
453     increment = sys.float_info[0]
454     p         = variables.size
455     n         = mesures.size
456     quantile  = float(quantile)
457     #
458     # Calcul des parametres du MM
459     # ---------------------------
460     tn      = float(toler) / n
461     e0      = -tn / math.log(tn)
462     epsilon = (e0-tn)/(1+math.log(e0))
463     #
464     # Calculs d'initialisation
465     # ------------------------
466     residus  = mesures - numpy.ravel( func( variables ) )
467     poids    = 1./(epsilon+numpy.abs(residus))
468     veps     = 1. - 2. * quantile - residus * poids
469     lastsurrogate = -numpy.sum(residus*veps) - (1.-2.*quantile)*numpy.sum(residus)
470     iteration = 0
471     #
472     # Recherche iterative
473     # -------------------
474     while (increment > toler) and (iteration < maxfun) :
475         iteration += 1
476         #
477         Derivees  = numpy.array(fprime(variables))
478         Derivees  = Derivees.reshape(n,p) # Necessaire pour remettre en place la matrice si elle passe par des tuyaux YACS
479         DeriveesT = Derivees.transpose()
480         M         =   numpy.dot( DeriveesT , (numpy.array(numpy.matrix(p*[poids,]).T)*Derivees) )
481         SM        =   numpy.transpose(numpy.dot( DeriveesT , veps ))
482         step      = - numpy.linalg.lstsq( M, SM, rcond=-1 )[0]
483         #
484         variables = variables + step
485         if bounds is not None:
486             # Attention : boucle infinie à éviter si un intervalle est trop petit
487             while( (variables < numpy.ravel(numpy.asmatrix(bounds)[:,0])).any() or (variables > numpy.ravel(numpy.asmatrix(bounds)[:,1])).any() ):
488                 step      = step/2.
489                 variables = variables - step
490         residus   = mesures - numpy.ravel( func(variables) )
491         surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
492         #
493         while ( (surrogate > lastsurrogate) and ( max(list(numpy.abs(step))) > 1.e-16 ) ) :
494             step      = step/2.
495             variables = variables - step
496             residus   = mesures - numpy.ravel( func(variables) )
497             surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
498         #
499         increment     = lastsurrogate-surrogate
500         poids         = 1./(epsilon+numpy.abs(residus))
501         veps          = 1. - 2. * quantile - residus * poids
502         lastsurrogate = -numpy.sum(residus * veps) - (1.-2.*quantile)*numpy.sum(residus)
503     #
504     # Mesure d'écart
505     # --------------
506     Ecart = quantile * numpy.sum(residus) - numpy.sum( residus[residus<0] )
507     #
508     return variables, Ecart, [n,p,iteration,increment,0]
509
510 # ==============================================================================
511 def EnsembleOfCenteredPerturbations( _bgcenter, _bgcovariance, _nbmembers ):
512     "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
513     #
514     _bgcenter = numpy.ravel(_bgcenter)[:,None]
515     if _nbmembers < 1:
516         raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
517     #
518     if _bgcovariance is None:
519         BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
520     else:
521         _Z = numpy.random.multivariate_normal(numpy.zeros(_bgcenter.size), _bgcovariance, size=_nbmembers).T
522         BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers) + _Z
523     #
524     return BackgroundEnsemble
525
526 # ==============================================================================
527 def EnsembleOfBackgroundPerturbations( _bgcenter, _bgcovariance, _nbmembers, _withSVD = True):
528     "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
529     def __CenteredRandomAnomalies(Zr, N):
530         """
531         Génère une matrice de N anomalies aléatoires centrées sur Zr selon les
532         notes manuscrites de MB et conforme au code de PS avec eps = -1
533         """
534         eps = -1
535         Q = numpy.eye(N-1)-numpy.ones((N-1,N-1))/numpy.sqrt(N)/(numpy.sqrt(N)-eps)
536         Q = numpy.concatenate((Q, [eps*numpy.ones(N-1)/numpy.sqrt(N)]), axis=0)
537         R, _ = numpy.linalg.qr(numpy.random.normal(size = (N-1,N-1)))
538         Q = numpy.dot(Q,R)
539         Zr = numpy.dot(Q,Zr)
540         return Zr.T
541     #
542     _bgcenter = numpy.ravel(_bgcenter)[:,None]
543     if _nbmembers < 1:
544         raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
545     if _bgcovariance is None:
546         BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
547     else:
548         if _withSVD:
549             U, s, V = numpy.linalg.svd(_bgcovariance, full_matrices=False)
550             _nbctl = _bgcenter.size
551             if _nbmembers > _nbctl:
552                 _Z = numpy.concatenate((numpy.dot(
553                     numpy.diag(numpy.sqrt(s[:_nbctl])), V[:_nbctl]),
554                     numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1-_nbctl)), axis = 0)
555             else:
556                 _Z = numpy.dot(numpy.diag(numpy.sqrt(s[:_nbmembers-1])), V[:_nbmembers-1])
557             _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
558             BackgroundEnsemble = _bgcenter + _Zca
559         else:
560             if max(abs(_bgcovariance.flatten())) > 0:
561                 _nbctl = _bgcenter.size
562                 _Z = numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1)
563                 _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
564                 BackgroundEnsemble = _bgcenter + _Zca
565             else:
566                 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
567     #
568     return BackgroundEnsemble
569
570 # ==============================================================================
571 def EnsembleOfAnomalies( _ensemble, _optmean = None):
572     "Renvoie les anomalies centrées à partir d'un ensemble TailleEtat*NbMembres"
573     if _optmean is None:
574         Em = numpy.asarray(_ensemble).mean(axis=1, dtype=mfp).astype('float')[:,numpy.newaxis]
575     else:
576         Em = numpy.ravel(_optmean)[:,numpy.newaxis]
577     #
578     return numpy.asarray(_ensemble) - Em
579
580 # ==============================================================================
581 def CovarianceInflation(
582         InputCovOrEns,
583         InflationType   = None,
584         InflationFactor = None,
585         BackgroundCov   = None,
586         ):
587     """
588     Inflation applicable soit sur Pb ou Pa, soit sur les ensembles EXb ou EXa
589
590     Synthèse : Hunt 2007, section 2.3.5
591     """
592     if InflationFactor is None:
593         return InputCovOrEns
594     else:
595         InflationFactor = float(InflationFactor)
596     #
597     if InflationType in ["MultiplicativeOnAnalysisCovariance", "MultiplicativeOnBackgroundCovariance"]:
598         if InflationFactor < 1.:
599             raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
600         if InflationFactor < 1.+mpr:
601             return InputCovOrEns
602         OutputCovOrEns = InflationFactor**2 * InputCovOrEns
603     #
604     elif InflationType in ["MultiplicativeOnAnalysisAnomalies", "MultiplicativeOnBackgroundAnomalies"]:
605         if InflationFactor < 1.:
606             raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
607         if InflationFactor < 1.+mpr:
608             return InputCovOrEns
609         InputCovOrEnsMean = InputCovOrEns.mean(axis=1, dtype=mfp).astype('float')
610         OutputCovOrEns = InputCovOrEnsMean[:,numpy.newaxis] \
611             + InflationFactor * (InputCovOrEns - InputCovOrEnsMean[:,numpy.newaxis])
612     #
613     elif InflationType in ["AdditiveOnAnalysisCovariance", "AdditiveOnBackgroundCovariance"]:
614         if InflationFactor < 0.:
615             raise ValueError("Inflation factor for additive inflation has to be greater or equal than 0.")
616         if InflationFactor < mpr:
617             return InputCovOrEns
618         __n, __m = numpy.asarray(InputCovOrEns).shape
619         if __n != __m:
620             raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
621         OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * numpy.eye(__n)
622     #
623     elif InflationType == "HybridOnBackgroundCovariance":
624         if InflationFactor < 0.:
625             raise ValueError("Inflation factor for hybrid inflation has to be greater or equal than 0.")
626         if InflationFactor < mpr:
627             return InputCovOrEns
628         __n, __m = numpy.asarray(InputCovOrEns).shape
629         if __n != __m:
630             raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
631         if BackgroundCov is None:
632             raise ValueError("Background covariance matrix B has to be given for hybrid inflation.")
633         if InputCovOrEns.shape != BackgroundCov.shape:
634             raise ValueError("Ensemble covariance matrix has to be of same size than background covariance matrix B.")
635         OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * BackgroundCov
636     #
637     elif InflationType == "Relaxation":
638         raise NotImplementedError("InflationType Relaxation")
639     #
640     else:
641         raise ValueError("Error in inflation type, '%s' is not a valid keyword."%InflationType)
642     #
643     return OutputCovOrEns
644
645 # ==============================================================================
646 def std3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
647     """
648     3DVAR (Bouttier 1999, Courtier 1993)
649
650     selfA est identique au "self" d'algorithme appelant et contient les
651     valeurs.
652     """
653     #
654     # Correction pour pallier a un bug de TNC sur le retour du Minimum
655     if "Minimizer" in selfA._parameters and selfA._parameters["Minimizer"] == "TNC":
656         selfA.setParameterValue("StoreInternalVariables",True)
657     #
658     # Opérateurs
659     # ----------
660     Hm = HO["Direct"].appliedTo
661     Ha = HO["Adjoint"].appliedInXTo
662     #
663     # Utilisation éventuelle d'un vecteur H(Xb) précalculé
664     # ----------------------------------------------------
665     if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
666         HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
667     else:
668         HXb = Hm( Xb )
669     HXb = numpy.asmatrix(numpy.ravel( HXb )).T
670     if Y.size != HXb.size:
671         raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
672     if max(Y.shape) != max(HXb.shape):
673         raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
674     #
675     if selfA._toStore("JacobianMatrixAtBackground"):
676         HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
677         HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
678         selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
679     #
680     # Précalcul des inversions de B et R
681     # ----------------------------------
682     BI = B.getI()
683     RI = R.getI()
684     #
685     # Point de démarrage de l'optimisation
686     # ------------------------------------
687     if selfA._parameters["InitializationPoint"] is not None:
688         Xini = numpy.ravel(selfA._parameters["InitializationPoint"])
689         if Xini.size != numpy.ravel(Xb).size:
690             raise ValueError("Incompatible size %i of forced initial point to replace the Xb of size %i" \
691                 %(Xini.size,numpy.ravel(Xb).size))
692     else:
693         Xini = numpy.ravel(Xb)
694     #
695     # Définition de la fonction-coût
696     # ------------------------------
697     def CostFunction(x):
698         _X  = numpy.asmatrix(numpy.ravel( x )).T
699         if selfA._parameters["StoreInternalVariables"] or \
700             selfA._toStore("CurrentState") or \
701             selfA._toStore("CurrentOptimum"):
702             selfA.StoredVariables["CurrentState"].store( _X )
703         _HX = Hm( _X )
704         _HX = numpy.asmatrix(numpy.ravel( _HX )).T
705         _Innovation = Y - _HX
706         if selfA._toStore("SimulatedObservationAtCurrentState") or \
707             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
708             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
709         if selfA._toStore("InnovationAtCurrentState"):
710             selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
711         #
712         Jb  = float( 0.5 * (_X - Xb).T * BI * (_X - Xb) )
713         Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
714         J   = Jb + Jo
715         #
716         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
717         selfA.StoredVariables["CostFunctionJb"].store( Jb )
718         selfA.StoredVariables["CostFunctionJo"].store( Jo )
719         selfA.StoredVariables["CostFunctionJ" ].store( J )
720         if selfA._toStore("IndexOfOptimum") or \
721             selfA._toStore("CurrentOptimum") or \
722             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
723             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
724             selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
725             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
726             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
727         if selfA._toStore("IndexOfOptimum"):
728             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
729         if selfA._toStore("CurrentOptimum"):
730             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
731         if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
732             selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
733         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
734             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
735         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
736             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
737         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
738             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
739         return J
740     #
741     def GradientOfCostFunction(x):
742         _X      = numpy.asmatrix(numpy.ravel( x )).T
743         _HX     = Hm( _X )
744         _HX     = numpy.asmatrix(numpy.ravel( _HX )).T
745         GradJb  = BI * (_X - Xb)
746         GradJo  = - Ha( (_X, RI * (Y - _HX)) )
747         GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
748         return GradJ
749     #
750     # Minimisation de la fonctionnelle
751     # --------------------------------
752     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
753     #
754     if selfA._parameters["Minimizer"] == "LBFGSB":
755         if "0.19" <= scipy.version.version <= "1.1.0":
756             import lbfgsbhlt as optimiseur
757         else:
758             import scipy.optimize as optimiseur
759         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
760             func        = CostFunction,
761             x0          = Xini,
762             fprime      = GradientOfCostFunction,
763             args        = (),
764             bounds      = selfA._parameters["Bounds"],
765             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
766             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
767             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
768             iprint      = selfA._parameters["optiprint"],
769             )
770         nfeval = Informations['funcalls']
771         rc     = Informations['warnflag']
772     elif selfA._parameters["Minimizer"] == "TNC":
773         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
774             func        = CostFunction,
775             x0          = Xini,
776             fprime      = GradientOfCostFunction,
777             args        = (),
778             bounds      = selfA._parameters["Bounds"],
779             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
780             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
781             ftol        = selfA._parameters["CostDecrementTolerance"],
782             messages    = selfA._parameters["optmessages"],
783             )
784     elif selfA._parameters["Minimizer"] == "CG":
785         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
786             f           = CostFunction,
787             x0          = Xini,
788             fprime      = GradientOfCostFunction,
789             args        = (),
790             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
791             gtol        = selfA._parameters["GradientNormTolerance"],
792             disp        = selfA._parameters["optdisp"],
793             full_output = True,
794             )
795     elif selfA._parameters["Minimizer"] == "NCG":
796         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
797             f           = CostFunction,
798             x0          = Xini,
799             fprime      = GradientOfCostFunction,
800             args        = (),
801             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
802             avextol     = selfA._parameters["CostDecrementTolerance"],
803             disp        = selfA._parameters["optdisp"],
804             full_output = True,
805             )
806     elif selfA._parameters["Minimizer"] == "BFGS":
807         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
808             f           = CostFunction,
809             x0          = Xini,
810             fprime      = GradientOfCostFunction,
811             args        = (),
812             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
813             gtol        = selfA._parameters["GradientNormTolerance"],
814             disp        = selfA._parameters["optdisp"],
815             full_output = True,
816             )
817     else:
818         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
819     #
820     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
821     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
822     #
823     # Correction pour pallier a un bug de TNC sur le retour du Minimum
824     # ----------------------------------------------------------------
825     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
826         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
827     #
828     # Obtention de l'analyse
829     # ----------------------
830     Xa = numpy.asmatrix(numpy.ravel( Minimum )).T
831     #
832     selfA.StoredVariables["Analysis"].store( Xa.A1 )
833     #
834     if selfA._toStore("OMA") or \
835         selfA._toStore("SigmaObs2") or \
836         selfA._toStore("SimulationQuantiles") or \
837         selfA._toStore("SimulatedObservationAtOptimum"):
838         if selfA._toStore("SimulatedObservationAtCurrentState"):
839             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
840         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
841             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
842         else:
843             HXa = Hm( Xa )
844     #
845     # Calcul de la covariance d'analyse
846     # ---------------------------------
847     if selfA._toStore("APosterioriCovariance") or \
848         selfA._toStore("SimulationQuantiles") or \
849         selfA._toStore("JacobianMatrixAtOptimum") or \
850         selfA._toStore("KalmanGainAtOptimum"):
851         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
852         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
853     if selfA._toStore("APosterioriCovariance") or \
854         selfA._toStore("SimulationQuantiles") or \
855         selfA._toStore("KalmanGainAtOptimum"):
856         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
857         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
858     if selfA._toStore("APosterioriCovariance") or \
859         selfA._toStore("SimulationQuantiles"):
860         HessienneI = []
861         nb = Xa.size
862         for i in range(nb):
863             _ee    = numpy.matrix(numpy.zeros(nb)).T
864             _ee[i] = 1.
865             _HtEE  = numpy.dot(HtM,_ee)
866             _HtEE  = numpy.asmatrix(numpy.ravel( _HtEE )).T
867             HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
868         HessienneI = numpy.matrix( HessienneI )
869         A = HessienneI.I
870         if min(A.shape) != max(A.shape):
871             raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
872         if (numpy.diag(A) < 0).any():
873             raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
874         if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
875             try:
876                 L = numpy.linalg.cholesky( A )
877             except:
878                 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
879     if selfA._toStore("APosterioriCovariance"):
880         selfA.StoredVariables["APosterioriCovariance"].store( A )
881     if selfA._toStore("JacobianMatrixAtOptimum"):
882         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
883     if selfA._toStore("KalmanGainAtOptimum"):
884         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
885         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
886         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
887     #
888     # Calculs et/ou stockages supplémentaires
889     # ---------------------------------------
890     if selfA._toStore("Innovation") or \
891         selfA._toStore("SigmaObs2") or \
892         selfA._toStore("MahalanobisConsistency") or \
893         selfA._toStore("OMB"):
894         d  = Y - HXb
895     if selfA._toStore("Innovation"):
896         selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
897     if selfA._toStore("BMA"):
898         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
899     if selfA._toStore("OMA"):
900         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
901     if selfA._toStore("OMB"):
902         selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
903     if selfA._toStore("SigmaObs2"):
904         TraceR = R.trace(Y.size)
905         selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
906     if selfA._toStore("MahalanobisConsistency"):
907         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
908     if selfA._toStore("SimulationQuantiles"):
909         nech = selfA._parameters["NumberOfSamplesForQuantiles"]
910         HXa  = numpy.matrix(numpy.ravel( HXa )).T
911         YfQ  = None
912         for i in range(nech):
913             if selfA._parameters["SimulationForQuantiles"] == "Linear":
914                 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
915                 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
916                 Yr = HXa + dYr
917             elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
918                 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
919                 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
920             if YfQ is None:
921                 YfQ = Yr
922             else:
923                 YfQ = numpy.hstack((YfQ,Yr))
924         YfQ.sort(axis=-1)
925         YQ = None
926         for quantile in selfA._parameters["Quantiles"]:
927             if not (0. <= float(quantile) <= 1.): continue
928             indice = int(nech * float(quantile) - 1./nech)
929             if YQ is None: YQ = YfQ[:,indice]
930             else:          YQ = numpy.hstack((YQ,YfQ[:,indice]))
931         selfA.StoredVariables["SimulationQuantiles"].store( YQ )
932     if selfA._toStore("SimulatedObservationAtBackground"):
933         selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
934     if selfA._toStore("SimulatedObservationAtOptimum"):
935         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
936     #
937     return 0
938
939 # ==============================================================================
940 def van3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
941     """
942     3DVAR variational analysis with no inversion of B (Huang 2000)
943
944     selfA est identique au "self" d'algorithme appelant et contient les
945     valeurs.
946     """
947     #
948     # Correction pour pallier a un bug de TNC sur le retour du Minimum
949     if "Minimizer" in selfA._parameters and selfA._parameters["Minimizer"] == "TNC":
950         selfA.setParameterValue("StoreInternalVariables",True)
951     #
952     # Initialisations
953     # ---------------
954     Hm = HO["Direct"].appliedTo
955     Ha = HO["Adjoint"].appliedInXTo
956     #
957     # Précalcul des inversions de B et R
958     BT = B.getT()
959     RI = R.getI()
960     #
961     # Point de démarrage de l'optimisation
962     Xini = numpy.zeros(Xb.shape)
963     #
964     # Définition de la fonction-coût
965     # ------------------------------
966     def CostFunction(v):
967         _V = numpy.asmatrix(numpy.ravel( v )).T
968         _X = Xb + B * _V
969         if selfA._parameters["StoreInternalVariables"] or \
970             selfA._toStore("CurrentState") or \
971             selfA._toStore("CurrentOptimum"):
972             selfA.StoredVariables["CurrentState"].store( _X )
973         _HX = Hm( _X )
974         _HX = numpy.asmatrix(numpy.ravel( _HX )).T
975         _Innovation = Y - _HX
976         if selfA._toStore("SimulatedObservationAtCurrentState") or \
977             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
978             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
979         if selfA._toStore("InnovationAtCurrentState"):
980             selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
981         #
982         Jb  = float( 0.5 * _V.T * BT * _V )
983         Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
984         J   = Jb + Jo
985         #
986         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
987         selfA.StoredVariables["CostFunctionJb"].store( Jb )
988         selfA.StoredVariables["CostFunctionJo"].store( Jo )
989         selfA.StoredVariables["CostFunctionJ" ].store( J )
990         if selfA._toStore("IndexOfOptimum") or \
991             selfA._toStore("CurrentOptimum") or \
992             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
993             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
994             selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
995             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
996             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
997         if selfA._toStore("IndexOfOptimum"):
998             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
999         if selfA._toStore("CurrentOptimum"):
1000             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
1001         if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1002             selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
1003         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1004             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1005         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1006             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1007         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1008             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1009         return J
1010     #
1011     def GradientOfCostFunction(v):
1012         _V = numpy.asmatrix(numpy.ravel( v )).T
1013         _X = Xb + B * _V
1014         _HX     = Hm( _X )
1015         _HX     = numpy.asmatrix(numpy.ravel( _HX )).T
1016         GradJb  = BT * _V
1017         GradJo  = - Ha( (_X, RI * (Y - _HX)) )
1018         GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
1019         return GradJ
1020     #
1021     # Minimisation de la fonctionnelle
1022     # --------------------------------
1023     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
1024     #
1025     if selfA._parameters["Minimizer"] == "LBFGSB":
1026         if "0.19" <= scipy.version.version <= "1.1.0":
1027             import lbfgsbhlt as optimiseur
1028         else:
1029             import scipy.optimize as optimiseur
1030         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
1031             func        = CostFunction,
1032             x0          = Xini,
1033             fprime      = GradientOfCostFunction,
1034             args        = (),
1035             bounds      = selfA._parameters["Bounds"],
1036             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
1037             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
1038             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
1039             iprint      = selfA._parameters["optiprint"],
1040             )
1041         nfeval = Informations['funcalls']
1042         rc     = Informations['warnflag']
1043     elif selfA._parameters["Minimizer"] == "TNC":
1044         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
1045             func        = CostFunction,
1046             x0          = Xini,
1047             fprime      = GradientOfCostFunction,
1048             args        = (),
1049             bounds      = selfA._parameters["Bounds"],
1050             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
1051             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
1052             ftol        = selfA._parameters["CostDecrementTolerance"],
1053             messages    = selfA._parameters["optmessages"],
1054             )
1055     elif selfA._parameters["Minimizer"] == "CG":
1056         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
1057             f           = CostFunction,
1058             x0          = Xini,
1059             fprime      = GradientOfCostFunction,
1060             args        = (),
1061             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1062             gtol        = selfA._parameters["GradientNormTolerance"],
1063             disp        = selfA._parameters["optdisp"],
1064             full_output = True,
1065             )
1066     elif selfA._parameters["Minimizer"] == "NCG":
1067         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
1068             f           = CostFunction,
1069             x0          = Xini,
1070             fprime      = GradientOfCostFunction,
1071             args        = (),
1072             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1073             avextol     = selfA._parameters["CostDecrementTolerance"],
1074             disp        = selfA._parameters["optdisp"],
1075             full_output = True,
1076             )
1077     elif selfA._parameters["Minimizer"] == "BFGS":
1078         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
1079             f           = CostFunction,
1080             x0          = Xini,
1081             fprime      = GradientOfCostFunction,
1082             args        = (),
1083             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1084             gtol        = selfA._parameters["GradientNormTolerance"],
1085             disp        = selfA._parameters["optdisp"],
1086             full_output = True,
1087             )
1088     else:
1089         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
1090     #
1091     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1092     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
1093     #
1094     # Correction pour pallier a un bug de TNC sur le retour du Minimum
1095     # ----------------------------------------------------------------
1096     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
1097         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
1098         Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
1099     else:
1100         Minimum = Xb + B * numpy.asmatrix(numpy.ravel( Minimum )).T
1101     #
1102     # Obtention de l'analyse
1103     # ----------------------
1104     Xa = Minimum
1105     #
1106     selfA.StoredVariables["Analysis"].store( Xa )
1107     #
1108     if selfA._toStore("OMA") or \
1109         selfA._toStore("SigmaObs2") or \
1110         selfA._toStore("SimulationQuantiles") or \
1111         selfA._toStore("SimulatedObservationAtOptimum"):
1112         if selfA._toStore("SimulatedObservationAtCurrentState"):
1113             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
1114         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1115             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
1116         else:
1117             HXa = Hm( Xa )
1118     #
1119     # Calcul de la covariance d'analyse
1120     # ---------------------------------
1121     if selfA._toStore("APosterioriCovariance") or \
1122         selfA._toStore("SimulationQuantiles") or \
1123         selfA._toStore("JacobianMatrixAtOptimum") or \
1124         selfA._toStore("KalmanGainAtOptimum"):
1125         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
1126         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
1127     if selfA._toStore("APosterioriCovariance") or \
1128         selfA._toStore("SimulationQuantiles") or \
1129         selfA._toStore("KalmanGainAtOptimum"):
1130         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
1131         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
1132     if selfA._toStore("APosterioriCovariance") or \
1133         selfA._toStore("SimulationQuantiles"):
1134         BI = B.getI()
1135         HessienneI = []
1136         nb = Xa.size
1137         for i in range(nb):
1138             _ee    = numpy.matrix(numpy.zeros(nb)).T
1139             _ee[i] = 1.
1140             _HtEE  = numpy.dot(HtM,_ee)
1141             _HtEE  = numpy.asmatrix(numpy.ravel( _HtEE )).T
1142             HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
1143         HessienneI = numpy.matrix( HessienneI )
1144         A = HessienneI.I
1145         if min(A.shape) != max(A.shape):
1146             raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
1147         if (numpy.diag(A) < 0).any():
1148             raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
1149         if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
1150             try:
1151                 L = numpy.linalg.cholesky( A )
1152             except:
1153                 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
1154     if selfA._toStore("APosterioriCovariance"):
1155         selfA.StoredVariables["APosterioriCovariance"].store( A )
1156     if selfA._toStore("JacobianMatrixAtOptimum"):
1157         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
1158     if selfA._toStore("KalmanGainAtOptimum"):
1159         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
1160         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
1161         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
1162     #
1163     # Calculs et/ou stockages supplémentaires
1164     # ---------------------------------------
1165     if selfA._toStore("Innovation") or \
1166         selfA._toStore("SigmaObs2") or \
1167         selfA._toStore("MahalanobisConsistency") or \
1168         selfA._toStore("OMB"):
1169         d  = Y - HXb
1170     if selfA._toStore("Innovation"):
1171         selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
1172     if selfA._toStore("BMA"):
1173         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
1174     if selfA._toStore("OMA"):
1175         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
1176     if selfA._toStore("OMB"):
1177         selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
1178     if selfA._toStore("SigmaObs2"):
1179         TraceR = R.trace(Y.size)
1180         selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
1181     if selfA._toStore("MahalanobisConsistency"):
1182         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
1183     if selfA._toStore("SimulationQuantiles"):
1184         nech = selfA._parameters["NumberOfSamplesForQuantiles"]
1185         HXa  = numpy.matrix(numpy.ravel( HXa )).T
1186         YfQ  = None
1187         for i in range(nech):
1188             if selfA._parameters["SimulationForQuantiles"] == "Linear":
1189                 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
1190                 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
1191                 Yr = HXa + dYr
1192             elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
1193                 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
1194                 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
1195             if YfQ is None:
1196                 YfQ = Yr
1197             else:
1198                 YfQ = numpy.hstack((YfQ,Yr))
1199         YfQ.sort(axis=-1)
1200         YQ = None
1201         for quantile in selfA._parameters["Quantiles"]:
1202             if not (0. <= float(quantile) <= 1.): continue
1203             indice = int(nech * float(quantile) - 1./nech)
1204             if YQ is None: YQ = YfQ[:,indice]
1205             else:          YQ = numpy.hstack((YQ,YfQ[:,indice]))
1206         selfA.StoredVariables["SimulationQuantiles"].store( YQ )
1207     if selfA._toStore("SimulatedObservationAtBackground"):
1208         selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
1209     if selfA._toStore("SimulatedObservationAtOptimum"):
1210         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
1211     #
1212     return 0
1213
1214 # ==============================================================================
1215 def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1216     """
1217     3DVAR incrémental (Courtier 1994, 1997)
1218
1219     selfA est identique au "self" d'algorithme appelant et contient les
1220     valeurs.
1221     """
1222     #
1223     # Correction pour pallier a un bug de TNC sur le retour du Minimum
1224     if "Minimizer" in selfA._parameters and selfA._parameters["Minimizer"] == "TNC":
1225         selfA.setParameterValue("StoreInternalVariables",True)
1226     #
1227     # Initialisations
1228     # ---------------
1229     #
1230     # Opérateur non-linéaire pour la boucle externe
1231     Hm = HO["Direct"].appliedTo
1232     #
1233     # Précalcul des inversions de B et R
1234     BI = B.getI()
1235     RI = R.getI()
1236     #
1237     # Point de démarrage de l'optimisation
1238     if selfA._parameters["InitializationPoint"] is not None:
1239         Xini = numpy.ravel(selfA._parameters["InitializationPoint"])
1240         if Xini.size != numpy.ravel(Xb).size:
1241             raise ValueError("Incompatible size %i of forced initial point to replace the Xb of size %i" \
1242                 %(Xini.size,numpy.ravel(Xb).size))
1243     else:
1244         Xini = numpy.ravel(Xb)
1245     #
1246     HXb = numpy.asmatrix(numpy.ravel( Hm( Xb ) )).T
1247     Innovation = Y - HXb
1248     #
1249     # Outer Loop
1250     # ----------
1251     iOuter = 0
1252     J      = 1./mpr
1253     DeltaJ = 1./mpr
1254     Xr     = Xini[:,None]
1255     while abs(DeltaJ) >= selfA._parameters["CostDecrementTolerance"] and iOuter <= selfA._parameters["MaximumNumberOfSteps"]:
1256         #
1257         # Inner Loop
1258         # ----------
1259         Ht = HO["Tangent"].asMatrix(Xr)
1260         Ht = Ht.reshape(Y.size,Xr.size) # ADAO & check shape
1261         #
1262         # Définition de la fonction-coût
1263         # ------------------------------
1264         def CostFunction(dx):
1265             _dX  = numpy.asmatrix(numpy.ravel( dx )).T
1266             if selfA._parameters["StoreInternalVariables"] or \
1267                 selfA._toStore("CurrentState") or \
1268                 selfA._toStore("CurrentOptimum"):
1269                 selfA.StoredVariables["CurrentState"].store( Xb + _dX )
1270             _HdX = Ht * _dX
1271             _HdX = numpy.asmatrix(numpy.ravel( _HdX )).T
1272             _dInnovation = Innovation - _HdX
1273             if selfA._toStore("SimulatedObservationAtCurrentState") or \
1274                 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1275                 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HXb + _HdX )
1276             if selfA._toStore("InnovationAtCurrentState"):
1277                 selfA.StoredVariables["InnovationAtCurrentState"].store( _dInnovation )
1278             #
1279             Jb  = float( 0.5 * _dX.T * BI * _dX )
1280             Jo  = float( 0.5 * _dInnovation.T * RI * _dInnovation )
1281             J   = Jb + Jo
1282             #
1283             selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
1284             selfA.StoredVariables["CostFunctionJb"].store( Jb )
1285             selfA.StoredVariables["CostFunctionJo"].store( Jo )
1286             selfA.StoredVariables["CostFunctionJ" ].store( J )
1287             if selfA._toStore("IndexOfOptimum") or \
1288                 selfA._toStore("CurrentOptimum") or \
1289                 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
1290                 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
1291                 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
1292                 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1293                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1294             if selfA._toStore("IndexOfOptimum"):
1295                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1296             if selfA._toStore("CurrentOptimum"):
1297                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
1298             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1299                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
1300             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1301                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1302             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1303                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1304             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1305                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1306             return J
1307         #
1308         def GradientOfCostFunction(dx):
1309             _dX          = numpy.asmatrix(numpy.ravel( dx )).T
1310             _HdX         = Ht * _dX
1311             _HdX         = numpy.asmatrix(numpy.ravel( _HdX )).T
1312             _dInnovation = Innovation - _HdX
1313             GradJb       = BI * _dX
1314             GradJo       = - Ht.T @ (RI * _dInnovation)
1315             GradJ        = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
1316             return GradJ
1317         #
1318         # Minimisation de la fonctionnelle
1319         # --------------------------------
1320         nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
1321         #
1322         if selfA._parameters["Minimizer"] == "LBFGSB":
1323             # Minimum, J_optimal, Informations = scipy.optimize.fmin_l_bfgs_b(
1324             if "0.19" <= scipy.version.version <= "1.1.0":
1325                 import lbfgsbhlt as optimiseur
1326             else:
1327                 import scipy.optimize as optimiseur
1328             Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
1329                 func        = CostFunction,
1330                 x0          = numpy.zeros(Xini.size),
1331                 fprime      = GradientOfCostFunction,
1332                 args        = (),
1333                 bounds      = selfA._parameters["Bounds"],
1334                 maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
1335                 factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
1336                 pgtol       = selfA._parameters["ProjectedGradientTolerance"],
1337                 iprint      = selfA._parameters["optiprint"],
1338                 )
1339             nfeval = Informations['funcalls']
1340             rc     = Informations['warnflag']
1341         elif selfA._parameters["Minimizer"] == "TNC":
1342             Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
1343                 func        = CostFunction,
1344                 x0          = numpy.zeros(Xini.size),
1345                 fprime      = GradientOfCostFunction,
1346                 args        = (),
1347                 bounds      = selfA._parameters["Bounds"],
1348                 maxfun      = selfA._parameters["MaximumNumberOfSteps"],
1349                 pgtol       = selfA._parameters["ProjectedGradientTolerance"],
1350                 ftol        = selfA._parameters["CostDecrementTolerance"],
1351                 messages    = selfA._parameters["optmessages"],
1352                 )
1353         elif selfA._parameters["Minimizer"] == "CG":
1354             Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
1355                 f           = CostFunction,
1356                 x0          = numpy.zeros(Xini.size),
1357                 fprime      = GradientOfCostFunction,
1358                 args        = (),
1359                 maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1360                 gtol        = selfA._parameters["GradientNormTolerance"],
1361                 disp        = selfA._parameters["optdisp"],
1362                 full_output = True,
1363                 )
1364         elif selfA._parameters["Minimizer"] == "NCG":
1365             Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
1366                 f           = CostFunction,
1367                 x0          = numpy.zeros(Xini.size),
1368                 fprime      = GradientOfCostFunction,
1369                 args        = (),
1370                 maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1371                 avextol     = selfA._parameters["CostDecrementTolerance"],
1372                 disp        = selfA._parameters["optdisp"],
1373                 full_output = True,
1374                 )
1375         elif selfA._parameters["Minimizer"] == "BFGS":
1376             Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
1377                 f           = CostFunction,
1378                 x0          = numpy.zeros(Xini.size),
1379                 fprime      = GradientOfCostFunction,
1380                 args        = (),
1381                 maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1382                 gtol        = selfA._parameters["GradientNormTolerance"],
1383                 disp        = selfA._parameters["optdisp"],
1384                 full_output = True,
1385                 )
1386         else:
1387             raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
1388         #
1389         IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1390         MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
1391         #
1392         if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
1393             Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
1394             Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
1395         else:
1396             Minimum = Xb + numpy.asmatrix(numpy.ravel( Minimum )).T
1397         #
1398         Xr     = Minimum
1399         DeltaJ = selfA.StoredVariables["CostFunctionJ" ][-1] - J
1400         iOuter = selfA.StoredVariables["CurrentIterationNumber"][-1]
1401     #
1402     # Obtention de l'analyse
1403     # ----------------------
1404     Xa = Xr
1405     #
1406     selfA.StoredVariables["Analysis"].store( Xa )
1407     #
1408     if selfA._toStore("OMA") or \
1409         selfA._toStore("SigmaObs2") or \
1410         selfA._toStore("SimulationQuantiles") or \
1411         selfA._toStore("SimulatedObservationAtOptimum"):
1412         if selfA._toStore("SimulatedObservationAtCurrentState"):
1413             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
1414         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1415             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
1416         else:
1417             HXa = Hm( Xa )
1418     #
1419     # Calcul de la covariance d'analyse
1420     # ---------------------------------
1421     if selfA._toStore("APosterioriCovariance") or \
1422         selfA._toStore("SimulationQuantiles") or \
1423         selfA._toStore("JacobianMatrixAtOptimum") or \
1424         selfA._toStore("KalmanGainAtOptimum"):
1425         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
1426         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
1427     if selfA._toStore("APosterioriCovariance") or \
1428         selfA._toStore("SimulationQuantiles") or \
1429         selfA._toStore("KalmanGainAtOptimum"):
1430         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
1431         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
1432     if selfA._toStore("APosterioriCovariance") or \
1433         selfA._toStore("SimulationQuantiles"):
1434         HessienneI = []
1435         nb = Xa.size
1436         for i in range(nb):
1437             _ee    = numpy.matrix(numpy.zeros(nb)).T
1438             _ee[i] = 1.
1439             _HtEE  = numpy.dot(HtM,_ee)
1440             _HtEE  = numpy.asmatrix(numpy.ravel( _HtEE )).T
1441             HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
1442         HessienneI = numpy.matrix( HessienneI )
1443         A = HessienneI.I
1444         if min(A.shape) != max(A.shape):
1445             raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
1446         if (numpy.diag(A) < 0).any():
1447             raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
1448         if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
1449             try:
1450                 L = numpy.linalg.cholesky( A )
1451             except:
1452                 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
1453     if selfA._toStore("APosterioriCovariance"):
1454         selfA.StoredVariables["APosterioriCovariance"].store( A )
1455     if selfA._toStore("JacobianMatrixAtOptimum"):
1456         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
1457     if selfA._toStore("KalmanGainAtOptimum"):
1458         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
1459         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
1460         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
1461     #
1462     # Calculs et/ou stockages supplémentaires
1463     # ---------------------------------------
1464     if selfA._toStore("Innovation") or \
1465         selfA._toStore("SigmaObs2") or \
1466         selfA._toStore("MahalanobisConsistency") or \
1467         selfA._toStore("OMB"):
1468         d  = Y - HXb
1469     if selfA._toStore("Innovation"):
1470         selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
1471     if selfA._toStore("BMA"):
1472         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
1473     if selfA._toStore("OMA"):
1474         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
1475     if selfA._toStore("OMB"):
1476         selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
1477     if selfA._toStore("SigmaObs2"):
1478         TraceR = R.trace(Y.size)
1479         selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
1480     if selfA._toStore("MahalanobisConsistency"):
1481         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
1482     if selfA._toStore("SimulationQuantiles"):
1483         nech = selfA._parameters["NumberOfSamplesForQuantiles"]
1484         HXa  = numpy.matrix(numpy.ravel( HXa )).T
1485         YfQ  = None
1486         for i in range(nech):
1487             if selfA._parameters["SimulationForQuantiles"] == "Linear":
1488                 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
1489                 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
1490                 Yr = HXa + dYr
1491             elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
1492                 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
1493                 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
1494             if YfQ is None:
1495                 YfQ = Yr
1496             else:
1497                 YfQ = numpy.hstack((YfQ,Yr))
1498         YfQ.sort(axis=-1)
1499         YQ = None
1500         for quantile in selfA._parameters["Quantiles"]:
1501             if not (0. <= float(quantile) <= 1.): continue
1502             indice = int(nech * float(quantile) - 1./nech)
1503             if YQ is None: YQ = YfQ[:,indice]
1504             else:          YQ = numpy.hstack((YQ,YfQ[:,indice]))
1505         selfA.StoredVariables["SimulationQuantiles"].store( YQ )
1506     if selfA._toStore("SimulatedObservationAtBackground"):
1507         selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
1508     if selfA._toStore("SimulatedObservationAtOptimum"):
1509         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
1510     #
1511     return 0
1512
1513 # ==============================================================================
1514 def psas3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1515     """
1516     3DVAR PSAS (Huang 2000)
1517
1518     selfA est identique au "self" d'algorithme appelant et contient les
1519     valeurs.
1520     """
1521     #
1522     # Correction pour pallier a un bug de TNC sur le retour du Minimum
1523     if "Minimizer" in selfA._parameters and selfA._parameters["Minimizer"] == "TNC":
1524         selfA.setParameterValue("StoreInternalVariables",True)
1525     #
1526     # Initialisations
1527     # ---------------
1528     #
1529     # Opérateurs
1530     Hm = HO["Direct"].appliedTo
1531     #
1532     # Utilisation éventuelle d'un vecteur H(Xb) précalculé
1533     if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
1534         HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
1535     else:
1536         HXb = Hm( Xb )
1537     HXb = numpy.asmatrix(numpy.ravel( HXb )).T
1538     if Y.size != HXb.size:
1539         raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
1540     if max(Y.shape) != max(HXb.shape):
1541         raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
1542     #
1543     if selfA._toStore("JacobianMatrixAtBackground"):
1544         HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
1545         HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
1546         selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
1547     #
1548     Ht = HO["Tangent"].asMatrix(Xb)
1549     BHT = B * Ht.T
1550     HBHTpR = R + Ht * BHT
1551     Innovation = Y - HXb
1552     #
1553     # Point de démarrage de l'optimisation
1554     Xini = numpy.zeros(Xb.shape)
1555     #
1556     # Définition de la fonction-coût
1557     # ------------------------------
1558     def CostFunction(w):
1559         _W = numpy.asmatrix(numpy.ravel( w )).T
1560         if selfA._parameters["StoreInternalVariables"] or \
1561             selfA._toStore("CurrentState") or \
1562             selfA._toStore("CurrentOptimum"):
1563             selfA.StoredVariables["CurrentState"].store( Xb + BHT * _W )
1564         if selfA._toStore("SimulatedObservationAtCurrentState") or \
1565             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1566             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Hm( Xb + BHT * _W ) )
1567         if selfA._toStore("InnovationAtCurrentState"):
1568             selfA.StoredVariables["InnovationAtCurrentState"].store( Innovation )
1569         #
1570         Jb  = float( 0.5 * _W.T * HBHTpR * _W )
1571         Jo  = float( - _W.T * Innovation )
1572         J   = Jb + Jo
1573         #
1574         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
1575         selfA.StoredVariables["CostFunctionJb"].store( Jb )
1576         selfA.StoredVariables["CostFunctionJo"].store( Jo )
1577         selfA.StoredVariables["CostFunctionJ" ].store( J )
1578         if selfA._toStore("IndexOfOptimum") or \
1579             selfA._toStore("CurrentOptimum") or \
1580             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
1581             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
1582             selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
1583             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1584             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1585         if selfA._toStore("IndexOfOptimum"):
1586             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1587         if selfA._toStore("CurrentOptimum"):
1588             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
1589         if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1590             selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
1591         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1592             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1593         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1594             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1595         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1596             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1597         return J
1598     #
1599     def GradientOfCostFunction(w):
1600         _W = numpy.asmatrix(numpy.ravel( w )).T
1601         GradJb  = HBHTpR * _W
1602         GradJo  = - Innovation
1603         GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
1604         return GradJ
1605     #
1606     # Minimisation de la fonctionnelle
1607     # --------------------------------
1608     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
1609     #
1610     if selfA._parameters["Minimizer"] == "LBFGSB":
1611         # Minimum, J_optimal, Informations = scipy.optimize.fmin_l_bfgs_b(
1612         if "0.19" <= scipy.version.version <= "1.1.0":
1613             import lbfgsbhlt as optimiseur
1614         else:
1615             import scipy.optimize as optimiseur
1616         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
1617             func        = CostFunction,
1618             x0          = Xini,
1619             fprime      = GradientOfCostFunction,
1620             args        = (),
1621             bounds      = selfA._parameters["Bounds"],
1622             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
1623             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
1624             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
1625             iprint      = selfA._parameters["optiprint"],
1626             )
1627         nfeval = Informations['funcalls']
1628         rc     = Informations['warnflag']
1629     elif selfA._parameters["Minimizer"] == "TNC":
1630         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
1631             func        = CostFunction,
1632             x0          = Xini,
1633             fprime      = GradientOfCostFunction,
1634             args        = (),
1635             bounds      = selfA._parameters["Bounds"],
1636             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
1637             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
1638             ftol        = selfA._parameters["CostDecrementTolerance"],
1639             messages    = selfA._parameters["optmessages"],
1640             )
1641     elif selfA._parameters["Minimizer"] == "CG":
1642         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
1643             f           = CostFunction,
1644             x0          = Xini,
1645             fprime      = GradientOfCostFunction,
1646             args        = (),
1647             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1648             gtol        = selfA._parameters["GradientNormTolerance"],
1649             disp        = selfA._parameters["optdisp"],
1650             full_output = True,
1651             )
1652     elif selfA._parameters["Minimizer"] == "NCG":
1653         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
1654             f           = CostFunction,
1655             x0          = Xini,
1656             fprime      = GradientOfCostFunction,
1657             args        = (),
1658             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1659             avextol     = selfA._parameters["CostDecrementTolerance"],
1660             disp        = selfA._parameters["optdisp"],
1661             full_output = True,
1662             )
1663     elif selfA._parameters["Minimizer"] == "BFGS":
1664         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
1665             f           = CostFunction,
1666             x0          = Xini,
1667             fprime      = GradientOfCostFunction,
1668             args        = (),
1669             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1670             gtol        = selfA._parameters["GradientNormTolerance"],
1671             disp        = selfA._parameters["optdisp"],
1672             full_output = True,
1673             )
1674     else:
1675         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
1676     #
1677     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1678     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
1679     #
1680     # Correction pour pallier a un bug de TNC sur le retour du Minimum
1681     # ----------------------------------------------------------------
1682     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
1683         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
1684         Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
1685     else:
1686         Minimum = Xb + BHT * numpy.asmatrix(numpy.ravel( Minimum )).T
1687     #
1688     # Obtention de l'analyse
1689     # ----------------------
1690     Xa = Minimum
1691     #
1692     selfA.StoredVariables["Analysis"].store( Xa )
1693     #
1694     if selfA._toStore("OMA") or \
1695         selfA._toStore("SigmaObs2") or \
1696         selfA._toStore("SimulationQuantiles") or \
1697         selfA._toStore("SimulatedObservationAtOptimum"):
1698         if selfA._toStore("SimulatedObservationAtCurrentState"):
1699             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
1700         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1701             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
1702         else:
1703             HXa = Hm( Xa )
1704     #
1705     # Calcul de la covariance d'analyse
1706     # ---------------------------------
1707     if selfA._toStore("APosterioriCovariance") or \
1708         selfA._toStore("SimulationQuantiles") or \
1709         selfA._toStore("JacobianMatrixAtOptimum") or \
1710         selfA._toStore("KalmanGainAtOptimum"):
1711         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
1712         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
1713     if selfA._toStore("APosterioriCovariance") or \
1714         selfA._toStore("SimulationQuantiles") or \
1715         selfA._toStore("KalmanGainAtOptimum"):
1716         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
1717         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
1718     if selfA._toStore("APosterioriCovariance") or \
1719         selfA._toStore("SimulationQuantiles"):
1720         BI = B.getI()
1721         RI = R.getI()
1722         HessienneI = []
1723         nb = Xa.size
1724         for i in range(nb):
1725             _ee    = numpy.matrix(numpy.zeros(nb)).T
1726             _ee[i] = 1.
1727             _HtEE  = numpy.dot(HtM,_ee)
1728             _HtEE  = numpy.asmatrix(numpy.ravel( _HtEE )).T
1729             HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
1730         HessienneI = numpy.matrix( HessienneI )
1731         A = HessienneI.I
1732         if min(A.shape) != max(A.shape):
1733             raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
1734         if (numpy.diag(A) < 0).any():
1735             raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
1736         if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
1737             try:
1738                 L = numpy.linalg.cholesky( A )
1739             except:
1740                 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
1741     if selfA._toStore("APosterioriCovariance"):
1742         selfA.StoredVariables["APosterioriCovariance"].store( A )
1743     if selfA._toStore("JacobianMatrixAtOptimum"):
1744         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
1745     if selfA._toStore("KalmanGainAtOptimum"):
1746         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
1747         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
1748         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
1749     #
1750     # Calculs et/ou stockages supplémentaires
1751     # ---------------------------------------
1752     if selfA._toStore("Innovation") or \
1753         selfA._toStore("SigmaObs2") or \
1754         selfA._toStore("MahalanobisConsistency") or \
1755         selfA._toStore("OMB"):
1756         d  = Y - HXb
1757     if selfA._toStore("Innovation"):
1758         selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
1759     if selfA._toStore("BMA"):
1760         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
1761     if selfA._toStore("OMA"):
1762         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
1763     if selfA._toStore("OMB"):
1764         selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
1765     if selfA._toStore("SigmaObs2"):
1766         TraceR = R.trace(Y.size)
1767         selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
1768     if selfA._toStore("MahalanobisConsistency"):
1769         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
1770     if selfA._toStore("SimulationQuantiles"):
1771         nech = selfA._parameters["NumberOfSamplesForQuantiles"]
1772         HXa  = numpy.matrix(numpy.ravel( HXa )).T
1773         YfQ  = None
1774         for i in range(nech):
1775             if selfA._parameters["SimulationForQuantiles"] == "Linear":
1776                 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
1777                 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
1778                 Yr = HXa + dYr
1779             elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
1780                 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
1781                 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
1782             if YfQ is None:
1783                 YfQ = Yr
1784             else:
1785                 YfQ = numpy.hstack((YfQ,Yr))
1786         YfQ.sort(axis=-1)
1787         YQ = None
1788         for quantile in selfA._parameters["Quantiles"]:
1789             if not (0. <= float(quantile) <= 1.): continue
1790             indice = int(nech * float(quantile) - 1./nech)
1791             if YQ is None: YQ = YfQ[:,indice]
1792             else:          YQ = numpy.hstack((YQ,YfQ[:,indice]))
1793         selfA.StoredVariables["SimulationQuantiles"].store( YQ )
1794     if selfA._toStore("SimulatedObservationAtBackground"):
1795         selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
1796     if selfA._toStore("SimulatedObservationAtOptimum"):
1797         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
1798     #
1799     return 0
1800
1801 # ==============================================================================
1802 def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
1803     """
1804     Stochastic EnKF (Envensen 1994, Burgers 1998)
1805
1806     selfA est identique au "self" d'algorithme appelant et contient les
1807     valeurs.
1808     """
1809     if selfA._parameters["EstimationOf"] == "Parameters":
1810         selfA._parameters["StoreInternalVariables"] = True
1811     #
1812     # Opérateurs
1813     # ----------
1814     H = HO["Direct"].appliedControledFormTo
1815     #
1816     if selfA._parameters["EstimationOf"] == "State":
1817         M = EM["Direct"].appliedControledFormTo
1818     #
1819     if CM is not None and "Tangent" in CM and U is not None:
1820         Cm = CM["Tangent"].asMatrix(Xb)
1821     else:
1822         Cm = None
1823     #
1824     # Nombre de pas identique au nombre de pas d'observations
1825     # -------------------------------------------------------
1826     if hasattr(Y,"stepnumber"):
1827         duration = Y.stepnumber()
1828         __p = numpy.cumprod(Y.shape())[-1]
1829     else:
1830         duration = 2
1831         __p = numpy.array(Y).size
1832     #
1833     # Précalcul des inversions de B et R
1834     # ----------------------------------
1835     if selfA._parameters["StoreInternalVariables"] \
1836         or selfA._toStore("CostFunctionJ") \
1837         or selfA._toStore("CostFunctionJb") \
1838         or selfA._toStore("CostFunctionJo") \
1839         or selfA._toStore("CurrentOptimum") \
1840         or selfA._toStore("APosterioriCovariance"):
1841         BI = B.getI()
1842         RI = R.getI()
1843     #
1844     # Initialisation
1845     # --------------
1846     __n = Xb.size
1847     __m = selfA._parameters["NumberOfMembers"]
1848     if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
1849     else:                         Pn = B
1850     if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
1851     else:                         Rn = R
1852     if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
1853     else:                         Qn = Q
1854     Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
1855     #
1856     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1857         selfA.StoredVariables["Analysis"].store( numpy.ravel(Xb) )
1858         if selfA._toStore("APosterioriCovariance"):
1859             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1860             covarianceXa = Pn
1861     #
1862     previousJMinimum = numpy.finfo(float).max
1863     #
1864     for step in range(duration-1):
1865         if hasattr(Y,"store"):
1866             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,-1))
1867         else:
1868             Ynpu = numpy.ravel( Y ).reshape((__p,-1))
1869         #
1870         if U is not None:
1871             if hasattr(U,"store") and len(U)>1:
1872                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
1873             elif hasattr(U,"store") and len(U)==1:
1874                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
1875             else:
1876                 Un = numpy.asmatrix(numpy.ravel( U )).T
1877         else:
1878             Un = None
1879         #
1880         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
1881             Xn = CovarianceInflation( Xn,
1882                 selfA._parameters["InflationType"],
1883                 selfA._parameters["InflationFactor"],
1884                 )
1885         #
1886         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
1887             EMX = M( [(Xn[:,i], Un) for i in range(__m)],
1888                 argsAsSerie = True,
1889                 returnSerieAsArrayMatrix = True )
1890             qi = numpy.random.multivariate_normal(numpy.zeros(__n), Qn, size=__m).T
1891             Xn_predicted = EMX + qi
1892             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
1893                 argsAsSerie = True,
1894                 returnSerieAsArrayMatrix = True )
1895             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1896                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
1897                 Xn_predicted = Xn_predicted + Cm * Un
1898         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
1899             # --- > Par principe, M = Id, Q = 0
1900             Xn_predicted = Xn
1901             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
1902                 argsAsSerie = True,
1903                 returnSerieAsArrayMatrix = True )
1904         #
1905         # Mean of forecast and observation of forecast
1906         Xfm  = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
1907         Hfm  = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,-1))
1908         #
1909         #--------------------------
1910         if VariantM == "KalmanFilterFormula05":
1911             PfHT, HPfHT = 0., 0.
1912             for i in range(__m):
1913                 Exfi = Xn_predicted[:,i].reshape((__n,-1)) - Xfm
1914                 Eyfi = HX_predicted[:,i].reshape((__p,-1)) - Hfm
1915                 PfHT  += Exfi * Eyfi.T
1916                 HPfHT += Eyfi * Eyfi.T
1917             PfHT  = (1./(__m-1)) * PfHT
1918             HPfHT = (1./(__m-1)) * HPfHT
1919             Kn     = PfHT * ( R + HPfHT ).I
1920             del PfHT, HPfHT
1921             #
1922             for i in range(__m):
1923                 ri = numpy.random.multivariate_normal(numpy.zeros(__p), Rn)
1924                 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(Ynpu) + ri - HX_predicted[:,i])
1925         #--------------------------
1926         elif VariantM == "KalmanFilterFormula16":
1927             EpY   = EnsembleOfCenteredPerturbations(Ynpu, Rn, __m)
1928             EpYm  = EpY.mean(axis=1, dtype=mfp).astype('float').reshape((__p,-1))
1929             #
1930             EaX   = EnsembleOfAnomalies( Xn_predicted ) / numpy.sqrt(__m-1)
1931             EaY = (HX_predicted - Hfm - EpY + EpYm) / numpy.sqrt(__m-1)
1932             #
1933             Kn = EaX @ EaY.T @ numpy.linalg.inv( EaY @ EaY.T)
1934             #
1935             for i in range(__m):
1936                 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(EpY[:,i]) - HX_predicted[:,i])
1937         #--------------------------
1938         else:
1939             raise ValueError("VariantM has to be chosen in the authorized methods list.")
1940         #
1941         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
1942             Xn = CovarianceInflation( Xn,
1943                 selfA._parameters["InflationType"],
1944                 selfA._parameters["InflationFactor"],
1945                 )
1946         #
1947         Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
1948         #--------------------------
1949         #
1950         if selfA._parameters["StoreInternalVariables"] \
1951             or selfA._toStore("CostFunctionJ") \
1952             or selfA._toStore("CostFunctionJb") \
1953             or selfA._toStore("CostFunctionJo") \
1954             or selfA._toStore("APosterioriCovariance") \
1955             or selfA._toStore("InnovationAtCurrentAnalysis") \
1956             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1957             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1958             _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
1959             _Innovation = Ynpu - _HXa
1960         #
1961         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1962         # ---> avec analysis
1963         selfA.StoredVariables["Analysis"].store( Xa )
1964         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1965             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
1966         if selfA._toStore("InnovationAtCurrentAnalysis"):
1967             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1968         # ---> avec current state
1969         if selfA._parameters["StoreInternalVariables"] \
1970             or selfA._toStore("CurrentState"):
1971             selfA.StoredVariables["CurrentState"].store( Xn )
1972         if selfA._toStore("ForecastState"):
1973             selfA.StoredVariables["ForecastState"].store( EMX )
1974         if selfA._toStore("BMA"):
1975             selfA.StoredVariables["BMA"].store( EMX - Xa.reshape((__n,1)) )
1976         if selfA._toStore("InnovationAtCurrentState"):
1977             selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
1978         if selfA._toStore("SimulatedObservationAtCurrentState") \
1979             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1980             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
1981         # ---> autres
1982         if selfA._parameters["StoreInternalVariables"] \
1983             or selfA._toStore("CostFunctionJ") \
1984             or selfA._toStore("CostFunctionJb") \
1985             or selfA._toStore("CostFunctionJo") \
1986             or selfA._toStore("CurrentOptimum") \
1987             or selfA._toStore("APosterioriCovariance"):
1988             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
1989             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
1990             J   = Jb + Jo
1991             selfA.StoredVariables["CostFunctionJb"].store( Jb )
1992             selfA.StoredVariables["CostFunctionJo"].store( Jo )
1993             selfA.StoredVariables["CostFunctionJ" ].store( J )
1994             #
1995             if selfA._toStore("IndexOfOptimum") \
1996                 or selfA._toStore("CurrentOptimum") \
1997                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1998                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1999                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2000                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2001                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2002             if selfA._toStore("IndexOfOptimum"):
2003                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2004             if selfA._toStore("CurrentOptimum"):
2005                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2006             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2007                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2008             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2009                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2010             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2011                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2012             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2013                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2014         if selfA._toStore("APosterioriCovariance"):
2015             Eai = EnsembleOfAnomalies( Xn ) / numpy.sqrt(__m-1) # Anomalies
2016             Pn = Eai @ Eai.T
2017             Pn = 0.5 * (Pn + Pn.T)
2018             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2019         if selfA._parameters["EstimationOf"] == "Parameters" \
2020             and J < previousJMinimum:
2021             previousJMinimum    = J
2022             XaMin               = Xa
2023             if selfA._toStore("APosterioriCovariance"):
2024                 covarianceXaMin = Pn
2025     #
2026     # Stockage final supplémentaire de l'optimum en estimation de paramètres
2027     # ----------------------------------------------------------------------
2028     if selfA._parameters["EstimationOf"] == "Parameters":
2029         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2030         selfA.StoredVariables["Analysis"].store( XaMin )
2031         if selfA._toStore("APosterioriCovariance"):
2032             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2033         if selfA._toStore("BMA"):
2034             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2035     #
2036     return 0
2037
2038 # ==============================================================================
2039 def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
2040     """
2041     Ensemble-Transform EnKF (ETKF or Deterministic EnKF: Bishop 2001, Hunt 2007)
2042
2043     selfA est identique au "self" d'algorithme appelant et contient les
2044     valeurs.
2045     """
2046     if selfA._parameters["EstimationOf"] == "Parameters":
2047         selfA._parameters["StoreInternalVariables"] = True
2048     #
2049     # Opérateurs
2050     # ----------
2051     H = HO["Direct"].appliedControledFormTo
2052     #
2053     if selfA._parameters["EstimationOf"] == "State":
2054         M = EM["Direct"].appliedControledFormTo
2055     #
2056     if CM is not None and "Tangent" in CM and U is not None:
2057         Cm = CM["Tangent"].asMatrix(Xb)
2058     else:
2059         Cm = None
2060     #
2061     # Nombre de pas identique au nombre de pas d'observations
2062     # -------------------------------------------------------
2063     if hasattr(Y,"stepnumber"):
2064         duration = Y.stepnumber()
2065         __p = numpy.cumprod(Y.shape())[-1]
2066     else:
2067         duration = 2
2068         __p = numpy.array(Y).size
2069     #
2070     # Précalcul des inversions de B et R
2071     # ----------------------------------
2072     if selfA._parameters["StoreInternalVariables"] \
2073         or selfA._toStore("CostFunctionJ") \
2074         or selfA._toStore("CostFunctionJb") \
2075         or selfA._toStore("CostFunctionJo") \
2076         or selfA._toStore("CurrentOptimum") \
2077         or selfA._toStore("APosterioriCovariance"):
2078         BI = B.getI()
2079         RI = R.getI()
2080     elif VariantM != "KalmanFilterFormula":
2081         RI = R.getI()
2082     if VariantM == "KalmanFilterFormula":
2083         RIdemi = R.choleskyI()
2084     #
2085     # Initialisation
2086     # --------------
2087     __n = Xb.size
2088     __m = selfA._parameters["NumberOfMembers"]
2089     if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
2090     else:                         Pn = B
2091     if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
2092     else:                         Rn = R
2093     if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
2094     else:                         Qn = Q
2095     Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
2096     #
2097     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2098         selfA.StoredVariables["Analysis"].store( numpy.ravel(Xb) )
2099         if selfA._toStore("APosterioriCovariance"):
2100             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2101             covarianceXa = Pn
2102     #
2103     previousJMinimum = numpy.finfo(float).max
2104     #
2105     for step in range(duration-1):
2106         if hasattr(Y,"store"):
2107             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,-1))
2108         else:
2109             Ynpu = numpy.ravel( Y ).reshape((__p,-1))
2110         #
2111         if U is not None:
2112             if hasattr(U,"store") and len(U)>1:
2113                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2114             elif hasattr(U,"store") and len(U)==1:
2115                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2116             else:
2117                 Un = numpy.asmatrix(numpy.ravel( U )).T
2118         else:
2119             Un = None
2120         #
2121         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2122             Xn = CovarianceInflation( Xn,
2123                 selfA._parameters["InflationType"],
2124                 selfA._parameters["InflationFactor"],
2125                 )
2126         #
2127         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
2128             EMX = M( [(Xn[:,i], Un) for i in range(__m)],
2129                 argsAsSerie = True,
2130                 returnSerieAsArrayMatrix = True )
2131             qi = numpy.random.multivariate_normal(numpy.zeros(__n), Qn, size=__m).T
2132             Xn_predicted = EMX + qi
2133             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2134                 argsAsSerie = True,
2135                 returnSerieAsArrayMatrix = True )
2136             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2137                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2138                 Xn_predicted = Xn_predicted + Cm * Un
2139         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
2140             # --- > Par principe, M = Id, Q = 0
2141             Xn_predicted = Xn
2142             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2143                 argsAsSerie = True,
2144                 returnSerieAsArrayMatrix = True )
2145         #
2146         # Mean of forecast and observation of forecast
2147         Xfm  = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
2148         Hfm  = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2149         #
2150         # Anomalies
2151         EaX   = EnsembleOfAnomalies( Xn_predicted )
2152         EaHX  = numpy.array(HX_predicted - Hfm)
2153         #
2154         #--------------------------
2155         if VariantM == "KalmanFilterFormula":
2156             mS    = RIdemi * EaHX / numpy.sqrt(__m-1)
2157             delta = RIdemi * ( Ynpu - Hfm )
2158             mT    = numpy.linalg.inv( numpy.eye(__m) + mS.T @ mS )
2159             vw    = mT @ mS.transpose() @ delta
2160             #
2161             Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
2162             mU    = numpy.eye(__m)
2163             #
2164             EaX   = EaX / numpy.sqrt(__m-1)
2165             Xn    = Xfm + EaX @ ( vw.reshape((__m,-1)) + numpy.sqrt(__m-1) * Tdemi @ mU )
2166         #--------------------------
2167         elif VariantM == "Variational":
2168             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
2169             def CostFunction(w):
2170                 _A  = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
2171                 _Jo = 0.5 * _A.T @ (RI * _A)
2172                 _Jb = 0.5 * (__m-1) * w.T @ w
2173                 _J  = _Jo + _Jb
2174                 return float(_J)
2175             def GradientOfCostFunction(w):
2176                 _A  = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
2177                 _GardJo = - EaHX.T @ (RI * _A)
2178                 _GradJb = (__m-1) * w.reshape((__m,1))
2179                 _GradJ  = _GardJo + _GradJb
2180                 return numpy.ravel(_GradJ)
2181             vw = scipy.optimize.fmin_cg(
2182                 f           = CostFunction,
2183                 x0          = numpy.zeros(__m),
2184                 fprime      = GradientOfCostFunction,
2185                 args        = (),
2186                 disp        = False,
2187                 )
2188             #
2189             Hto = EaHX.T @ (RI * EaHX)
2190             Htb = (__m-1) * numpy.eye(__m)
2191             Hta = Hto + Htb
2192             #
2193             Pta = numpy.linalg.inv( Hta )
2194             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
2195             #
2196             Xn  = Xfm + EaX @ (vw[:,None] + EWa)
2197         #--------------------------
2198         elif VariantM == "FiniteSize11": # Jauge Boc2011
2199             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
2200             def CostFunction(w):
2201                 _A  = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
2202                 _Jo = 0.5 * _A.T @ (RI * _A)
2203                 _Jb = 0.5 * __m * math.log(1 + 1/__m + w.T @ w)
2204                 _J  = _Jo + _Jb
2205                 return float(_J)
2206             def GradientOfCostFunction(w):
2207                 _A  = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
2208                 _GardJo = - EaHX.T @ (RI * _A)
2209                 _GradJb = __m * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
2210                 _GradJ  = _GardJo + _GradJb
2211                 return numpy.ravel(_GradJ)
2212             vw = scipy.optimize.fmin_cg(
2213                 f           = CostFunction,
2214                 x0          = numpy.zeros(__m),
2215                 fprime      = GradientOfCostFunction,
2216                 args        = (),
2217                 disp        = False,
2218                 )
2219             #
2220             Hto = EaHX.T @ (RI * EaHX)
2221             Htb = __m * \
2222                 ( (1 + 1/__m + vw.T @ vw) * numpy.eye(__m) - 2 * vw @ vw.T ) \
2223                 / (1 + 1/__m + vw.T @ vw)**2
2224             Hta = Hto + Htb
2225             #
2226             Pta = numpy.linalg.inv( Hta )
2227             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
2228             #
2229             Xn  = Xfm + EaX @ (vw.reshape((__m,-1)) + EWa)
2230         #--------------------------
2231         elif VariantM == "FiniteSize15": # Jauge Boc2015
2232             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
2233             def CostFunction(w):
2234                 _A  = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
2235                 _Jo = 0.5 * _A.T * RI * _A
2236                 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w)
2237                 _J  = _Jo + _Jb
2238                 return float(_J)
2239             def GradientOfCostFunction(w):
2240                 _A  = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
2241                 _GardJo = - EaHX.T @ (RI * _A)
2242                 _GradJb = (__m+1) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
2243                 _GradJ  = _GardJo + _GradJb
2244                 return numpy.ravel(_GradJ)
2245             vw = scipy.optimize.fmin_cg(
2246                 f           = CostFunction,
2247                 x0          = numpy.zeros(__m),
2248                 fprime      = GradientOfCostFunction,
2249                 args        = (),
2250                 disp        = False,
2251                 )
2252             #
2253             Hto = EaHX.T @ (RI * EaHX)
2254             Htb = (__m+1) * \
2255                 ( (1 + 1/__m + vw.T @ vw) * numpy.eye(__m) - 2 * vw @ vw.T ) \
2256                 / (1 + 1/__m + vw.T @ vw)**2
2257             Hta = Hto + Htb
2258             #
2259             Pta = numpy.linalg.inv( Hta )
2260             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
2261             #
2262             Xn  = Xfm + EaX @ (vw.reshape((__m,-1)) + EWa)
2263         #--------------------------
2264         elif VariantM == "FiniteSize16": # Jauge Boc2016
2265             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
2266             def CostFunction(w):
2267                 _A  = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
2268                 _Jo = 0.5 * _A.T @ (RI * _A)
2269                 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w / (__m-1))
2270                 _J  = _Jo + _Jb
2271                 return float(_J)
2272             def GradientOfCostFunction(w):
2273                 _A  = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
2274                 _GardJo = - EaHX.T @ (RI * _A)
2275                 _GradJb = ((__m+1) / (__m-1)) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w / (__m-1))
2276                 _GradJ  = _GardJo + _GradJb
2277                 return numpy.ravel(_GradJ)
2278             vw = scipy.optimize.fmin_cg(
2279                 f           = CostFunction,
2280                 x0          = numpy.zeros(__m),
2281                 fprime      = GradientOfCostFunction,
2282                 args        = (),
2283                 disp        = False,
2284                 )
2285             #
2286             Hto = EaHX.T @ (RI * EaHX)
2287             Htb = ((__m+1) / (__m-1)) * \
2288                 ( (1 + 1/__m + vw.T @ vw / (__m-1)) * numpy.eye(__m) - 2 * vw @ vw.T / (__m-1) ) \
2289                 / (1 + 1/__m + vw.T @ vw / (__m-1))**2
2290             Hta = Hto + Htb
2291             #
2292             Pta = numpy.linalg.inv( Hta )
2293             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
2294             #
2295             Xn  = Xfm + EaX @ (vw[:,None] + EWa)
2296         #--------------------------
2297         else:
2298             raise ValueError("VariantM has to be chosen in the authorized methods list.")
2299         #
2300         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2301             Xn = CovarianceInflation( Xn,
2302                 selfA._parameters["InflationType"],
2303                 selfA._parameters["InflationFactor"],
2304                 )
2305         #
2306         Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
2307         #--------------------------
2308         #
2309         if selfA._parameters["StoreInternalVariables"] \
2310             or selfA._toStore("CostFunctionJ") \
2311             or selfA._toStore("CostFunctionJb") \
2312             or selfA._toStore("CostFunctionJo") \
2313             or selfA._toStore("APosterioriCovariance") \
2314             or selfA._toStore("InnovationAtCurrentAnalysis") \
2315             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2316             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2317             _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
2318             _Innovation = Ynpu - _HXa
2319         #
2320         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2321         # ---> avec analysis
2322         selfA.StoredVariables["Analysis"].store( Xa )
2323         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2324             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2325         if selfA._toStore("InnovationAtCurrentAnalysis"):
2326             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2327         # ---> avec current state
2328         if selfA._parameters["StoreInternalVariables"] \
2329             or selfA._toStore("CurrentState"):
2330             selfA.StoredVariables["CurrentState"].store( Xn )
2331         if selfA._toStore("ForecastState"):
2332             selfA.StoredVariables["ForecastState"].store( EMX )
2333         if selfA._toStore("BMA"):
2334             selfA.StoredVariables["BMA"].store( EMX - Xa.reshape((__n,1)) )
2335         if selfA._toStore("InnovationAtCurrentState"):
2336             selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu.reshape((__p,1)) )
2337         if selfA._toStore("SimulatedObservationAtCurrentState") \
2338             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2339             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
2340         # ---> autres
2341         if selfA._parameters["StoreInternalVariables"] \
2342             or selfA._toStore("CostFunctionJ") \
2343             or selfA._toStore("CostFunctionJb") \
2344             or selfA._toStore("CostFunctionJo") \
2345             or selfA._toStore("CurrentOptimum") \
2346             or selfA._toStore("APosterioriCovariance"):
2347             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2348             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
2349             J   = Jb + Jo
2350             selfA.StoredVariables["CostFunctionJb"].store( Jb )
2351             selfA.StoredVariables["CostFunctionJo"].store( Jo )
2352             selfA.StoredVariables["CostFunctionJ" ].store( J )
2353             #
2354             if selfA._toStore("IndexOfOptimum") \
2355                 or selfA._toStore("CurrentOptimum") \
2356                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2357                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2358                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2359                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2360                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2361             if selfA._toStore("IndexOfOptimum"):
2362                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2363             if selfA._toStore("CurrentOptimum"):
2364                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2365             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2366                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2367             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2368                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2369             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2370                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2371             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2372                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2373         if selfA._toStore("APosterioriCovariance"):
2374             Eai = EnsembleOfAnomalies( Xn ) / numpy.sqrt(__m-1) # Anomalies
2375             Pn = Eai @ Eai.T
2376             Pn = 0.5 * (Pn + Pn.T)
2377             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2378         if selfA._parameters["EstimationOf"] == "Parameters" \
2379             and J < previousJMinimum:
2380             previousJMinimum    = J
2381             XaMin               = Xa
2382             if selfA._toStore("APosterioriCovariance"):
2383                 covarianceXaMin = Pn
2384     #
2385     # Stockage final supplémentaire de l'optimum en estimation de paramètres
2386     # ----------------------------------------------------------------------
2387     if selfA._parameters["EstimationOf"] == "Parameters":
2388         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2389         selfA.StoredVariables["Analysis"].store( XaMin )
2390         if selfA._toStore("APosterioriCovariance"):
2391             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2392         if selfA._toStore("BMA"):
2393             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2394     #
2395     return 0
2396
2397 # ==============================================================================
2398 def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13",
2399     BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
2400     """
2401     Maximum Likelihood Ensemble Filter (EnKF/MLEF Zupanski 2005, Bocquet 2013)
2402
2403     selfA est identique au "self" d'algorithme appelant et contient les
2404     valeurs.
2405     """
2406     if selfA._parameters["EstimationOf"] == "Parameters":
2407         selfA._parameters["StoreInternalVariables"] = True
2408     #
2409     # Opérateurs
2410     # ----------
2411     H = HO["Direct"].appliedControledFormTo
2412     #
2413     if selfA._parameters["EstimationOf"] == "State":
2414         M = EM["Direct"].appliedControledFormTo
2415     #
2416     if CM is not None and "Tangent" in CM and U is not None:
2417         Cm = CM["Tangent"].asMatrix(Xb)
2418     else:
2419         Cm = None
2420     #
2421     # Nombre de pas identique au nombre de pas d'observations
2422     # -------------------------------------------------------
2423     if hasattr(Y,"stepnumber"):
2424         duration = Y.stepnumber()
2425         __p = numpy.cumprod(Y.shape())[-1]
2426     else:
2427         duration = 2
2428         __p = numpy.array(Y).size
2429     #
2430     # Précalcul des inversions de B et R
2431     # ----------------------------------
2432     if selfA._parameters["StoreInternalVariables"] \
2433         or selfA._toStore("CostFunctionJ") \
2434         or selfA._toStore("CostFunctionJb") \
2435         or selfA._toStore("CostFunctionJo") \
2436         or selfA._toStore("CurrentOptimum") \
2437         or selfA._toStore("APosterioriCovariance"):
2438         BI = B.getI()
2439     RI = R.getI()
2440     #
2441     # Initialisation
2442     # --------------
2443     __n = Xb.size
2444     __m = selfA._parameters["NumberOfMembers"]
2445     if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
2446     else:                         Pn = B
2447     if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
2448     else:                         Rn = R
2449     if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
2450     else:                         Qn = Q
2451     Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
2452     #
2453     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2454         selfA.StoredVariables["Analysis"].store( numpy.ravel(Xb) )
2455         if selfA._toStore("APosterioriCovariance"):
2456             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2457             covarianceXa = Pn
2458     #
2459     previousJMinimum = numpy.finfo(float).max
2460     #
2461     for step in range(duration-1):
2462         if hasattr(Y,"store"):
2463             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,-1))
2464         else:
2465             Ynpu = numpy.ravel( Y ).reshape((__p,-1))
2466         #
2467         if U is not None:
2468             if hasattr(U,"store") and len(U)>1:
2469                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2470             elif hasattr(U,"store") and len(U)==1:
2471                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2472             else:
2473                 Un = numpy.asmatrix(numpy.ravel( U )).T
2474         else:
2475             Un = None
2476         #
2477         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2478             Xn = CovarianceInflation( Xn,
2479                 selfA._parameters["InflationType"],
2480                 selfA._parameters["InflationFactor"],
2481                 )
2482         #
2483         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
2484             EMX = M( [(Xn[:,i], Un) for i in range(__m)],
2485                 argsAsSerie = True,
2486                 returnSerieAsArrayMatrix = True )
2487             qi = numpy.random.multivariate_normal(numpy.zeros(__n), Qn, size=__m).T
2488             Xn_predicted = EMX + qi
2489             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2490                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2491                 Xn_predicted = Xn_predicted + Cm * Un
2492         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
2493             # --- > Par principe, M = Id, Q = 0
2494             Xn_predicted = Xn
2495         #
2496         #--------------------------
2497         if VariantM == "MLEF13":
2498             Xfm = numpy.ravel(Xn_predicted.mean(axis=1, dtype=mfp).astype('float'))
2499             EaX = EnsembleOfAnomalies( Xn_predicted ) / numpy.sqrt(__m-1)
2500             Ua  = numpy.eye(__m)
2501             __j = 0
2502             Deltaw = 1
2503             if not BnotT:
2504                 Ta  = numpy.eye(__m)
2505             vw  = numpy.zeros(__m)
2506             while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
2507                 vx1 = (Xfm + EaX @ vw).reshape((__n,-1))
2508                 #
2509                 if BnotT:
2510                     E1 = vx1 + _epsilon * EaX
2511                 else:
2512                     E1 = vx1 + numpy.sqrt(__m-1) * EaX @ Ta
2513                 #
2514                 HE2 = H( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
2515                     argsAsSerie = True,
2516                     returnSerieAsArrayMatrix = True )
2517                 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,-1))
2518                 #
2519                 if BnotT:
2520                     EaY = (HE2 - vy2) / _epsilon
2521                 else:
2522                     EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / numpy.sqrt(__m-1)
2523                 #
2524                 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy2 )))
2525                 mH = numpy.eye(__m) + EaY.transpose() @ (RI * EaY)
2526                 Deltaw = - numpy.linalg.solve(mH,GradJ)
2527                 #
2528                 vw = vw + Deltaw
2529                 #
2530                 if not BnotT:
2531                     Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2532                 #
2533                 __j = __j + 1
2534             #
2535             if BnotT:
2536                 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2537             #
2538             Xn = vx1 + numpy.sqrt(__m-1) * EaX @ Ta @ Ua
2539         #--------------------------
2540         else:
2541             raise ValueError("VariantM has to be chosen in the authorized methods list.")
2542         #
2543         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2544             Xn = CovarianceInflation( Xn,
2545                 selfA._parameters["InflationType"],
2546                 selfA._parameters["InflationFactor"],
2547                 )
2548         #
2549         Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
2550         #--------------------------
2551         #
2552         if selfA._parameters["StoreInternalVariables"] \
2553             or selfA._toStore("CostFunctionJ") \
2554             or selfA._toStore("CostFunctionJb") \
2555             or selfA._toStore("CostFunctionJo") \
2556             or selfA._toStore("APosterioriCovariance") \
2557             or selfA._toStore("InnovationAtCurrentAnalysis") \
2558             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2559             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2560             _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
2561             _Innovation = Ynpu - _HXa
2562         #
2563         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2564         # ---> avec analysis
2565         selfA.StoredVariables["Analysis"].store( Xa )
2566         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2567             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2568         if selfA._toStore("InnovationAtCurrentAnalysis"):
2569             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2570         # ---> avec current state
2571         if selfA._parameters["StoreInternalVariables"] \
2572             or selfA._toStore("CurrentState"):
2573             selfA.StoredVariables["CurrentState"].store( Xn )
2574         if selfA._toStore("ForecastState"):
2575             selfA.StoredVariables["ForecastState"].store( EMX )
2576         if selfA._toStore("BMA"):
2577             selfA.StoredVariables["BMA"].store( EMX - Xa.reshape((__n,1)) )
2578         if selfA._toStore("InnovationAtCurrentState"):
2579             selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu.reshape((__p,-1)) )
2580         if selfA._toStore("SimulatedObservationAtCurrentState") \
2581             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2582             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
2583         # ---> autres
2584         if selfA._parameters["StoreInternalVariables"] \
2585             or selfA._toStore("CostFunctionJ") \
2586             or selfA._toStore("CostFunctionJb") \
2587             or selfA._toStore("CostFunctionJo") \
2588             or selfA._toStore("CurrentOptimum") \
2589             or selfA._toStore("APosterioriCovariance"):
2590             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2591             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
2592             J   = Jb + Jo
2593             selfA.StoredVariables["CostFunctionJb"].store( Jb )
2594             selfA.StoredVariables["CostFunctionJo"].store( Jo )
2595             selfA.StoredVariables["CostFunctionJ" ].store( J )
2596             #
2597             if selfA._toStore("IndexOfOptimum") \
2598                 or selfA._toStore("CurrentOptimum") \
2599                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2600                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2601                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2602                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2603                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2604             if selfA._toStore("IndexOfOptimum"):
2605                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2606             if selfA._toStore("CurrentOptimum"):
2607                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2608             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2609                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2610             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2611                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2612             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2613                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2614             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2615                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2616         if selfA._toStore("APosterioriCovariance"):
2617             Eai = EnsembleOfAnomalies( Xn ) / numpy.sqrt(__m-1) # Anomalies
2618             Pn = Eai @ Eai.T
2619             Pn = 0.5 * (Pn + Pn.T)
2620             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2621         if selfA._parameters["EstimationOf"] == "Parameters" \
2622             and J < previousJMinimum:
2623             previousJMinimum    = J
2624             XaMin               = Xa
2625             if selfA._toStore("APosterioriCovariance"):
2626                 covarianceXaMin = Pn
2627     #
2628     # Stockage final supplémentaire de l'optimum en estimation de paramètres
2629     # ----------------------------------------------------------------------
2630     if selfA._parameters["EstimationOf"] == "Parameters":
2631         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2632         selfA.StoredVariables["Analysis"].store( XaMin )
2633         if selfA._toStore("APosterioriCovariance"):
2634             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2635         if selfA._toStore("BMA"):
2636             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2637     #
2638     return 0
2639
2640 # ==============================================================================
2641 def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12",
2642     BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
2643     """
2644     Iterative EnKF (Sakov 2012, Sakov 2018)
2645
2646     selfA est identique au "self" d'algorithme appelant et contient les
2647     valeurs.
2648     """
2649     if selfA._parameters["EstimationOf"] == "Parameters":
2650         selfA._parameters["StoreInternalVariables"] = True
2651     #
2652     # Opérateurs
2653     # ----------
2654     H = HO["Direct"].appliedControledFormTo
2655     #
2656     if selfA._parameters["EstimationOf"] == "State":
2657         M = EM["Direct"].appliedControledFormTo
2658     #
2659     if CM is not None and "Tangent" in CM and U is not None:
2660         Cm = CM["Tangent"].asMatrix(Xb)
2661     else:
2662         Cm = None
2663     #
2664     # Nombre de pas identique au nombre de pas d'observations
2665     # -------------------------------------------------------
2666     if hasattr(Y,"stepnumber"):
2667         duration = Y.stepnumber()
2668         __p = numpy.cumprod(Y.shape())[-1]
2669     else:
2670         duration = 2
2671         __p = numpy.array(Y).size
2672     #
2673     # Précalcul des inversions de B et R
2674     # ----------------------------------
2675     if selfA._parameters["StoreInternalVariables"] \
2676         or selfA._toStore("CostFunctionJ") \
2677         or selfA._toStore("CostFunctionJb") \
2678         or selfA._toStore("CostFunctionJo") \
2679         or selfA._toStore("CurrentOptimum") \
2680         or selfA._toStore("APosterioriCovariance"):
2681         BI = B.getI()
2682     RI = R.getI()
2683     #
2684     # Initialisation
2685     # --------------
2686     __n = Xb.size
2687     __m = selfA._parameters["NumberOfMembers"]
2688     if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
2689     else:                         Pn = B
2690     if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
2691     else:                         Rn = R
2692     if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
2693     else:                         Qn = Q
2694     Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
2695     #
2696     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2697         selfA.StoredVariables["Analysis"].store( numpy.ravel(Xb) )
2698         if selfA._toStore("APosterioriCovariance"):
2699             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2700             covarianceXa = Pn
2701     #
2702     previousJMinimum = numpy.finfo(float).max
2703     #
2704     for step in range(duration-1):
2705         if hasattr(Y,"store"):
2706             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,-1))
2707         else:
2708             Ynpu = numpy.ravel( Y ).reshape((__p,-1))
2709         #
2710         if U is not None:
2711             if hasattr(U,"store") and len(U)>1:
2712                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2713             elif hasattr(U,"store") and len(U)==1:
2714                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2715             else:
2716                 Un = numpy.asmatrix(numpy.ravel( U )).T
2717         else:
2718             Un = None
2719         #
2720         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2721             Xn = CovarianceInflation( Xn,
2722                 selfA._parameters["InflationType"],
2723                 selfA._parameters["InflationFactor"],
2724                 )
2725         #
2726         #--------------------------
2727         if VariantM == "IEnKF12":
2728             Xfm = numpy.ravel(Xn.mean(axis=1, dtype=mfp).astype('float'))
2729             EaX = EnsembleOfAnomalies( Xn ) / numpy.sqrt(__m-1)
2730             __j = 0
2731             Deltaw = 1
2732             if not BnotT:
2733                 Ta  = numpy.eye(__m)
2734             vw  = numpy.zeros(__m)
2735             while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
2736                 vx1 = (Xfm + EaX @ vw).reshape((__n,-1))
2737                 #
2738                 if BnotT:
2739                     E1 = vx1 + _epsilon * EaX
2740                 else:
2741                     E1 = vx1 + numpy.sqrt(__m-1) * EaX @ Ta
2742                 #
2743                 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q
2744                     E2 = M( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
2745                         argsAsSerie = True,
2746                         returnSerieAsArrayMatrix = True )
2747                 elif selfA._parameters["EstimationOf"] == "Parameters":
2748                     # --- > Par principe, M = Id
2749                     E2 = Xn
2750                 vx2 = E2.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
2751                 vy1 = H((vx2, Un)).reshape((__p,-1))
2752                 #
2753                 HE2 = H( [(E2[:,i,numpy.newaxis], Un) for i in range(__m)],
2754                     argsAsSerie = True,
2755                     returnSerieAsArrayMatrix = True )
2756                 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,-1))
2757                 #
2758                 if BnotT:
2759                     EaY = (HE2 - vy2) / _epsilon
2760                 else:
2761                     EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / numpy.sqrt(__m-1)
2762                 #
2763                 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy1 )))
2764                 mH = numpy.eye(__m) + EaY.transpose() @ (RI * EaY)
2765                 Deltaw = - numpy.linalg.solve(mH,GradJ)
2766                 #
2767                 vw = vw + Deltaw
2768                 #
2769                 if not BnotT:
2770                     Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2771                 #
2772                 __j = __j + 1
2773             #
2774             A2 = EnsembleOfAnomalies( E2 )
2775             #
2776             if BnotT:
2777                 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2778                 A2 = numpy.sqrt(__m-1) * A2 @ Ta / _epsilon
2779             #
2780             Xn = vx2 + A2
2781         #--------------------------
2782         else:
2783             raise ValueError("VariantM has to be chosen in the authorized methods list.")
2784         #
2785         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2786             Xn = CovarianceInflation( Xn,
2787                 selfA._parameters["InflationType"],
2788                 selfA._parameters["InflationFactor"],
2789                 )
2790         #
2791         Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
2792         #--------------------------
2793         #
2794         if selfA._parameters["StoreInternalVariables"] \
2795             or selfA._toStore("CostFunctionJ") \
2796             or selfA._toStore("CostFunctionJb") \
2797             or selfA._toStore("CostFunctionJo") \
2798             or selfA._toStore("APosterioriCovariance") \
2799             or selfA._toStore("InnovationAtCurrentAnalysis") \
2800             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2801             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2802             _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
2803             _Innovation = Ynpu - _HXa
2804         #
2805         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2806         # ---> avec analysis
2807         selfA.StoredVariables["Analysis"].store( Xa )
2808         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2809             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2810         if selfA._toStore("InnovationAtCurrentAnalysis"):
2811             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2812         # ---> avec current state
2813         if selfA._parameters["StoreInternalVariables"] \
2814             or selfA._toStore("CurrentState"):
2815             selfA.StoredVariables["CurrentState"].store( Xn )
2816         if selfA._toStore("ForecastState"):
2817             selfA.StoredVariables["ForecastState"].store( E2 )
2818         if selfA._toStore("BMA"):
2819             selfA.StoredVariables["BMA"].store( E2 - Xa )
2820         if selfA._toStore("InnovationAtCurrentState"):
2821             selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu.reshape((__p,-1)) )
2822         if selfA._toStore("SimulatedObservationAtCurrentState") \
2823             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2824             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
2825         # ---> autres
2826         if selfA._parameters["StoreInternalVariables"] \
2827             or selfA._toStore("CostFunctionJ") \
2828             or selfA._toStore("CostFunctionJb") \
2829             or selfA._toStore("CostFunctionJo") \
2830             or selfA._toStore("CurrentOptimum") \
2831             or selfA._toStore("APosterioriCovariance"):
2832             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2833             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
2834             J   = Jb + Jo
2835             selfA.StoredVariables["CostFunctionJb"].store( Jb )
2836             selfA.StoredVariables["CostFunctionJo"].store( Jo )
2837             selfA.StoredVariables["CostFunctionJ" ].store( J )
2838             #
2839             if selfA._toStore("IndexOfOptimum") \
2840                 or selfA._toStore("CurrentOptimum") \
2841                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2842                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2843                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2844                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2845                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2846             if selfA._toStore("IndexOfOptimum"):
2847                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2848             if selfA._toStore("CurrentOptimum"):
2849                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2850             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2851                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2852             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2853                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2854             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2855                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2856             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2857                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2858         if selfA._toStore("APosterioriCovariance"):
2859             Eai = EnsembleOfAnomalies( Xn ) / numpy.sqrt(__m-1) # Anomalies
2860             Pn = Eai @ Eai.T
2861             Pn = 0.5 * (Pn + Pn.T)
2862             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2863         if selfA._parameters["EstimationOf"] == "Parameters" \
2864             and J < previousJMinimum:
2865             previousJMinimum    = J
2866             XaMin               = Xa
2867             if selfA._toStore("APosterioriCovariance"):
2868                 covarianceXaMin = Pn
2869     #
2870     # Stockage final supplémentaire de l'optimum en estimation de paramètres
2871     # ----------------------------------------------------------------------
2872     if selfA._parameters["EstimationOf"] == "Parameters":
2873         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2874         selfA.StoredVariables["Analysis"].store( XaMin )
2875         if selfA._toStore("APosterioriCovariance"):
2876             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2877         if selfA._toStore("BMA"):
2878             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2879     #
2880     return 0
2881
2882 # ==============================================================================
2883 if __name__ == "__main__":
2884     print('\n AUTODIAGNOSTIC\n')