Salome HOME
Minor improvements and fixes for internal variables
[modules/adao.git] / src / daComposant / daCore / NumericObjects.py
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) 2008-2021 EDF R&D
4 #
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
9 #
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13 # Lesser General Public License for more details.
14 #
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
18 #
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
20 #
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
22
23 __doc__ = """
24     Définit les objets numériques génériques.
25 """
26 __author__ = "Jean-Philippe ARGAUD"
27
28 import os, time, copy, types, sys, logging
29 import math, numpy, scipy, scipy.optimize, scipy.version
30 from daCore.BasicObjects import Operator
31 from daCore.PlatformInfo import PlatformInfo
32 mpr = PlatformInfo().MachinePrecision()
33 mfp = PlatformInfo().MaximumPrecision()
34 # logging.getLogger().setLevel(logging.DEBUG)
35
36 # ==============================================================================
37 def ExecuteFunction( paire ):
38     assert len(paire) == 2, "Incorrect number of arguments"
39     X, funcrepr = paire
40     __X = numpy.asmatrix(numpy.ravel( X )).T
41     __sys_path_tmp = sys.path ; sys.path.insert(0,funcrepr["__userFunction__path"])
42     __module = __import__(funcrepr["__userFunction__modl"], globals(), locals(), [])
43     __fonction = getattr(__module,funcrepr["__userFunction__name"])
44     sys.path = __sys_path_tmp ; del __sys_path_tmp
45     __HX  = __fonction( __X )
46     return numpy.ravel( __HX )
47
48 # ==============================================================================
49 class FDApproximation(object):
50     """
51     Cette classe sert d'interface pour définir les opérateurs approximés. A la
52     création d'un objet, en fournissant une fonction "Function", on obtient un
53     objet qui dispose de 3 méthodes "DirectOperator", "TangentOperator" et
54     "AdjointOperator". On contrôle l'approximation DF avec l'incrément
55     multiplicatif "increment" valant par défaut 1%, ou avec l'incrément fixe
56     "dX" qui sera multiplié par "increment" (donc en %), et on effectue de DF
57     centrées si le booléen "centeredDF" est vrai.
58     """
59     def __init__(self,
60             name                  = "FDApproximation",
61             Function              = None,
62             centeredDF            = False,
63             increment             = 0.01,
64             dX                    = None,
65             avoidingRedundancy    = True,
66             toleranceInRedundancy = 1.e-18,
67             lenghtOfRedundancy    = -1,
68             mpEnabled             = False,
69             mpWorkers             = None,
70             mfEnabled             = False,
71             ):
72         self.__name = str(name)
73         if mpEnabled:
74             try:
75                 import multiprocessing
76                 self.__mpEnabled = True
77             except ImportError:
78                 self.__mpEnabled = False
79         else:
80             self.__mpEnabled = False
81         self.__mpWorkers = mpWorkers
82         if self.__mpWorkers is not None and self.__mpWorkers < 1:
83             self.__mpWorkers = None
84         logging.debug("FDA Calculs en multiprocessing : %s (nombre de processus : %s)"%(self.__mpEnabled,self.__mpWorkers))
85         #
86         if mfEnabled:
87             self.__mfEnabled = True
88         else:
89             self.__mfEnabled = False
90         logging.debug("FDA Calculs en multifonctions : %s"%(self.__mfEnabled,))
91         #
92         if avoidingRedundancy:
93             self.__avoidRC = True
94             self.__tolerBP = float(toleranceInRedundancy)
95             self.__lenghtRJ = int(lenghtOfRedundancy)
96             self.__listJPCP = [] # Jacobian Previous Calculated Points
97             self.__listJPCI = [] # Jacobian Previous Calculated Increment
98             self.__listJPCR = [] # Jacobian Previous Calculated Results
99             self.__listJPPN = [] # Jacobian Previous Calculated Point Norms
100             self.__listJPIN = [] # Jacobian Previous Calculated Increment Norms
101         else:
102             self.__avoidRC = False
103         #
104         if self.__mpEnabled:
105             if isinstance(Function,types.FunctionType):
106                 logging.debug("FDA Calculs en multiprocessing : FunctionType")
107                 self.__userFunction__name = Function.__name__
108                 try:
109                     mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
110                 except:
111                     mod = os.path.abspath(Function.__globals__['__file__'])
112                 if not os.path.isfile(mod):
113                     raise ImportError("No user defined function or method found with the name %s"%(mod,))
114                 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
115                 self.__userFunction__path = os.path.dirname(mod)
116                 del mod
117                 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled )
118                 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
119             elif isinstance(Function,types.MethodType):
120                 logging.debug("FDA Calculs en multiprocessing : MethodType")
121                 self.__userFunction__name = Function.__name__
122                 try:
123                     mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
124                 except:
125                     mod = os.path.abspath(Function.__func__.__globals__['__file__'])
126                 if not os.path.isfile(mod):
127                     raise ImportError("No user defined function or method found with the name %s"%(mod,))
128                 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
129                 self.__userFunction__path = os.path.dirname(mod)
130                 del mod
131                 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled )
132                 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
133             else:
134                 raise TypeError("User defined function or method has to be provided for finite differences approximation.")
135         else:
136             self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled )
137             self.__userFunction = self.__userOperator.appliedTo
138         #
139         self.__centeredDF = bool(centeredDF)
140         if abs(float(increment)) > 1.e-15:
141             self.__increment  = float(increment)
142         else:
143             self.__increment  = 0.01
144         if dX is None:
145             self.__dX     = None
146         else:
147             self.__dX     = numpy.asmatrix(numpy.ravel( dX )).T
148         logging.debug("FDA Reduction des doublons de calcul : %s"%self.__avoidRC)
149         if self.__avoidRC:
150             logging.debug("FDA Tolerance de determination des doublons : %.2e"%self.__tolerBP)
151
152     # ---------------------------------------------------------
153     def __doublon__(self, e, l, n, v=None):
154         __ac, __iac = False, -1
155         for i in range(len(l)-1,-1,-1):
156             if numpy.linalg.norm(e - l[i]) < self.__tolerBP * n[i]:
157                 __ac, __iac = True, i
158                 if v is not None: logging.debug("FDA Cas%s déja calculé, récupération du doublon %i"%(v,__iac))
159                 break
160         return __ac, __iac
161
162     # ---------------------------------------------------------
163     def DirectOperator(self, X ):
164         """
165         Calcul du direct à l'aide de la fonction fournie.
166         """
167         logging.debug("FDA Calcul DirectOperator (explicite)")
168         if self.__mfEnabled:
169             _HX = self.__userFunction( X, argsAsSerie = True )
170         else:
171             _X = numpy.asmatrix(numpy.ravel( X )).T
172             _HX = numpy.ravel(self.__userFunction( _X ))
173         #
174         return _HX
175
176     # ---------------------------------------------------------
177     def TangentMatrix(self, X ):
178         """
179         Calcul de l'opérateur tangent comme la Jacobienne par différences finies,
180         c'est-à-dire le gradient de H en X. On utilise des différences finies
181         directionnelles autour du point X. X est un numpy.matrix.
182
183         Différences finies centrées (approximation d'ordre 2):
184         1/ Pour chaque composante i de X, on ajoute et on enlève la perturbation
185            dX[i] à la  composante X[i], pour composer X_plus_dXi et X_moins_dXi, et
186            on calcule les réponses HX_plus_dXi = H( X_plus_dXi ) et HX_moins_dXi =
187            H( X_moins_dXi )
188         2/ On effectue les différences (HX_plus_dXi-HX_moins_dXi) et on divise par
189            le pas 2*dXi
190         3/ Chaque résultat, par composante, devient une colonne de la Jacobienne
191
192         Différences finies non centrées (approximation d'ordre 1):
193         1/ Pour chaque composante i de X, on ajoute la perturbation dX[i] à la
194            composante X[i] pour composer X_plus_dXi, et on calcule la réponse
195            HX_plus_dXi = H( X_plus_dXi )
196         2/ On calcule la valeur centrale HX = H(X)
197         3/ On effectue les différences (HX_plus_dXi-HX) et on divise par
198            le pas dXi
199         4/ Chaque résultat, par composante, devient une colonne de la Jacobienne
200
201         """
202         logging.debug("FDA Début du calcul de la Jacobienne")
203         logging.debug("FDA   Incrément de............: %s*X"%float(self.__increment))
204         logging.debug("FDA   Approximation centrée...: %s"%(self.__centeredDF))
205         #
206         if X is None or len(X)==0:
207             raise ValueError("Nominal point X for approximate derivatives can not be None or void (given X: %s)."%(str(X),))
208         #
209         _X = numpy.asmatrix(numpy.ravel( X )).T
210         #
211         if self.__dX is None:
212             _dX  = self.__increment * _X
213         else:
214             _dX = numpy.asmatrix(numpy.ravel( self.__dX )).T
215         #
216         if (_dX == 0.).any():
217             moyenne = _dX.mean()
218             if moyenne == 0.:
219                 _dX = numpy.where( _dX == 0., float(self.__increment), _dX )
220             else:
221                 _dX = numpy.where( _dX == 0., moyenne, _dX )
222         #
223         __alreadyCalculated  = False
224         if self.__avoidRC:
225             __bidon, __alreadyCalculatedP = self.__doublon__(_X,  self.__listJPCP, self.__listJPPN, None)
226             __bidon, __alreadyCalculatedI = self.__doublon__(_dX, self.__listJPCI, self.__listJPIN, None)
227             if __alreadyCalculatedP == __alreadyCalculatedI > -1:
228                 __alreadyCalculated, __i = True, __alreadyCalculatedP
229                 logging.debug("FDA Cas J déja calculé, récupération du doublon %i"%__i)
230         #
231         if __alreadyCalculated:
232             logging.debug("FDA   Calcul Jacobienne (par récupération du doublon %i)"%__i)
233             _Jacobienne = self.__listJPCR[__i]
234         else:
235             logging.debug("FDA   Calcul Jacobienne (explicite)")
236             if self.__centeredDF:
237                 #
238                 if self.__mpEnabled and not self.__mfEnabled:
239                     funcrepr = {
240                         "__userFunction__path" : self.__userFunction__path,
241                         "__userFunction__modl" : self.__userFunction__modl,
242                         "__userFunction__name" : self.__userFunction__name,
243                     }
244                     _jobs = []
245                     for i in range( len(_dX) ):
246                         _dXi            = _dX[i]
247                         _X_plus_dXi     = numpy.array( _X.A1, dtype=float )
248                         _X_plus_dXi[i]  = _X[i] + _dXi
249                         _X_moins_dXi    = numpy.array( _X.A1, dtype=float )
250                         _X_moins_dXi[i] = _X[i] - _dXi
251                         #
252                         _jobs.append( (_X_plus_dXi,  funcrepr) )
253                         _jobs.append( (_X_moins_dXi, funcrepr) )
254                     #
255                     import multiprocessing
256                     self.__pool = multiprocessing.Pool(self.__mpWorkers)
257                     _HX_plusmoins_dX = self.__pool.map( ExecuteFunction, _jobs )
258                     self.__pool.close()
259                     self.__pool.join()
260                     #
261                     _Jacobienne  = []
262                     for i in range( len(_dX) ):
263                         _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
264                     #
265                 elif self.__mfEnabled:
266                     _xserie = []
267                     for i in range( len(_dX) ):
268                         _dXi            = _dX[i]
269                         _X_plus_dXi     = numpy.array( _X.A1, dtype=float )
270                         _X_plus_dXi[i]  = _X[i] + _dXi
271                         _X_moins_dXi    = numpy.array( _X.A1, dtype=float )
272                         _X_moins_dXi[i] = _X[i] - _dXi
273                         #
274                         _xserie.append( _X_plus_dXi )
275                         _xserie.append( _X_moins_dXi )
276                     #
277                     _HX_plusmoins_dX = self.DirectOperator( _xserie )
278                      #
279                     _Jacobienne  = []
280                     for i in range( len(_dX) ):
281                         _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
282                     #
283                 else:
284                     _Jacobienne  = []
285                     for i in range( _dX.size ):
286                         _dXi            = _dX[i]
287                         _X_plus_dXi     = numpy.array( _X.A1, dtype=float )
288                         _X_plus_dXi[i]  = _X[i] + _dXi
289                         _X_moins_dXi    = numpy.array( _X.A1, dtype=float )
290                         _X_moins_dXi[i] = _X[i] - _dXi
291                         #
292                         _HX_plus_dXi    = self.DirectOperator( _X_plus_dXi )
293                         _HX_moins_dXi   = self.DirectOperator( _X_moins_dXi )
294                         #
295                         _Jacobienne.append( numpy.ravel( _HX_plus_dXi - _HX_moins_dXi ) / (2.*_dXi) )
296                 #
297             else:
298                 #
299                 if self.__mpEnabled and not self.__mfEnabled:
300                     funcrepr = {
301                         "__userFunction__path" : self.__userFunction__path,
302                         "__userFunction__modl" : self.__userFunction__modl,
303                         "__userFunction__name" : self.__userFunction__name,
304                     }
305                     _jobs = []
306                     _jobs.append( (_X.A1, funcrepr) )
307                     for i in range( len(_dX) ):
308                         _X_plus_dXi    = numpy.array( _X.A1, dtype=float )
309                         _X_plus_dXi[i] = _X[i] + _dX[i]
310                         #
311                         _jobs.append( (_X_plus_dXi, funcrepr) )
312                     #
313                     import multiprocessing
314                     self.__pool = multiprocessing.Pool(self.__mpWorkers)
315                     _HX_plus_dX = self.__pool.map( ExecuteFunction, _jobs )
316                     self.__pool.close()
317                     self.__pool.join()
318                     #
319                     _HX = _HX_plus_dX.pop(0)
320                     #
321                     _Jacobienne = []
322                     for i in range( len(_dX) ):
323                         _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
324                     #
325                 elif self.__mfEnabled:
326                     _xserie = []
327                     _xserie.append( _X.A1 )
328                     for i in range( len(_dX) ):
329                         _X_plus_dXi    = numpy.array( _X.A1, dtype=float )
330                         _X_plus_dXi[i] = _X[i] + _dX[i]
331                         #
332                         _xserie.append( _X_plus_dXi )
333                     #
334                     _HX_plus_dX = self.DirectOperator( _xserie )
335                     #
336                     _HX = _HX_plus_dX.pop(0)
337                     #
338                     _Jacobienne = []
339                     for i in range( len(_dX) ):
340                         _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
341                    #
342                 else:
343                     _Jacobienne  = []
344                     _HX = self.DirectOperator( _X )
345                     for i in range( _dX.size ):
346                         _dXi            = _dX[i]
347                         _X_plus_dXi     = numpy.array( _X.A1, dtype=float )
348                         _X_plus_dXi[i]  = _X[i] + _dXi
349                         #
350                         _HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
351                         #
352                         _Jacobienne.append( numpy.ravel(( _HX_plus_dXi - _HX ) / _dXi) )
353                 #
354             #
355             _Jacobienne = numpy.asmatrix( numpy.vstack( _Jacobienne ) ).T
356             if self.__avoidRC:
357                 if self.__lenghtRJ < 0: self.__lenghtRJ = 2 * _X.size
358                 while len(self.__listJPCP) > self.__lenghtRJ:
359                     self.__listJPCP.pop(0)
360                     self.__listJPCI.pop(0)
361                     self.__listJPCR.pop(0)
362                     self.__listJPPN.pop(0)
363                     self.__listJPIN.pop(0)
364                 self.__listJPCP.append( copy.copy(_X) )
365                 self.__listJPCI.append( copy.copy(_dX) )
366                 self.__listJPCR.append( copy.copy(_Jacobienne) )
367                 self.__listJPPN.append( numpy.linalg.norm(_X) )
368                 self.__listJPIN.append( numpy.linalg.norm(_Jacobienne) )
369         #
370         logging.debug("FDA Fin du calcul de la Jacobienne")
371         #
372         return _Jacobienne
373
374     # ---------------------------------------------------------
375     def TangentOperator(self, paire ):
376         """
377         Calcul du tangent à l'aide de la Jacobienne.
378         """
379         if self.__mfEnabled:
380             assert len(paire) == 1, "Incorrect lenght of arguments"
381             _paire = paire[0]
382             assert len(_paire) == 2, "Incorrect number of arguments"
383         else:
384             assert len(paire) == 2, "Incorrect number of arguments"
385             _paire = paire
386         X, dX = _paire
387         _Jacobienne = self.TangentMatrix( X )
388         if dX is None or len(dX) == 0:
389             #
390             # Calcul de la forme matricielle si le second argument est None
391             # -------------------------------------------------------------
392             if self.__mfEnabled: return [_Jacobienne,]
393             else:                return _Jacobienne
394         else:
395             #
396             # Calcul de la valeur linéarisée de H en X appliqué à dX
397             # ------------------------------------------------------
398             _dX = numpy.asmatrix(numpy.ravel( dX )).T
399             _HtX = numpy.dot(_Jacobienne, _dX)
400             if self.__mfEnabled: return [_HtX.A1,]
401             else:                return _HtX.A1
402
403     # ---------------------------------------------------------
404     def AdjointOperator(self, paire ):
405         """
406         Calcul de l'adjoint à l'aide de la Jacobienne.
407         """
408         if self.__mfEnabled:
409             assert len(paire) == 1, "Incorrect lenght of arguments"
410             _paire = paire[0]
411             assert len(_paire) == 2, "Incorrect number of arguments"
412         else:
413             assert len(paire) == 2, "Incorrect number of arguments"
414             _paire = paire
415         X, Y = _paire
416         _JacobienneT = self.TangentMatrix( X ).T
417         if Y is None or len(Y) == 0:
418             #
419             # Calcul de la forme matricielle si le second argument est None
420             # -------------------------------------------------------------
421             if self.__mfEnabled: return [_JacobienneT,]
422             else:                return _JacobienneT
423         else:
424             #
425             # Calcul de la valeur de l'adjoint en X appliqué à Y
426             # --------------------------------------------------
427             _Y = numpy.asmatrix(numpy.ravel( Y )).T
428             _HaY = numpy.dot(_JacobienneT, _Y)
429             if self.__mfEnabled: return [_HaY.A1,]
430             else:                return _HaY.A1
431
432 # ==============================================================================
433 def mmqr(
434         func     = None,
435         x0       = None,
436         fprime   = None,
437         bounds   = None,
438         quantile = 0.5,
439         maxfun   = 15000,
440         toler    = 1.e-06,
441         y        = None,
442         ):
443     """
444     Implémentation informatique de l'algorithme MMQR, basée sur la publication :
445     David R. Hunter, Kenneth Lange, "Quantile Regression via an MM Algorithm",
446     Journal of Computational and Graphical Statistics, 9, 1, pp.60-77, 2000.
447     """
448     #
449     # Recuperation des donnees et informations initiales
450     # --------------------------------------------------
451     variables = numpy.ravel( x0 )
452     mesures   = numpy.ravel( y )
453     increment = sys.float_info[0]
454     p         = variables.size
455     n         = mesures.size
456     quantile  = float(quantile)
457     #
458     # Calcul des parametres du MM
459     # ---------------------------
460     tn      = float(toler) / n
461     e0      = -tn / math.log(tn)
462     epsilon = (e0-tn)/(1+math.log(e0))
463     #
464     # Calculs d'initialisation
465     # ------------------------
466     residus  = mesures - numpy.ravel( func( variables ) )
467     poids    = 1./(epsilon+numpy.abs(residus))
468     veps     = 1. - 2. * quantile - residus * poids
469     lastsurrogate = -numpy.sum(residus*veps) - (1.-2.*quantile)*numpy.sum(residus)
470     iteration = 0
471     #
472     # Recherche iterative
473     # -------------------
474     while (increment > toler) and (iteration < maxfun) :
475         iteration += 1
476         #
477         Derivees  = numpy.array(fprime(variables))
478         Derivees  = Derivees.reshape(n,p) # Necessaire pour remettre en place la matrice si elle passe par des tuyaux YACS
479         DeriveesT = Derivees.transpose()
480         M         =   numpy.dot( DeriveesT , (numpy.array(numpy.matrix(p*[poids,]).T)*Derivees) )
481         SM        =   numpy.transpose(numpy.dot( DeriveesT , veps ))
482         step      = - numpy.linalg.lstsq( M, SM, rcond=-1 )[0]
483         #
484         variables = variables + step
485         if bounds is not None:
486             # Attention : boucle infinie à éviter si un intervalle est trop petit
487             while( (variables < numpy.ravel(numpy.asmatrix(bounds)[:,0])).any() or (variables > numpy.ravel(numpy.asmatrix(bounds)[:,1])).any() ):
488                 step      = step/2.
489                 variables = variables - step
490         residus   = mesures - numpy.ravel( func(variables) )
491         surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
492         #
493         while ( (surrogate > lastsurrogate) and ( max(list(numpy.abs(step))) > 1.e-16 ) ) :
494             step      = step/2.
495             variables = variables - step
496             residus   = mesures - numpy.ravel( func(variables) )
497             surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
498         #
499         increment     = lastsurrogate-surrogate
500         poids         = 1./(epsilon+numpy.abs(residus))
501         veps          = 1. - 2. * quantile - residus * poids
502         lastsurrogate = -numpy.sum(residus * veps) - (1.-2.*quantile)*numpy.sum(residus)
503     #
504     # Mesure d'écart
505     # --------------
506     Ecart = quantile * numpy.sum(residus) - numpy.sum( residus[residus<0] )
507     #
508     return variables, Ecart, [n,p,iteration,increment,0]
509
510 # ==============================================================================
511 def EnsembleOfCenteredPerturbations( _bgcenter, _bgcovariance, _nbmembers ):
512     "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
513     #
514     _bgcenter = numpy.ravel(_bgcenter)[:,None]
515     if _nbmembers < 1:
516         raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
517     #
518     if _bgcovariance is None:
519         BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
520     else:
521         _Z = numpy.random.multivariate_normal(numpy.zeros(_bgcenter.size), _bgcovariance, size=_nbmembers).T
522         BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers) + _Z
523     #
524     return BackgroundEnsemble
525
526 # ==============================================================================
527 def EnsembleOfBackgroundPerturbations( _bgcenter, _bgcovariance, _nbmembers, _withSVD = True):
528     "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
529     def __CenteredRandomAnomalies(Zr, N):
530         """
531         Génère une matrice de N anomalies aléatoires centrées sur Zr selon les
532         notes manuscrites de MB et conforme au code de PS avec eps = -1
533         """
534         eps = -1
535         Q = numpy.eye(N-1)-numpy.ones((N-1,N-1))/numpy.sqrt(N)/(numpy.sqrt(N)-eps)
536         Q = numpy.concatenate((Q, [eps*numpy.ones(N-1)/numpy.sqrt(N)]), axis=0)
537         R, _ = numpy.linalg.qr(numpy.random.normal(size = (N-1,N-1)))
538         Q = numpy.dot(Q,R)
539         Zr = numpy.dot(Q,Zr)
540         return Zr.T
541     #
542     _bgcenter = numpy.ravel(_bgcenter)[:,None]
543     if _nbmembers < 1:
544         raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
545     if _bgcovariance is None:
546         BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
547     else:
548         if _withSVD:
549             U, s, V = numpy.linalg.svd(_bgcovariance, full_matrices=False)
550             _nbctl = _bgcenter.size
551             if _nbmembers > _nbctl:
552                 _Z = numpy.concatenate((numpy.dot(
553                     numpy.diag(numpy.sqrt(s[:_nbctl])), V[:_nbctl]),
554                     numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1-_nbctl)), axis = 0)
555             else:
556                 _Z = numpy.dot(numpy.diag(numpy.sqrt(s[:_nbmembers-1])), V[:_nbmembers-1])
557             _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
558             BackgroundEnsemble = _bgcenter + _Zca
559         else:
560             if max(abs(_bgcovariance.flatten())) > 0:
561                 _nbctl = _bgcenter.size
562                 _Z = numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1)
563                 _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
564                 BackgroundEnsemble = _bgcenter + _Zca
565             else:
566                 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
567     #
568     return BackgroundEnsemble
569
570 # ==============================================================================
571 def EnsembleOfAnomalies( _ensemble, _optmean = None):
572     "Renvoie les anomalies centrées à partir d'un ensemble TailleEtat*NbMembres"
573     if _optmean is None:
574         Em = numpy.asarray(_ensemble).mean(axis=1, dtype=mfp).astype('float')[:,numpy.newaxis]
575     else:
576         Em = numpy.ravel(_optmean)[:,numpy.newaxis]
577     #
578     return numpy.asarray(_ensemble) - Em
579
580 # ==============================================================================
581 def CovarianceInflation(
582         InputCovOrEns,
583         InflationType   = None,
584         InflationFactor = None,
585         BackgroundCov   = None,
586         ):
587     """
588     Inflation applicable soit sur Pb ou Pa, soit sur les ensembles EXb ou EXa
589
590     Synthèse : Hunt 2007, section 2.3.5
591     """
592     if InflationFactor is None:
593         return InputCovOrEns
594     else:
595         InflationFactor = float(InflationFactor)
596     #
597     if InflationType in ["MultiplicativeOnAnalysisCovariance", "MultiplicativeOnBackgroundCovariance"]:
598         if InflationFactor < 1.:
599             raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
600         if InflationFactor < 1.+mpr:
601             return InputCovOrEns
602         OutputCovOrEns = InflationFactor**2 * InputCovOrEns
603     #
604     elif InflationType in ["MultiplicativeOnAnalysisAnomalies", "MultiplicativeOnBackgroundAnomalies"]:
605         if InflationFactor < 1.:
606             raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
607         if InflationFactor < 1.+mpr:
608             return InputCovOrEns
609         InputCovOrEnsMean = InputCovOrEns.mean(axis=1, dtype=mfp).astype('float')
610         OutputCovOrEns = InputCovOrEnsMean[:,numpy.newaxis] \
611             + InflationFactor * (InputCovOrEns - InputCovOrEnsMean[:,numpy.newaxis])
612     #
613     elif InflationType in ["AdditiveOnAnalysisCovariance", "AdditiveOnBackgroundCovariance"]:
614         if InflationFactor < 0.:
615             raise ValueError("Inflation factor for additive inflation has to be greater or equal than 0.")
616         if InflationFactor < mpr:
617             return InputCovOrEns
618         __n, __m = numpy.asarray(InputCovOrEns).shape
619         if __n != __m:
620             raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
621         OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * numpy.eye(__n)
622     #
623     elif InflationType == "HybridOnBackgroundCovariance":
624         if InflationFactor < 0.:
625             raise ValueError("Inflation factor for hybrid inflation has to be greater or equal than 0.")
626         if InflationFactor < mpr:
627             return InputCovOrEns
628         __n, __m = numpy.asarray(InputCovOrEns).shape
629         if __n != __m:
630             raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
631         if BackgroundCov is None:
632             raise ValueError("Background covariance matrix B has to be given for hybrid inflation.")
633         if InputCovOrEns.shape != BackgroundCov.shape:
634             raise ValueError("Ensemble covariance matrix has to be of same size than background covariance matrix B.")
635         OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * BackgroundCov
636     #
637     elif InflationType == "Relaxation":
638         raise NotImplementedError("InflationType Relaxation")
639     #
640     else:
641         raise ValueError("Error in inflation type, '%s' is not a valid keyword."%InflationType)
642     #
643     return OutputCovOrEns
644
645 # ==============================================================================
646 def multi3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, oneCycle):
647     """
648     3DVAR multi-pas et multi-méthodes
649     """
650     #
651     # Initialisation
652     # --------------
653     Xn = numpy.ravel(Xb).reshape((-1,1))
654     #
655     if selfA._parameters["EstimationOf"] == "State":
656         M = EM["Direct"].appliedTo
657         #
658         if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
659             selfA.StoredVariables["Analysis"].store( Xn )
660             if selfA._toStore("APosterioriCovariance"):
661                 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(Xn.size)
662                 else:                         Pn = B
663                 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
664             if selfA._toStore("ForecastState"):
665                 selfA.StoredVariables["ForecastState"].store( Xn )
666     #
667     if hasattr(Y,"stepnumber"):
668         duration = Y.stepnumber()
669     else:
670         duration = 2
671     #
672     # Multi-pas
673     # ---------
674     for step in range(duration-1):
675         if hasattr(Y,"store"):
676             Ynpu = numpy.ravel( Y[step+1] ).reshape((-1,1))
677         else:
678             Ynpu = numpy.ravel( Y ).reshape((-1,1))
679         #
680         if selfA._parameters["EstimationOf"] == "State": # Forecast
681             Xn = selfA.StoredVariables["Analysis"][-1]
682             Xn_predicted = M( Xn )
683             if selfA._toStore("ForecastState"):
684                 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
685         elif selfA._parameters["EstimationOf"] == "Parameters": # No forecast
686             # --- > Par principe, M = Id, Q = 0
687             Xn_predicted = Xn
688         Xn_predicted = numpy.ravel(Xn_predicted).reshape((-1,1))
689         #
690         oneCycle(selfA, Xn_predicted, Ynpu, U, HO, None, None, R, B, None)
691     #
692     return 0
693
694 # ==============================================================================
695 def std3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
696     """
697     3DVAR
698     """
699     #
700     # Initialisations
701     # ---------------
702     #
703     # Opérateurs
704     Hm = HO["Direct"].appliedTo
705     Ha = HO["Adjoint"].appliedInXTo
706     #
707     # Utilisation éventuelle d'un vecteur H(Xb) précalculé
708     if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
709         HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
710     else:
711         HXb = Hm( Xb )
712     HXb = numpy.asmatrix(numpy.ravel( HXb )).T
713     if Y.size != HXb.size:
714         raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
715     if max(Y.shape) != max(HXb.shape):
716         raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
717     #
718     if selfA._toStore("JacobianMatrixAtBackground"):
719         HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
720         HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
721         selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
722     #
723     # Précalcul des inversions de B et R
724     BI = B.getI()
725     RI = R.getI()
726     #
727     # Point de démarrage de l'optimisation
728     Xini = selfA._parameters["InitializationPoint"]
729     #
730     # Définition de la fonction-coût
731     # ------------------------------
732     def CostFunction(x):
733         _X  = numpy.asmatrix(numpy.ravel( x )).T
734         if selfA._parameters["StoreInternalVariables"] or \
735             selfA._toStore("CurrentState") or \
736             selfA._toStore("CurrentOptimum"):
737             selfA.StoredVariables["CurrentState"].store( _X )
738         _HX = Hm( _X )
739         _HX = numpy.asmatrix(numpy.ravel( _HX )).T
740         _Innovation = Y - _HX
741         if selfA._toStore("SimulatedObservationAtCurrentState") or \
742             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
743             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
744         if selfA._toStore("InnovationAtCurrentState"):
745             selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
746         #
747         Jb  = float( 0.5 * (_X - Xb).T * BI * (_X - Xb) )
748         Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
749         J   = Jb + Jo
750         #
751         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
752         selfA.StoredVariables["CostFunctionJb"].store( Jb )
753         selfA.StoredVariables["CostFunctionJo"].store( Jo )
754         selfA.StoredVariables["CostFunctionJ" ].store( J )
755         if selfA._toStore("IndexOfOptimum") or \
756             selfA._toStore("CurrentOptimum") or \
757             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
758             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
759             selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
760             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
761             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
762         if selfA._toStore("IndexOfOptimum"):
763             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
764         if selfA._toStore("CurrentOptimum"):
765             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
766         if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
767             selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
768         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
769             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
770         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
771             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
772         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
773             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
774         return J
775     #
776     def GradientOfCostFunction(x):
777         _X      = numpy.asmatrix(numpy.ravel( x )).T
778         _HX     = Hm( _X )
779         _HX     = numpy.asmatrix(numpy.ravel( _HX )).T
780         GradJb  = BI * (_X - Xb)
781         GradJo  = - Ha( (_X, RI * (Y - _HX)) )
782         GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
783         return GradJ
784     #
785     # Minimisation de la fonctionnelle
786     # --------------------------------
787     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
788     #
789     if selfA._parameters["Minimizer"] == "LBFGSB":
790         if "0.19" <= scipy.version.version <= "1.1.0":
791             import lbfgsbhlt as optimiseur
792         else:
793             import scipy.optimize as optimiseur
794         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
795             func        = CostFunction,
796             x0          = Xini,
797             fprime      = GradientOfCostFunction,
798             args        = (),
799             bounds      = selfA._parameters["Bounds"],
800             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
801             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
802             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
803             iprint      = selfA._parameters["optiprint"],
804             )
805         nfeval = Informations['funcalls']
806         rc     = Informations['warnflag']
807     elif selfA._parameters["Minimizer"] == "TNC":
808         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
809             func        = CostFunction,
810             x0          = Xini,
811             fprime      = GradientOfCostFunction,
812             args        = (),
813             bounds      = selfA._parameters["Bounds"],
814             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
815             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
816             ftol        = selfA._parameters["CostDecrementTolerance"],
817             messages    = selfA._parameters["optmessages"],
818             )
819     elif selfA._parameters["Minimizer"] == "CG":
820         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
821             f           = CostFunction,
822             x0          = Xini,
823             fprime      = GradientOfCostFunction,
824             args        = (),
825             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
826             gtol        = selfA._parameters["GradientNormTolerance"],
827             disp        = selfA._parameters["optdisp"],
828             full_output = True,
829             )
830     elif selfA._parameters["Minimizer"] == "NCG":
831         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
832             f           = CostFunction,
833             x0          = Xini,
834             fprime      = GradientOfCostFunction,
835             args        = (),
836             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
837             avextol     = selfA._parameters["CostDecrementTolerance"],
838             disp        = selfA._parameters["optdisp"],
839             full_output = True,
840             )
841     elif selfA._parameters["Minimizer"] == "BFGS":
842         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
843             f           = CostFunction,
844             x0          = Xini,
845             fprime      = GradientOfCostFunction,
846             args        = (),
847             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
848             gtol        = selfA._parameters["GradientNormTolerance"],
849             disp        = selfA._parameters["optdisp"],
850             full_output = True,
851             )
852     else:
853         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
854     #
855     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
856     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
857     #
858     # Correction pour pallier a un bug de TNC sur le retour du Minimum
859     # ----------------------------------------------------------------
860     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
861         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
862     #
863     # Obtention de l'analyse
864     # ----------------------
865     Xa = numpy.asmatrix(numpy.ravel( Minimum )).T
866     #
867     selfA.StoredVariables["Analysis"].store( Xa )
868     #
869     if selfA._toStore("OMA") or \
870         selfA._toStore("SigmaObs2") or \
871         selfA._toStore("SimulationQuantiles") or \
872         selfA._toStore("SimulatedObservationAtOptimum"):
873         if selfA._toStore("SimulatedObservationAtCurrentState"):
874             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
875         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
876             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
877         else:
878             HXa = Hm( Xa )
879     #
880     # Calcul de la covariance d'analyse
881     # ---------------------------------
882     if selfA._toStore("APosterioriCovariance") or \
883         selfA._toStore("SimulationQuantiles") or \
884         selfA._toStore("JacobianMatrixAtOptimum") or \
885         selfA._toStore("KalmanGainAtOptimum"):
886         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
887         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
888     if selfA._toStore("APosterioriCovariance") or \
889         selfA._toStore("SimulationQuantiles") or \
890         selfA._toStore("KalmanGainAtOptimum"):
891         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
892         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
893     if selfA._toStore("APosterioriCovariance") or \
894         selfA._toStore("SimulationQuantiles"):
895         HessienneI = []
896         nb = Xa.size
897         for i in range(nb):
898             _ee    = numpy.matrix(numpy.zeros(nb)).T
899             _ee[i] = 1.
900             _HtEE  = numpy.dot(HtM,_ee)
901             _HtEE  = numpy.asmatrix(numpy.ravel( _HtEE )).T
902             HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
903         HessienneI = numpy.matrix( HessienneI )
904         A = HessienneI.I
905         if min(A.shape) != max(A.shape):
906             raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
907         if (numpy.diag(A) < 0).any():
908             raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
909         if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
910             try:
911                 L = numpy.linalg.cholesky( A )
912             except:
913                 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
914     if selfA._toStore("APosterioriCovariance"):
915         selfA.StoredVariables["APosterioriCovariance"].store( A )
916     if selfA._toStore("JacobianMatrixAtOptimum"):
917         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
918     if selfA._toStore("KalmanGainAtOptimum"):
919         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
920         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
921         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
922     #
923     # Calculs et/ou stockages supplémentaires
924     # ---------------------------------------
925     if selfA._toStore("Innovation") or \
926         selfA._toStore("SigmaObs2") or \
927         selfA._toStore("MahalanobisConsistency") or \
928         selfA._toStore("OMB"):
929         d  = Y - HXb
930     if selfA._toStore("Innovation"):
931         selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
932     if selfA._toStore("BMA"):
933         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
934     if selfA._toStore("OMA"):
935         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
936     if selfA._toStore("OMB"):
937         selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
938     if selfA._toStore("SigmaObs2"):
939         TraceR = R.trace(Y.size)
940         selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
941     if selfA._toStore("MahalanobisConsistency"):
942         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
943     if selfA._toStore("SimulationQuantiles"):
944         nech = selfA._parameters["NumberOfSamplesForQuantiles"]
945         HXa  = numpy.matrix(numpy.ravel( HXa )).T
946         YfQ  = None
947         for i in range(nech):
948             if selfA._parameters["SimulationForQuantiles"] == "Linear":
949                 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
950                 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
951                 Yr = HXa + dYr
952             elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
953                 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
954                 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
955             if YfQ is None:
956                 YfQ = Yr
957             else:
958                 YfQ = numpy.hstack((YfQ,Yr))
959         YfQ.sort(axis=-1)
960         YQ = None
961         for quantile in selfA._parameters["Quantiles"]:
962             if not (0. <= float(quantile) <= 1.): continue
963             indice = int(nech * float(quantile) - 1./nech)
964             if YQ is None: YQ = YfQ[:,indice]
965             else:          YQ = numpy.hstack((YQ,YfQ[:,indice]))
966         selfA.StoredVariables["SimulationQuantiles"].store( YQ )
967     if selfA._toStore("SimulatedObservationAtBackground"):
968         selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
969     if selfA._toStore("SimulatedObservationAtOptimum"):
970         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
971     #
972     return 0
973
974 # ==============================================================================
975 def van3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
976     """
977     3DVAR variational analysis with no inversion of B
978     """
979     #
980     # Initialisations
981     # ---------------
982     #
983     # Opérateurs
984     Hm = HO["Direct"].appliedTo
985     Ha = HO["Adjoint"].appliedInXTo
986     #
987     # Précalcul des inversions de B et R
988     BT = B.getT()
989     RI = R.getI()
990     #
991     # Point de démarrage de l'optimisation
992     Xini = numpy.zeros(Xb.shape)
993     #
994     # Définition de la fonction-coût
995     # ------------------------------
996     def CostFunction(v):
997         _V = numpy.asmatrix(numpy.ravel( v )).T
998         _X = Xb + B * _V
999         if selfA._parameters["StoreInternalVariables"] or \
1000             selfA._toStore("CurrentState") or \
1001             selfA._toStore("CurrentOptimum"):
1002             selfA.StoredVariables["CurrentState"].store( _X )
1003         _HX = Hm( _X )
1004         _HX = numpy.asmatrix(numpy.ravel( _HX )).T
1005         _Innovation = Y - _HX
1006         if selfA._toStore("SimulatedObservationAtCurrentState") or \
1007             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1008             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
1009         if selfA._toStore("InnovationAtCurrentState"):
1010             selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
1011         #
1012         Jb  = float( 0.5 * _V.T * BT * _V )
1013         Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
1014         J   = Jb + Jo
1015         #
1016         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
1017         selfA.StoredVariables["CostFunctionJb"].store( Jb )
1018         selfA.StoredVariables["CostFunctionJo"].store( Jo )
1019         selfA.StoredVariables["CostFunctionJ" ].store( J )
1020         if selfA._toStore("IndexOfOptimum") or \
1021             selfA._toStore("CurrentOptimum") or \
1022             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
1023             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
1024             selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
1025             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1026             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1027         if selfA._toStore("IndexOfOptimum"):
1028             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1029         if selfA._toStore("CurrentOptimum"):
1030             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
1031         if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1032             selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
1033         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1034             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1035         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1036             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1037         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1038             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1039         return J
1040     #
1041     def GradientOfCostFunction(v):
1042         _V = numpy.asmatrix(numpy.ravel( v )).T
1043         _X = Xb + B * _V
1044         _HX     = Hm( _X )
1045         _HX     = numpy.asmatrix(numpy.ravel( _HX )).T
1046         GradJb  = BT * _V
1047         GradJo  = - Ha( (_X, RI * (Y - _HX)) )
1048         GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
1049         return GradJ
1050     #
1051     # Minimisation de la fonctionnelle
1052     # --------------------------------
1053     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
1054     #
1055     if selfA._parameters["Minimizer"] == "LBFGSB":
1056         if "0.19" <= scipy.version.version <= "1.1.0":
1057             import lbfgsbhlt as optimiseur
1058         else:
1059             import scipy.optimize as optimiseur
1060         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
1061             func        = CostFunction,
1062             x0          = Xini,
1063             fprime      = GradientOfCostFunction,
1064             args        = (),
1065             bounds      = selfA._parameters["Bounds"],
1066             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
1067             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
1068             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
1069             iprint      = selfA._parameters["optiprint"],
1070             )
1071         nfeval = Informations['funcalls']
1072         rc     = Informations['warnflag']
1073     elif selfA._parameters["Minimizer"] == "TNC":
1074         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
1075             func        = CostFunction,
1076             x0          = Xini,
1077             fprime      = GradientOfCostFunction,
1078             args        = (),
1079             bounds      = selfA._parameters["Bounds"],
1080             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
1081             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
1082             ftol        = selfA._parameters["CostDecrementTolerance"],
1083             messages    = selfA._parameters["optmessages"],
1084             )
1085     elif selfA._parameters["Minimizer"] == "CG":
1086         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
1087             f           = CostFunction,
1088             x0          = Xini,
1089             fprime      = GradientOfCostFunction,
1090             args        = (),
1091             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1092             gtol        = selfA._parameters["GradientNormTolerance"],
1093             disp        = selfA._parameters["optdisp"],
1094             full_output = True,
1095             )
1096     elif selfA._parameters["Minimizer"] == "NCG":
1097         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
1098             f           = CostFunction,
1099             x0          = Xini,
1100             fprime      = GradientOfCostFunction,
1101             args        = (),
1102             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1103             avextol     = selfA._parameters["CostDecrementTolerance"],
1104             disp        = selfA._parameters["optdisp"],
1105             full_output = True,
1106             )
1107     elif selfA._parameters["Minimizer"] == "BFGS":
1108         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
1109             f           = CostFunction,
1110             x0          = Xini,
1111             fprime      = GradientOfCostFunction,
1112             args        = (),
1113             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1114             gtol        = selfA._parameters["GradientNormTolerance"],
1115             disp        = selfA._parameters["optdisp"],
1116             full_output = True,
1117             )
1118     else:
1119         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
1120     #
1121     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1122     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
1123     #
1124     # Correction pour pallier a un bug de TNC sur le retour du Minimum
1125     # ----------------------------------------------------------------
1126     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
1127         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
1128         Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
1129     else:
1130         Minimum = Xb + B * numpy.asmatrix(numpy.ravel( Minimum )).T
1131     #
1132     # Obtention de l'analyse
1133     # ----------------------
1134     Xa = Minimum
1135     #
1136     selfA.StoredVariables["Analysis"].store( Xa )
1137     #
1138     if selfA._toStore("OMA") or \
1139         selfA._toStore("SigmaObs2") or \
1140         selfA._toStore("SimulationQuantiles") or \
1141         selfA._toStore("SimulatedObservationAtOptimum"):
1142         if selfA._toStore("SimulatedObservationAtCurrentState"):
1143             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
1144         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1145             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
1146         else:
1147             HXa = Hm( Xa )
1148     #
1149     # Calcul de la covariance d'analyse
1150     # ---------------------------------
1151     if selfA._toStore("APosterioriCovariance") or \
1152         selfA._toStore("SimulationQuantiles") or \
1153         selfA._toStore("JacobianMatrixAtOptimum") or \
1154         selfA._toStore("KalmanGainAtOptimum"):
1155         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
1156         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
1157     if selfA._toStore("APosterioriCovariance") or \
1158         selfA._toStore("SimulationQuantiles") or \
1159         selfA._toStore("KalmanGainAtOptimum"):
1160         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
1161         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
1162     if selfA._toStore("APosterioriCovariance") or \
1163         selfA._toStore("SimulationQuantiles"):
1164         BI = B.getI()
1165         HessienneI = []
1166         nb = Xa.size
1167         for i in range(nb):
1168             _ee    = numpy.matrix(numpy.zeros(nb)).T
1169             _ee[i] = 1.
1170             _HtEE  = numpy.dot(HtM,_ee)
1171             _HtEE  = numpy.asmatrix(numpy.ravel( _HtEE )).T
1172             HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
1173         HessienneI = numpy.matrix( HessienneI )
1174         A = HessienneI.I
1175         if min(A.shape) != max(A.shape):
1176             raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
1177         if (numpy.diag(A) < 0).any():
1178             raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
1179         if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
1180             try:
1181                 L = numpy.linalg.cholesky( A )
1182             except:
1183                 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
1184     if selfA._toStore("APosterioriCovariance"):
1185         selfA.StoredVariables["APosterioriCovariance"].store( A )
1186     if selfA._toStore("JacobianMatrixAtOptimum"):
1187         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
1188     if selfA._toStore("KalmanGainAtOptimum"):
1189         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
1190         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
1191         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
1192     #
1193     # Calculs et/ou stockages supplémentaires
1194     # ---------------------------------------
1195     if selfA._toStore("Innovation") or \
1196         selfA._toStore("SigmaObs2") or \
1197         selfA._toStore("MahalanobisConsistency") or \
1198         selfA._toStore("OMB"):
1199         d  = Y - HXb
1200     if selfA._toStore("Innovation"):
1201         selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
1202     if selfA._toStore("BMA"):
1203         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
1204     if selfA._toStore("OMA"):
1205         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
1206     if selfA._toStore("OMB"):
1207         selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
1208     if selfA._toStore("SigmaObs2"):
1209         TraceR = R.trace(Y.size)
1210         selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
1211     if selfA._toStore("MahalanobisConsistency"):
1212         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
1213     if selfA._toStore("SimulationQuantiles"):
1214         nech = selfA._parameters["NumberOfSamplesForQuantiles"]
1215         HXa  = numpy.matrix(numpy.ravel( HXa )).T
1216         YfQ  = None
1217         for i in range(nech):
1218             if selfA._parameters["SimulationForQuantiles"] == "Linear":
1219                 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
1220                 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
1221                 Yr = HXa + dYr
1222             elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
1223                 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
1224                 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
1225             if YfQ is None:
1226                 YfQ = Yr
1227             else:
1228                 YfQ = numpy.hstack((YfQ,Yr))
1229         YfQ.sort(axis=-1)
1230         YQ = None
1231         for quantile in selfA._parameters["Quantiles"]:
1232             if not (0. <= float(quantile) <= 1.): continue
1233             indice = int(nech * float(quantile) - 1./nech)
1234             if YQ is None: YQ = YfQ[:,indice]
1235             else:          YQ = numpy.hstack((YQ,YfQ[:,indice]))
1236         selfA.StoredVariables["SimulationQuantiles"].store( YQ )
1237     if selfA._toStore("SimulatedObservationAtBackground"):
1238         selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
1239     if selfA._toStore("SimulatedObservationAtOptimum"):
1240         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
1241     #
1242     return 0
1243
1244 # ==============================================================================
1245 def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1246     """
1247     3DVAR incrémental
1248     """
1249     #
1250     # Initialisations
1251     # ---------------
1252     #
1253     # Opérateur non-linéaire pour la boucle externe
1254     Hm = HO["Direct"].appliedTo
1255     #
1256     # Précalcul des inversions de B et R
1257     BI = B.getI()
1258     RI = R.getI()
1259     #
1260     # Point de démarrage de l'optimisation
1261     Xini = selfA._parameters["InitializationPoint"]
1262     #
1263     HXb = numpy.asmatrix(numpy.ravel( Hm( Xb ) )).T
1264     Innovation = Y - HXb
1265     #
1266     # Outer Loop
1267     # ----------
1268     iOuter = 0
1269     J      = 1./mpr
1270     DeltaJ = 1./mpr
1271     Xr     = Xini.reshape((-1,1))
1272     while abs(DeltaJ) >= selfA._parameters["CostDecrementTolerance"] and iOuter <= selfA._parameters["MaximumNumberOfSteps"]:
1273         #
1274         # Inner Loop
1275         # ----------
1276         Ht = HO["Tangent"].asMatrix(Xr)
1277         Ht = Ht.reshape(Y.size,Xr.size) # ADAO & check shape
1278         #
1279         # Définition de la fonction-coût
1280         # ------------------------------
1281         def CostFunction(dx):
1282             _dX  = numpy.asmatrix(numpy.ravel( dx )).T
1283             if selfA._parameters["StoreInternalVariables"] or \
1284                 selfA._toStore("CurrentState") or \
1285                 selfA._toStore("CurrentOptimum"):
1286                 selfA.StoredVariables["CurrentState"].store( Xb + _dX )
1287             _HdX = Ht * _dX
1288             _HdX = numpy.asmatrix(numpy.ravel( _HdX )).T
1289             _dInnovation = Innovation - _HdX
1290             if selfA._toStore("SimulatedObservationAtCurrentState") or \
1291                 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1292                 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HXb + _HdX )
1293             if selfA._toStore("InnovationAtCurrentState"):
1294                 selfA.StoredVariables["InnovationAtCurrentState"].store( _dInnovation )
1295             #
1296             Jb  = float( 0.5 * _dX.T * BI * _dX )
1297             Jo  = float( 0.5 * _dInnovation.T * RI * _dInnovation )
1298             J   = Jb + Jo
1299             #
1300             selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
1301             selfA.StoredVariables["CostFunctionJb"].store( Jb )
1302             selfA.StoredVariables["CostFunctionJo"].store( Jo )
1303             selfA.StoredVariables["CostFunctionJ" ].store( J )
1304             if selfA._toStore("IndexOfOptimum") or \
1305                 selfA._toStore("CurrentOptimum") or \
1306                 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
1307                 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
1308                 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
1309                 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1310                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1311             if selfA._toStore("IndexOfOptimum"):
1312                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1313             if selfA._toStore("CurrentOptimum"):
1314                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
1315             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1316                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
1317             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1318                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1319             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1320                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1321             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1322                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1323             return J
1324         #
1325         def GradientOfCostFunction(dx):
1326             _dX          = numpy.asmatrix(numpy.ravel( dx )).T
1327             _HdX         = Ht * _dX
1328             _HdX         = numpy.asmatrix(numpy.ravel( _HdX )).T
1329             _dInnovation = Innovation - _HdX
1330             GradJb       = BI * _dX
1331             GradJo       = - Ht.T @ (RI * _dInnovation)
1332             GradJ        = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
1333             return GradJ
1334         #
1335         # Minimisation de la fonctionnelle
1336         # --------------------------------
1337         nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
1338         #
1339         if selfA._parameters["Minimizer"] == "LBFGSB":
1340             # Minimum, J_optimal, Informations = scipy.optimize.fmin_l_bfgs_b(
1341             if "0.19" <= scipy.version.version <= "1.1.0":
1342                 import lbfgsbhlt as optimiseur
1343             else:
1344                 import scipy.optimize as optimiseur
1345             Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
1346                 func        = CostFunction,
1347                 x0          = numpy.zeros(Xini.size),
1348                 fprime      = GradientOfCostFunction,
1349                 args        = (),
1350                 bounds      = selfA._parameters["Bounds"],
1351                 maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
1352                 factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
1353                 pgtol       = selfA._parameters["ProjectedGradientTolerance"],
1354                 iprint      = selfA._parameters["optiprint"],
1355                 )
1356             nfeval = Informations['funcalls']
1357             rc     = Informations['warnflag']
1358         elif selfA._parameters["Minimizer"] == "TNC":
1359             Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
1360                 func        = CostFunction,
1361                 x0          = numpy.zeros(Xini.size),
1362                 fprime      = GradientOfCostFunction,
1363                 args        = (),
1364                 bounds      = selfA._parameters["Bounds"],
1365                 maxfun      = selfA._parameters["MaximumNumberOfSteps"],
1366                 pgtol       = selfA._parameters["ProjectedGradientTolerance"],
1367                 ftol        = selfA._parameters["CostDecrementTolerance"],
1368                 messages    = selfA._parameters["optmessages"],
1369                 )
1370         elif selfA._parameters["Minimizer"] == "CG":
1371             Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
1372                 f           = CostFunction,
1373                 x0          = numpy.zeros(Xini.size),
1374                 fprime      = GradientOfCostFunction,
1375                 args        = (),
1376                 maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1377                 gtol        = selfA._parameters["GradientNormTolerance"],
1378                 disp        = selfA._parameters["optdisp"],
1379                 full_output = True,
1380                 )
1381         elif selfA._parameters["Minimizer"] == "NCG":
1382             Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
1383                 f           = CostFunction,
1384                 x0          = numpy.zeros(Xini.size),
1385                 fprime      = GradientOfCostFunction,
1386                 args        = (),
1387                 maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1388                 avextol     = selfA._parameters["CostDecrementTolerance"],
1389                 disp        = selfA._parameters["optdisp"],
1390                 full_output = True,
1391                 )
1392         elif selfA._parameters["Minimizer"] == "BFGS":
1393             Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
1394                 f           = CostFunction,
1395                 x0          = numpy.zeros(Xini.size),
1396                 fprime      = GradientOfCostFunction,
1397                 args        = (),
1398                 maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1399                 gtol        = selfA._parameters["GradientNormTolerance"],
1400                 disp        = selfA._parameters["optdisp"],
1401                 full_output = True,
1402                 )
1403         else:
1404             raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
1405         #
1406         IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1407         MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
1408         #
1409         if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
1410             Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
1411             Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
1412         else:
1413             Minimum = Xb + numpy.asmatrix(numpy.ravel( Minimum )).T
1414         #
1415         Xr     = Minimum
1416         DeltaJ = selfA.StoredVariables["CostFunctionJ" ][-1] - J
1417         iOuter = selfA.StoredVariables["CurrentIterationNumber"][-1]
1418     #
1419     # Obtention de l'analyse
1420     # ----------------------
1421     Xa = Xr
1422     #
1423     selfA.StoredVariables["Analysis"].store( Xa )
1424     #
1425     if selfA._toStore("OMA") or \
1426         selfA._toStore("SigmaObs2") or \
1427         selfA._toStore("SimulationQuantiles") or \
1428         selfA._toStore("SimulatedObservationAtOptimum"):
1429         if selfA._toStore("SimulatedObservationAtCurrentState"):
1430             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
1431         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1432             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
1433         else:
1434             HXa = Hm( Xa )
1435     #
1436     # Calcul de la covariance d'analyse
1437     # ---------------------------------
1438     if selfA._toStore("APosterioriCovariance") or \
1439         selfA._toStore("SimulationQuantiles") or \
1440         selfA._toStore("JacobianMatrixAtOptimum") or \
1441         selfA._toStore("KalmanGainAtOptimum"):
1442         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
1443         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
1444     if selfA._toStore("APosterioriCovariance") or \
1445         selfA._toStore("SimulationQuantiles") or \
1446         selfA._toStore("KalmanGainAtOptimum"):
1447         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
1448         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
1449     if selfA._toStore("APosterioriCovariance") or \
1450         selfA._toStore("SimulationQuantiles"):
1451         HessienneI = []
1452         nb = Xa.size
1453         for i in range(nb):
1454             _ee    = numpy.matrix(numpy.zeros(nb)).T
1455             _ee[i] = 1.
1456             _HtEE  = numpy.dot(HtM,_ee)
1457             _HtEE  = numpy.asmatrix(numpy.ravel( _HtEE )).T
1458             HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
1459         HessienneI = numpy.matrix( HessienneI )
1460         A = HessienneI.I
1461         if min(A.shape) != max(A.shape):
1462             raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
1463         if (numpy.diag(A) < 0).any():
1464             raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
1465         if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
1466             try:
1467                 L = numpy.linalg.cholesky( A )
1468             except:
1469                 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
1470     if selfA._toStore("APosterioriCovariance"):
1471         selfA.StoredVariables["APosterioriCovariance"].store( A )
1472     if selfA._toStore("JacobianMatrixAtOptimum"):
1473         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
1474     if selfA._toStore("KalmanGainAtOptimum"):
1475         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
1476         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
1477         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
1478     #
1479     # Calculs et/ou stockages supplémentaires
1480     # ---------------------------------------
1481     if selfA._toStore("Innovation") or \
1482         selfA._toStore("SigmaObs2") or \
1483         selfA._toStore("MahalanobisConsistency") or \
1484         selfA._toStore("OMB"):
1485         d  = Y - HXb
1486     if selfA._toStore("Innovation"):
1487         selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
1488     if selfA._toStore("BMA"):
1489         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
1490     if selfA._toStore("OMA"):
1491         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
1492     if selfA._toStore("OMB"):
1493         selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
1494     if selfA._toStore("SigmaObs2"):
1495         TraceR = R.trace(Y.size)
1496         selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
1497     if selfA._toStore("MahalanobisConsistency"):
1498         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
1499     if selfA._toStore("SimulationQuantiles"):
1500         nech = selfA._parameters["NumberOfSamplesForQuantiles"]
1501         HXa  = numpy.matrix(numpy.ravel( HXa )).T
1502         YfQ  = None
1503         for i in range(nech):
1504             if selfA._parameters["SimulationForQuantiles"] == "Linear":
1505                 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
1506                 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
1507                 Yr = HXa + dYr
1508             elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
1509                 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
1510                 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
1511             if YfQ is None:
1512                 YfQ = Yr
1513             else:
1514                 YfQ = numpy.hstack((YfQ,Yr))
1515         YfQ.sort(axis=-1)
1516         YQ = None
1517         for quantile in selfA._parameters["Quantiles"]:
1518             if not (0. <= float(quantile) <= 1.): continue
1519             indice = int(nech * float(quantile) - 1./nech)
1520             if YQ is None: YQ = YfQ[:,indice]
1521             else:          YQ = numpy.hstack((YQ,YfQ[:,indice]))
1522         selfA.StoredVariables["SimulationQuantiles"].store( YQ )
1523     if selfA._toStore("SimulatedObservationAtBackground"):
1524         selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
1525     if selfA._toStore("SimulatedObservationAtOptimum"):
1526         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
1527     #
1528     return 0
1529
1530 # ==============================================================================
1531 def psas3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1532     """
1533     3DVAR PSAS
1534     """
1535     #
1536     # Initialisations
1537     # ---------------
1538     #
1539     # Opérateurs
1540     Hm = HO["Direct"].appliedTo
1541     #
1542     # Utilisation éventuelle d'un vecteur H(Xb) précalculé
1543     if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
1544         HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
1545     else:
1546         HXb = Hm( Xb )
1547     HXb = numpy.asmatrix(numpy.ravel( HXb )).T
1548     if Y.size != HXb.size:
1549         raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
1550     if max(Y.shape) != max(HXb.shape):
1551         raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
1552     #
1553     if selfA._toStore("JacobianMatrixAtBackground"):
1554         HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
1555         HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
1556         selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
1557     #
1558     Ht = HO["Tangent"].asMatrix(Xb)
1559     BHT = B * Ht.T
1560     HBHTpR = R + Ht * BHT
1561     Innovation = Y - HXb
1562     #
1563     # Point de démarrage de l'optimisation
1564     Xini = numpy.zeros(Xb.shape)
1565     #
1566     # Définition de la fonction-coût
1567     # ------------------------------
1568     def CostFunction(w):
1569         _W = numpy.asmatrix(numpy.ravel( w )).T
1570         if selfA._parameters["StoreInternalVariables"] or \
1571             selfA._toStore("CurrentState") or \
1572             selfA._toStore("CurrentOptimum"):
1573             selfA.StoredVariables["CurrentState"].store( Xb + BHT * _W )
1574         if selfA._toStore("SimulatedObservationAtCurrentState") or \
1575             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1576             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Hm( Xb + BHT * _W ) )
1577         if selfA._toStore("InnovationAtCurrentState"):
1578             selfA.StoredVariables["InnovationAtCurrentState"].store( Innovation )
1579         #
1580         Jb  = float( 0.5 * _W.T * HBHTpR * _W )
1581         Jo  = float( - _W.T * Innovation )
1582         J   = Jb + Jo
1583         #
1584         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
1585         selfA.StoredVariables["CostFunctionJb"].store( Jb )
1586         selfA.StoredVariables["CostFunctionJo"].store( Jo )
1587         selfA.StoredVariables["CostFunctionJ" ].store( J )
1588         if selfA._toStore("IndexOfOptimum") or \
1589             selfA._toStore("CurrentOptimum") or \
1590             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
1591             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
1592             selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
1593             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1594             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1595         if selfA._toStore("IndexOfOptimum"):
1596             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1597         if selfA._toStore("CurrentOptimum"):
1598             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
1599         if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1600             selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
1601         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1602             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1603         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1604             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1605         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1606             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1607         return J
1608     #
1609     def GradientOfCostFunction(w):
1610         _W = numpy.asmatrix(numpy.ravel( w )).T
1611         GradJb  = HBHTpR * _W
1612         GradJo  = - Innovation
1613         GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
1614         return GradJ
1615     #
1616     # Minimisation de la fonctionnelle
1617     # --------------------------------
1618     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
1619     #
1620     if selfA._parameters["Minimizer"] == "LBFGSB":
1621         if "0.19" <= scipy.version.version <= "1.1.0":
1622             import lbfgsbhlt as optimiseur
1623         else:
1624             import scipy.optimize as optimiseur
1625         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
1626             func        = CostFunction,
1627             x0          = Xini,
1628             fprime      = GradientOfCostFunction,
1629             args        = (),
1630             bounds      = selfA._parameters["Bounds"],
1631             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
1632             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
1633             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
1634             iprint      = selfA._parameters["optiprint"],
1635             )
1636         nfeval = Informations['funcalls']
1637         rc     = Informations['warnflag']
1638     elif selfA._parameters["Minimizer"] == "TNC":
1639         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
1640             func        = CostFunction,
1641             x0          = Xini,
1642             fprime      = GradientOfCostFunction,
1643             args        = (),
1644             bounds      = selfA._parameters["Bounds"],
1645             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
1646             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
1647             ftol        = selfA._parameters["CostDecrementTolerance"],
1648             messages    = selfA._parameters["optmessages"],
1649             )
1650     elif selfA._parameters["Minimizer"] == "CG":
1651         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
1652             f           = CostFunction,
1653             x0          = Xini,
1654             fprime      = GradientOfCostFunction,
1655             args        = (),
1656             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1657             gtol        = selfA._parameters["GradientNormTolerance"],
1658             disp        = selfA._parameters["optdisp"],
1659             full_output = True,
1660             )
1661     elif selfA._parameters["Minimizer"] == "NCG":
1662         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
1663             f           = CostFunction,
1664             x0          = Xini,
1665             fprime      = GradientOfCostFunction,
1666             args        = (),
1667             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1668             avextol     = selfA._parameters["CostDecrementTolerance"],
1669             disp        = selfA._parameters["optdisp"],
1670             full_output = True,
1671             )
1672     elif selfA._parameters["Minimizer"] == "BFGS":
1673         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
1674             f           = CostFunction,
1675             x0          = Xini,
1676             fprime      = GradientOfCostFunction,
1677             args        = (),
1678             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1679             gtol        = selfA._parameters["GradientNormTolerance"],
1680             disp        = selfA._parameters["optdisp"],
1681             full_output = True,
1682             )
1683     else:
1684         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
1685     #
1686     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1687     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
1688     #
1689     # Correction pour pallier a un bug de TNC sur le retour du Minimum
1690     # ----------------------------------------------------------------
1691     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
1692         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
1693         Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
1694     else:
1695         Minimum = Xb + BHT * numpy.asmatrix(numpy.ravel( Minimum )).T
1696     #
1697     # Obtention de l'analyse
1698     # ----------------------
1699     Xa = Minimum
1700     #
1701     selfA.StoredVariables["Analysis"].store( Xa )
1702     #
1703     if selfA._toStore("OMA") or \
1704         selfA._toStore("SigmaObs2") or \
1705         selfA._toStore("SimulationQuantiles") or \
1706         selfA._toStore("SimulatedObservationAtOptimum"):
1707         if selfA._toStore("SimulatedObservationAtCurrentState"):
1708             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
1709         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1710             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
1711         else:
1712             HXa = Hm( Xa )
1713     #
1714     # Calcul de la covariance d'analyse
1715     # ---------------------------------
1716     if selfA._toStore("APosterioriCovariance") or \
1717         selfA._toStore("SimulationQuantiles") or \
1718         selfA._toStore("JacobianMatrixAtOptimum") or \
1719         selfA._toStore("KalmanGainAtOptimum"):
1720         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
1721         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
1722     if selfA._toStore("APosterioriCovariance") or \
1723         selfA._toStore("SimulationQuantiles") or \
1724         selfA._toStore("KalmanGainAtOptimum"):
1725         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
1726         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
1727     if selfA._toStore("APosterioriCovariance") or \
1728         selfA._toStore("SimulationQuantiles"):
1729         BI = B.getI()
1730         RI = R.getI()
1731         HessienneI = []
1732         nb = Xa.size
1733         for i in range(nb):
1734             _ee    = numpy.matrix(numpy.zeros(nb)).T
1735             _ee[i] = 1.
1736             _HtEE  = numpy.dot(HtM,_ee)
1737             _HtEE  = numpy.asmatrix(numpy.ravel( _HtEE )).T
1738             HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
1739         HessienneI = numpy.matrix( HessienneI )
1740         A = HessienneI.I
1741         if min(A.shape) != max(A.shape):
1742             raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
1743         if (numpy.diag(A) < 0).any():
1744             raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
1745         if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
1746             try:
1747                 L = numpy.linalg.cholesky( A )
1748             except:
1749                 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
1750     if selfA._toStore("APosterioriCovariance"):
1751         selfA.StoredVariables["APosterioriCovariance"].store( A )
1752     if selfA._toStore("JacobianMatrixAtOptimum"):
1753         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
1754     if selfA._toStore("KalmanGainAtOptimum"):
1755         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
1756         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
1757         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
1758     #
1759     # Calculs et/ou stockages supplémentaires
1760     # ---------------------------------------
1761     if selfA._toStore("Innovation") or \
1762         selfA._toStore("SigmaObs2") or \
1763         selfA._toStore("MahalanobisConsistency") or \
1764         selfA._toStore("OMB"):
1765         d  = Y - HXb
1766     if selfA._toStore("Innovation"):
1767         selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
1768     if selfA._toStore("BMA"):
1769         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
1770     if selfA._toStore("OMA"):
1771         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
1772     if selfA._toStore("OMB"):
1773         selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
1774     if selfA._toStore("SigmaObs2"):
1775         TraceR = R.trace(Y.size)
1776         selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
1777     if selfA._toStore("MahalanobisConsistency"):
1778         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
1779     if selfA._toStore("SimulationQuantiles"):
1780         nech = selfA._parameters["NumberOfSamplesForQuantiles"]
1781         HXa  = numpy.matrix(numpy.ravel( HXa )).T
1782         YfQ  = None
1783         for i in range(nech):
1784             if selfA._parameters["SimulationForQuantiles"] == "Linear":
1785                 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
1786                 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
1787                 Yr = HXa + dYr
1788             elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
1789                 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
1790                 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
1791             if YfQ is None:
1792                 YfQ = Yr
1793             else:
1794                 YfQ = numpy.hstack((YfQ,Yr))
1795         YfQ.sort(axis=-1)
1796         YQ = None
1797         for quantile in selfA._parameters["Quantiles"]:
1798             if not (0. <= float(quantile) <= 1.): continue
1799             indice = int(nech * float(quantile) - 1./nech)
1800             if YQ is None: YQ = YfQ[:,indice]
1801             else:          YQ = numpy.hstack((YQ,YfQ[:,indice]))
1802         selfA.StoredVariables["SimulationQuantiles"].store( YQ )
1803     if selfA._toStore("SimulatedObservationAtBackground"):
1804         selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
1805     if selfA._toStore("SimulatedObservationAtOptimum"):
1806         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
1807     #
1808     return 0
1809
1810 # ==============================================================================
1811 def std4dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1812     """
1813     4DVAR
1814     """
1815     #
1816     # Initialisations
1817     # ---------------
1818     #
1819     # Opérateurs
1820     Hm = HO["Direct"].appliedControledFormTo
1821     Mm = EM["Direct"].appliedControledFormTo
1822     #
1823     if CM is not None and "Tangent" in CM and U is not None:
1824         Cm = CM["Tangent"].asMatrix(Xb)
1825     else:
1826         Cm = None
1827     #
1828     def Un(_step):
1829         if U is not None:
1830             if hasattr(U,"store") and 1<=_step<len(U) :
1831                 _Un = numpy.asmatrix(numpy.ravel( U[_step] )).T
1832             elif hasattr(U,"store") and len(U)==1:
1833                 _Un = numpy.asmatrix(numpy.ravel( U[0] )).T
1834             else:
1835                 _Un = numpy.asmatrix(numpy.ravel( U )).T
1836         else:
1837             _Un = None
1838         return _Un
1839     def CmUn(_xn,_un):
1840         if Cm is not None and _un is not None: # Attention : si Cm est aussi dans M, doublon !
1841             _Cm   = Cm.reshape(_xn.size,_un.size) # ADAO & check shape
1842             _CmUn = _Cm * _un
1843         else:
1844             _CmUn = 0.
1845         return _CmUn
1846     #
1847     # Remarque : les observations sont exploitées à partir du pas de temps
1848     # numéro 1, et sont utilisées dans Yo comme rangées selon ces indices.
1849     # Donc le pas 0 n'est pas utilisé puisque la première étape commence
1850     # avec l'observation du pas 1.
1851     #
1852     # Nombre de pas identique au nombre de pas d'observations
1853     if hasattr(Y,"stepnumber"):
1854         duration = Y.stepnumber()
1855     else:
1856         duration = 2
1857     #
1858     # Précalcul des inversions de B et R
1859     BI = B.getI()
1860     RI = R.getI()
1861     #
1862     # Point de démarrage de l'optimisation
1863     Xini = selfA._parameters["InitializationPoint"]
1864     #
1865     # Définition de la fonction-coût
1866     # ------------------------------
1867     selfA.DirectCalculation = [None,] # Le pas 0 n'est pas observé
1868     selfA.DirectInnovation  = [None,] # Le pas 0 n'est pas observé
1869     def CostFunction(x):
1870         _X  = numpy.asmatrix(numpy.ravel( x )).T
1871         if selfA._parameters["StoreInternalVariables"] or \
1872             selfA._toStore("CurrentState") or \
1873             selfA._toStore("CurrentOptimum"):
1874             selfA.StoredVariables["CurrentState"].store( _X )
1875         Jb  = float( 0.5 * (_X - Xb).T * BI * (_X - Xb) )
1876         selfA.DirectCalculation = [None,]
1877         selfA.DirectInnovation  = [None,]
1878         Jo  = 0.
1879         _Xn = _X
1880         for step in range(0,duration-1):
1881             if hasattr(Y,"store"):
1882                 _Ynpu = numpy.asmatrix(numpy.ravel( Y[step+1] )).T
1883             else:
1884                 _Ynpu = numpy.asmatrix(numpy.ravel( Y )).T
1885             _Un = Un(step)
1886             #
1887             # Etape d'évolution
1888             if selfA._parameters["EstimationOf"] == "State":
1889                 _Xn = Mm( (_Xn, _Un) ) + CmUn(_Xn, _Un)
1890             elif selfA._parameters["EstimationOf"] == "Parameters":
1891                 pass
1892             #
1893             if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
1894                 _Xn = numpy.max(numpy.hstack((_Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1)
1895                 _Xn = numpy.min(numpy.hstack((_Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1)
1896             #
1897             # Etape de différence aux observations
1898             if selfA._parameters["EstimationOf"] == "State":
1899                 _YmHMX = _Ynpu - numpy.asmatrix(numpy.ravel( Hm( (_Xn, None) ) )).T
1900             elif selfA._parameters["EstimationOf"] == "Parameters":
1901                 _YmHMX = _Ynpu - numpy.asmatrix(numpy.ravel( Hm( (_Xn, _Un) ) )).T - CmUn(_Xn, _Un)
1902             #
1903             # Stockage de l'état
1904             selfA.DirectCalculation.append( _Xn )
1905             selfA.DirectInnovation.append( _YmHMX )
1906             #
1907             # Ajout dans la fonctionnelle d'observation
1908             Jo = Jo + 0.5 * float( _YmHMX.T * RI * _YmHMX )
1909         J = Jb + Jo
1910         #
1911         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
1912         selfA.StoredVariables["CostFunctionJb"].store( Jb )
1913         selfA.StoredVariables["CostFunctionJo"].store( Jo )
1914         selfA.StoredVariables["CostFunctionJ" ].store( J )
1915         if selfA._toStore("IndexOfOptimum") or \
1916             selfA._toStore("CurrentOptimum") or \
1917             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
1918             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
1919             selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1920             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1921         if selfA._toStore("IndexOfOptimum"):
1922             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1923         if selfA._toStore("CurrentOptimum"):
1924             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
1925         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1926             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1927         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1928             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1929         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1930             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1931         return J
1932     #
1933     def GradientOfCostFunction(x):
1934         _X      = numpy.asmatrix(numpy.ravel( x )).T
1935         GradJb  = BI * (_X - Xb)
1936         GradJo  = 0.
1937         for step in range(duration-1,0,-1):
1938             # Etape de récupération du dernier stockage de l'évolution
1939             _Xn = selfA.DirectCalculation.pop()
1940             # Etape de récupération du dernier stockage de l'innovation
1941             _YmHMX = selfA.DirectInnovation.pop()
1942             # Calcul des adjoints
1943             Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
1944             Ha = Ha.reshape(_Xn.size,_YmHMX.size) # ADAO & check shape
1945             Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
1946             Ma = Ma.reshape(_Xn.size,_Xn.size) # ADAO & check shape
1947             # Calcul du gradient par etat adjoint
1948             GradJo = GradJo + Ha * RI * _YmHMX # Equivaut pour Ha lineaire à : Ha( (_Xn, RI * _YmHMX) )
1949             GradJo = Ma * GradJo               # Equivaut pour Ma lineaire à : Ma( (_Xn, GradJo) )
1950         GradJ = numpy.ravel( GradJb ) - numpy.ravel( GradJo )
1951         return GradJ
1952     #
1953     # Minimisation de la fonctionnelle
1954     # --------------------------------
1955     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
1956     #
1957     if selfA._parameters["Minimizer"] == "LBFGSB":
1958         if "0.19" <= scipy.version.version <= "1.1.0":
1959             import lbfgsbhlt as optimiseur
1960         else:
1961             import scipy.optimize as optimiseur
1962         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
1963             func        = CostFunction,
1964             x0          = Xini,
1965             fprime      = GradientOfCostFunction,
1966             args        = (),
1967             bounds      = selfA._parameters["Bounds"],
1968             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
1969             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
1970             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
1971             iprint      = selfA._parameters["optiprint"],
1972             )
1973         nfeval = Informations['funcalls']
1974         rc     = Informations['warnflag']
1975     elif selfA._parameters["Minimizer"] == "TNC":
1976         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
1977             func        = CostFunction,
1978             x0          = Xini,
1979             fprime      = GradientOfCostFunction,
1980             args        = (),
1981             bounds      = selfA._parameters["Bounds"],
1982             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
1983             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
1984             ftol        = selfA._parameters["CostDecrementTolerance"],
1985             messages    = selfA._parameters["optmessages"],
1986             )
1987     elif selfA._parameters["Minimizer"] == "CG":
1988         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
1989             f           = CostFunction,
1990             x0          = Xini,
1991             fprime      = GradientOfCostFunction,
1992             args        = (),
1993             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
1994             gtol        = selfA._parameters["GradientNormTolerance"],
1995             disp        = selfA._parameters["optdisp"],
1996             full_output = True,
1997             )
1998     elif selfA._parameters["Minimizer"] == "NCG":
1999         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
2000             f           = CostFunction,
2001             x0          = Xini,
2002             fprime      = GradientOfCostFunction,
2003             args        = (),
2004             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2005             avextol     = selfA._parameters["CostDecrementTolerance"],
2006             disp        = selfA._parameters["optdisp"],
2007             full_output = True,
2008             )
2009     elif selfA._parameters["Minimizer"] == "BFGS":
2010         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
2011             f           = CostFunction,
2012             x0          = Xini,
2013             fprime      = GradientOfCostFunction,
2014             args        = (),
2015             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2016             gtol        = selfA._parameters["GradientNormTolerance"],
2017             disp        = selfA._parameters["optdisp"],
2018             full_output = True,
2019             )
2020     else:
2021         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
2022     #
2023     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2024     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
2025     #
2026     # Correction pour pallier a un bug de TNC sur le retour du Minimum
2027     # ----------------------------------------------------------------
2028     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
2029         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
2030     #
2031     # Obtention de l'analyse
2032     # ----------------------
2033     Xa = numpy.asmatrix(numpy.ravel( Minimum )).T
2034     #
2035     selfA.StoredVariables["Analysis"].store( Xa )
2036     #
2037     # Calculs et/ou stockages supplémentaires
2038     # ---------------------------------------
2039     if selfA._toStore("BMA"):
2040         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
2041     #
2042     return 0
2043
2044 # ==============================================================================
2045 def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
2046     """
2047     Stochastic EnKF
2048     """
2049     if selfA._parameters["EstimationOf"] == "Parameters":
2050         selfA._parameters["StoreInternalVariables"] = True
2051     #
2052     # Opérateurs
2053     # ----------
2054     H = HO["Direct"].appliedControledFormTo
2055     #
2056     if selfA._parameters["EstimationOf"] == "State":
2057         M = EM["Direct"].appliedControledFormTo
2058     #
2059     if CM is not None and "Tangent" in CM and U is not None:
2060         Cm = CM["Tangent"].asMatrix(Xb)
2061     else:
2062         Cm = None
2063     #
2064     # Nombre de pas identique au nombre de pas d'observations
2065     # -------------------------------------------------------
2066     if hasattr(Y,"stepnumber"):
2067         duration = Y.stepnumber()
2068         __p = numpy.cumprod(Y.shape())[-1]
2069     else:
2070         duration = 2
2071         __p = numpy.array(Y).size
2072     #
2073     # Précalcul des inversions de B et R
2074     # ----------------------------------
2075     if selfA._parameters["StoreInternalVariables"] \
2076         or selfA._toStore("CostFunctionJ") \
2077         or selfA._toStore("CostFunctionJb") \
2078         or selfA._toStore("CostFunctionJo") \
2079         or selfA._toStore("CurrentOptimum") \
2080         or selfA._toStore("APosterioriCovariance"):
2081         BI = B.getI()
2082         RI = R.getI()
2083     #
2084     # Initialisation
2085     # --------------
2086     __n = Xb.size
2087     __m = selfA._parameters["NumberOfMembers"]
2088     if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
2089     else:                         Pn = B
2090     if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
2091     else:                         Rn = R
2092     if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
2093     else:                         Qn = Q
2094     Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
2095     #
2096     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2097         selfA.StoredVariables["Analysis"].store( Xb )
2098         if selfA._toStore("APosterioriCovariance"):
2099             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2100             covarianceXa = Pn
2101     #
2102     previousJMinimum = numpy.finfo(float).max
2103     #
2104     for step in range(duration-1):
2105         if hasattr(Y,"store"):
2106             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,-1))
2107         else:
2108             Ynpu = numpy.ravel( Y ).reshape((__p,-1))
2109         #
2110         if U is not None:
2111             if hasattr(U,"store") and len(U)>1:
2112                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2113             elif hasattr(U,"store") and len(U)==1:
2114                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2115             else:
2116                 Un = numpy.asmatrix(numpy.ravel( U )).T
2117         else:
2118             Un = None
2119         #
2120         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2121             Xn = CovarianceInflation( Xn,
2122                 selfA._parameters["InflationType"],
2123                 selfA._parameters["InflationFactor"],
2124                 )
2125         #
2126         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
2127             EMX = M( [(Xn[:,i], Un) for i in range(__m)],
2128                 argsAsSerie = True,
2129                 returnSerieAsArrayMatrix = True )
2130             qi = numpy.random.multivariate_normal(numpy.zeros(__n), Qn, size=__m).T
2131             Xn_predicted = EMX + qi
2132             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2133                 argsAsSerie = True,
2134                 returnSerieAsArrayMatrix = True )
2135             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2136                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2137                 Xn_predicted = Xn_predicted + Cm * Un
2138         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
2139             # --- > Par principe, M = Id, Q = 0
2140             Xn_predicted = Xn
2141             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2142                 argsAsSerie = True,
2143                 returnSerieAsArrayMatrix = True )
2144         #
2145         # Mean of forecast and observation of forecast
2146         Xfm  = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
2147         Hfm  = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,-1))
2148         #
2149         #--------------------------
2150         if VariantM == "KalmanFilterFormula05":
2151             PfHT, HPfHT = 0., 0.
2152             for i in range(__m):
2153                 Exfi = Xn_predicted[:,i].reshape((__n,-1)) - Xfm
2154                 Eyfi = HX_predicted[:,i].reshape((__p,-1)) - Hfm
2155                 PfHT  += Exfi * Eyfi.T
2156                 HPfHT += Eyfi * Eyfi.T
2157             PfHT  = (1./(__m-1)) * PfHT
2158             HPfHT = (1./(__m-1)) * HPfHT
2159             Kn     = PfHT * ( R + HPfHT ).I
2160             del PfHT, HPfHT
2161             #
2162             for i in range(__m):
2163                 ri = numpy.random.multivariate_normal(numpy.zeros(__p), Rn)
2164                 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(Ynpu) + ri - HX_predicted[:,i])
2165         #--------------------------
2166         elif VariantM == "KalmanFilterFormula16":
2167             EpY   = EnsembleOfCenteredPerturbations(Ynpu, Rn, __m)
2168             EpYm  = EpY.mean(axis=1, dtype=mfp).astype('float').reshape((__p,-1))
2169             #
2170             EaX   = EnsembleOfAnomalies( Xn_predicted ) / numpy.sqrt(__m-1)
2171             EaY = (HX_predicted - Hfm - EpY + EpYm) / numpy.sqrt(__m-1)
2172             #
2173             Kn = EaX @ EaY.T @ numpy.linalg.inv( EaY @ EaY.T)
2174             #
2175             for i in range(__m):
2176                 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(EpY[:,i]) - HX_predicted[:,i])
2177         #--------------------------
2178         else:
2179             raise ValueError("VariantM has to be chosen in the authorized methods list.")
2180         #
2181         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2182             Xn = CovarianceInflation( Xn,
2183                 selfA._parameters["InflationType"],
2184                 selfA._parameters["InflationFactor"],
2185                 )
2186         #
2187         Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
2188         #--------------------------
2189         #
2190         if selfA._parameters["StoreInternalVariables"] \
2191             or selfA._toStore("CostFunctionJ") \
2192             or selfA._toStore("CostFunctionJb") \
2193             or selfA._toStore("CostFunctionJo") \
2194             or selfA._toStore("APosterioriCovariance") \
2195             or selfA._toStore("InnovationAtCurrentAnalysis") \
2196             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2197             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2198             _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
2199             _Innovation = Ynpu - _HXa
2200         #
2201         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2202         # ---> avec analysis
2203         selfA.StoredVariables["Analysis"].store( Xa )
2204         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2205             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2206         if selfA._toStore("InnovationAtCurrentAnalysis"):
2207             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2208         # ---> avec current state
2209         if selfA._parameters["StoreInternalVariables"] \
2210             or selfA._toStore("CurrentState"):
2211             selfA.StoredVariables["CurrentState"].store( Xn )
2212         if selfA._toStore("ForecastState"):
2213             selfA.StoredVariables["ForecastState"].store( EMX )
2214         if selfA._toStore("BMA"):
2215             selfA.StoredVariables["BMA"].store( EMX - Xa.reshape((__n,1)) )
2216         if selfA._toStore("InnovationAtCurrentState"):
2217             selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
2218         if selfA._toStore("SimulatedObservationAtCurrentState") \
2219             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2220             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
2221         # ---> autres
2222         if selfA._parameters["StoreInternalVariables"] \
2223             or selfA._toStore("CostFunctionJ") \
2224             or selfA._toStore("CostFunctionJb") \
2225             or selfA._toStore("CostFunctionJo") \
2226             or selfA._toStore("CurrentOptimum") \
2227             or selfA._toStore("APosterioriCovariance"):
2228             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2229             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
2230             J   = Jb + Jo
2231             selfA.StoredVariables["CostFunctionJb"].store( Jb )
2232             selfA.StoredVariables["CostFunctionJo"].store( Jo )
2233             selfA.StoredVariables["CostFunctionJ" ].store( J )
2234             #
2235             if selfA._toStore("IndexOfOptimum") \
2236                 or selfA._toStore("CurrentOptimum") \
2237                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2238                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2239                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2240                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2241                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2242             if selfA._toStore("IndexOfOptimum"):
2243                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2244             if selfA._toStore("CurrentOptimum"):
2245                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2246             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2247                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2248             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2249                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2250             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2251                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2252             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2253                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2254         if selfA._toStore("APosterioriCovariance"):
2255             Eai = EnsembleOfAnomalies( Xn ) / numpy.sqrt(__m-1) # Anomalies
2256             Pn = Eai @ Eai.T
2257             Pn = 0.5 * (Pn + Pn.T)
2258             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2259         if selfA._parameters["EstimationOf"] == "Parameters" \
2260             and J < previousJMinimum:
2261             previousJMinimum    = J
2262             XaMin               = Xa
2263             if selfA._toStore("APosterioriCovariance"):
2264                 covarianceXaMin = Pn
2265     #
2266     # Stockage final supplémentaire de l'optimum en estimation de paramètres
2267     # ----------------------------------------------------------------------
2268     if selfA._parameters["EstimationOf"] == "Parameters":
2269         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2270         selfA.StoredVariables["Analysis"].store( XaMin )
2271         if selfA._toStore("APosterioriCovariance"):
2272             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2273         if selfA._toStore("BMA"):
2274             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2275     #
2276     return 0
2277
2278 # ==============================================================================
2279 def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
2280     """
2281     Ensemble-Transform EnKF
2282     """
2283     if selfA._parameters["EstimationOf"] == "Parameters":
2284         selfA._parameters["StoreInternalVariables"] = True
2285     #
2286     # Opérateurs
2287     # ----------
2288     H = HO["Direct"].appliedControledFormTo
2289     #
2290     if selfA._parameters["EstimationOf"] == "State":
2291         M = EM["Direct"].appliedControledFormTo
2292     #
2293     if CM is not None and "Tangent" in CM and U is not None:
2294         Cm = CM["Tangent"].asMatrix(Xb)
2295     else:
2296         Cm = None
2297     #
2298     # Nombre de pas identique au nombre de pas d'observations
2299     # -------------------------------------------------------
2300     if hasattr(Y,"stepnumber"):
2301         duration = Y.stepnumber()
2302         __p = numpy.cumprod(Y.shape())[-1]
2303     else:
2304         duration = 2
2305         __p = numpy.array(Y).size
2306     #
2307     # Précalcul des inversions de B et R
2308     # ----------------------------------
2309     if selfA._parameters["StoreInternalVariables"] \
2310         or selfA._toStore("CostFunctionJ") \
2311         or selfA._toStore("CostFunctionJb") \
2312         or selfA._toStore("CostFunctionJo") \
2313         or selfA._toStore("CurrentOptimum") \
2314         or selfA._toStore("APosterioriCovariance"):
2315         BI = B.getI()
2316         RI = R.getI()
2317     elif VariantM != "KalmanFilterFormula":
2318         RI = R.getI()
2319     if VariantM == "KalmanFilterFormula":
2320         RIdemi = R.choleskyI()
2321     #
2322     # Initialisation
2323     # --------------
2324     __n = Xb.size
2325     __m = selfA._parameters["NumberOfMembers"]
2326     if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
2327     else:                         Pn = B
2328     if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
2329     else:                         Rn = R
2330     if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
2331     else:                         Qn = Q
2332     Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
2333     #
2334     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2335         selfA.StoredVariables["Analysis"].store( Xb )
2336         if selfA._toStore("APosterioriCovariance"):
2337             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2338             covarianceXa = Pn
2339     #
2340     previousJMinimum = numpy.finfo(float).max
2341     #
2342     for step in range(duration-1):
2343         if hasattr(Y,"store"):
2344             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,-1))
2345         else:
2346             Ynpu = numpy.ravel( Y ).reshape((__p,-1))
2347         #
2348         if U is not None:
2349             if hasattr(U,"store") and len(U)>1:
2350                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2351             elif hasattr(U,"store") and len(U)==1:
2352                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2353             else:
2354                 Un = numpy.asmatrix(numpy.ravel( U )).T
2355         else:
2356             Un = None
2357         #
2358         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2359             Xn = CovarianceInflation( Xn,
2360                 selfA._parameters["InflationType"],
2361                 selfA._parameters["InflationFactor"],
2362                 )
2363         #
2364         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
2365             EMX = M( [(Xn[:,i], Un) for i in range(__m)],
2366                 argsAsSerie = True,
2367                 returnSerieAsArrayMatrix = True )
2368             qi = numpy.random.multivariate_normal(numpy.zeros(__n), Qn, size=__m).T
2369             Xn_predicted = EMX + qi
2370             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2371                 argsAsSerie = True,
2372                 returnSerieAsArrayMatrix = True )
2373             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2374                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2375                 Xn_predicted = Xn_predicted + Cm * Un
2376         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
2377             # --- > Par principe, M = Id, Q = 0
2378             Xn_predicted = Xn
2379             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2380                 argsAsSerie = True,
2381                 returnSerieAsArrayMatrix = True )
2382         #
2383         # Mean of forecast and observation of forecast
2384         Xfm  = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
2385         Hfm  = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,-1))
2386         #
2387         # Anomalies
2388         EaX   = EnsembleOfAnomalies( Xn_predicted )
2389         EaHX  = numpy.array(HX_predicted - Hfm)
2390         #
2391         #--------------------------
2392         if VariantM == "KalmanFilterFormula":
2393             mS    = RIdemi * EaHX / numpy.sqrt(__m-1)
2394             delta = RIdemi * ( Ynpu - Hfm )
2395             mT    = numpy.linalg.inv( numpy.eye(__m) + mS.T @ mS )
2396             vw    = mT @ mS.transpose() @ delta
2397             #
2398             Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
2399             mU    = numpy.eye(__m)
2400             #
2401             EaX   = EaX / numpy.sqrt(__m-1)
2402             Xn    = Xfm + EaX @ ( vw.reshape((__m,-1)) + numpy.sqrt(__m-1) * Tdemi @ mU )
2403         #--------------------------
2404         elif VariantM == "Variational":
2405             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
2406             def CostFunction(w):
2407                 _A  = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
2408                 _Jo = 0.5 * _A.T @ (RI * _A)
2409                 _Jb = 0.5 * (__m-1) * w.T @ w
2410                 _J  = _Jo + _Jb
2411                 return float(_J)
2412             def GradientOfCostFunction(w):
2413                 _A  = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
2414                 _GardJo = - EaHX.T @ (RI * _A)
2415                 _GradJb = (__m-1) * w.reshape((__m,1))
2416                 _GradJ  = _GardJo + _GradJb
2417                 return numpy.ravel(_GradJ)
2418             vw = scipy.optimize.fmin_cg(
2419                 f           = CostFunction,
2420                 x0          = numpy.zeros(__m),
2421                 fprime      = GradientOfCostFunction,
2422                 args        = (),
2423                 disp        = False,
2424                 )
2425             #
2426             Hto = EaHX.T @ (RI * EaHX)
2427             Htb = (__m-1) * numpy.eye(__m)
2428             Hta = Hto + Htb
2429             #
2430             Pta = numpy.linalg.inv( Hta )
2431             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
2432             #
2433             Xn  = Xfm + EaX @ (vw[:,None] + EWa)
2434         #--------------------------
2435         elif VariantM == "FiniteSize11": # Jauge Boc2011
2436             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
2437             def CostFunction(w):
2438                 _A  = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
2439                 _Jo = 0.5 * _A.T @ (RI * _A)
2440                 _Jb = 0.5 * __m * math.log(1 + 1/__m + w.T @ w)
2441                 _J  = _Jo + _Jb
2442                 return float(_J)
2443             def GradientOfCostFunction(w):
2444                 _A  = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
2445                 _GardJo = - EaHX.T @ (RI * _A)
2446                 _GradJb = __m * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
2447                 _GradJ  = _GardJo + _GradJb
2448                 return numpy.ravel(_GradJ)
2449             vw = scipy.optimize.fmin_cg(
2450                 f           = CostFunction,
2451                 x0          = numpy.zeros(__m),
2452                 fprime      = GradientOfCostFunction,
2453                 args        = (),
2454                 disp        = False,
2455                 )
2456             #
2457             Hto = EaHX.T @ (RI * EaHX)
2458             Htb = __m * \
2459                 ( (1 + 1/__m + vw.T @ vw) * numpy.eye(__m) - 2 * vw @ vw.T ) \
2460                 / (1 + 1/__m + vw.T @ vw)**2
2461             Hta = Hto + Htb
2462             #
2463             Pta = numpy.linalg.inv( Hta )
2464             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
2465             #
2466             Xn  = Xfm + EaX @ (vw.reshape((__m,-1)) + EWa)
2467         #--------------------------
2468         elif VariantM == "FiniteSize15": # Jauge Boc2015
2469             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
2470             def CostFunction(w):
2471                 _A  = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
2472                 _Jo = 0.5 * _A.T * RI * _A
2473                 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w)
2474                 _J  = _Jo + _Jb
2475                 return float(_J)
2476             def GradientOfCostFunction(w):
2477                 _A  = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
2478                 _GardJo = - EaHX.T @ (RI * _A)
2479                 _GradJb = (__m+1) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
2480                 _GradJ  = _GardJo + _GradJb
2481                 return numpy.ravel(_GradJ)
2482             vw = scipy.optimize.fmin_cg(
2483                 f           = CostFunction,
2484                 x0          = numpy.zeros(__m),
2485                 fprime      = GradientOfCostFunction,
2486                 args        = (),
2487                 disp        = False,
2488                 )
2489             #
2490             Hto = EaHX.T @ (RI * EaHX)
2491             Htb = (__m+1) * \
2492                 ( (1 + 1/__m + vw.T @ vw) * numpy.eye(__m) - 2 * vw @ vw.T ) \
2493                 / (1 + 1/__m + vw.T @ vw)**2
2494             Hta = Hto + Htb
2495             #
2496             Pta = numpy.linalg.inv( Hta )
2497             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
2498             #
2499             Xn  = Xfm + EaX @ (vw.reshape((__m,-1)) + EWa)
2500         #--------------------------
2501         elif VariantM == "FiniteSize16": # Jauge Boc2016
2502             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
2503             def CostFunction(w):
2504                 _A  = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
2505                 _Jo = 0.5 * _A.T @ (RI * _A)
2506                 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w / (__m-1))
2507                 _J  = _Jo + _Jb
2508                 return float(_J)
2509             def GradientOfCostFunction(w):
2510                 _A  = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
2511                 _GardJo = - EaHX.T @ (RI * _A)
2512                 _GradJb = ((__m+1) / (__m-1)) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w / (__m-1))
2513                 _GradJ  = _GardJo + _GradJb
2514                 return numpy.ravel(_GradJ)
2515             vw = scipy.optimize.fmin_cg(
2516                 f           = CostFunction,
2517                 x0          = numpy.zeros(__m),
2518                 fprime      = GradientOfCostFunction,
2519                 args        = (),
2520                 disp        = False,
2521                 )
2522             #
2523             Hto = EaHX.T @ (RI * EaHX)
2524             Htb = ((__m+1) / (__m-1)) * \
2525                 ( (1 + 1/__m + vw.T @ vw / (__m-1)) * numpy.eye(__m) - 2 * vw @ vw.T / (__m-1) ) \
2526                 / (1 + 1/__m + vw.T @ vw / (__m-1))**2
2527             Hta = Hto + Htb
2528             #
2529             Pta = numpy.linalg.inv( Hta )
2530             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
2531             #
2532             Xn  = Xfm + EaX @ (vw[:,None] + EWa)
2533         #--------------------------
2534         else:
2535             raise ValueError("VariantM has to be chosen in the authorized methods list.")
2536         #
2537         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2538             Xn = CovarianceInflation( Xn,
2539                 selfA._parameters["InflationType"],
2540                 selfA._parameters["InflationFactor"],
2541                 )
2542         #
2543         Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
2544         #--------------------------
2545         #
2546         if selfA._parameters["StoreInternalVariables"] \
2547             or selfA._toStore("CostFunctionJ") \
2548             or selfA._toStore("CostFunctionJb") \
2549             or selfA._toStore("CostFunctionJo") \
2550             or selfA._toStore("APosterioriCovariance") \
2551             or selfA._toStore("InnovationAtCurrentAnalysis") \
2552             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2553             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2554             _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
2555             _Innovation = Ynpu - _HXa
2556         #
2557         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2558         # ---> avec analysis
2559         selfA.StoredVariables["Analysis"].store( Xa )
2560         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2561             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2562         if selfA._toStore("InnovationAtCurrentAnalysis"):
2563             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2564         # ---> avec current state
2565         if selfA._parameters["StoreInternalVariables"] \
2566             or selfA._toStore("CurrentState"):
2567             selfA.StoredVariables["CurrentState"].store( Xn )
2568         if selfA._toStore("ForecastState"):
2569             selfA.StoredVariables["ForecastState"].store( EMX )
2570         if selfA._toStore("BMA"):
2571             selfA.StoredVariables["BMA"].store( EMX - Xa.reshape((__n,1)) )
2572         if selfA._toStore("InnovationAtCurrentState"):
2573             selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu.reshape((__p,1)) )
2574         if selfA._toStore("SimulatedObservationAtCurrentState") \
2575             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2576             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
2577         # ---> autres
2578         if selfA._parameters["StoreInternalVariables"] \
2579             or selfA._toStore("CostFunctionJ") \
2580             or selfA._toStore("CostFunctionJb") \
2581             or selfA._toStore("CostFunctionJo") \
2582             or selfA._toStore("CurrentOptimum") \
2583             or selfA._toStore("APosterioriCovariance"):
2584             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2585             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
2586             J   = Jb + Jo
2587             selfA.StoredVariables["CostFunctionJb"].store( Jb )
2588             selfA.StoredVariables["CostFunctionJo"].store( Jo )
2589             selfA.StoredVariables["CostFunctionJ" ].store( J )
2590             #
2591             if selfA._toStore("IndexOfOptimum") \
2592                 or selfA._toStore("CurrentOptimum") \
2593                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2594                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2595                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2596                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2597                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2598             if selfA._toStore("IndexOfOptimum"):
2599                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2600             if selfA._toStore("CurrentOptimum"):
2601                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2602             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2603                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2604             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2605                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2606             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2607                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2608             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2609                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2610         if selfA._toStore("APosterioriCovariance"):
2611             Eai = EnsembleOfAnomalies( Xn ) / numpy.sqrt(__m-1) # Anomalies
2612             Pn = Eai @ Eai.T
2613             Pn = 0.5 * (Pn + Pn.T)
2614             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2615         if selfA._parameters["EstimationOf"] == "Parameters" \
2616             and J < previousJMinimum:
2617             previousJMinimum    = J
2618             XaMin               = Xa
2619             if selfA._toStore("APosterioriCovariance"):
2620                 covarianceXaMin = Pn
2621     #
2622     # Stockage final supplémentaire de l'optimum en estimation de paramètres
2623     # ----------------------------------------------------------------------
2624     if selfA._parameters["EstimationOf"] == "Parameters":
2625         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2626         selfA.StoredVariables["Analysis"].store( XaMin )
2627         if selfA._toStore("APosterioriCovariance"):
2628             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2629         if selfA._toStore("BMA"):
2630             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2631     #
2632     return 0
2633
2634 # ==============================================================================
2635 def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13",
2636     BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
2637     """
2638     Maximum Likelihood Ensemble Filter
2639     """
2640     if selfA._parameters["EstimationOf"] == "Parameters":
2641         selfA._parameters["StoreInternalVariables"] = True
2642     #
2643     # Opérateurs
2644     # ----------
2645     H = HO["Direct"].appliedControledFormTo
2646     #
2647     if selfA._parameters["EstimationOf"] == "State":
2648         M = EM["Direct"].appliedControledFormTo
2649     #
2650     if CM is not None and "Tangent" in CM and U is not None:
2651         Cm = CM["Tangent"].asMatrix(Xb)
2652     else:
2653         Cm = None
2654     #
2655     # Nombre de pas identique au nombre de pas d'observations
2656     # -------------------------------------------------------
2657     if hasattr(Y,"stepnumber"):
2658         duration = Y.stepnumber()
2659         __p = numpy.cumprod(Y.shape())[-1]
2660     else:
2661         duration = 2
2662         __p = numpy.array(Y).size
2663     #
2664     # Précalcul des inversions de B et R
2665     # ----------------------------------
2666     if selfA._parameters["StoreInternalVariables"] \
2667         or selfA._toStore("CostFunctionJ") \
2668         or selfA._toStore("CostFunctionJb") \
2669         or selfA._toStore("CostFunctionJo") \
2670         or selfA._toStore("CurrentOptimum") \
2671         or selfA._toStore("APosterioriCovariance"):
2672         BI = B.getI()
2673     RI = R.getI()
2674     #
2675     # Initialisation
2676     # --------------
2677     __n = Xb.size
2678     __m = selfA._parameters["NumberOfMembers"]
2679     if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
2680     else:                         Pn = B
2681     if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
2682     else:                         Rn = R
2683     if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
2684     else:                         Qn = Q
2685     Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
2686     #
2687     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2688         selfA.StoredVariables["Analysis"].store( Xb )
2689         if selfA._toStore("APosterioriCovariance"):
2690             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2691             covarianceXa = Pn
2692     #
2693     previousJMinimum = numpy.finfo(float).max
2694     #
2695     for step in range(duration-1):
2696         if hasattr(Y,"store"):
2697             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,-1))
2698         else:
2699             Ynpu = numpy.ravel( Y ).reshape((__p,-1))
2700         #
2701         if U is not None:
2702             if hasattr(U,"store") and len(U)>1:
2703                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2704             elif hasattr(U,"store") and len(U)==1:
2705                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2706             else:
2707                 Un = numpy.asmatrix(numpy.ravel( U )).T
2708         else:
2709             Un = None
2710         #
2711         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2712             Xn = CovarianceInflation( Xn,
2713                 selfA._parameters["InflationType"],
2714                 selfA._parameters["InflationFactor"],
2715                 )
2716         #
2717         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
2718             EMX = M( [(Xn[:,i], Un) for i in range(__m)],
2719                 argsAsSerie = True,
2720                 returnSerieAsArrayMatrix = True )
2721             qi = numpy.random.multivariate_normal(numpy.zeros(__n), Qn, size=__m).T
2722             Xn_predicted = EMX + qi
2723             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2724                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2725                 Xn_predicted = Xn_predicted + Cm * Un
2726         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
2727             # --- > Par principe, M = Id, Q = 0
2728             Xn_predicted = Xn
2729         #
2730         #--------------------------
2731         if VariantM == "MLEF13":
2732             Xfm = numpy.ravel(Xn_predicted.mean(axis=1, dtype=mfp).astype('float'))
2733             EaX = EnsembleOfAnomalies( Xn_predicted ) / numpy.sqrt(__m-1)
2734             Ua  = numpy.eye(__m)
2735             __j = 0
2736             Deltaw = 1
2737             if not BnotT:
2738                 Ta  = numpy.eye(__m)
2739             vw  = numpy.zeros(__m)
2740             while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
2741                 vx1 = (Xfm + EaX @ vw).reshape((__n,-1))
2742                 #
2743                 if BnotT:
2744                     E1 = vx1 + _epsilon * EaX
2745                 else:
2746                     E1 = vx1 + numpy.sqrt(__m-1) * EaX @ Ta
2747                 #
2748                 HE2 = H( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
2749                     argsAsSerie = True,
2750                     returnSerieAsArrayMatrix = True )
2751                 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,-1))
2752                 #
2753                 if BnotT:
2754                     EaY = (HE2 - vy2) / _epsilon
2755                 else:
2756                     EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / numpy.sqrt(__m-1)
2757                 #
2758                 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy2 )))
2759                 mH = numpy.eye(__m) + EaY.transpose() @ (RI * EaY)
2760                 Deltaw = - numpy.linalg.solve(mH,GradJ)
2761                 #
2762                 vw = vw + Deltaw
2763                 #
2764                 if not BnotT:
2765                     Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2766                 #
2767                 __j = __j + 1
2768             #
2769             if BnotT:
2770                 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2771             #
2772             Xn = vx1 + numpy.sqrt(__m-1) * EaX @ Ta @ Ua
2773         #--------------------------
2774         else:
2775             raise ValueError("VariantM has to be chosen in the authorized methods list.")
2776         #
2777         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2778             Xn = CovarianceInflation( Xn,
2779                 selfA._parameters["InflationType"],
2780                 selfA._parameters["InflationFactor"],
2781                 )
2782         #
2783         Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
2784         #--------------------------
2785         #
2786         if selfA._parameters["StoreInternalVariables"] \
2787             or selfA._toStore("CostFunctionJ") \
2788             or selfA._toStore("CostFunctionJb") \
2789             or selfA._toStore("CostFunctionJo") \
2790             or selfA._toStore("APosterioriCovariance") \
2791             or selfA._toStore("InnovationAtCurrentAnalysis") \
2792             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2793             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2794             _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
2795             _Innovation = Ynpu - _HXa
2796         #
2797         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2798         # ---> avec analysis
2799         selfA.StoredVariables["Analysis"].store( Xa )
2800         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2801             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2802         if selfA._toStore("InnovationAtCurrentAnalysis"):
2803             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2804         # ---> avec current state
2805         if selfA._parameters["StoreInternalVariables"] \
2806             or selfA._toStore("CurrentState"):
2807             selfA.StoredVariables["CurrentState"].store( Xn )
2808         if selfA._toStore("ForecastState"):
2809             selfA.StoredVariables["ForecastState"].store( EMX )
2810         if selfA._toStore("BMA"):
2811             selfA.StoredVariables["BMA"].store( EMX - Xa.reshape((__n,1)) )
2812         if selfA._toStore("InnovationAtCurrentState"):
2813             selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu.reshape((__p,-1)) )
2814         if selfA._toStore("SimulatedObservationAtCurrentState") \
2815             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2816             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
2817         # ---> autres
2818         if selfA._parameters["StoreInternalVariables"] \
2819             or selfA._toStore("CostFunctionJ") \
2820             or selfA._toStore("CostFunctionJb") \
2821             or selfA._toStore("CostFunctionJo") \
2822             or selfA._toStore("CurrentOptimum") \
2823             or selfA._toStore("APosterioriCovariance"):
2824             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2825             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
2826             J   = Jb + Jo
2827             selfA.StoredVariables["CostFunctionJb"].store( Jb )
2828             selfA.StoredVariables["CostFunctionJo"].store( Jo )
2829             selfA.StoredVariables["CostFunctionJ" ].store( J )
2830             #
2831             if selfA._toStore("IndexOfOptimum") \
2832                 or selfA._toStore("CurrentOptimum") \
2833                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2834                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2835                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2836                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2837                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2838             if selfA._toStore("IndexOfOptimum"):
2839                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2840             if selfA._toStore("CurrentOptimum"):
2841                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2842             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2843                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2844             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2845                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2846             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2847                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2848             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2849                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2850         if selfA._toStore("APosterioriCovariance"):
2851             Eai = EnsembleOfAnomalies( Xn ) / numpy.sqrt(__m-1) # Anomalies
2852             Pn = Eai @ Eai.T
2853             Pn = 0.5 * (Pn + Pn.T)
2854             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2855         if selfA._parameters["EstimationOf"] == "Parameters" \
2856             and J < previousJMinimum:
2857             previousJMinimum    = J
2858             XaMin               = Xa
2859             if selfA._toStore("APosterioriCovariance"):
2860                 covarianceXaMin = Pn
2861     #
2862     # Stockage final supplémentaire de l'optimum en estimation de paramètres
2863     # ----------------------------------------------------------------------
2864     if selfA._parameters["EstimationOf"] == "Parameters":
2865         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2866         selfA.StoredVariables["Analysis"].store( XaMin )
2867         if selfA._toStore("APosterioriCovariance"):
2868             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2869         if selfA._toStore("BMA"):
2870             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2871     #
2872     return 0
2873
2874 # ==============================================================================
2875 def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12",
2876     BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
2877     """
2878     Iterative EnKF
2879     """
2880     if selfA._parameters["EstimationOf"] == "Parameters":
2881         selfA._parameters["StoreInternalVariables"] = True
2882     #
2883     # Opérateurs
2884     # ----------
2885     H = HO["Direct"].appliedControledFormTo
2886     #
2887     if selfA._parameters["EstimationOf"] == "State":
2888         M = EM["Direct"].appliedControledFormTo
2889     #
2890     if CM is not None and "Tangent" in CM and U is not None:
2891         Cm = CM["Tangent"].asMatrix(Xb)
2892     else:
2893         Cm = None
2894     #
2895     # Nombre de pas identique au nombre de pas d'observations
2896     # -------------------------------------------------------
2897     if hasattr(Y,"stepnumber"):
2898         duration = Y.stepnumber()
2899         __p = numpy.cumprod(Y.shape())[-1]
2900     else:
2901         duration = 2
2902         __p = numpy.array(Y).size
2903     #
2904     # Précalcul des inversions de B et R
2905     # ----------------------------------
2906     if selfA._parameters["StoreInternalVariables"] \
2907         or selfA._toStore("CostFunctionJ") \
2908         or selfA._toStore("CostFunctionJb") \
2909         or selfA._toStore("CostFunctionJo") \
2910         or selfA._toStore("CurrentOptimum") \
2911         or selfA._toStore("APosterioriCovariance"):
2912         BI = B.getI()
2913     RI = R.getI()
2914     #
2915     # Initialisation
2916     # --------------
2917     __n = Xb.size
2918     __m = selfA._parameters["NumberOfMembers"]
2919     if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
2920     else:                         Pn = B
2921     if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
2922     else:                         Rn = R
2923     if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
2924     else:                         Qn = Q
2925     Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
2926     #
2927     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2928         selfA.StoredVariables["Analysis"].store( Xb )
2929         if selfA._toStore("APosterioriCovariance"):
2930             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2931             covarianceXa = Pn
2932     #
2933     previousJMinimum = numpy.finfo(float).max
2934     #
2935     for step in range(duration-1):
2936         if hasattr(Y,"store"):
2937             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,-1))
2938         else:
2939             Ynpu = numpy.ravel( Y ).reshape((__p,-1))
2940         #
2941         if U is not None:
2942             if hasattr(U,"store") and len(U)>1:
2943                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2944             elif hasattr(U,"store") and len(U)==1:
2945                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2946             else:
2947                 Un = numpy.asmatrix(numpy.ravel( U )).T
2948         else:
2949             Un = None
2950         #
2951         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2952             Xn = CovarianceInflation( Xn,
2953                 selfA._parameters["InflationType"],
2954                 selfA._parameters["InflationFactor"],
2955                 )
2956         #
2957         #--------------------------
2958         if VariantM == "IEnKF12":
2959             Xfm = numpy.ravel(Xn.mean(axis=1, dtype=mfp).astype('float'))
2960             EaX = EnsembleOfAnomalies( Xn ) / numpy.sqrt(__m-1)
2961             __j = 0
2962             Deltaw = 1
2963             if not BnotT:
2964                 Ta  = numpy.eye(__m)
2965             vw  = numpy.zeros(__m)
2966             while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
2967                 vx1 = (Xfm + EaX @ vw).reshape((__n,-1))
2968                 #
2969                 if BnotT:
2970                     E1 = vx1 + _epsilon * EaX
2971                 else:
2972                     E1 = vx1 + numpy.sqrt(__m-1) * EaX @ Ta
2973                 #
2974                 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q
2975                     E2 = M( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
2976                         argsAsSerie = True,
2977                         returnSerieAsArrayMatrix = True )
2978                 elif selfA._parameters["EstimationOf"] == "Parameters":
2979                     # --- > Par principe, M = Id
2980                     E2 = Xn
2981                 vx2 = E2.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
2982                 vy1 = H((vx2, Un)).reshape((__p,-1))
2983                 #
2984                 HE2 = H( [(E2[:,i,numpy.newaxis], Un) for i in range(__m)],
2985                     argsAsSerie = True,
2986                     returnSerieAsArrayMatrix = True )
2987                 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,-1))
2988                 #
2989                 if BnotT:
2990                     EaY = (HE2 - vy2) / _epsilon
2991                 else:
2992                     EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / numpy.sqrt(__m-1)
2993                 #
2994                 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy1 )))
2995                 mH = numpy.eye(__m) + EaY.transpose() @ (RI * EaY)
2996                 Deltaw = - numpy.linalg.solve(mH,GradJ)
2997                 #
2998                 vw = vw + Deltaw
2999                 #
3000                 if not BnotT:
3001                     Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
3002                 #
3003                 __j = __j + 1
3004             #
3005             A2 = EnsembleOfAnomalies( E2 )
3006             #
3007             if BnotT:
3008                 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
3009                 A2 = numpy.sqrt(__m-1) * A2 @ Ta / _epsilon
3010             #
3011             Xn = vx2 + A2
3012         #--------------------------
3013         else:
3014             raise ValueError("VariantM has to be chosen in the authorized methods list.")
3015         #
3016         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
3017             Xn = CovarianceInflation( Xn,
3018                 selfA._parameters["InflationType"],
3019                 selfA._parameters["InflationFactor"],
3020                 )
3021         #
3022         Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
3023         #--------------------------
3024         #
3025         if selfA._parameters["StoreInternalVariables"] \
3026             or selfA._toStore("CostFunctionJ") \
3027             or selfA._toStore("CostFunctionJb") \
3028             or selfA._toStore("CostFunctionJo") \
3029             or selfA._toStore("APosterioriCovariance") \
3030             or selfA._toStore("InnovationAtCurrentAnalysis") \
3031             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
3032             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3033             _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
3034             _Innovation = Ynpu - _HXa
3035         #
3036         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3037         # ---> avec analysis
3038         selfA.StoredVariables["Analysis"].store( Xa )
3039         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
3040             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
3041         if selfA._toStore("InnovationAtCurrentAnalysis"):
3042             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
3043         # ---> avec current state
3044         if selfA._parameters["StoreInternalVariables"] \
3045             or selfA._toStore("CurrentState"):
3046             selfA.StoredVariables["CurrentState"].store( Xn )
3047         if selfA._toStore("ForecastState"):
3048             selfA.StoredVariables["ForecastState"].store( E2 )
3049         if selfA._toStore("BMA"):
3050             selfA.StoredVariables["BMA"].store( E2 - Xa )
3051         if selfA._toStore("InnovationAtCurrentState"):
3052             selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu.reshape((__p,-1)) )
3053         if selfA._toStore("SimulatedObservationAtCurrentState") \
3054             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3055             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
3056         # ---> autres
3057         if selfA._parameters["StoreInternalVariables"] \
3058             or selfA._toStore("CostFunctionJ") \
3059             or selfA._toStore("CostFunctionJb") \
3060             or selfA._toStore("CostFunctionJo") \
3061             or selfA._toStore("CurrentOptimum") \
3062             or selfA._toStore("APosterioriCovariance"):
3063             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
3064             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
3065             J   = Jb + Jo
3066             selfA.StoredVariables["CostFunctionJb"].store( Jb )
3067             selfA.StoredVariables["CostFunctionJo"].store( Jo )
3068             selfA.StoredVariables["CostFunctionJ" ].store( J )
3069             #
3070             if selfA._toStore("IndexOfOptimum") \
3071                 or selfA._toStore("CurrentOptimum") \
3072                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
3073                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
3074                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
3075                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3076                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3077             if selfA._toStore("IndexOfOptimum"):
3078                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3079             if selfA._toStore("CurrentOptimum"):
3080                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
3081             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3082                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
3083             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3084                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3085             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3086                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3087             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3088                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3089         if selfA._toStore("APosterioriCovariance"):
3090             Eai = EnsembleOfAnomalies( Xn ) / numpy.sqrt(__m-1) # Anomalies
3091             Pn = Eai @ Eai.T
3092             Pn = 0.5 * (Pn + Pn.T)
3093             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
3094         if selfA._parameters["EstimationOf"] == "Parameters" \
3095             and J < previousJMinimum:
3096             previousJMinimum    = J
3097             XaMin               = Xa
3098             if selfA._toStore("APosterioriCovariance"):
3099                 covarianceXaMin = Pn
3100     #
3101     # Stockage final supplémentaire de l'optimum en estimation de paramètres
3102     # ----------------------------------------------------------------------
3103     if selfA._parameters["EstimationOf"] == "Parameters":
3104         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3105         selfA.StoredVariables["Analysis"].store( XaMin )
3106         if selfA._toStore("APosterioriCovariance"):
3107             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
3108         if selfA._toStore("BMA"):
3109             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
3110     #
3111     return 0
3112
3113 # ==============================================================================
3114 if __name__ == "__main__":
3115     print('\n AUTODIAGNOSTIC\n')