Salome HOME
Documentation and performance improvements
[modules/adao.git] / src / daComposant / daCore / NumericObjects.py
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) 2008-2021 EDF R&D
4 #
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
9 #
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13 # Lesser General Public License for more details.
14 #
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
18 #
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
20 #
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
22
23 __doc__ = """
24     Définit les objets numériques génériques.
25 """
26 __author__ = "Jean-Philippe ARGAUD"
27
28 import os, time, copy, types, sys, logging
29 import math, numpy, scipy, scipy.optimize, scipy.version
30 from daCore.BasicObjects import Operator
31 from daCore.PlatformInfo import PlatformInfo
32 mpr = PlatformInfo().MachinePrecision()
33 mfp = PlatformInfo().MaximumPrecision()
34 # logging.getLogger().setLevel(logging.DEBUG)
35
36 # ==============================================================================
37 def ExecuteFunction( triplet ):
38     assert len(triplet) == 3, "Incorrect number of arguments"
39     X, xArgs, funcrepr = triplet
40     __X = numpy.ravel( X ).reshape((-1,1))
41     __sys_path_tmp = sys.path ; sys.path.insert(0,funcrepr["__userFunction__path"])
42     __module = __import__(funcrepr["__userFunction__modl"], globals(), locals(), [])
43     __fonction = getattr(__module,funcrepr["__userFunction__name"])
44     sys.path = __sys_path_tmp ; del __sys_path_tmp
45     if isinstance(xArgs, dict):
46         __HX  = __fonction( __X, **xArgs )
47     else:
48         __HX  = __fonction( __X )
49     return numpy.ravel( __HX )
50
51 # ==============================================================================
52 class FDApproximation(object):
53     """
54     Cette classe sert d'interface pour définir les opérateurs approximés. A la
55     création d'un objet, en fournissant une fonction "Function", on obtient un
56     objet qui dispose de 3 méthodes "DirectOperator", "TangentOperator" et
57     "AdjointOperator". On contrôle l'approximation DF avec l'incrément
58     multiplicatif "increment" valant par défaut 1%, ou avec l'incrément fixe
59     "dX" qui sera multiplié par "increment" (donc en %), et on effectue de DF
60     centrées si le booléen "centeredDF" est vrai.
61     """
62     def __init__(self,
63             name                  = "FDApproximation",
64             Function              = None,
65             centeredDF            = False,
66             increment             = 0.01,
67             dX                    = None,
68             extraArguments        = None,
69             avoidingRedundancy    = True,
70             toleranceInRedundancy = 1.e-18,
71             lenghtOfRedundancy    = -1,
72             mpEnabled             = False,
73             mpWorkers             = None,
74             mfEnabled             = False,
75             ):
76         self.__name = str(name)
77         self.__extraArgs = extraArguments
78         if mpEnabled:
79             try:
80                 import multiprocessing
81                 self.__mpEnabled = True
82             except ImportError:
83                 self.__mpEnabled = False
84         else:
85             self.__mpEnabled = False
86         self.__mpWorkers = mpWorkers
87         if self.__mpWorkers is not None and self.__mpWorkers < 1:
88             self.__mpWorkers = None
89         logging.debug("FDA Calculs en multiprocessing : %s (nombre de processus : %s)"%(self.__mpEnabled,self.__mpWorkers))
90         #
91         if mfEnabled:
92             self.__mfEnabled = True
93         else:
94             self.__mfEnabled = False
95         logging.debug("FDA Calculs en multifonctions : %s"%(self.__mfEnabled,))
96         #
97         if avoidingRedundancy:
98             self.__avoidRC = True
99             self.__tolerBP = float(toleranceInRedundancy)
100             self.__lenghtRJ = int(lenghtOfRedundancy)
101             self.__listJPCP = [] # Jacobian Previous Calculated Points
102             self.__listJPCI = [] # Jacobian Previous Calculated Increment
103             self.__listJPCR = [] # Jacobian Previous Calculated Results
104             self.__listJPPN = [] # Jacobian Previous Calculated Point Norms
105             self.__listJPIN = [] # Jacobian Previous Calculated Increment Norms
106         else:
107             self.__avoidRC = False
108         #
109         if self.__mpEnabled:
110             if isinstance(Function,types.FunctionType):
111                 logging.debug("FDA Calculs en multiprocessing : FunctionType")
112                 self.__userFunction__name = Function.__name__
113                 try:
114                     mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
115                 except:
116                     mod = os.path.abspath(Function.__globals__['__file__'])
117                 if not os.path.isfile(mod):
118                     raise ImportError("No user defined function or method found with the name %s"%(mod,))
119                 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
120                 self.__userFunction__path = os.path.dirname(mod)
121                 del mod
122                 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
123                 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
124             elif isinstance(Function,types.MethodType):
125                 logging.debug("FDA Calculs en multiprocessing : MethodType")
126                 self.__userFunction__name = Function.__name__
127                 try:
128                     mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
129                 except:
130                     mod = os.path.abspath(Function.__func__.__globals__['__file__'])
131                 if not os.path.isfile(mod):
132                     raise ImportError("No user defined function or method found with the name %s"%(mod,))
133                 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
134                 self.__userFunction__path = os.path.dirname(mod)
135                 del mod
136                 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
137                 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
138             else:
139                 raise TypeError("User defined function or method has to be provided for finite differences approximation.")
140         else:
141             self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
142             self.__userFunction = self.__userOperator.appliedTo
143         #
144         self.__centeredDF = bool(centeredDF)
145         if abs(float(increment)) > 1.e-15:
146             self.__increment  = float(increment)
147         else:
148             self.__increment  = 0.01
149         if dX is None:
150             self.__dX     = None
151         else:
152             self.__dX     = numpy.ravel( dX )
153         logging.debug("FDA Reduction des doublons de calcul : %s"%self.__avoidRC)
154         if self.__avoidRC:
155             logging.debug("FDA Tolerance de determination des doublons : %.2e"%self.__tolerBP)
156
157     # ---------------------------------------------------------
158     def __doublon__(self, e, l, n, v=None):
159         __ac, __iac = False, -1
160         for i in range(len(l)-1,-1,-1):
161             if numpy.linalg.norm(e - l[i]) < self.__tolerBP * n[i]:
162                 __ac, __iac = True, i
163                 if v is not None: logging.debug("FDA Cas%s déja calculé, récupération du doublon %i"%(v,__iac))
164                 break
165         return __ac, __iac
166
167     # ---------------------------------------------------------
168     def DirectOperator(self, X, **extraArgs ):
169         """
170         Calcul du direct à l'aide de la fonction fournie.
171
172         NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
173         ne doivent pas être données ici à la fonction utilisateur.
174         """
175         logging.debug("FDA Calcul DirectOperator (explicite)")
176         if self.__mfEnabled:
177             _HX = self.__userFunction( X, argsAsSerie = True )
178         else:
179             _HX = numpy.ravel(self.__userFunction( numpy.ravel(X) ))
180         #
181         return _HX
182
183     # ---------------------------------------------------------
184     def TangentMatrix(self, X ):
185         """
186         Calcul de l'opérateur tangent comme la Jacobienne par différences finies,
187         c'est-à-dire le gradient de H en X. On utilise des différences finies
188         directionnelles autour du point X. X est un numpy.matrix.
189
190         Différences finies centrées (approximation d'ordre 2):
191         1/ Pour chaque composante i de X, on ajoute et on enlève la perturbation
192            dX[i] à la  composante X[i], pour composer X_plus_dXi et X_moins_dXi, et
193            on calcule les réponses HX_plus_dXi = H( X_plus_dXi ) et HX_moins_dXi =
194            H( X_moins_dXi )
195         2/ On effectue les différences (HX_plus_dXi-HX_moins_dXi) et on divise par
196            le pas 2*dXi
197         3/ Chaque résultat, par composante, devient une colonne de la Jacobienne
198
199         Différences finies non centrées (approximation d'ordre 1):
200         1/ Pour chaque composante i de X, on ajoute la perturbation dX[i] à la
201            composante X[i] pour composer X_plus_dXi, et on calcule la réponse
202            HX_plus_dXi = H( X_plus_dXi )
203         2/ On calcule la valeur centrale HX = H(X)
204         3/ On effectue les différences (HX_plus_dXi-HX) et on divise par
205            le pas dXi
206         4/ Chaque résultat, par composante, devient une colonne de la Jacobienne
207
208         """
209         logging.debug("FDA Début du calcul de la Jacobienne")
210         logging.debug("FDA   Incrément de............: %s*X"%float(self.__increment))
211         logging.debug("FDA   Approximation centrée...: %s"%(self.__centeredDF))
212         #
213         if X is None or len(X)==0:
214             raise ValueError("Nominal point X for approximate derivatives can not be None or void (given X: %s)."%(str(X),))
215         #
216         _X = numpy.ravel( X )
217         #
218         if self.__dX is None:
219             _dX  = self.__increment * _X
220         else:
221             _dX = numpy.ravel( self.__dX )
222         assert len(_X) == len(_dX), "Inconsistent dX increment length with respect to the X one"
223         assert _X.size == _dX.size, "Inconsistent dX increment size with respect to the X one"
224         #
225         if (_dX == 0.).any():
226             moyenne = _dX.mean()
227             if moyenne == 0.:
228                 _dX = numpy.where( _dX == 0., float(self.__increment), _dX )
229             else:
230                 _dX = numpy.where( _dX == 0., moyenne, _dX )
231         #
232         __alreadyCalculated  = False
233         if self.__avoidRC:
234             __bidon, __alreadyCalculatedP = self.__doublon__(_X,  self.__listJPCP, self.__listJPPN, None)
235             __bidon, __alreadyCalculatedI = self.__doublon__(_dX, self.__listJPCI, self.__listJPIN, None)
236             if __alreadyCalculatedP == __alreadyCalculatedI > -1:
237                 __alreadyCalculated, __i = True, __alreadyCalculatedP
238                 logging.debug("FDA Cas J déjà calculé, récupération du doublon %i"%__i)
239         #
240         if __alreadyCalculated:
241             logging.debug("FDA   Calcul Jacobienne (par récupération du doublon %i)"%__i)
242             _Jacobienne = self.__listJPCR[__i]
243         else:
244             logging.debug("FDA   Calcul Jacobienne (explicite)")
245             if self.__centeredDF:
246                 #
247                 if self.__mpEnabled and not self.__mfEnabled:
248                     funcrepr = {
249                         "__userFunction__path" : self.__userFunction__path,
250                         "__userFunction__modl" : self.__userFunction__modl,
251                         "__userFunction__name" : self.__userFunction__name,
252                     }
253                     _jobs = []
254                     for i in range( len(_dX) ):
255                         _dXi            = _dX[i]
256                         _X_plus_dXi     = numpy.array( _X, dtype=float )
257                         _X_plus_dXi[i]  = _X[i] + _dXi
258                         _X_moins_dXi    = numpy.array( _X, dtype=float )
259                         _X_moins_dXi[i] = _X[i] - _dXi
260                         #
261                         _jobs.append( (_X_plus_dXi,  self.__extraArgs, funcrepr) )
262                         _jobs.append( (_X_moins_dXi, self.__extraArgs, funcrepr) )
263                     #
264                     import multiprocessing
265                     self.__pool = multiprocessing.Pool(self.__mpWorkers)
266                     _HX_plusmoins_dX = self.__pool.map( ExecuteFunction, _jobs )
267                     self.__pool.close()
268                     self.__pool.join()
269                     #
270                     _Jacobienne  = []
271                     for i in range( len(_dX) ):
272                         _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
273                     #
274                 elif self.__mfEnabled:
275                     _xserie = []
276                     for i in range( len(_dX) ):
277                         _dXi            = _dX[i]
278                         _X_plus_dXi     = numpy.array( _X, dtype=float )
279                         _X_plus_dXi[i]  = _X[i] + _dXi
280                         _X_moins_dXi    = numpy.array( _X, dtype=float )
281                         _X_moins_dXi[i] = _X[i] - _dXi
282                         #
283                         _xserie.append( _X_plus_dXi )
284                         _xserie.append( _X_moins_dXi )
285                     #
286                     _HX_plusmoins_dX = self.DirectOperator( _xserie )
287                      #
288                     _Jacobienne  = []
289                     for i in range( len(_dX) ):
290                         _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
291                     #
292                 else:
293                     _Jacobienne  = []
294                     for i in range( _dX.size ):
295                         _dXi            = _dX[i]
296                         _X_plus_dXi     = numpy.array( _X, dtype=float )
297                         _X_plus_dXi[i]  = _X[i] + _dXi
298                         _X_moins_dXi    = numpy.array( _X, dtype=float )
299                         _X_moins_dXi[i] = _X[i] - _dXi
300                         #
301                         _HX_plus_dXi    = self.DirectOperator( _X_plus_dXi )
302                         _HX_moins_dXi   = self.DirectOperator( _X_moins_dXi )
303                         #
304                         _Jacobienne.append( numpy.ravel( _HX_plus_dXi - _HX_moins_dXi ) / (2.*_dXi) )
305                 #
306             else:
307                 #
308                 if self.__mpEnabled and not self.__mfEnabled:
309                     funcrepr = {
310                         "__userFunction__path" : self.__userFunction__path,
311                         "__userFunction__modl" : self.__userFunction__modl,
312                         "__userFunction__name" : self.__userFunction__name,
313                     }
314                     _jobs = []
315                     _jobs.append( (_X, self.__extraArgs, funcrepr) )
316                     for i in range( len(_dX) ):
317                         _X_plus_dXi    = numpy.array( _X, dtype=float )
318                         _X_plus_dXi[i] = _X[i] + _dX[i]
319                         #
320                         _jobs.append( (_X_plus_dXi, self.__extraArgs, funcrepr) )
321                     #
322                     import multiprocessing
323                     self.__pool = multiprocessing.Pool(self.__mpWorkers)
324                     _HX_plus_dX = self.__pool.map( ExecuteFunction, _jobs )
325                     self.__pool.close()
326                     self.__pool.join()
327                     #
328                     _HX = _HX_plus_dX.pop(0)
329                     #
330                     _Jacobienne = []
331                     for i in range( len(_dX) ):
332                         _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
333                     #
334                 elif self.__mfEnabled:
335                     _xserie = []
336                     _xserie.append( _X )
337                     for i in range( len(_dX) ):
338                         _X_plus_dXi    = numpy.array( _X, dtype=float )
339                         _X_plus_dXi[i] = _X[i] + _dX[i]
340                         #
341                         _xserie.append( _X_plus_dXi )
342                     #
343                     _HX_plus_dX = self.DirectOperator( _xserie )
344                     #
345                     _HX = _HX_plus_dX.pop(0)
346                     #
347                     _Jacobienne = []
348                     for i in range( len(_dX) ):
349                         _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
350                    #
351                 else:
352                     _Jacobienne  = []
353                     _HX = self.DirectOperator( _X )
354                     for i in range( _dX.size ):
355                         _dXi            = _dX[i]
356                         _X_plus_dXi     = numpy.array( _X, dtype=float )
357                         _X_plus_dXi[i]  = _X[i] + _dXi
358                         #
359                         _HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
360                         #
361                         _Jacobienne.append( numpy.ravel(( _HX_plus_dXi - _HX ) / _dXi) )
362                 #
363             #
364             _Jacobienne = numpy.transpose( numpy.vstack( _Jacobienne ) )
365             if self.__avoidRC:
366                 if self.__lenghtRJ < 0: self.__lenghtRJ = 2 * _X.size
367                 while len(self.__listJPCP) > self.__lenghtRJ:
368                     self.__listJPCP.pop(0)
369                     self.__listJPCI.pop(0)
370                     self.__listJPCR.pop(0)
371                     self.__listJPPN.pop(0)
372                     self.__listJPIN.pop(0)
373                 self.__listJPCP.append( copy.copy(_X) )
374                 self.__listJPCI.append( copy.copy(_dX) )
375                 self.__listJPCR.append( copy.copy(_Jacobienne) )
376                 self.__listJPPN.append( numpy.linalg.norm(_X) )
377                 self.__listJPIN.append( numpy.linalg.norm(_Jacobienne) )
378         #
379         logging.debug("FDA Fin du calcul de la Jacobienne")
380         #
381         return _Jacobienne
382
383     # ---------------------------------------------------------
384     def TangentOperator(self, paire, **extraArgs ):
385         """
386         Calcul du tangent à l'aide de la Jacobienne.
387
388         NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
389         ne doivent pas être données ici à la fonction utilisateur.
390         """
391         if self.__mfEnabled:
392             assert len(paire) == 1, "Incorrect lenght of arguments"
393             _paire = paire[0]
394             assert len(_paire) == 2, "Incorrect number of arguments"
395         else:
396             assert len(paire) == 2, "Incorrect number of arguments"
397             _paire = paire
398         X, dX = _paire
399         _Jacobienne = self.TangentMatrix( X )
400         if dX is None or len(dX) == 0:
401             #
402             # Calcul de la forme matricielle si le second argument est None
403             # -------------------------------------------------------------
404             if self.__mfEnabled: return [_Jacobienne,]
405             else:                return _Jacobienne
406         else:
407             #
408             # Calcul de la valeur linéarisée de H en X appliqué à dX
409             # ------------------------------------------------------
410             _dX = numpy.ravel( dX )
411             _HtX = numpy.dot(_Jacobienne, _dX)
412             if self.__mfEnabled: return [_HtX,]
413             else:                return _HtX
414
415     # ---------------------------------------------------------
416     def AdjointOperator(self, paire, **extraArgs ):
417         """
418         Calcul de l'adjoint à l'aide de la Jacobienne.
419
420         NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
421         ne doivent pas être données ici à la fonction utilisateur.
422         """
423         if self.__mfEnabled:
424             assert len(paire) == 1, "Incorrect lenght of arguments"
425             _paire = paire[0]
426             assert len(_paire) == 2, "Incorrect number of arguments"
427         else:
428             assert len(paire) == 2, "Incorrect number of arguments"
429             _paire = paire
430         X, Y = _paire
431         _JacobienneT = self.TangentMatrix( X ).T
432         if Y is None or len(Y) == 0:
433             #
434             # Calcul de la forme matricielle si le second argument est None
435             # -------------------------------------------------------------
436             if self.__mfEnabled: return [_JacobienneT,]
437             else:                return _JacobienneT
438         else:
439             #
440             # Calcul de la valeur de l'adjoint en X appliqué à Y
441             # --------------------------------------------------
442             _Y = numpy.ravel( Y )
443             _HaY = numpy.dot(_JacobienneT, _Y)
444             if self.__mfEnabled: return [_HaY,]
445             else:                return _HaY
446
447 # ==============================================================================
448 def EnsembleOfCenteredPerturbations( _bgcenter, _bgcovariance, _nbmembers ):
449     "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
450     #
451     _bgcenter = numpy.ravel(_bgcenter)[:,None]
452     if _nbmembers < 1:
453         raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
454     #
455     if _bgcovariance is None:
456         BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
457     else:
458         _Z = numpy.random.multivariate_normal(numpy.zeros(_bgcenter.size), _bgcovariance, size=_nbmembers).T
459         BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers) + _Z
460     #
461     return BackgroundEnsemble
462
463 # ==============================================================================
464 def EnsembleOfBackgroundPerturbations( _bgcenter, _bgcovariance, _nbmembers, _withSVD = True):
465     "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
466     def __CenteredRandomAnomalies(Zr, N):
467         """
468         Génère une matrice de N anomalies aléatoires centrées sur Zr selon les
469         notes manuscrites de MB et conforme au code de PS avec eps = -1
470         """
471         eps = -1
472         Q = numpy.identity(N-1)-numpy.ones((N-1,N-1))/numpy.sqrt(N)/(numpy.sqrt(N)-eps)
473         Q = numpy.concatenate((Q, [eps*numpy.ones(N-1)/numpy.sqrt(N)]), axis=0)
474         R, _ = numpy.linalg.qr(numpy.random.normal(size = (N-1,N-1)))
475         Q = numpy.dot(Q,R)
476         Zr = numpy.dot(Q,Zr)
477         return Zr.T
478     #
479     _bgcenter = numpy.ravel(_bgcenter).reshape((-1,1))
480     if _nbmembers < 1:
481         raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
482     if _bgcovariance is None:
483         BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
484     else:
485         if _withSVD:
486             U, s, V = numpy.linalg.svd(_bgcovariance, full_matrices=False)
487             _nbctl = _bgcenter.size
488             if _nbmembers > _nbctl:
489                 _Z = numpy.concatenate((numpy.dot(
490                     numpy.diag(numpy.sqrt(s[:_nbctl])), V[:_nbctl]),
491                     numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1-_nbctl)), axis = 0)
492             else:
493                 _Z = numpy.dot(numpy.diag(numpy.sqrt(s[:_nbmembers-1])), V[:_nbmembers-1])
494             _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
495             BackgroundEnsemble = _bgcenter + _Zca
496         else:
497             if max(abs(_bgcovariance.flatten())) > 0:
498                 _nbctl = _bgcenter.size
499                 _Z = numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1)
500                 _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
501                 BackgroundEnsemble = _bgcenter + _Zca
502             else:
503                 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
504     #
505     return BackgroundEnsemble
506
507 # ==============================================================================
508 def EnsembleMean( __Ensemble ):
509     "Renvoie la moyenne empirique d'un ensemble"
510     return numpy.asarray(__Ensemble).mean(axis=1, dtype=mfp).astype('float').reshape((-1,1))
511
512 # ==============================================================================
513 def EnsembleOfAnomalies( __Ensemble, __OptMean = None, __Normalisation = 1.):
514     "Renvoie les anomalies centrées à partir d'un ensemble"
515     if __OptMean is None:
516         __Em = EnsembleMean( __Ensemble )
517     else:
518         __Em = numpy.ravel( __OptMean ).reshape((-1,1))
519     #
520     return __Normalisation * (numpy.asarray( __Ensemble ) - __Em)
521
522 # ==============================================================================
523 def EnsembleErrorCovariance( __Ensemble, __quick = False ):
524     "Renvoie l'estimation empirique de la covariance d'ensemble"
525     if __quick:
526         # Covariance rapide mais rarement définie positive
527         __Covariance = numpy.cov( __Ensemble )
528     else:
529         # Résultat souvent identique à numpy.cov, mais plus robuste
530         __n, __m = numpy.asarray( __Ensemble ).shape
531         __Anomalies = EnsembleOfAnomalies( __Ensemble )
532         # Estimation empirique
533         __Covariance = ( __Anomalies @ __Anomalies.T ) / (__m-1)
534         # Assure la symétrie
535         __Covariance = ( __Covariance + __Covariance.T ) * 0.5
536         # Assure la positivité
537         __epsilon    = mpr*numpy.trace( __Covariance )
538         __Covariance = __Covariance + __epsilon * numpy.identity(__n)
539     #
540     return __Covariance
541
542 # ==============================================================================
543 def EnsemblePerturbationWithGivenCovariance( __Ensemble, __Covariance, __Seed=None ):
544     "Ajout d'une perturbation à chaque membre d'un ensemble selon une covariance prescrite"
545     if hasattr(__Covariance,"assparsematrix"):
546         if (abs(__Ensemble).mean() > mpr) and (abs(__Covariance.assparsematrix())/abs(__Ensemble).mean() < mpr).all():
547             # Traitement d'une covariance nulle ou presque
548             return __Ensemble
549         if (abs(__Ensemble).mean() <= mpr) and (abs(__Covariance.assparsematrix()) < mpr).all():
550             # Traitement d'une covariance nulle ou presque
551             return __Ensemble
552     else:
553         if (abs(__Ensemble).mean() > mpr) and (abs(__Covariance)/abs(__Ensemble).mean() < mpr).all():
554             # Traitement d'une covariance nulle ou presque
555             return __Ensemble
556         if (abs(__Ensemble).mean() <= mpr) and (abs(__Covariance) < mpr).all():
557             # Traitement d'une covariance nulle ou presque
558             return __Ensemble
559     #
560     __n, __m = __Ensemble.shape
561     if __Seed is not None: numpy.random.seed(__Seed)
562     #
563     if hasattr(__Covariance,"isscalar") and __Covariance.isscalar():
564         # Traitement d'une covariance multiple de l'identité
565         __zero = 0.
566         __std  = numpy.sqrt(__Covariance.assparsematrix())
567         __Ensemble += numpy.random.normal(__zero, __std, size=(__m,__n)).T
568     #
569     elif hasattr(__Covariance,"isvector") and __Covariance.isvector():
570         # Traitement d'une covariance diagonale avec variances non identiques
571         __zero = numpy.zeros(__n)
572         __std  = numpy.sqrt(__Covariance.assparsematrix())
573         __Ensemble += numpy.asarray([numpy.random.normal(__zero, __std) for i in range(__m)]).T
574     #
575     elif hasattr(__Covariance,"ismatrix") and __Covariance.ismatrix():
576         # Traitement d'une covariance pleine
577         __Ensemble += numpy.random.multivariate_normal(numpy.zeros(__n), __Covariance.asfullmatrix(__n), size=__m).T
578     #
579     elif isinstance(__Covariance, numpy.ndarray):
580         # Traitement d'une covariance numpy pleine, sachant qu'on arrive ici en dernier
581         __Ensemble += numpy.random.multivariate_normal(numpy.zeros(__n), __Covariance, size=__m).T
582     #
583     else:
584         raise ValueError("Error in ensemble perturbation with inadequate covariance specification")
585     #
586     return __Ensemble
587
588 # ==============================================================================
589 def CovarianceInflation(
590         InputCovOrEns,
591         InflationType   = None,
592         InflationFactor = None,
593         BackgroundCov   = None,
594         ):
595     """
596     Inflation applicable soit sur Pb ou Pa, soit sur les ensembles EXb ou EXa
597
598     Synthèse : Hunt 2007, section 2.3.5
599     """
600     if InflationFactor is None:
601         return InputCovOrEns
602     else:
603         InflationFactor = float(InflationFactor)
604     #
605     if InflationType in ["MultiplicativeOnAnalysisCovariance", "MultiplicativeOnBackgroundCovariance"]:
606         if InflationFactor < 1.:
607             raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
608         if InflationFactor < 1.+mpr:
609             return InputCovOrEns
610         OutputCovOrEns = InflationFactor**2 * InputCovOrEns
611     #
612     elif InflationType in ["MultiplicativeOnAnalysisAnomalies", "MultiplicativeOnBackgroundAnomalies"]:
613         if InflationFactor < 1.:
614             raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
615         if InflationFactor < 1.+mpr:
616             return InputCovOrEns
617         InputCovOrEnsMean = InputCovOrEns.mean(axis=1, dtype=mfp).astype('float')
618         OutputCovOrEns = InputCovOrEnsMean[:,numpy.newaxis] \
619             + InflationFactor * (InputCovOrEns - InputCovOrEnsMean[:,numpy.newaxis])
620     #
621     elif InflationType in ["AdditiveOnAnalysisCovariance", "AdditiveOnBackgroundCovariance"]:
622         if InflationFactor < 0.:
623             raise ValueError("Inflation factor for additive inflation has to be greater or equal than 0.")
624         if InflationFactor < mpr:
625             return InputCovOrEns
626         __n, __m = numpy.asarray(InputCovOrEns).shape
627         if __n != __m:
628             raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
629         OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * numpy.identity(__n)
630     #
631     elif InflationType == "HybridOnBackgroundCovariance":
632         if InflationFactor < 0.:
633             raise ValueError("Inflation factor for hybrid inflation has to be greater or equal than 0.")
634         if InflationFactor < mpr:
635             return InputCovOrEns
636         __n, __m = numpy.asarray(InputCovOrEns).shape
637         if __n != __m:
638             raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
639         if BackgroundCov is None:
640             raise ValueError("Background covariance matrix B has to be given for hybrid inflation.")
641         if InputCovOrEns.shape != BackgroundCov.shape:
642             raise ValueError("Ensemble covariance matrix has to be of same size than background covariance matrix B.")
643         OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * BackgroundCov
644     #
645     elif InflationType == "Relaxation":
646         raise NotImplementedError("InflationType Relaxation")
647     #
648     else:
649         raise ValueError("Error in inflation type, '%s' is not a valid keyword."%InflationType)
650     #
651     return OutputCovOrEns
652
653 # ==============================================================================
654 def QuantilesEstimations(selfA, A, Xa, HXa = None, Hm = None, HtM = None):
655     "Estimation des quantiles a posteriori (selfA est modifié)"
656     nbsamples = selfA._parameters["NumberOfSamplesForQuantiles"]
657     #
658     # Traitement des bornes
659     if "StateBoundsForQuantiles" in selfA._parameters:
660         LBounds = selfA._parameters["StateBoundsForQuantiles"] # Prioritaire
661     elif "Bounds" in selfA._parameters:
662         LBounds = selfA._parameters["Bounds"]  # Défaut raisonnable
663     else:
664         LBounds = None
665     if LBounds is not None:
666         LBounds = ForceNumericBounds( LBounds )
667     _Xa = numpy.ravel(Xa)
668     #
669     # Échantillonnage des états
670     YfQ  = None
671     EXr  = None
672     for i in range(nbsamples):
673         if selfA._parameters["SimulationForQuantiles"] == "Linear" and HtM is not None and HXa is not None:
674             dXr = (numpy.random.multivariate_normal(_Xa,A) - _Xa).reshape((-1,1))
675             if LBounds is not None: # "EstimateProjection" par défaut
676                 dXr = numpy.max(numpy.hstack((dXr,LBounds[:,0].reshape((-1,1))) - Xa),axis=1)
677                 dXr = numpy.min(numpy.hstack((dXr,LBounds[:,1].reshape((-1,1))) - Xa),axis=1)
678             dYr = HtM @ dXr
679             Yr = HXa.reshape((-1,1)) + dYr
680             if selfA._toStore("SampledStateForQuantiles"): Xr = _Xa + numpy.ravel(dXr)
681         elif selfA._parameters["SimulationForQuantiles"] == "NonLinear" and Hm is not None:
682             Xr = numpy.random.multivariate_normal(_Xa,A)
683             if LBounds is not None: # "EstimateProjection" par défaut
684                 Xr = numpy.max(numpy.hstack((Xr.reshape((-1,1)),LBounds[:,0].reshape((-1,1)))),axis=1)
685                 Xr = numpy.min(numpy.hstack((Xr.reshape((-1,1)),LBounds[:,1].reshape((-1,1)))),axis=1)
686             Yr = Hm( Xr )
687         else:
688             raise ValueError("Quantile simulations has only to be Linear or NonLinear.")
689         #
690         if YfQ is None:
691             YfQ = Yr.reshape((-1,1))
692             if selfA._toStore("SampledStateForQuantiles"): EXr = Xr.reshape((-1,1))
693         else:
694             YfQ = numpy.hstack((YfQ,Yr.reshape((-1,1))))
695             if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.hstack((EXr,Xr.reshape((-1,1))))
696     #
697     # Extraction des quantiles
698     YfQ.sort(axis=-1)
699     YQ = None
700     for quantile in selfA._parameters["Quantiles"]:
701         if not (0. <= float(quantile) <= 1.): continue
702         indice = int(nbsamples * float(quantile) - 1./nbsamples)
703         if YQ is None: YQ = YfQ[:,indice].reshape((-1,1))
704         else:          YQ = numpy.hstack((YQ,YfQ[:,indice].reshape((-1,1))))
705     if YQ is not None: # Liste non vide de quantiles
706         selfA.StoredVariables["SimulationQuantiles"].store( YQ )
707     if selfA._toStore("SampledStateForQuantiles"):
708         selfA.StoredVariables["SampledStateForQuantiles"].store( EXr )
709     #
710     return 0
711
712 # ==============================================================================
713 def ForceNumericBounds( __Bounds ):
714     "Force les bornes à être des valeurs numériques, sauf si globalement None"
715     # Conserve une valeur par défaut à None s'il n'y a pas de bornes
716     if __Bounds is None: return None
717     # Converti toutes les bornes individuelles None à +/- l'infini
718     __Bounds = numpy.asarray( __Bounds, dtype=float )
719     if len(__Bounds.shape) != 2 or min(__Bounds.shape) <= 0 or __Bounds.shape[1] != 2:
720         raise ValueError("Incorrectly shaped bounds data")
721     __Bounds[numpy.isnan(__Bounds[:,0]),0] = -sys.float_info.max
722     __Bounds[numpy.isnan(__Bounds[:,1]),1] =  sys.float_info.max
723     return __Bounds
724
725 # ==============================================================================
726 def RecentredBounds( __Bounds, __Center):
727     "Recentre les bornes autour de 0, sauf si globalement None"
728     # Conserve une valeur par défaut à None s'il n'y a pas de bornes
729     if __Bounds is None: return None
730     # Recentre les valeurs numériques de bornes
731     return ForceNumericBounds( __Bounds ) - numpy.ravel( __Center ).transpose((-1,1))
732
733 # ==============================================================================
734 def ApplyBounds( __Vector, __Bounds, __newClip = True):
735     "Applique des bornes numériques à un point"
736     # Conserve une valeur par défaut s'il n'y a pas de bornes
737     if __Bounds is None: return __Vector
738     #
739     if not isinstance(__Vector, numpy.ndarray): # Is an array
740         raise ValueError("Incorrect array definition of vector data")
741     if not isinstance(__Bounds, numpy.ndarray): # Is an array
742         raise ValueError("Incorrect array definition of bounds data")
743     if 2*__Vector.size != __Bounds.size: # Is a 2 column array of vector lenght
744         raise ValueError("Incorrect bounds number to be applied for this vector")
745     if len(__Bounds.shape) != 2 or min(__Bounds.shape) <= 0 or __Bounds.shape[1] != 2:
746         raise ValueError("Incorrectly shaped bounds data")
747     #
748     if __newClip:
749         __Vector = __Vector.clip(
750             __Bounds[:,0].reshape(__Vector.shape),
751             __Bounds[:,1].reshape(__Vector.shape),
752             )
753     else:
754         __Vector = numpy.max(numpy.hstack((__Vector.reshape((-1,1)),numpy.asmatrix(__Bounds)[:,0])),axis=1)
755         __Vector = numpy.min(numpy.hstack((__Vector.reshape((-1,1)),numpy.asmatrix(__Bounds)[:,1])),axis=1)
756         __Vector = numpy.asarray(__Vector)
757     #
758     return __Vector
759
760 # ==============================================================================
761 def cekf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
762     """
763     Contrained Extended Kalman Filter
764     """
765     if selfA._parameters["EstimationOf"] == "Parameters":
766         selfA._parameters["StoreInternalVariables"] = True
767     selfA._parameters["Bounds"] = ForceNumericBounds( selfA._parameters["Bounds"] )
768     #
769     # Opérateurs
770     H = HO["Direct"].appliedControledFormTo
771     #
772     if selfA._parameters["EstimationOf"] == "State":
773         M = EM["Direct"].appliedControledFormTo
774     #
775     if CM is not None and "Tangent" in CM and U is not None:
776         Cm = CM["Tangent"].asMatrix(Xb)
777     else:
778         Cm = None
779     #
780     # Durée d'observation et tailles
781     if hasattr(Y,"stepnumber"):
782         duration = Y.stepnumber()
783         __p = numpy.cumprod(Y.shape())[-1]
784     else:
785         duration = 2
786         __p = numpy.array(Y).size
787     #
788     # Précalcul des inversions de B et R
789     if selfA._parameters["StoreInternalVariables"] \
790         or selfA._toStore("CostFunctionJ") \
791         or selfA._toStore("CostFunctionJb") \
792         or selfA._toStore("CostFunctionJo") \
793         or selfA._toStore("CurrentOptimum") \
794         or selfA._toStore("APosterioriCovariance"):
795         BI = B.getI()
796         RI = R.getI()
797     #
798     __n = Xb.size
799     #
800     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
801         Xn = Xb
802         Pn = B
803         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
804         selfA.StoredVariables["Analysis"].store( Xb )
805         if selfA._toStore("APosterioriCovariance"):
806             if hasattr(B,"asfullmatrix"):
807                 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
808             else:
809                 selfA.StoredVariables["APosterioriCovariance"].store( B )
810         selfA._setInternalState("seed", numpy.random.get_state())
811     elif selfA._parameters["nextStep"]:
812         Xn = selfA._getInternalState("Xn")
813         Pn = selfA._getInternalState("Pn")
814     #
815     if selfA._parameters["EstimationOf"] == "Parameters":
816         XaMin            = Xn
817         previousJMinimum = numpy.finfo(float).max
818     #
819     for step in range(duration-1):
820         if hasattr(Y,"store"):
821             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
822         else:
823             Ynpu = numpy.ravel( Y ).reshape((__p,1))
824         #
825         Ht = HO["Tangent"].asMatrix(ValueForMethodForm = Xn)
826         Ht = Ht.reshape(Ynpu.size,Xn.size) # ADAO & check shape
827         Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = Xn)
828         Ha = Ha.reshape(Xn.size,Ynpu.size) # ADAO & check shape
829         #
830         if selfA._parameters["EstimationOf"] == "State":
831             Mt = EM["Tangent"].asMatrix(ValueForMethodForm = Xn)
832             Mt = Mt.reshape(Xn.size,Xn.size) # ADAO & check shape
833             Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = Xn)
834             Ma = Ma.reshape(Xn.size,Xn.size) # ADAO & check shape
835         #
836         if U is not None:
837             if hasattr(U,"store") and len(U)>1:
838                 Un = numpy.ravel( U[step] ).reshape((-1,1))
839             elif hasattr(U,"store") and len(U)==1:
840                 Un = numpy.ravel( U[0] ).reshape((-1,1))
841             else:
842                 Un = numpy.ravel( U ).reshape((-1,1))
843         else:
844             Un = None
845         #
846         if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
847             Xn = ApplyBounds( Xn, selfA._parameters["Bounds"] )
848         #
849         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
850             Xn_predicted = numpy.ravel( M( (Xn, Un) ) ).reshape((__n,1))
851             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
852                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
853                 Xn_predicted = Xn_predicted + Cm * Un
854             Pn_predicted = Q + Mt * (Pn * Ma)
855         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
856             # --- > Par principe, M = Id, Q = 0
857             Xn_predicted = Xn
858             Pn_predicted = Pn
859         #
860         if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
861             Xn_predicted = ApplyBounds( Xn_predicted, selfA._parameters["Bounds"] )
862         #
863         if selfA._parameters["EstimationOf"] == "State":
864             HX_predicted = numpy.ravel( H( (Xn_predicted, None) ) ).reshape((__p,1))
865             _Innovation  = Ynpu - HX_predicted
866         elif selfA._parameters["EstimationOf"] == "Parameters":
867             HX_predicted = numpy.ravel( H( (Xn_predicted, Un) ) ).reshape((__p,1))
868             _Innovation  = Ynpu - HX_predicted
869             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
870                 _Innovation = _Innovation - Cm * Un
871         #
872         Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha))
873         Xn = Xn_predicted + Kn * _Innovation
874         Pn = Pn_predicted - Kn * Ht * Pn_predicted
875         #
876         if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
877             Xn = ApplyBounds( Xn, selfA._parameters["Bounds"] )
878         #
879         Xa = Xn # Pointeurs
880         #--------------------------
881         selfA._setInternalState("Xn", Xn)
882         selfA._setInternalState("Pn", Pn)
883         #--------------------------
884         #
885         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
886         # ---> avec analysis
887         selfA.StoredVariables["Analysis"].store( Xa )
888         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
889             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( H((Xa, Un)) )
890         if selfA._toStore("InnovationAtCurrentAnalysis"):
891             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
892         # ---> avec current state
893         if selfA._parameters["StoreInternalVariables"] \
894             or selfA._toStore("CurrentState"):
895             selfA.StoredVariables["CurrentState"].store( Xn )
896         if selfA._toStore("ForecastState"):
897             selfA.StoredVariables["ForecastState"].store( Xn_predicted )
898         if selfA._toStore("ForecastCovariance"):
899             selfA.StoredVariables["ForecastCovariance"].store( Pn_predicted )
900         if selfA._toStore("BMA"):
901             selfA.StoredVariables["BMA"].store( Xn_predicted - Xa )
902         if selfA._toStore("InnovationAtCurrentState"):
903             selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
904         if selfA._toStore("SimulatedObservationAtCurrentState") \
905             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
906             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
907         # ---> autres
908         if selfA._parameters["StoreInternalVariables"] \
909             or selfA._toStore("CostFunctionJ") \
910             or selfA._toStore("CostFunctionJb") \
911             or selfA._toStore("CostFunctionJo") \
912             or selfA._toStore("CurrentOptimum") \
913             or selfA._toStore("APosterioriCovariance"):
914             Jb  = float( 0.5 * (Xa - Xb).T @ (BI @ (Xa - Xb)) )
915             Jo  = float( 0.5 * _Innovation.T @ (RI @ _Innovation) )
916             J   = Jb + Jo
917             selfA.StoredVariables["CostFunctionJb"].store( Jb )
918             selfA.StoredVariables["CostFunctionJo"].store( Jo )
919             selfA.StoredVariables["CostFunctionJ" ].store( J )
920             #
921             if selfA._toStore("IndexOfOptimum") \
922                 or selfA._toStore("CurrentOptimum") \
923                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
924                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
925                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
926                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
927                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
928             if selfA._toStore("IndexOfOptimum"):
929                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
930             if selfA._toStore("CurrentOptimum"):
931                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
932             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
933                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
934             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
935                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
936             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
937                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
938             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
939                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
940         if selfA._toStore("APosterioriCovariance"):
941             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
942         if selfA._parameters["EstimationOf"] == "Parameters" \
943             and J < previousJMinimum:
944             previousJMinimum    = J
945             XaMin               = Xa
946             if selfA._toStore("APosterioriCovariance"):
947                 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
948     #
949     # Stockage final supplémentaire de l'optimum en estimation de paramètres
950     # ----------------------------------------------------------------------
951     if selfA._parameters["EstimationOf"] == "Parameters":
952         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
953         selfA.StoredVariables["Analysis"].store( XaMin )
954         if selfA._toStore("APosterioriCovariance"):
955             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
956         if selfA._toStore("BMA"):
957             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
958     #
959     return 0
960
961 # ==============================================================================
962 def enks(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="EnKS16-KalmanFilterFormula"):
963     """
964     EnKS
965     """
966     #
967     # Opérateurs
968     H = HO["Direct"].appliedControledFormTo
969     #
970     if selfA._parameters["EstimationOf"] == "State":
971         M = EM["Direct"].appliedControledFormTo
972     #
973     if CM is not None and "Tangent" in CM and U is not None:
974         Cm = CM["Tangent"].asMatrix(Xb)
975     else:
976         Cm = None
977     #
978     # Précalcul des inversions de B et R
979     RIdemi = R.sqrtmI()
980     #
981     # Durée d'observation et tailles
982     LagL = selfA._parameters["SmootherLagL"]
983     if (not hasattr(Y,"store")) or (not hasattr(Y,"stepnumber")):
984         raise ValueError("Fixed-lag smoother requires a series of observation")
985     if Y.stepnumber() < LagL:
986         raise ValueError("Fixed-lag smoother requires a series of observation greater then the lag L")
987     duration = Y.stepnumber()
988     __p = numpy.cumprod(Y.shape())[-1]
989     __n = Xb.size
990     __m = selfA._parameters["NumberOfMembers"]
991     #
992     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
993         selfA.StoredVariables["Analysis"].store( Xb )
994         if selfA._toStore("APosterioriCovariance"):
995             if hasattr(B,"asfullmatrix"):
996                 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
997             else:
998                 selfA.StoredVariables["APosterioriCovariance"].store( B )
999     #
1000     # Calcul direct initial (on privilégie la mémorisation au recalcul)
1001     __seed = numpy.random.get_state()
1002     selfB = copy.deepcopy(selfA)
1003     selfB._parameters["StoreSupplementaryCalculations"] = ["CurrentEnsembleState"]
1004     if VariantM == "EnKS16-KalmanFilterFormula":
1005         etkf(selfB, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM = "KalmanFilterFormula")
1006     else:
1007         raise ValueError("VariantM has to be chosen in the authorized methods list.")
1008     if LagL > 0:
1009         EL  = selfB.StoredVariables["CurrentEnsembleState"][LagL-1]
1010     else:
1011         EL = EnsembleOfBackgroundPerturbations( Xb, None, __m ) # Cf. etkf
1012     selfA._parameters["SetSeed"] = numpy.random.set_state(__seed)
1013     #
1014     for step in range(LagL,duration-1):
1015         #
1016         sEL = selfB.StoredVariables["CurrentEnsembleState"][step+1-LagL:step+1]
1017         sEL.append(None)
1018         #
1019         if hasattr(Y,"store"):
1020             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1021         else:
1022             Ynpu = numpy.ravel( Y ).reshape((__p,1))
1023         #
1024         if U is not None:
1025             if hasattr(U,"store") and len(U)>1:
1026                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
1027             elif hasattr(U,"store") and len(U)==1:
1028                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
1029             else:
1030                 Un = numpy.asmatrix(numpy.ravel( U )).T
1031         else:
1032             Un = None
1033         #
1034         #--------------------------
1035         if VariantM == "EnKS16-KalmanFilterFormula":
1036             if selfA._parameters["EstimationOf"] == "State": # Forecast
1037                 EL = M( [(EL[:,i], Un) for i in range(__m)],
1038                     argsAsSerie = True,
1039                     returnSerieAsArrayMatrix = True )
1040                 EL = EnsemblePerturbationWithGivenCovariance( EL, Q )
1041                 EZ = H( [(EL[:,i], Un) for i in range(__m)],
1042                     argsAsSerie = True,
1043                     returnSerieAsArrayMatrix = True )
1044                 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1045                     Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
1046                     EZ = EZ + Cm * Un
1047             elif selfA._parameters["EstimationOf"] == "Parameters":
1048                 # --- > Par principe, M = Id, Q = 0
1049                 EZ = H( [(EL[:,i], Un) for i in range(__m)],
1050                     argsAsSerie = True,
1051                     returnSerieAsArrayMatrix = True )
1052             #
1053             vEm   = EL.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1054             vZm   = EZ.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
1055             #
1056             mS    = RIdemi @ EnsembleOfAnomalies( EZ, vZm, 1./math.sqrt(__m-1) )
1057             mS    = mS.reshape((-1,__m)) # Pour dimension 1
1058             delta = RIdemi @ ( Ynpu - vZm )
1059             mT    = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS )
1060             vw    = mT @ mS.T @ delta
1061             #
1062             Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
1063             mU    = numpy.identity(__m)
1064             wTU   = (vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU)
1065             #
1066             EX    = EnsembleOfAnomalies( EL, vEm, 1./math.sqrt(__m-1) )
1067             EL    = vEm + EX @ wTU
1068             #
1069             sEL[LagL] = EL
1070             for irl in range(LagL): # Lissage des L précédentes analysis
1071                 vEm = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1072                 EX = EnsembleOfAnomalies( sEL[irl], vEm, 1./math.sqrt(__m-1) )
1073                 sEL[irl] = vEm + EX @ wTU
1074             #
1075             # Conservation de l'analyse retrospective d'ordre 0 avant rotation
1076             Xa = sEL[0].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1077             if selfA._toStore("APosterioriCovariance"):
1078                 EXn = sEL[0]
1079             #
1080             for irl in range(LagL):
1081                 sEL[irl] = sEL[irl+1]
1082             sEL[LagL] = None
1083         #--------------------------
1084         else:
1085             raise ValueError("VariantM has to be chosen in the authorized methods list.")
1086         #
1087         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1088         # ---> avec analysis
1089         selfA.StoredVariables["Analysis"].store( Xa )
1090         if selfA._toStore("APosterioriCovariance"):
1091             selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(EXn) )
1092     #
1093     # Stockage des dernières analyses incomplètement remises à jour
1094     for irl in range(LagL):
1095         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1096         Xa = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1097         selfA.StoredVariables["Analysis"].store( Xa )
1098     #
1099     return 0
1100
1101 # ==============================================================================
1102 def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
1103     """
1104     Ensemble-Transform EnKF
1105     """
1106     if selfA._parameters["EstimationOf"] == "Parameters":
1107         selfA._parameters["StoreInternalVariables"] = True
1108     #
1109     # Opérateurs
1110     H = HO["Direct"].appliedControledFormTo
1111     #
1112     if selfA._parameters["EstimationOf"] == "State":
1113         M = EM["Direct"].appliedControledFormTo
1114     #
1115     if CM is not None and "Tangent" in CM and U is not None:
1116         Cm = CM["Tangent"].asMatrix(Xb)
1117     else:
1118         Cm = None
1119     #
1120     # Durée d'observation et tailles
1121     if hasattr(Y,"stepnumber"):
1122         duration = Y.stepnumber()
1123         __p = numpy.cumprod(Y.shape())[-1]
1124     else:
1125         duration = 2
1126         __p = numpy.array(Y).size
1127     #
1128     # Précalcul des inversions de B et R
1129     if selfA._parameters["StoreInternalVariables"] \
1130         or selfA._toStore("CostFunctionJ") \
1131         or selfA._toStore("CostFunctionJb") \
1132         or selfA._toStore("CostFunctionJo") \
1133         or selfA._toStore("CurrentOptimum") \
1134         or selfA._toStore("APosterioriCovariance"):
1135         BI = B.getI()
1136         RI = R.getI()
1137     elif VariantM != "KalmanFilterFormula":
1138         RI = R.getI()
1139     if VariantM == "KalmanFilterFormula":
1140         RIdemi = R.sqrtmI()
1141     #
1142     __n = Xb.size
1143     __m = selfA._parameters["NumberOfMembers"]
1144     #
1145     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1146         Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
1147         selfA.StoredVariables["Analysis"].store( Xb )
1148         if selfA._toStore("APosterioriCovariance"):
1149             if hasattr(B,"asfullmatrix"):
1150                 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
1151             else:
1152                 selfA.StoredVariables["APosterioriCovariance"].store( B )
1153         selfA._setInternalState("seed", numpy.random.get_state())
1154     elif selfA._parameters["nextStep"]:
1155         Xn = selfA._getInternalState("Xn")
1156     #
1157     previousJMinimum = numpy.finfo(float).max
1158     #
1159     for step in range(duration-1):
1160         numpy.random.set_state(selfA._getInternalState("seed"))
1161         if hasattr(Y,"store"):
1162             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1163         else:
1164             Ynpu = numpy.ravel( Y ).reshape((__p,1))
1165         #
1166         if U is not None:
1167             if hasattr(U,"store") and len(U)>1:
1168                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
1169             elif hasattr(U,"store") and len(U)==1:
1170                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
1171             else:
1172                 Un = numpy.asmatrix(numpy.ravel( U )).T
1173         else:
1174             Un = None
1175         #
1176         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
1177             Xn = CovarianceInflation( Xn,
1178                 selfA._parameters["InflationType"],
1179                 selfA._parameters["InflationFactor"],
1180                 )
1181         #
1182         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
1183             EMX = M( [(Xn[:,i], Un) for i in range(__m)],
1184                 argsAsSerie = True,
1185                 returnSerieAsArrayMatrix = True )
1186             Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
1187             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
1188                 argsAsSerie = True,
1189                 returnSerieAsArrayMatrix = True )
1190             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1191                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
1192                 Xn_predicted = Xn_predicted + Cm * Un
1193         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
1194             # --- > Par principe, M = Id, Q = 0
1195             Xn_predicted = EMX = Xn
1196             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
1197                 argsAsSerie = True,
1198                 returnSerieAsArrayMatrix = True )
1199         #
1200         # Mean of forecast and observation of forecast
1201         Xfm  = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1202         Hfm  = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
1203         #
1204         # Anomalies
1205         EaX   = EnsembleOfAnomalies( Xn_predicted, Xfm )
1206         EaHX  = EnsembleOfAnomalies( HX_predicted, Hfm)
1207         #
1208         #--------------------------
1209         if VariantM == "KalmanFilterFormula":
1210             mS    = RIdemi * EaHX / math.sqrt(__m-1)
1211             mS    = mS.reshape((-1,__m)) # Pour dimension 1
1212             delta = RIdemi * ( Ynpu - Hfm )
1213             mT    = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS )
1214             vw    = mT @ mS.T @ delta
1215             #
1216             Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
1217             mU    = numpy.identity(__m)
1218             #
1219             EaX   = EaX / math.sqrt(__m-1)
1220             Xn    = Xfm + EaX @ ( vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU )
1221         #--------------------------
1222         elif VariantM == "Variational":
1223             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1224             def CostFunction(w):
1225                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1226                 _Jo = 0.5 * _A.T @ (RI * _A)
1227                 _Jb = 0.5 * (__m-1) * w.T @ w
1228                 _J  = _Jo + _Jb
1229                 return float(_J)
1230             def GradientOfCostFunction(w):
1231                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1232                 _GardJo = - EaHX.T @ (RI * _A)
1233                 _GradJb = (__m-1) * w.reshape((__m,1))
1234                 _GradJ  = _GardJo + _GradJb
1235                 return numpy.ravel(_GradJ)
1236             vw = scipy.optimize.fmin_cg(
1237                 f           = CostFunction,
1238                 x0          = numpy.zeros(__m),
1239                 fprime      = GradientOfCostFunction,
1240                 args        = (),
1241                 disp        = False,
1242                 )
1243             #
1244             Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
1245             Htb = (__m-1) * numpy.identity(__m)
1246             Hta = Hto + Htb
1247             #
1248             Pta = numpy.linalg.inv( Hta )
1249             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1250             #
1251             Xn  = Xfm + EaX @ (vw[:,None] + EWa)
1252         #--------------------------
1253         elif VariantM == "FiniteSize11": # Jauge Boc2011
1254             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1255             def CostFunction(w):
1256                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1257                 _Jo = 0.5 * _A.T @ (RI * _A)
1258                 _Jb = 0.5 * __m * math.log(1 + 1/__m + w.T @ w)
1259                 _J  = _Jo + _Jb
1260                 return float(_J)
1261             def GradientOfCostFunction(w):
1262                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1263                 _GardJo = - EaHX.T @ (RI * _A)
1264                 _GradJb = __m * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
1265                 _GradJ  = _GardJo + _GradJb
1266                 return numpy.ravel(_GradJ)
1267             vw = scipy.optimize.fmin_cg(
1268                 f           = CostFunction,
1269                 x0          = numpy.zeros(__m),
1270                 fprime      = GradientOfCostFunction,
1271                 args        = (),
1272                 disp        = False,
1273                 )
1274             #
1275             Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
1276             Htb = __m * \
1277                 ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
1278                 / (1 + 1/__m + vw.T @ vw)**2
1279             Hta = Hto + Htb
1280             #
1281             Pta = numpy.linalg.inv( Hta )
1282             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1283             #
1284             Xn  = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
1285         #--------------------------
1286         elif VariantM == "FiniteSize15": # Jauge Boc2015
1287             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1288             def CostFunction(w):
1289                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1290                 _Jo = 0.5 * _A.T * RI * _A
1291                 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w)
1292                 _J  = _Jo + _Jb
1293                 return float(_J)
1294             def GradientOfCostFunction(w):
1295                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1296                 _GardJo = - EaHX.T @ (RI * _A)
1297                 _GradJb = (__m+1) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
1298                 _GradJ  = _GardJo + _GradJb
1299                 return numpy.ravel(_GradJ)
1300             vw = scipy.optimize.fmin_cg(
1301                 f           = CostFunction,
1302                 x0          = numpy.zeros(__m),
1303                 fprime      = GradientOfCostFunction,
1304                 args        = (),
1305                 disp        = False,
1306                 )
1307             #
1308             Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
1309             Htb = (__m+1) * \
1310                 ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
1311                 / (1 + 1/__m + vw.T @ vw)**2
1312             Hta = Hto + Htb
1313             #
1314             Pta = numpy.linalg.inv( Hta )
1315             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1316             #
1317             Xn  = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
1318         #--------------------------
1319         elif VariantM == "FiniteSize16": # Jauge Boc2016
1320             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1321             def CostFunction(w):
1322                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1323                 _Jo = 0.5 * _A.T @ (RI * _A)
1324                 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w / (__m-1))
1325                 _J  = _Jo + _Jb
1326                 return float(_J)
1327             def GradientOfCostFunction(w):
1328                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1329                 _GardJo = - EaHX.T @ (RI * _A)
1330                 _GradJb = ((__m+1) / (__m-1)) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w / (__m-1))
1331                 _GradJ  = _GardJo + _GradJb
1332                 return numpy.ravel(_GradJ)
1333             vw = scipy.optimize.fmin_cg(
1334                 f           = CostFunction,
1335                 x0          = numpy.zeros(__m),
1336                 fprime      = GradientOfCostFunction,
1337                 args        = (),
1338                 disp        = False,
1339                 )
1340             #
1341             Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
1342             Htb = ((__m+1) / (__m-1)) * \
1343                 ( (1 + 1/__m + vw.T @ vw / (__m-1)) * numpy.identity(__m) - 2 * vw @ vw.T / (__m-1) ) \
1344                 / (1 + 1/__m + vw.T @ vw / (__m-1))**2
1345             Hta = Hto + Htb
1346             #
1347             Pta = numpy.linalg.inv( Hta )
1348             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1349             #
1350             Xn  = Xfm + EaX @ (vw[:,None] + EWa)
1351         #--------------------------
1352         else:
1353             raise ValueError("VariantM has to be chosen in the authorized methods list.")
1354         #
1355         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
1356             Xn = CovarianceInflation( Xn,
1357                 selfA._parameters["InflationType"],
1358                 selfA._parameters["InflationFactor"],
1359                 )
1360         #
1361         Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1362         #--------------------------
1363         selfA._setInternalState("Xn", Xn)
1364         selfA._setInternalState("seed", numpy.random.get_state())
1365         #--------------------------
1366         #
1367         if selfA._parameters["StoreInternalVariables"] \
1368             or selfA._toStore("CostFunctionJ") \
1369             or selfA._toStore("CostFunctionJb") \
1370             or selfA._toStore("CostFunctionJo") \
1371             or selfA._toStore("APosterioriCovariance") \
1372             or selfA._toStore("InnovationAtCurrentAnalysis") \
1373             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1374             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1375             _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
1376             _Innovation = Ynpu - _HXa
1377         #
1378         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1379         # ---> avec analysis
1380         selfA.StoredVariables["Analysis"].store( Xa )
1381         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1382             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
1383         if selfA._toStore("InnovationAtCurrentAnalysis"):
1384             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1385         # ---> avec current state
1386         if selfA._parameters["StoreInternalVariables"] \
1387             or selfA._toStore("CurrentState"):
1388             selfA.StoredVariables["CurrentState"].store( Xn )
1389         if selfA._toStore("ForecastState"):
1390             selfA.StoredVariables["ForecastState"].store( EMX )
1391         if selfA._toStore("ForecastCovariance"):
1392             selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(EMX) )
1393         if selfA._toStore("BMA"):
1394             selfA.StoredVariables["BMA"].store( EMX - Xa )
1395         if selfA._toStore("InnovationAtCurrentState"):
1396             selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
1397         if selfA._toStore("SimulatedObservationAtCurrentState") \
1398             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1399             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
1400         # ---> autres
1401         if selfA._parameters["StoreInternalVariables"] \
1402             or selfA._toStore("CostFunctionJ") \
1403             or selfA._toStore("CostFunctionJb") \
1404             or selfA._toStore("CostFunctionJo") \
1405             or selfA._toStore("CurrentOptimum") \
1406             or selfA._toStore("APosterioriCovariance"):
1407             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
1408             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
1409             J   = Jb + Jo
1410             selfA.StoredVariables["CostFunctionJb"].store( Jb )
1411             selfA.StoredVariables["CostFunctionJo"].store( Jo )
1412             selfA.StoredVariables["CostFunctionJ" ].store( J )
1413             #
1414             if selfA._toStore("IndexOfOptimum") \
1415                 or selfA._toStore("CurrentOptimum") \
1416                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1417                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1418                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1419                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1420                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1421             if selfA._toStore("IndexOfOptimum"):
1422                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1423             if selfA._toStore("CurrentOptimum"):
1424                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1425             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1426                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1427             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1428                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1429             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1430                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1431             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1432                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1433         if selfA._toStore("APosterioriCovariance"):
1434             selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
1435         if selfA._parameters["EstimationOf"] == "Parameters" \
1436             and J < previousJMinimum:
1437             previousJMinimum    = J
1438             XaMin               = Xa
1439             if selfA._toStore("APosterioriCovariance"):
1440                 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
1441         # ---> Pour les smoothers
1442         if selfA._toStore("CurrentEnsembleState"):
1443             selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
1444     #
1445     # Stockage final supplémentaire de l'optimum en estimation de paramètres
1446     # ----------------------------------------------------------------------
1447     if selfA._parameters["EstimationOf"] == "Parameters":
1448         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1449         selfA.StoredVariables["Analysis"].store( XaMin )
1450         if selfA._toStore("APosterioriCovariance"):
1451             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1452         if selfA._toStore("BMA"):
1453             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1454     #
1455     return 0
1456
1457 # ==============================================================================
1458 def exkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1459     """
1460     Extended Kalman Filter
1461     """
1462     if selfA._parameters["EstimationOf"] == "Parameters":
1463         selfA._parameters["StoreInternalVariables"] = True
1464     #
1465     # Opérateurs
1466     H = HO["Direct"].appliedControledFormTo
1467     #
1468     if selfA._parameters["EstimationOf"] == "State":
1469         M = EM["Direct"].appliedControledFormTo
1470     #
1471     if CM is not None and "Tangent" in CM and U is not None:
1472         Cm = CM["Tangent"].asMatrix(Xb)
1473     else:
1474         Cm = None
1475     #
1476     # Durée d'observation et tailles
1477     if hasattr(Y,"stepnumber"):
1478         duration = Y.stepnumber()
1479         __p = numpy.cumprod(Y.shape())[-1]
1480     else:
1481         duration = 2
1482         __p = numpy.array(Y).size
1483     #
1484     # Précalcul des inversions de B et R
1485     if selfA._parameters["StoreInternalVariables"] \
1486         or selfA._toStore("CostFunctionJ") \
1487         or selfA._toStore("CostFunctionJb") \
1488         or selfA._toStore("CostFunctionJo") \
1489         or selfA._toStore("CurrentOptimum") \
1490         or selfA._toStore("APosterioriCovariance"):
1491         BI = B.getI()
1492         RI = R.getI()
1493     #
1494     __n = Xb.size
1495     #
1496     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1497         Xn = Xb
1498         Pn = B
1499         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1500         selfA.StoredVariables["Analysis"].store( Xb )
1501         if selfA._toStore("APosterioriCovariance"):
1502             if hasattr(B,"asfullmatrix"):
1503                 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
1504             else:
1505                 selfA.StoredVariables["APosterioriCovariance"].store( B )
1506         selfA._setInternalState("seed", numpy.random.get_state())
1507     elif selfA._parameters["nextStep"]:
1508         Xn = selfA._getInternalState("Xn")
1509         Pn = selfA._getInternalState("Pn")
1510     #
1511     if selfA._parameters["EstimationOf"] == "Parameters":
1512         XaMin            = Xn
1513         previousJMinimum = numpy.finfo(float).max
1514     #
1515     for step in range(duration-1):
1516         if hasattr(Y,"store"):
1517             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1518         else:
1519             Ynpu = numpy.ravel( Y ).reshape((__p,1))
1520         #
1521         Ht = HO["Tangent"].asMatrix(ValueForMethodForm = Xn)
1522         Ht = Ht.reshape(Ynpu.size,Xn.size) # ADAO & check shape
1523         Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = Xn)
1524         Ha = Ha.reshape(Xn.size,Ynpu.size) # ADAO & check shape
1525         #
1526         if selfA._parameters["EstimationOf"] == "State":
1527             Mt = EM["Tangent"].asMatrix(ValueForMethodForm = Xn)
1528             Mt = Mt.reshape(Xn.size,Xn.size) # ADAO & check shape
1529             Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = Xn)
1530             Ma = Ma.reshape(Xn.size,Xn.size) # ADAO & check shape
1531         #
1532         if U is not None:
1533             if hasattr(U,"store") and len(U)>1:
1534                 Un = numpy.ravel( U[step] ).reshape((-1,1))
1535             elif hasattr(U,"store") and len(U)==1:
1536                 Un = numpy.ravel( U[0] ).reshape((-1,1))
1537             else:
1538                 Un = numpy.ravel( U ).reshape((-1,1))
1539         else:
1540             Un = None
1541         #
1542         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
1543             Xn_predicted = numpy.ravel( M( (Xn, Un) ) ).reshape((__n,1))
1544             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1545                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
1546                 Xn_predicted = Xn_predicted + Cm * Un
1547             Pn_predicted = Q + Mt * (Pn * Ma)
1548         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
1549             # --- > Par principe, M = Id, Q = 0
1550             Xn_predicted = Xn
1551             Pn_predicted = Pn
1552         #
1553         if selfA._parameters["EstimationOf"] == "State":
1554             HX_predicted = numpy.ravel( H( (Xn_predicted, None) ) ).reshape((__p,1))
1555             _Innovation  = Ynpu - HX_predicted
1556         elif selfA._parameters["EstimationOf"] == "Parameters":
1557             HX_predicted = numpy.ravel( H( (Xn_predicted, Un) ) ).reshape((__p,1))
1558             _Innovation  = Ynpu - HX_predicted
1559             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
1560                 _Innovation = _Innovation - Cm * Un
1561         #
1562         Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha))
1563         Xn = Xn_predicted + Kn * _Innovation
1564         Pn = Pn_predicted - Kn * Ht * Pn_predicted
1565         #
1566         Xa = Xn # Pointeurs
1567         #--------------------------
1568         selfA._setInternalState("Xn", Xn)
1569         selfA._setInternalState("Pn", Pn)
1570         #--------------------------
1571         #
1572         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1573         # ---> avec analysis
1574         selfA.StoredVariables["Analysis"].store( Xa )
1575         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1576             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( H((Xa, Un)) )
1577         if selfA._toStore("InnovationAtCurrentAnalysis"):
1578             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1579         # ---> avec current state
1580         if selfA._parameters["StoreInternalVariables"] \
1581             or selfA._toStore("CurrentState"):
1582             selfA.StoredVariables["CurrentState"].store( Xn )
1583         if selfA._toStore("ForecastState"):
1584             selfA.StoredVariables["ForecastState"].store( Xn_predicted )
1585         if selfA._toStore("ForecastCovariance"):
1586             selfA.StoredVariables["ForecastCovariance"].store( Pn_predicted )
1587         if selfA._toStore("BMA"):
1588             selfA.StoredVariables["BMA"].store( Xn_predicted - Xa )
1589         if selfA._toStore("InnovationAtCurrentState"):
1590             selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
1591         if selfA._toStore("SimulatedObservationAtCurrentState") \
1592             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1593             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
1594         # ---> autres
1595         if selfA._parameters["StoreInternalVariables"] \
1596             or selfA._toStore("CostFunctionJ") \
1597             or selfA._toStore("CostFunctionJb") \
1598             or selfA._toStore("CostFunctionJo") \
1599             or selfA._toStore("CurrentOptimum") \
1600             or selfA._toStore("APosterioriCovariance"):
1601             Jb  = float( 0.5 * (Xa - Xb).T @ (BI @ (Xa - Xb)) )
1602             Jo  = float( 0.5 * _Innovation.T @ (RI @ _Innovation) )
1603             J   = Jb + Jo
1604             selfA.StoredVariables["CostFunctionJb"].store( Jb )
1605             selfA.StoredVariables["CostFunctionJo"].store( Jo )
1606             selfA.StoredVariables["CostFunctionJ" ].store( J )
1607             #
1608             if selfA._toStore("IndexOfOptimum") \
1609                 or selfA._toStore("CurrentOptimum") \
1610                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1611                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1612                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1613                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1614                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1615             if selfA._toStore("IndexOfOptimum"):
1616                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1617             if selfA._toStore("CurrentOptimum"):
1618                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1619             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1620                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1621             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1622                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1623             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1624                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1625             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1626                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1627         if selfA._toStore("APosterioriCovariance"):
1628             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1629         if selfA._parameters["EstimationOf"] == "Parameters" \
1630             and J < previousJMinimum:
1631             previousJMinimum    = J
1632             XaMin               = Xa
1633             if selfA._toStore("APosterioriCovariance"):
1634                 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
1635     #
1636     # Stockage final supplémentaire de l'optimum en estimation de paramètres
1637     # ----------------------------------------------------------------------
1638     if selfA._parameters["EstimationOf"] == "Parameters":
1639         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1640         selfA.StoredVariables["Analysis"].store( XaMin )
1641         if selfA._toStore("APosterioriCovariance"):
1642             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1643         if selfA._toStore("BMA"):
1644             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1645     #
1646     return 0
1647
1648 # ==============================================================================
1649 def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12",
1650     BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
1651     """
1652     Iterative EnKF
1653     """
1654     if selfA._parameters["EstimationOf"] == "Parameters":
1655         selfA._parameters["StoreInternalVariables"] = True
1656     #
1657     # Opérateurs
1658     H = HO["Direct"].appliedControledFormTo
1659     #
1660     if selfA._parameters["EstimationOf"] == "State":
1661         M = EM["Direct"].appliedControledFormTo
1662     #
1663     if CM is not None and "Tangent" in CM and U is not None:
1664         Cm = CM["Tangent"].asMatrix(Xb)
1665     else:
1666         Cm = None
1667     #
1668     # Durée d'observation et tailles
1669     if hasattr(Y,"stepnumber"):
1670         duration = Y.stepnumber()
1671         __p = numpy.cumprod(Y.shape())[-1]
1672     else:
1673         duration = 2
1674         __p = numpy.array(Y).size
1675     #
1676     # Précalcul des inversions de B et R
1677     if selfA._parameters["StoreInternalVariables"] \
1678         or selfA._toStore("CostFunctionJ") \
1679         or selfA._toStore("CostFunctionJb") \
1680         or selfA._toStore("CostFunctionJo") \
1681         or selfA._toStore("CurrentOptimum") \
1682         or selfA._toStore("APosterioriCovariance"):
1683         BI = B.getI()
1684     RI = R.getI()
1685     #
1686     __n = Xb.size
1687     __m = selfA._parameters["NumberOfMembers"]
1688     #
1689     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1690         if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
1691         else:                         Pn = B
1692         Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
1693         selfA.StoredVariables["Analysis"].store( Xb )
1694         if selfA._toStore("APosterioriCovariance"):
1695             if hasattr(B,"asfullmatrix"):
1696                 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
1697             else:
1698                 selfA.StoredVariables["APosterioriCovariance"].store( B )
1699         selfA._setInternalState("seed", numpy.random.get_state())
1700     elif selfA._parameters["nextStep"]:
1701         Xn = selfA._getInternalState("Xn")
1702     #
1703     previousJMinimum = numpy.finfo(float).max
1704     #
1705     for step in range(duration-1):
1706         numpy.random.set_state(selfA._getInternalState("seed"))
1707         if hasattr(Y,"store"):
1708             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1709         else:
1710             Ynpu = numpy.ravel( Y ).reshape((__p,1))
1711         #
1712         if U is not None:
1713             if hasattr(U,"store") and len(U)>1:
1714                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
1715             elif hasattr(U,"store") and len(U)==1:
1716                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
1717             else:
1718                 Un = numpy.asmatrix(numpy.ravel( U )).T
1719         else:
1720             Un = None
1721         #
1722         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
1723             Xn = CovarianceInflation( Xn,
1724                 selfA._parameters["InflationType"],
1725                 selfA._parameters["InflationFactor"],
1726                 )
1727         #
1728         #--------------------------
1729         if VariantM == "IEnKF12":
1730             Xfm = numpy.ravel(Xn.mean(axis=1, dtype=mfp).astype('float'))
1731             EaX = EnsembleOfAnomalies( Xn ) / math.sqrt(__m-1)
1732             __j = 0
1733             Deltaw = 1
1734             if not BnotT:
1735                 Ta  = numpy.identity(__m)
1736             vw  = numpy.zeros(__m)
1737             while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
1738                 vx1 = (Xfm + EaX @ vw).reshape((__n,1))
1739                 #
1740                 if BnotT:
1741                     E1 = vx1 + _epsilon * EaX
1742                 else:
1743                     E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
1744                 #
1745                 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q
1746                     E2 = M( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
1747                         argsAsSerie = True,
1748                         returnSerieAsArrayMatrix = True )
1749                 elif selfA._parameters["EstimationOf"] == "Parameters":
1750                     # --- > Par principe, M = Id
1751                     E2 = Xn
1752                 vx2 = E2.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1753                 vy1 = H((vx2, Un)).reshape((__p,1))
1754                 #
1755                 HE2 = H( [(E2[:,i,numpy.newaxis], Un) for i in range(__m)],
1756                     argsAsSerie = True,
1757                     returnSerieAsArrayMatrix = True )
1758                 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
1759                 #
1760                 if BnotT:
1761                     EaY = (HE2 - vy2) / _epsilon
1762                 else:
1763                     EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
1764                 #
1765                 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy1 )))
1766                 mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY).reshape((-1,__m))
1767                 Deltaw = - numpy.linalg.solve(mH,GradJ)
1768                 #
1769                 vw = vw + Deltaw
1770                 #
1771                 if not BnotT:
1772                     Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
1773                 #
1774                 __j = __j + 1
1775             #
1776             A2 = EnsembleOfAnomalies( E2 )
1777             #
1778             if BnotT:
1779                 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
1780                 A2 = math.sqrt(__m-1) * A2 @ Ta / _epsilon
1781             #
1782             Xn = vx2 + A2
1783         #--------------------------
1784         else:
1785             raise ValueError("VariantM has to be chosen in the authorized methods list.")
1786         #
1787         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
1788             Xn = CovarianceInflation( Xn,
1789                 selfA._parameters["InflationType"],
1790                 selfA._parameters["InflationFactor"],
1791                 )
1792         #
1793         Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1794         #--------------------------
1795         selfA._setInternalState("Xn", Xn)
1796         selfA._setInternalState("seed", numpy.random.get_state())
1797         #--------------------------
1798         #
1799         if selfA._parameters["StoreInternalVariables"] \
1800             or selfA._toStore("CostFunctionJ") \
1801             or selfA._toStore("CostFunctionJb") \
1802             or selfA._toStore("CostFunctionJo") \
1803             or selfA._toStore("APosterioriCovariance") \
1804             or selfA._toStore("InnovationAtCurrentAnalysis") \
1805             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1806             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1807             _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
1808             _Innovation = Ynpu - _HXa
1809         #
1810         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1811         # ---> avec analysis
1812         selfA.StoredVariables["Analysis"].store( Xa )
1813         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1814             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
1815         if selfA._toStore("InnovationAtCurrentAnalysis"):
1816             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1817         # ---> avec current state
1818         if selfA._parameters["StoreInternalVariables"] \
1819             or selfA._toStore("CurrentState"):
1820             selfA.StoredVariables["CurrentState"].store( Xn )
1821         if selfA._toStore("ForecastState"):
1822             selfA.StoredVariables["ForecastState"].store( E2 )
1823         if selfA._toStore("ForecastCovariance"):
1824             selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(E2) )
1825         if selfA._toStore("BMA"):
1826             selfA.StoredVariables["BMA"].store( E2 - Xa )
1827         if selfA._toStore("InnovationAtCurrentState"):
1828             selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
1829         if selfA._toStore("SimulatedObservationAtCurrentState") \
1830             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1831             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
1832         # ---> autres
1833         if selfA._parameters["StoreInternalVariables"] \
1834             or selfA._toStore("CostFunctionJ") \
1835             or selfA._toStore("CostFunctionJb") \
1836             or selfA._toStore("CostFunctionJo") \
1837             or selfA._toStore("CurrentOptimum") \
1838             or selfA._toStore("APosterioriCovariance"):
1839             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
1840             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
1841             J   = Jb + Jo
1842             selfA.StoredVariables["CostFunctionJb"].store( Jb )
1843             selfA.StoredVariables["CostFunctionJo"].store( Jo )
1844             selfA.StoredVariables["CostFunctionJ" ].store( J )
1845             #
1846             if selfA._toStore("IndexOfOptimum") \
1847                 or selfA._toStore("CurrentOptimum") \
1848                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1849                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1850                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1851                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1852                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1853             if selfA._toStore("IndexOfOptimum"):
1854                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1855             if selfA._toStore("CurrentOptimum"):
1856                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1857             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1858                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1859             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1860                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1861             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1862                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1863             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1864                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1865         if selfA._toStore("APosterioriCovariance"):
1866             selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
1867         if selfA._parameters["EstimationOf"] == "Parameters" \
1868             and J < previousJMinimum:
1869             previousJMinimum    = J
1870             XaMin               = Xa
1871             if selfA._toStore("APosterioriCovariance"):
1872                 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
1873         # ---> Pour les smoothers
1874         if selfA._toStore("CurrentEnsembleState"):
1875             selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
1876     #
1877     # Stockage final supplémentaire de l'optimum en estimation de paramètres
1878     # ----------------------------------------------------------------------
1879     if selfA._parameters["EstimationOf"] == "Parameters":
1880         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1881         selfA.StoredVariables["Analysis"].store( XaMin )
1882         if selfA._toStore("APosterioriCovariance"):
1883             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1884         if selfA._toStore("BMA"):
1885             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1886     #
1887     return 0
1888
1889 # ==============================================================================
1890 def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1891     """
1892     3DVAR incrémental
1893     """
1894     #
1895     # Initialisations
1896     # ---------------
1897     #
1898     # Opérateur non-linéaire pour la boucle externe
1899     Hm = HO["Direct"].appliedTo
1900     #
1901     # Précalcul des inversions de B et R
1902     BI = B.getI()
1903     RI = R.getI()
1904     #
1905     # Point de démarrage de l'optimisation
1906     Xini = selfA._parameters["InitializationPoint"]
1907     #
1908     HXb = numpy.asmatrix(numpy.ravel( Hm( Xb ) )).T
1909     Innovation = Y - HXb
1910     #
1911     # Outer Loop
1912     # ----------
1913     iOuter = 0
1914     J      = 1./mpr
1915     DeltaJ = 1./mpr
1916     Xr     = Xini.reshape((-1,1))
1917     while abs(DeltaJ) >= selfA._parameters["CostDecrementTolerance"] and iOuter <= selfA._parameters["MaximumNumberOfSteps"]:
1918         #
1919         # Inner Loop
1920         # ----------
1921         Ht = HO["Tangent"].asMatrix(Xr)
1922         Ht = Ht.reshape(Y.size,Xr.size) # ADAO & check shape
1923         #
1924         # Définition de la fonction-coût
1925         # ------------------------------
1926         def CostFunction(dx):
1927             _dX  = numpy.asmatrix(numpy.ravel( dx )).T
1928             if selfA._parameters["StoreInternalVariables"] or \
1929                 selfA._toStore("CurrentState") or \
1930                 selfA._toStore("CurrentOptimum"):
1931                 selfA.StoredVariables["CurrentState"].store( Xb + _dX )
1932             _HdX = Ht * _dX
1933             _HdX = numpy.asmatrix(numpy.ravel( _HdX )).T
1934             _dInnovation = Innovation - _HdX
1935             if selfA._toStore("SimulatedObservationAtCurrentState") or \
1936                 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1937                 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HXb + _HdX )
1938             if selfA._toStore("InnovationAtCurrentState"):
1939                 selfA.StoredVariables["InnovationAtCurrentState"].store( _dInnovation )
1940             #
1941             Jb  = float( 0.5 * _dX.T * BI * _dX )
1942             Jo  = float( 0.5 * _dInnovation.T * RI * _dInnovation )
1943             J   = Jb + Jo
1944             #
1945             selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
1946             selfA.StoredVariables["CostFunctionJb"].store( Jb )
1947             selfA.StoredVariables["CostFunctionJo"].store( Jo )
1948             selfA.StoredVariables["CostFunctionJ" ].store( J )
1949             if selfA._toStore("IndexOfOptimum") or \
1950                 selfA._toStore("CurrentOptimum") or \
1951                 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
1952                 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
1953                 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
1954                 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1955                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1956             if selfA._toStore("IndexOfOptimum"):
1957                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1958             if selfA._toStore("CurrentOptimum"):
1959                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
1960             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1961                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
1962             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1963                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1964             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1965                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1966             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1967                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1968             return J
1969         #
1970         def GradientOfCostFunction(dx):
1971             _dX          = numpy.asmatrix(numpy.ravel( dx )).T
1972             _HdX         = Ht * _dX
1973             _HdX         = numpy.asmatrix(numpy.ravel( _HdX )).T
1974             _dInnovation = Innovation - _HdX
1975             GradJb       = BI * _dX
1976             GradJo       = - Ht.T @ (RI * _dInnovation)
1977             GradJ        = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
1978             return GradJ
1979         #
1980         # Minimisation de la fonctionnelle
1981         # --------------------------------
1982         nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
1983         #
1984         if selfA._parameters["Minimizer"] == "LBFGSB":
1985             # Minimum, J_optimal, Informations = scipy.optimize.fmin_l_bfgs_b(
1986             if "0.19" <= scipy.version.version <= "1.1.0":
1987                 import lbfgsbhlt as optimiseur
1988             else:
1989                 import scipy.optimize as optimiseur
1990             Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
1991                 func        = CostFunction,
1992                 x0          = numpy.zeros(Xini.size),
1993                 fprime      = GradientOfCostFunction,
1994                 args        = (),
1995                 bounds      = RecentredBounds(selfA._parameters["Bounds"], Xb),
1996                 maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
1997                 factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
1998                 pgtol       = selfA._parameters["ProjectedGradientTolerance"],
1999                 iprint      = selfA._parameters["optiprint"],
2000                 )
2001             nfeval = Informations['funcalls']
2002             rc     = Informations['warnflag']
2003         elif selfA._parameters["Minimizer"] == "TNC":
2004             Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
2005                 func        = CostFunction,
2006                 x0          = numpy.zeros(Xini.size),
2007                 fprime      = GradientOfCostFunction,
2008                 args        = (),
2009                 bounds      = RecentredBounds(selfA._parameters["Bounds"], Xb),
2010                 maxfun      = selfA._parameters["MaximumNumberOfSteps"],
2011                 pgtol       = selfA._parameters["ProjectedGradientTolerance"],
2012                 ftol        = selfA._parameters["CostDecrementTolerance"],
2013                 messages    = selfA._parameters["optmessages"],
2014                 )
2015         elif selfA._parameters["Minimizer"] == "CG":
2016             Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
2017                 f           = CostFunction,
2018                 x0          = numpy.zeros(Xini.size),
2019                 fprime      = GradientOfCostFunction,
2020                 args        = (),
2021                 maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2022                 gtol        = selfA._parameters["GradientNormTolerance"],
2023                 disp        = selfA._parameters["optdisp"],
2024                 full_output = True,
2025                 )
2026         elif selfA._parameters["Minimizer"] == "NCG":
2027             Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
2028                 f           = CostFunction,
2029                 x0          = numpy.zeros(Xini.size),
2030                 fprime      = GradientOfCostFunction,
2031                 args        = (),
2032                 maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2033                 avextol     = selfA._parameters["CostDecrementTolerance"],
2034                 disp        = selfA._parameters["optdisp"],
2035                 full_output = True,
2036                 )
2037         elif selfA._parameters["Minimizer"] == "BFGS":
2038             Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
2039                 f           = CostFunction,
2040                 x0          = numpy.zeros(Xini.size),
2041                 fprime      = GradientOfCostFunction,
2042                 args        = (),
2043                 maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2044                 gtol        = selfA._parameters["GradientNormTolerance"],
2045                 disp        = selfA._parameters["optdisp"],
2046                 full_output = True,
2047                 )
2048         else:
2049             raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
2050         #
2051         IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2052         MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
2053         #
2054         if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
2055             Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
2056             Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
2057         else:
2058             Minimum = Xb + numpy.asmatrix(numpy.ravel( Minimum )).T
2059         #
2060         Xr     = Minimum
2061         DeltaJ = selfA.StoredVariables["CostFunctionJ" ][-1] - J
2062         iOuter = selfA.StoredVariables["CurrentIterationNumber"][-1]
2063     #
2064     # Obtention de l'analyse
2065     # ----------------------
2066     Xa = Xr
2067     #
2068     selfA.StoredVariables["Analysis"].store( Xa )
2069     #
2070     if selfA._toStore("OMA") or \
2071         selfA._toStore("SigmaObs2") or \
2072         selfA._toStore("SimulationQuantiles") or \
2073         selfA._toStore("SimulatedObservationAtOptimum"):
2074         if selfA._toStore("SimulatedObservationAtCurrentState"):
2075             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
2076         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2077             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
2078         else:
2079             HXa = Hm( Xa )
2080     #
2081     # Calcul de la covariance d'analyse
2082     # ---------------------------------
2083     if selfA._toStore("APosterioriCovariance") or \
2084         selfA._toStore("SimulationQuantiles") or \
2085         selfA._toStore("JacobianMatrixAtOptimum") or \
2086         selfA._toStore("KalmanGainAtOptimum"):
2087         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
2088         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
2089     if selfA._toStore("APosterioriCovariance") or \
2090         selfA._toStore("SimulationQuantiles") or \
2091         selfA._toStore("KalmanGainAtOptimum"):
2092         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
2093         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
2094     if selfA._toStore("APosterioriCovariance") or \
2095         selfA._toStore("SimulationQuantiles"):
2096         HessienneI = []
2097         nb = Xa.size
2098         for i in range(nb):
2099             _ee    = numpy.matrix(numpy.zeros(nb)).T
2100             _ee[i] = 1.
2101             _HtEE  = numpy.dot(HtM,_ee)
2102             _HtEE  = numpy.asmatrix(numpy.ravel( _HtEE )).T
2103             HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
2104         HessienneI = numpy.matrix( HessienneI )
2105         A = HessienneI.I
2106         if min(A.shape) != max(A.shape):
2107             raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
2108         if (numpy.diag(A) < 0).any():
2109             raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
2110         if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
2111             try:
2112                 L = numpy.linalg.cholesky( A )
2113             except:
2114                 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
2115     if selfA._toStore("APosterioriCovariance"):
2116         selfA.StoredVariables["APosterioriCovariance"].store( A )
2117     if selfA._toStore("JacobianMatrixAtOptimum"):
2118         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
2119     if selfA._toStore("KalmanGainAtOptimum"):
2120         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
2121         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
2122         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
2123     #
2124     # Calculs et/ou stockages supplémentaires
2125     # ---------------------------------------
2126     if selfA._toStore("Innovation") or \
2127         selfA._toStore("SigmaObs2") or \
2128         selfA._toStore("MahalanobisConsistency") or \
2129         selfA._toStore("OMB"):
2130         d  = Y - HXb
2131     if selfA._toStore("Innovation"):
2132         selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
2133     if selfA._toStore("BMA"):
2134         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
2135     if selfA._toStore("OMA"):
2136         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
2137     if selfA._toStore("OMB"):
2138         selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
2139     if selfA._toStore("SigmaObs2"):
2140         TraceR = R.trace(Y.size)
2141         selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
2142     if selfA._toStore("MahalanobisConsistency"):
2143         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
2144     if selfA._toStore("SimulationQuantiles"):
2145         QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
2146     if selfA._toStore("SimulatedObservationAtBackground"):
2147         selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
2148     if selfA._toStore("SimulatedObservationAtOptimum"):
2149         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
2150     #
2151     return 0
2152
2153 # ==============================================================================
2154 def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13",
2155     BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
2156     """
2157     Maximum Likelihood Ensemble Filter
2158     """
2159     if selfA._parameters["EstimationOf"] == "Parameters":
2160         selfA._parameters["StoreInternalVariables"] = True
2161     #
2162     # Opérateurs
2163     H = HO["Direct"].appliedControledFormTo
2164     #
2165     if selfA._parameters["EstimationOf"] == "State":
2166         M = EM["Direct"].appliedControledFormTo
2167     #
2168     if CM is not None and "Tangent" in CM and U is not None:
2169         Cm = CM["Tangent"].asMatrix(Xb)
2170     else:
2171         Cm = None
2172     #
2173     # Durée d'observation et tailles
2174     if hasattr(Y,"stepnumber"):
2175         duration = Y.stepnumber()
2176         __p = numpy.cumprod(Y.shape())[-1]
2177     else:
2178         duration = 2
2179         __p = numpy.array(Y).size
2180     #
2181     # Précalcul des inversions de B et R
2182     if selfA._parameters["StoreInternalVariables"] \
2183         or selfA._toStore("CostFunctionJ") \
2184         or selfA._toStore("CostFunctionJb") \
2185         or selfA._toStore("CostFunctionJo") \
2186         or selfA._toStore("CurrentOptimum") \
2187         or selfA._toStore("APosterioriCovariance"):
2188         BI = B.getI()
2189     RI = R.getI()
2190     #
2191     __n = Xb.size
2192     __m = selfA._parameters["NumberOfMembers"]
2193     #
2194     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2195         Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
2196         selfA.StoredVariables["Analysis"].store( Xb )
2197         if selfA._toStore("APosterioriCovariance"):
2198             if hasattr(B,"asfullmatrix"):
2199                 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
2200             else:
2201                 selfA.StoredVariables["APosterioriCovariance"].store( B )
2202         selfA._setInternalState("seed", numpy.random.get_state())
2203     elif selfA._parameters["nextStep"]:
2204         Xn = selfA._getInternalState("Xn")
2205     #
2206     previousJMinimum = numpy.finfo(float).max
2207     #
2208     for step in range(duration-1):
2209         numpy.random.set_state(selfA._getInternalState("seed"))
2210         if hasattr(Y,"store"):
2211             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
2212         else:
2213             Ynpu = numpy.ravel( Y ).reshape((__p,1))
2214         #
2215         if U is not None:
2216             if hasattr(U,"store") and len(U)>1:
2217                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2218             elif hasattr(U,"store") and len(U)==1:
2219                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2220             else:
2221                 Un = numpy.asmatrix(numpy.ravel( U )).T
2222         else:
2223             Un = None
2224         #
2225         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2226             Xn = CovarianceInflation( Xn,
2227                 selfA._parameters["InflationType"],
2228                 selfA._parameters["InflationFactor"],
2229                 )
2230         #
2231         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
2232             EMX = M( [(Xn[:,i], Un) for i in range(__m)],
2233                 argsAsSerie = True,
2234                 returnSerieAsArrayMatrix = True )
2235             Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
2236             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2237                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2238                 Xn_predicted = Xn_predicted + Cm * Un
2239         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
2240             # --- > Par principe, M = Id, Q = 0
2241             Xn_predicted = EMX = Xn
2242         #
2243         #--------------------------
2244         if VariantM == "MLEF13":
2245             Xfm = numpy.ravel(Xn_predicted.mean(axis=1, dtype=mfp).astype('float'))
2246             EaX = EnsembleOfAnomalies( Xn_predicted, Xfm, 1./math.sqrt(__m-1) )
2247             Ua  = numpy.identity(__m)
2248             __j = 0
2249             Deltaw = 1
2250             if not BnotT:
2251                 Ta  = numpy.identity(__m)
2252             vw  = numpy.zeros(__m)
2253             while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
2254                 vx1 = (Xfm + EaX @ vw).reshape((__n,1))
2255                 #
2256                 if BnotT:
2257                     E1 = vx1 + _epsilon * EaX
2258                 else:
2259                     E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
2260                 #
2261                 HE2 = H( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
2262                     argsAsSerie = True,
2263                     returnSerieAsArrayMatrix = True )
2264                 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2265                 #
2266                 if BnotT:
2267                     EaY = (HE2 - vy2) / _epsilon
2268                 else:
2269                     EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
2270                 #
2271                 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy2 )))
2272                 mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY).reshape((-1,__m))
2273                 Deltaw = - numpy.linalg.solve(mH,GradJ)
2274                 #
2275                 vw = vw + Deltaw
2276                 #
2277                 if not BnotT:
2278                     Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2279                 #
2280                 __j = __j + 1
2281             #
2282             if BnotT:
2283                 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2284             #
2285             Xn = vx1 + math.sqrt(__m-1) * EaX @ Ta @ Ua
2286         #--------------------------
2287         else:
2288             raise ValueError("VariantM has to be chosen in the authorized methods list.")
2289         #
2290         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2291             Xn = CovarianceInflation( Xn,
2292                 selfA._parameters["InflationType"],
2293                 selfA._parameters["InflationFactor"],
2294                 )
2295         #
2296         Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2297         #--------------------------
2298         selfA._setInternalState("Xn", Xn)
2299         selfA._setInternalState("seed", numpy.random.get_state())
2300         #--------------------------
2301         #
2302         if selfA._parameters["StoreInternalVariables"] \
2303             or selfA._toStore("CostFunctionJ") \
2304             or selfA._toStore("CostFunctionJb") \
2305             or selfA._toStore("CostFunctionJo") \
2306             or selfA._toStore("APosterioriCovariance") \
2307             or selfA._toStore("InnovationAtCurrentAnalysis") \
2308             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2309             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2310             _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
2311             _Innovation = Ynpu - _HXa
2312         #
2313         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2314         # ---> avec analysis
2315         selfA.StoredVariables["Analysis"].store( Xa )
2316         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2317             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2318         if selfA._toStore("InnovationAtCurrentAnalysis"):
2319             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2320         # ---> avec current state
2321         if selfA._parameters["StoreInternalVariables"] \
2322             or selfA._toStore("CurrentState"):
2323             selfA.StoredVariables["CurrentState"].store( Xn )
2324         if selfA._toStore("ForecastState"):
2325             selfA.StoredVariables["ForecastState"].store( EMX )
2326         if selfA._toStore("ForecastCovariance"):
2327             selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(EMX) )
2328         if selfA._toStore("BMA"):
2329             selfA.StoredVariables["BMA"].store( EMX - Xa )
2330         if selfA._toStore("InnovationAtCurrentState"):
2331             selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
2332         if selfA._toStore("SimulatedObservationAtCurrentState") \
2333             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2334             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
2335         # ---> autres
2336         if selfA._parameters["StoreInternalVariables"] \
2337             or selfA._toStore("CostFunctionJ") \
2338             or selfA._toStore("CostFunctionJb") \
2339             or selfA._toStore("CostFunctionJo") \
2340             or selfA._toStore("CurrentOptimum") \
2341             or selfA._toStore("APosterioriCovariance"):
2342             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2343             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
2344             J   = Jb + Jo
2345             selfA.StoredVariables["CostFunctionJb"].store( Jb )
2346             selfA.StoredVariables["CostFunctionJo"].store( Jo )
2347             selfA.StoredVariables["CostFunctionJ" ].store( J )
2348             #
2349             if selfA._toStore("IndexOfOptimum") \
2350                 or selfA._toStore("CurrentOptimum") \
2351                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2352                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2353                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2354                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2355                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2356             if selfA._toStore("IndexOfOptimum"):
2357                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2358             if selfA._toStore("CurrentOptimum"):
2359                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2360             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2361                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2362             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2363                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2364             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2365                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2366             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2367                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2368         if selfA._toStore("APosterioriCovariance"):
2369             selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
2370         if selfA._parameters["EstimationOf"] == "Parameters" \
2371             and J < previousJMinimum:
2372             previousJMinimum    = J
2373             XaMin               = Xa
2374             if selfA._toStore("APosterioriCovariance"):
2375                 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
2376         # ---> Pour les smoothers
2377         if selfA._toStore("CurrentEnsembleState"):
2378             selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
2379     #
2380     # Stockage final supplémentaire de l'optimum en estimation de paramètres
2381     # ----------------------------------------------------------------------
2382     if selfA._parameters["EstimationOf"] == "Parameters":
2383         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2384         selfA.StoredVariables["Analysis"].store( XaMin )
2385         if selfA._toStore("APosterioriCovariance"):
2386             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2387         if selfA._toStore("BMA"):
2388             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2389     #
2390     return 0
2391
2392 # ==============================================================================
2393 def mmqr(
2394         func     = None,
2395         x0       = None,
2396         fprime   = None,
2397         bounds   = None,
2398         quantile = 0.5,
2399         maxfun   = 15000,
2400         toler    = 1.e-06,
2401         y        = None,
2402         ):
2403     """
2404     Implémentation informatique de l'algorithme MMQR, basée sur la publication :
2405     David R. Hunter, Kenneth Lange, "Quantile Regression via an MM Algorithm",
2406     Journal of Computational and Graphical Statistics, 9, 1, pp.60-77, 2000.
2407     """
2408     #
2409     # Recuperation des donnees et informations initiales
2410     # --------------------------------------------------
2411     variables = numpy.ravel( x0 )
2412     mesures   = numpy.ravel( y )
2413     increment = sys.float_info[0]
2414     p         = variables.size
2415     n         = mesures.size
2416     quantile  = float(quantile)
2417     #
2418     # Calcul des parametres du MM
2419     # ---------------------------
2420     tn      = float(toler) / n
2421     e0      = -tn / math.log(tn)
2422     epsilon = (e0-tn)/(1+math.log(e0))
2423     #
2424     # Calculs d'initialisation
2425     # ------------------------
2426     residus  = mesures - numpy.ravel( func( variables ) )
2427     poids    = 1./(epsilon+numpy.abs(residus))
2428     veps     = 1. - 2. * quantile - residus * poids
2429     lastsurrogate = -numpy.sum(residus*veps) - (1.-2.*quantile)*numpy.sum(residus)
2430     iteration = 0
2431     #
2432     # Recherche iterative
2433     # -------------------
2434     while (increment > toler) and (iteration < maxfun) :
2435         iteration += 1
2436         #
2437         Derivees  = numpy.array(fprime(variables))
2438         Derivees  = Derivees.reshape(n,p) # Necessaire pour remettre en place la matrice si elle passe par des tuyaux YACS
2439         DeriveesT = Derivees.transpose()
2440         M         =   numpy.dot( DeriveesT , (numpy.array(numpy.matrix(p*[poids,]).T)*Derivees) )
2441         SM        =   numpy.transpose(numpy.dot( DeriveesT , veps ))
2442         step      = - numpy.linalg.lstsq( M, SM, rcond=-1 )[0]
2443         #
2444         variables = variables + step
2445         if bounds is not None:
2446             # Attention : boucle infinie à éviter si un intervalle est trop petit
2447             while( (variables < numpy.ravel(numpy.asmatrix(bounds)[:,0])).any() or (variables > numpy.ravel(numpy.asmatrix(bounds)[:,1])).any() ):
2448                 step      = step/2.
2449                 variables = variables - step
2450         residus   = mesures - numpy.ravel( func(variables) )
2451         surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
2452         #
2453         while ( (surrogate > lastsurrogate) and ( max(list(numpy.abs(step))) > 1.e-16 ) ) :
2454             step      = step/2.
2455             variables = variables - step
2456             residus   = mesures - numpy.ravel( func(variables) )
2457             surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
2458         #
2459         increment     = lastsurrogate-surrogate
2460         poids         = 1./(epsilon+numpy.abs(residus))
2461         veps          = 1. - 2. * quantile - residus * poids
2462         lastsurrogate = -numpy.sum(residus * veps) - (1.-2.*quantile)*numpy.sum(residus)
2463     #
2464     # Mesure d'écart
2465     # --------------
2466     Ecart = quantile * numpy.sum(residus) - numpy.sum( residus[residus<0] )
2467     #
2468     return variables, Ecart, [n,p,iteration,increment,0]
2469
2470 # ==============================================================================
2471 def multi3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, oneCycle):
2472     """
2473     3DVAR multi-pas et multi-méthodes
2474     """
2475     #
2476     # Initialisation
2477     if selfA._parameters["EstimationOf"] == "State":
2478         M = EM["Direct"].appliedTo
2479         #
2480         if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2481             Xn = numpy.ravel(Xb).reshape((-1,1))
2482             selfA.StoredVariables["Analysis"].store( Xn )
2483             if selfA._toStore("APosterioriCovariance"):
2484                 if hasattr(B,"asfullmatrix"):
2485                     selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(Xn.size) )
2486                 else:
2487                     selfA.StoredVariables["APosterioriCovariance"].store( B )
2488             if selfA._toStore("ForecastState"):
2489                 selfA.StoredVariables["ForecastState"].store( Xn )
2490         elif selfA._parameters["nextStep"]:
2491             Xn = selfA._getInternalState("Xn")
2492     else:
2493         Xn = numpy.ravel(Xb).reshape((-1,1))
2494     #
2495     if hasattr(Y,"stepnumber"):
2496         duration = Y.stepnumber()
2497     else:
2498         duration = 2
2499     #
2500     # Multi-pas
2501     for step in range(duration-1):
2502         if hasattr(Y,"store"):
2503             Ynpu = numpy.ravel( Y[step+1] ).reshape((-1,1))
2504         else:
2505             Ynpu = numpy.ravel( Y ).reshape((-1,1))
2506         #
2507         if selfA._parameters["EstimationOf"] == "State": # Forecast
2508             Xn_predicted = M( Xn )
2509             if selfA._toStore("ForecastState"):
2510                 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
2511         elif selfA._parameters["EstimationOf"] == "Parameters": # No forecast
2512             # --- > Par principe, M = Id, Q = 0
2513             Xn_predicted = Xn
2514         Xn_predicted = numpy.ravel(Xn_predicted).reshape((-1,1))
2515         #
2516         oneCycle(selfA, Xn_predicted, Ynpu, U, HO, None, None, R, B, None)
2517         #
2518         Xn = selfA.StoredVariables["Analysis"][-1]
2519         #--------------------------
2520         selfA._setInternalState("Xn", Xn)
2521     #
2522     return 0
2523
2524 # ==============================================================================
2525 def psas3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
2526     """
2527     3DVAR PSAS
2528     """
2529     #
2530     # Initialisations
2531     # ---------------
2532     #
2533     # Opérateurs
2534     Hm = HO["Direct"].appliedTo
2535     #
2536     # Utilisation éventuelle d'un vecteur H(Xb) précalculé
2537     if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
2538         HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
2539     else:
2540         HXb = Hm( Xb )
2541     HXb = numpy.asmatrix(numpy.ravel( HXb )).T
2542     if Y.size != HXb.size:
2543         raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
2544     if max(Y.shape) != max(HXb.shape):
2545         raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
2546     #
2547     if selfA._toStore("JacobianMatrixAtBackground"):
2548         HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
2549         HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
2550         selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
2551     #
2552     Ht = HO["Tangent"].asMatrix(Xb)
2553     BHT = B * Ht.T
2554     HBHTpR = R + Ht * BHT
2555     Innovation = Y - HXb
2556     #
2557     # Point de démarrage de l'optimisation
2558     Xini = numpy.zeros(Xb.shape)
2559     #
2560     # Définition de la fonction-coût
2561     # ------------------------------
2562     def CostFunction(w):
2563         _W = numpy.asmatrix(numpy.ravel( w )).T
2564         if selfA._parameters["StoreInternalVariables"] or \
2565             selfA._toStore("CurrentState") or \
2566             selfA._toStore("CurrentOptimum"):
2567             selfA.StoredVariables["CurrentState"].store( Xb + BHT * _W )
2568         if selfA._toStore("SimulatedObservationAtCurrentState") or \
2569             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2570             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Hm( Xb + BHT * _W ) )
2571         if selfA._toStore("InnovationAtCurrentState"):
2572             selfA.StoredVariables["InnovationAtCurrentState"].store( Innovation )
2573         #
2574         Jb  = float( 0.5 * _W.T * HBHTpR * _W )
2575         Jo  = float( - _W.T * Innovation )
2576         J   = Jb + Jo
2577         #
2578         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
2579         selfA.StoredVariables["CostFunctionJb"].store( Jb )
2580         selfA.StoredVariables["CostFunctionJo"].store( Jo )
2581         selfA.StoredVariables["CostFunctionJ" ].store( J )
2582         if selfA._toStore("IndexOfOptimum") or \
2583             selfA._toStore("CurrentOptimum") or \
2584             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
2585             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
2586             selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
2587             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2588             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2589         if selfA._toStore("IndexOfOptimum"):
2590             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2591         if selfA._toStore("CurrentOptimum"):
2592             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
2593         if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2594             selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
2595         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2596             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2597         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2598             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2599         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2600             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2601         return J
2602     #
2603     def GradientOfCostFunction(w):
2604         _W = numpy.asmatrix(numpy.ravel( w )).T
2605         GradJb  = HBHTpR * _W
2606         GradJo  = - Innovation
2607         GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
2608         return GradJ
2609     #
2610     # Minimisation de la fonctionnelle
2611     # --------------------------------
2612     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
2613     #
2614     if selfA._parameters["Minimizer"] == "LBFGSB":
2615         if "0.19" <= scipy.version.version <= "1.1.0":
2616             import lbfgsbhlt as optimiseur
2617         else:
2618             import scipy.optimize as optimiseur
2619         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
2620             func        = CostFunction,
2621             x0          = Xini,
2622             fprime      = GradientOfCostFunction,
2623             args        = (),
2624             bounds      = RecentredBounds(selfA._parameters["Bounds"], Xb),
2625             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
2626             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
2627             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
2628             iprint      = selfA._parameters["optiprint"],
2629             )
2630         nfeval = Informations['funcalls']
2631         rc     = Informations['warnflag']
2632     elif selfA._parameters["Minimizer"] == "TNC":
2633         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
2634             func        = CostFunction,
2635             x0          = Xini,
2636             fprime      = GradientOfCostFunction,
2637             args        = (),
2638             bounds      = RecentredBounds(selfA._parameters["Bounds"], Xb),
2639             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
2640             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
2641             ftol        = selfA._parameters["CostDecrementTolerance"],
2642             messages    = selfA._parameters["optmessages"],
2643             )
2644     elif selfA._parameters["Minimizer"] == "CG":
2645         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
2646             f           = CostFunction,
2647             x0          = Xini,
2648             fprime      = GradientOfCostFunction,
2649             args        = (),
2650             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2651             gtol        = selfA._parameters["GradientNormTolerance"],
2652             disp        = selfA._parameters["optdisp"],
2653             full_output = True,
2654             )
2655     elif selfA._parameters["Minimizer"] == "NCG":
2656         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
2657             f           = CostFunction,
2658             x0          = Xini,
2659             fprime      = GradientOfCostFunction,
2660             args        = (),
2661             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2662             avextol     = selfA._parameters["CostDecrementTolerance"],
2663             disp        = selfA._parameters["optdisp"],
2664             full_output = True,
2665             )
2666     elif selfA._parameters["Minimizer"] == "BFGS":
2667         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
2668             f           = CostFunction,
2669             x0          = Xini,
2670             fprime      = GradientOfCostFunction,
2671             args        = (),
2672             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2673             gtol        = selfA._parameters["GradientNormTolerance"],
2674             disp        = selfA._parameters["optdisp"],
2675             full_output = True,
2676             )
2677     else:
2678         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
2679     #
2680     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2681     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
2682     #
2683     # Correction pour pallier a un bug de TNC sur le retour du Minimum
2684     # ----------------------------------------------------------------
2685     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
2686         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
2687         Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
2688     else:
2689         Minimum = Xb + BHT * numpy.asmatrix(numpy.ravel( Minimum )).T
2690     #
2691     # Obtention de l'analyse
2692     # ----------------------
2693     Xa = Minimum
2694     #
2695     selfA.StoredVariables["Analysis"].store( Xa )
2696     #
2697     if selfA._toStore("OMA") or \
2698         selfA._toStore("SigmaObs2") or \
2699         selfA._toStore("SimulationQuantiles") or \
2700         selfA._toStore("SimulatedObservationAtOptimum"):
2701         if selfA._toStore("SimulatedObservationAtCurrentState"):
2702             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
2703         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2704             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
2705         else:
2706             HXa = Hm( Xa )
2707     #
2708     # Calcul de la covariance d'analyse
2709     # ---------------------------------
2710     if selfA._toStore("APosterioriCovariance") or \
2711         selfA._toStore("SimulationQuantiles") or \
2712         selfA._toStore("JacobianMatrixAtOptimum") or \
2713         selfA._toStore("KalmanGainAtOptimum"):
2714         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
2715         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
2716     if selfA._toStore("APosterioriCovariance") or \
2717         selfA._toStore("SimulationQuantiles") or \
2718         selfA._toStore("KalmanGainAtOptimum"):
2719         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
2720         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
2721     if selfA._toStore("APosterioriCovariance") or \
2722         selfA._toStore("SimulationQuantiles"):
2723         BI = B.getI()
2724         RI = R.getI()
2725         HessienneI = []
2726         nb = Xa.size
2727         for i in range(nb):
2728             _ee    = numpy.matrix(numpy.zeros(nb)).T
2729             _ee[i] = 1.
2730             _HtEE  = numpy.dot(HtM,_ee)
2731             _HtEE  = numpy.asmatrix(numpy.ravel( _HtEE )).T
2732             HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
2733         HessienneI = numpy.matrix( HessienneI )
2734         A = HessienneI.I
2735         if min(A.shape) != max(A.shape):
2736             raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
2737         if (numpy.diag(A) < 0).any():
2738             raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
2739         if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
2740             try:
2741                 L = numpy.linalg.cholesky( A )
2742             except:
2743                 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
2744     if selfA._toStore("APosterioriCovariance"):
2745         selfA.StoredVariables["APosterioriCovariance"].store( A )
2746     if selfA._toStore("JacobianMatrixAtOptimum"):
2747         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
2748     if selfA._toStore("KalmanGainAtOptimum"):
2749         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
2750         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
2751         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
2752     #
2753     # Calculs et/ou stockages supplémentaires
2754     # ---------------------------------------
2755     if selfA._toStore("Innovation") or \
2756         selfA._toStore("SigmaObs2") or \
2757         selfA._toStore("MahalanobisConsistency") or \
2758         selfA._toStore("OMB"):
2759         d  = Y - HXb
2760     if selfA._toStore("Innovation"):
2761         selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
2762     if selfA._toStore("BMA"):
2763         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
2764     if selfA._toStore("OMA"):
2765         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
2766     if selfA._toStore("OMB"):
2767         selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
2768     if selfA._toStore("SigmaObs2"):
2769         TraceR = R.trace(Y.size)
2770         selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
2771     if selfA._toStore("MahalanobisConsistency"):
2772         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
2773     if selfA._toStore("SimulationQuantiles"):
2774         QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
2775     if selfA._toStore("SimulatedObservationAtBackground"):
2776         selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
2777     if selfA._toStore("SimulatedObservationAtOptimum"):
2778         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
2779     #
2780     return 0
2781
2782 # ==============================================================================
2783 def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula16"):
2784     """
2785     Stochastic EnKF
2786     """
2787     if selfA._parameters["EstimationOf"] == "Parameters":
2788         selfA._parameters["StoreInternalVariables"] = True
2789     #
2790     # Opérateurs
2791     H = HO["Direct"].appliedControledFormTo
2792     #
2793     if selfA._parameters["EstimationOf"] == "State":
2794         M = EM["Direct"].appliedControledFormTo
2795     #
2796     if CM is not None and "Tangent" in CM and U is not None:
2797         Cm = CM["Tangent"].asMatrix(Xb)
2798     else:
2799         Cm = None
2800     #
2801     # Durée d'observation et tailles
2802     if hasattr(Y,"stepnumber"):
2803         duration = Y.stepnumber()
2804         __p = numpy.cumprod(Y.shape())[-1]
2805     else:
2806         duration = 2
2807         __p = numpy.array(Y).size
2808     #
2809     # Précalcul des inversions de B et R
2810     if selfA._parameters["StoreInternalVariables"] \
2811         or selfA._toStore("CostFunctionJ") \
2812         or selfA._toStore("CostFunctionJb") \
2813         or selfA._toStore("CostFunctionJo") \
2814         or selfA._toStore("CurrentOptimum") \
2815         or selfA._toStore("APosterioriCovariance"):
2816         BI = B.getI()
2817         RI = R.getI()
2818     #
2819     __n = Xb.size
2820     __m = selfA._parameters["NumberOfMembers"]
2821     #
2822     if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
2823     else:                         Rn = R
2824     #
2825     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2826         if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
2827         else:                         Pn = B
2828         Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
2829         selfA.StoredVariables["Analysis"].store( Xb )
2830         if selfA._toStore("APosterioriCovariance"):
2831             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2832         selfA._setInternalState("seed", numpy.random.get_state())
2833     elif selfA._parameters["nextStep"]:
2834         Xn = selfA._getInternalState("Xn")
2835     #
2836     previousJMinimum = numpy.finfo(float).max
2837     #
2838     for step in range(duration-1):
2839         numpy.random.set_state(selfA._getInternalState("seed"))
2840         if hasattr(Y,"store"):
2841             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
2842         else:
2843             Ynpu = numpy.ravel( Y ).reshape((__p,1))
2844         #
2845         if U is not None:
2846             if hasattr(U,"store") and len(U)>1:
2847                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2848             elif hasattr(U,"store") and len(U)==1:
2849                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2850             else:
2851                 Un = numpy.asmatrix(numpy.ravel( U )).T
2852         else:
2853             Un = None
2854         #
2855         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2856             Xn = CovarianceInflation( Xn,
2857                 selfA._parameters["InflationType"],
2858                 selfA._parameters["InflationFactor"],
2859                 )
2860         #
2861         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
2862             EMX = M( [(Xn[:,i], Un) for i in range(__m)],
2863                 argsAsSerie = True,
2864                 returnSerieAsArrayMatrix = True )
2865             Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
2866             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2867                 argsAsSerie = True,
2868                 returnSerieAsArrayMatrix = True )
2869             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2870                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2871                 Xn_predicted = Xn_predicted + Cm * Un
2872         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
2873             # --- > Par principe, M = Id, Q = 0
2874             Xn_predicted = EMX = Xn
2875             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2876                 argsAsSerie = True,
2877                 returnSerieAsArrayMatrix = True )
2878         #
2879         # Mean of forecast and observation of forecast
2880         Xfm  = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2881         Hfm  = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2882         #
2883         #--------------------------
2884         if VariantM == "KalmanFilterFormula05":
2885             PfHT, HPfHT = 0., 0.
2886             for i in range(__m):
2887                 Exfi = Xn_predicted[:,i].reshape((__n,1)) - Xfm
2888                 Eyfi = HX_predicted[:,i].reshape((__p,1)) - Hfm
2889                 PfHT  += Exfi * Eyfi.T
2890                 HPfHT += Eyfi * Eyfi.T
2891             PfHT  = (1./(__m-1)) * PfHT
2892             HPfHT = (1./(__m-1)) * HPfHT
2893             Kn     = PfHT * ( R + HPfHT ).I
2894             del PfHT, HPfHT
2895             #
2896             for i in range(__m):
2897                 ri = numpy.random.multivariate_normal(numpy.zeros(__p), Rn)
2898                 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(Ynpu) + ri - HX_predicted[:,i])
2899         #--------------------------
2900         elif VariantM == "KalmanFilterFormula16":
2901             EpY   = EnsembleOfCenteredPerturbations(Ynpu, Rn, __m)
2902             EpYm  = EpY.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2903             #
2904             EaX   = EnsembleOfAnomalies( Xn_predicted ) / math.sqrt(__m-1)
2905             EaY = (HX_predicted - Hfm - EpY + EpYm) / math.sqrt(__m-1)
2906             #
2907             Kn = EaX @ EaY.T @ numpy.linalg.inv( EaY @ EaY.T)
2908             #
2909             for i in range(__m):
2910                 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(EpY[:,i]) - HX_predicted[:,i])
2911         #--------------------------
2912         else:
2913             raise ValueError("VariantM has to be chosen in the authorized methods list.")
2914         #
2915         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2916             Xn = CovarianceInflation( Xn,
2917                 selfA._parameters["InflationType"],
2918                 selfA._parameters["InflationFactor"],
2919                 )
2920         #
2921         Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2922         #--------------------------
2923         selfA._setInternalState("Xn", Xn)
2924         selfA._setInternalState("seed", numpy.random.get_state())
2925         #--------------------------
2926         #
2927         if selfA._parameters["StoreInternalVariables"] \
2928             or selfA._toStore("CostFunctionJ") \
2929             or selfA._toStore("CostFunctionJb") \
2930             or selfA._toStore("CostFunctionJo") \
2931             or selfA._toStore("APosterioriCovariance") \
2932             or selfA._toStore("InnovationAtCurrentAnalysis") \
2933             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2934             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2935             _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
2936             _Innovation = Ynpu - _HXa
2937         #
2938         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2939         # ---> avec analysis
2940         selfA.StoredVariables["Analysis"].store( Xa )
2941         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2942             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2943         if selfA._toStore("InnovationAtCurrentAnalysis"):
2944             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2945         # ---> avec current state
2946         if selfA._parameters["StoreInternalVariables"] \
2947             or selfA._toStore("CurrentState"):
2948             selfA.StoredVariables["CurrentState"].store( Xn )
2949         if selfA._toStore("ForecastState"):
2950             selfA.StoredVariables["ForecastState"].store( EMX )
2951         if selfA._toStore("ForecastCovariance"):
2952             selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(EMX) )
2953         if selfA._toStore("BMA"):
2954             selfA.StoredVariables["BMA"].store( EMX - Xa )
2955         if selfA._toStore("InnovationAtCurrentState"):
2956             selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
2957         if selfA._toStore("SimulatedObservationAtCurrentState") \
2958             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2959             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
2960         # ---> autres
2961         if selfA._parameters["StoreInternalVariables"] \
2962             or selfA._toStore("CostFunctionJ") \
2963             or selfA._toStore("CostFunctionJb") \
2964             or selfA._toStore("CostFunctionJo") \
2965             or selfA._toStore("CurrentOptimum") \
2966             or selfA._toStore("APosterioriCovariance"):
2967             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2968             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
2969             J   = Jb + Jo
2970             selfA.StoredVariables["CostFunctionJb"].store( Jb )
2971             selfA.StoredVariables["CostFunctionJo"].store( Jo )
2972             selfA.StoredVariables["CostFunctionJ" ].store( J )
2973             #
2974             if selfA._toStore("IndexOfOptimum") \
2975                 or selfA._toStore("CurrentOptimum") \
2976                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2977                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2978                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2979                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2980                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2981             if selfA._toStore("IndexOfOptimum"):
2982                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2983             if selfA._toStore("CurrentOptimum"):
2984                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2985             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2986                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2987             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2988                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2989             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2990                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2991             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2992                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2993         if selfA._toStore("APosterioriCovariance"):
2994             selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
2995         if selfA._parameters["EstimationOf"] == "Parameters" \
2996             and J < previousJMinimum:
2997             previousJMinimum    = J
2998             XaMin               = Xa
2999             if selfA._toStore("APosterioriCovariance"):
3000                 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
3001         # ---> Pour les smoothers
3002         if selfA._toStore("CurrentEnsembleState"):
3003             selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
3004     #
3005     # Stockage final supplémentaire de l'optimum en estimation de paramètres
3006     # ----------------------------------------------------------------------
3007     if selfA._parameters["EstimationOf"] == "Parameters":
3008         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3009         selfA.StoredVariables["Analysis"].store( XaMin )
3010         if selfA._toStore("APosterioriCovariance"):
3011             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
3012         if selfA._toStore("BMA"):
3013             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
3014     #
3015     return 0
3016
3017 # ==============================================================================
3018 def std3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3019     """
3020     3DVAR
3021     """
3022     #
3023     # Initialisations
3024     # ---------------
3025     #
3026     # Opérateurs
3027     Hm = HO["Direct"].appliedTo
3028     Ha = HO["Adjoint"].appliedInXTo
3029     #
3030     # Utilisation éventuelle d'un vecteur H(Xb) précalculé
3031     if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
3032         HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
3033     else:
3034         HXb = Hm( Xb )
3035     HXb = HXb.reshape((-1,1))
3036     if Y.size != HXb.size:
3037         raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
3038     if max(Y.shape) != max(HXb.shape):
3039         raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
3040     #
3041     if selfA._toStore("JacobianMatrixAtBackground"):
3042         HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
3043         HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
3044         selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
3045     #
3046     # Précalcul des inversions de B et R
3047     BI = B.getI()
3048     RI = R.getI()
3049     #
3050     # Point de démarrage de l'optimisation
3051     Xini = selfA._parameters["InitializationPoint"]
3052     #
3053     # Définition de la fonction-coût
3054     # ------------------------------
3055     def CostFunction(x):
3056         _X  = numpy.ravel( x ).reshape((-1,1))
3057         if selfA._parameters["StoreInternalVariables"] or \
3058             selfA._toStore("CurrentState") or \
3059             selfA._toStore("CurrentOptimum"):
3060             selfA.StoredVariables["CurrentState"].store( _X )
3061         _HX = Hm( _X ).reshape((-1,1))
3062         _Innovation = Y - _HX
3063         if selfA._toStore("SimulatedObservationAtCurrentState") or \
3064             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3065             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
3066         if selfA._toStore("InnovationAtCurrentState"):
3067             selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
3068         #
3069         Jb  = float( 0.5 * (_X - Xb).T * (BI * (_X - Xb)) )
3070         Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
3071         J   = Jb + Jo
3072         #
3073         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
3074         selfA.StoredVariables["CostFunctionJb"].store( Jb )
3075         selfA.StoredVariables["CostFunctionJo"].store( Jo )
3076         selfA.StoredVariables["CostFunctionJ" ].store( J )
3077         if selfA._toStore("IndexOfOptimum") or \
3078             selfA._toStore("CurrentOptimum") or \
3079             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
3080             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
3081             selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
3082             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3083             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3084         if selfA._toStore("IndexOfOptimum"):
3085             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3086         if selfA._toStore("CurrentOptimum"):
3087             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
3088         if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3089             selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
3090         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3091             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3092         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3093             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3094         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3095             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3096         return J
3097     #
3098     def GradientOfCostFunction(x):
3099         _X      = numpy.ravel( x ).reshape((-1,1))
3100         _HX     = Hm( _X )
3101         _HX     = numpy.ravel( _HX ).reshape((-1,1))
3102         GradJb  = BI * (_X - Xb)
3103         GradJo  = - Ha( (_X, RI * (Y - _HX)) )
3104         GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
3105         return GradJ
3106     #
3107     # Minimisation de la fonctionnelle
3108     # --------------------------------
3109     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
3110     #
3111     if selfA._parameters["Minimizer"] == "LBFGSB":
3112         if "0.19" <= scipy.version.version <= "1.1.0":
3113             import lbfgsbhlt as optimiseur
3114         else:
3115             import scipy.optimize as optimiseur
3116         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
3117             func        = CostFunction,
3118             x0          = Xini,
3119             fprime      = GradientOfCostFunction,
3120             args        = (),
3121             bounds      = selfA._parameters["Bounds"],
3122             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
3123             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
3124             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
3125             iprint      = selfA._parameters["optiprint"],
3126             )
3127         nfeval = Informations['funcalls']
3128         rc     = Informations['warnflag']
3129     elif selfA._parameters["Minimizer"] == "TNC":
3130         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
3131             func        = CostFunction,
3132             x0          = Xini,
3133             fprime      = GradientOfCostFunction,
3134             args        = (),
3135             bounds      = selfA._parameters["Bounds"],
3136             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
3137             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
3138             ftol        = selfA._parameters["CostDecrementTolerance"],
3139             messages    = selfA._parameters["optmessages"],
3140             )
3141     elif selfA._parameters["Minimizer"] == "CG":
3142         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
3143             f           = CostFunction,
3144             x0          = Xini,
3145             fprime      = GradientOfCostFunction,
3146             args        = (),
3147             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3148             gtol        = selfA._parameters["GradientNormTolerance"],
3149             disp        = selfA._parameters["optdisp"],
3150             full_output = True,
3151             )
3152     elif selfA._parameters["Minimizer"] == "NCG":
3153         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
3154             f           = CostFunction,
3155             x0          = Xini,
3156             fprime      = GradientOfCostFunction,
3157             args        = (),
3158             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3159             avextol     = selfA._parameters["CostDecrementTolerance"],
3160             disp        = selfA._parameters["optdisp"],
3161             full_output = True,
3162             )
3163     elif selfA._parameters["Minimizer"] == "BFGS":
3164         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
3165             f           = CostFunction,
3166             x0          = Xini,
3167             fprime      = GradientOfCostFunction,
3168             args        = (),
3169             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3170             gtol        = selfA._parameters["GradientNormTolerance"],
3171             disp        = selfA._parameters["optdisp"],
3172             full_output = True,
3173             )
3174     else:
3175         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
3176     #
3177     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3178     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
3179     #
3180     # Correction pour pallier a un bug de TNC sur le retour du Minimum
3181     # ----------------------------------------------------------------
3182     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
3183         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
3184     #
3185     Xa = Minimum
3186     #--------------------------
3187     #
3188     selfA.StoredVariables["Analysis"].store( Xa )
3189     #
3190     if selfA._toStore("OMA") or \
3191         selfA._toStore("SigmaObs2") or \
3192         selfA._toStore("SimulationQuantiles") or \
3193         selfA._toStore("SimulatedObservationAtOptimum"):
3194         if selfA._toStore("SimulatedObservationAtCurrentState"):
3195             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
3196         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3197             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
3198         else:
3199             HXa = Hm( Xa )
3200     #
3201     if selfA._toStore("APosterioriCovariance") or \
3202         selfA._toStore("SimulationQuantiles") or \
3203         selfA._toStore("JacobianMatrixAtOptimum") or \
3204         selfA._toStore("KalmanGainAtOptimum"):
3205         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
3206         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
3207     if selfA._toStore("APosterioriCovariance") or \
3208         selfA._toStore("SimulationQuantiles") or \
3209         selfA._toStore("KalmanGainAtOptimum"):
3210         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
3211         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
3212     if selfA._toStore("APosterioriCovariance") or \
3213         selfA._toStore("SimulationQuantiles"):
3214         HessienneI = []
3215         nb = Xa.size
3216         for i in range(nb):
3217             _ee    = numpy.zeros(nb)
3218             _ee[i] = 1.
3219             _HtEE  = numpy.dot(HtM,_ee)
3220             HessienneI.append( numpy.ravel( BI * _ee.reshape((-1,1)) + HaM * (RI * _HtEE.reshape((-1,1))) ) )
3221         A = numpy.linalg.inv(numpy.array( HessienneI ))
3222         if min(A.shape) != max(A.shape):
3223             raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
3224         if (numpy.diag(A) < 0).any():
3225             raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
3226         if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
3227             try:
3228                 L = numpy.linalg.cholesky( A )
3229             except:
3230                 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
3231     if selfA._toStore("APosterioriCovariance"):
3232         selfA.StoredVariables["APosterioriCovariance"].store( A )
3233     if selfA._toStore("JacobianMatrixAtOptimum"):
3234         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
3235     if selfA._toStore("KalmanGainAtOptimum"):
3236         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
3237         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
3238         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
3239     #
3240     if selfA._toStore("Innovation") or \
3241         selfA._toStore("SigmaObs2") or \
3242         selfA._toStore("MahalanobisConsistency") or \
3243         selfA._toStore("OMB"):
3244         d  = Y - HXb
3245     if selfA._toStore("Innovation"):
3246         selfA.StoredVariables["Innovation"].store( d )
3247     if selfA._toStore("BMA"):
3248         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
3249     if selfA._toStore("OMA"):
3250         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
3251     if selfA._toStore("OMB"):
3252         selfA.StoredVariables["OMB"].store( d )
3253     if selfA._toStore("SigmaObs2"):
3254         TraceR = R.trace(Y.size)
3255         selfA.StoredVariables["SigmaObs2"].store( float( (d.T @ (numpy.ravel(Y)-numpy.ravel(HXa))) ) / TraceR )
3256     if selfA._toStore("MahalanobisConsistency"):
3257         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
3258     if selfA._toStore("SimulationQuantiles"):
3259         QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
3260     if selfA._toStore("SimulatedObservationAtBackground"):
3261         selfA.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
3262     if selfA._toStore("SimulatedObservationAtOptimum"):
3263         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
3264     #
3265     return 0
3266
3267 # ==============================================================================
3268 def std4dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3269     """
3270     4DVAR
3271     """
3272     #
3273     # Initialisations
3274     # ---------------
3275     #
3276     # Opérateurs
3277     Hm = HO["Direct"].appliedControledFormTo
3278     Mm = EM["Direct"].appliedControledFormTo
3279     #
3280     if CM is not None and "Tangent" in CM and U is not None:
3281         Cm = CM["Tangent"].asMatrix(Xb)
3282     else:
3283         Cm = None
3284     #
3285     def Un(_step):
3286         if U is not None:
3287             if hasattr(U,"store") and 1<=_step<len(U) :
3288                 _Un = numpy.asmatrix(numpy.ravel( U[_step] )).T
3289             elif hasattr(U,"store") and len(U)==1:
3290                 _Un = numpy.asmatrix(numpy.ravel( U[0] )).T
3291             else:
3292                 _Un = numpy.asmatrix(numpy.ravel( U )).T
3293         else:
3294             _Un = None
3295         return _Un
3296     def CmUn(_xn,_un):
3297         if Cm is not None and _un is not None: # Attention : si Cm est aussi dans M, doublon !
3298             _Cm   = Cm.reshape(_xn.size,_un.size) # ADAO & check shape
3299             _CmUn = _Cm * _un
3300         else:
3301             _CmUn = 0.
3302         return _CmUn
3303     #
3304     # Remarque : les observations sont exploitées à partir du pas de temps
3305     # numéro 1, et sont utilisées dans Yo comme rangées selon ces indices.
3306     # Donc le pas 0 n'est pas utilisé puisque la première étape commence
3307     # avec l'observation du pas 1.
3308     #
3309     # Nombre de pas identique au nombre de pas d'observations
3310     if hasattr(Y,"stepnumber"):
3311         duration = Y.stepnumber()
3312     else:
3313         duration = 2
3314     #
3315     # Précalcul des inversions de B et R
3316     BI = B.getI()
3317     RI = R.getI()
3318     #
3319     # Point de démarrage de l'optimisation
3320     Xini = selfA._parameters["InitializationPoint"]
3321     #
3322     # Définition de la fonction-coût
3323     # ------------------------------
3324     selfA.DirectCalculation = [None,] # Le pas 0 n'est pas observé
3325     selfA.DirectInnovation  = [None,] # Le pas 0 n'est pas observé
3326     def CostFunction(x):
3327         _X  = numpy.asmatrix(numpy.ravel( x )).T
3328         if selfA._parameters["StoreInternalVariables"] or \
3329             selfA._toStore("CurrentState") or \
3330             selfA._toStore("CurrentOptimum"):
3331             selfA.StoredVariables["CurrentState"].store( _X )
3332         Jb  = float( 0.5 * (_X - Xb).T * BI * (_X - Xb) )
3333         selfA.DirectCalculation = [None,]
3334         selfA.DirectInnovation  = [None,]
3335         Jo  = 0.
3336         _Xn = _X
3337         for step in range(0,duration-1):
3338             if hasattr(Y,"store"):
3339                 _Ynpu = numpy.asmatrix(numpy.ravel( Y[step+1] )).T
3340             else:
3341                 _Ynpu = numpy.asmatrix(numpy.ravel( Y )).T
3342             _Un = Un(step)
3343             #
3344             # Etape d'évolution
3345             if selfA._parameters["EstimationOf"] == "State":
3346                 _Xn = Mm( (_Xn, _Un) ) + CmUn(_Xn, _Un)
3347             elif selfA._parameters["EstimationOf"] == "Parameters":
3348                 pass
3349             #
3350             if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
3351                 _Xn = ApplyBounds( _Xn, ForceNumericBounds(selfA._parameters["Bounds"]) )
3352             #
3353             # Etape de différence aux observations
3354             if selfA._parameters["EstimationOf"] == "State":
3355                 _YmHMX = _Ynpu - numpy.asmatrix(numpy.ravel( Hm( (_Xn, None) ) )).T
3356             elif selfA._parameters["EstimationOf"] == "Parameters":
3357                 _YmHMX = _Ynpu - numpy.asmatrix(numpy.ravel( Hm( (_Xn, _Un) ) )).T - CmUn(_Xn, _Un)
3358             #
3359             # Stockage de l'état
3360             selfA.DirectCalculation.append( _Xn )
3361             selfA.DirectInnovation.append( _YmHMX )
3362             #
3363             # Ajout dans la fonctionnelle d'observation
3364             Jo = Jo + 0.5 * float( _YmHMX.T * RI * _YmHMX )
3365         J = Jb + Jo
3366         #
3367         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
3368         selfA.StoredVariables["CostFunctionJb"].store( Jb )
3369         selfA.StoredVariables["CostFunctionJo"].store( Jo )
3370         selfA.StoredVariables["CostFunctionJ" ].store( J )
3371         if selfA._toStore("IndexOfOptimum") or \
3372             selfA._toStore("CurrentOptimum") or \
3373             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
3374             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
3375             selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3376             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3377         if selfA._toStore("IndexOfOptimum"):
3378             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3379         if selfA._toStore("CurrentOptimum"):
3380             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
3381         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3382             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3383         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3384             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3385         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3386             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3387         return J
3388     #
3389     def GradientOfCostFunction(x):
3390         _X      = numpy.asmatrix(numpy.ravel( x )).T
3391         GradJb  = BI * (_X - Xb)
3392         GradJo  = 0.
3393         for step in range(duration-1,0,-1):
3394             # Étape de récupération du dernier stockage de l'évolution
3395             _Xn = selfA.DirectCalculation.pop()
3396             # Étape de récupération du dernier stockage de l'innovation
3397             _YmHMX = selfA.DirectInnovation.pop()
3398             # Calcul des adjoints
3399             Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
3400             Ha = Ha.reshape(_Xn.size,_YmHMX.size) # ADAO & check shape
3401             Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
3402             Ma = Ma.reshape(_Xn.size,_Xn.size) # ADAO & check shape
3403             # Calcul du gradient par état adjoint
3404             GradJo = GradJo + Ha * (RI * _YmHMX) # Équivaut pour Ha linéaire à : Ha( (_Xn, RI * _YmHMX) )
3405             GradJo = Ma * GradJo                 # Équivaut pour Ma linéaire à : Ma( (_Xn, GradJo) )
3406         GradJ = numpy.ravel( GradJb ) - numpy.ravel( GradJo )
3407         return GradJ
3408     #
3409     # Minimisation de la fonctionnelle
3410     # --------------------------------
3411     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
3412     #
3413     if selfA._parameters["Minimizer"] == "LBFGSB":
3414         if "0.19" <= scipy.version.version <= "1.1.0":
3415             import lbfgsbhlt as optimiseur
3416         else:
3417             import scipy.optimize as optimiseur
3418         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
3419             func        = CostFunction,
3420             x0          = Xini,
3421             fprime      = GradientOfCostFunction,
3422             args        = (),
3423             bounds      = selfA._parameters["Bounds"],
3424             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
3425             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
3426             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
3427             iprint      = selfA._parameters["optiprint"],
3428             )
3429         nfeval = Informations['funcalls']
3430         rc     = Informations['warnflag']
3431     elif selfA._parameters["Minimizer"] == "TNC":
3432         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
3433             func        = CostFunction,
3434             x0          = Xini,
3435             fprime      = GradientOfCostFunction,
3436             args        = (),
3437             bounds      = selfA._parameters["Bounds"],
3438             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
3439             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
3440             ftol        = selfA._parameters["CostDecrementTolerance"],
3441             messages    = selfA._parameters["optmessages"],
3442             )
3443     elif selfA._parameters["Minimizer"] == "CG":
3444         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
3445             f           = CostFunction,
3446             x0          = Xini,
3447             fprime      = GradientOfCostFunction,
3448             args        = (),
3449             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3450             gtol        = selfA._parameters["GradientNormTolerance"],
3451             disp        = selfA._parameters["optdisp"],
3452             full_output = True,
3453             )
3454     elif selfA._parameters["Minimizer"] == "NCG":
3455         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
3456             f           = CostFunction,
3457             x0          = Xini,
3458             fprime      = GradientOfCostFunction,
3459             args        = (),
3460             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3461             avextol     = selfA._parameters["CostDecrementTolerance"],
3462             disp        = selfA._parameters["optdisp"],
3463             full_output = True,
3464             )
3465     elif selfA._parameters["Minimizer"] == "BFGS":
3466         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
3467             f           = CostFunction,
3468             x0          = Xini,
3469             fprime      = GradientOfCostFunction,
3470             args        = (),
3471             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3472             gtol        = selfA._parameters["GradientNormTolerance"],
3473             disp        = selfA._parameters["optdisp"],
3474             full_output = True,
3475             )
3476     else:
3477         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
3478     #
3479     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3480     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
3481     #
3482     # Correction pour pallier a un bug de TNC sur le retour du Minimum
3483     # ----------------------------------------------------------------
3484     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
3485         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
3486     #
3487     # Obtention de l'analyse
3488     # ----------------------
3489     Xa = Minimum
3490     #
3491     selfA.StoredVariables["Analysis"].store( Xa )
3492     #
3493     # Calculs et/ou stockages supplémentaires
3494     # ---------------------------------------
3495     if selfA._toStore("BMA"):
3496         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
3497     #
3498     return 0
3499
3500 # ==============================================================================
3501 def stdkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3502     """
3503     Standard Kalman Filter
3504     """
3505     if selfA._parameters["EstimationOf"] == "Parameters":
3506         selfA._parameters["StoreInternalVariables"] = True
3507     #
3508     # Opérateurs
3509     # ----------
3510     Ht = HO["Tangent"].asMatrix(Xb)
3511     Ha = HO["Adjoint"].asMatrix(Xb)
3512     #
3513     if selfA._parameters["EstimationOf"] == "State":
3514         Mt = EM["Tangent"].asMatrix(Xb)
3515         Ma = EM["Adjoint"].asMatrix(Xb)
3516     #
3517     if CM is not None and "Tangent" in CM and U is not None:
3518         Cm = CM["Tangent"].asMatrix(Xb)
3519     else:
3520         Cm = None
3521     #
3522     # Durée d'observation et tailles
3523     if hasattr(Y,"stepnumber"):
3524         duration = Y.stepnumber()
3525         __p = numpy.cumprod(Y.shape())[-1]
3526     else:
3527         duration = 2
3528         __p = numpy.array(Y).size
3529     #
3530     # Précalcul des inversions de B et R
3531     if selfA._parameters["StoreInternalVariables"] \
3532         or selfA._toStore("CostFunctionJ") \
3533         or selfA._toStore("CostFunctionJb") \
3534         or selfA._toStore("CostFunctionJo") \
3535         or selfA._toStore("CurrentOptimum") \
3536         or selfA._toStore("APosterioriCovariance"):
3537         BI = B.getI()
3538         RI = R.getI()
3539     #
3540     __n = Xb.size
3541     #
3542     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
3543         Xn = Xb
3544         Pn = B
3545         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3546         selfA.StoredVariables["Analysis"].store( Xb )
3547         if selfA._toStore("APosterioriCovariance"):
3548             if hasattr(B,"asfullmatrix"):
3549                 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
3550             else:
3551                 selfA.StoredVariables["APosterioriCovariance"].store( B )
3552         selfA._setInternalState("seed", numpy.random.get_state())
3553     elif selfA._parameters["nextStep"]:
3554         Xn = selfA._getInternalState("Xn")
3555         Pn = selfA._getInternalState("Pn")
3556     #
3557     if selfA._parameters["EstimationOf"] == "Parameters":
3558         XaMin            = Xn
3559         previousJMinimum = numpy.finfo(float).max
3560     #
3561     for step in range(duration-1):
3562         if hasattr(Y,"store"):
3563             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
3564         else:
3565             Ynpu = numpy.ravel( Y ).reshape((__p,1))
3566         #
3567         if U is not None:
3568             if hasattr(U,"store") and len(U)>1:
3569                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
3570             elif hasattr(U,"store") and len(U)==1:
3571                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
3572             else:
3573                 Un = numpy.asmatrix(numpy.ravel( U )).T
3574         else:
3575             Un = None
3576         #
3577         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
3578             Xn_predicted = Mt * Xn
3579             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
3580                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
3581                 Xn_predicted = Xn_predicted + Cm * Un
3582             Pn_predicted = Q + Mt * (Pn * Ma)
3583         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
3584             # --- > Par principe, M = Id, Q = 0
3585             Xn_predicted = Xn
3586             Pn_predicted = Pn
3587         #
3588         if selfA._parameters["EstimationOf"] == "State":
3589             HX_predicted = Ht * Xn_predicted
3590             _Innovation  = Ynpu - HX_predicted
3591         elif selfA._parameters["EstimationOf"] == "Parameters":
3592             HX_predicted = Ht * Xn_predicted
3593             _Innovation  = Ynpu - HX_predicted
3594             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
3595                 _Innovation = _Innovation - Cm * Un
3596         #
3597         Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha))
3598         Xn = Xn_predicted + Kn * _Innovation
3599         Pn = Pn_predicted - Kn * Ht * Pn_predicted
3600         #
3601         Xa = Xn # Pointeurs
3602         #--------------------------
3603         selfA._setInternalState("Xn", Xn)
3604         selfA._setInternalState("Pn", Pn)
3605         #--------------------------
3606         #
3607         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3608         # ---> avec analysis
3609         selfA.StoredVariables["Analysis"].store( Xa )
3610         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
3611             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( Ht * Xa )
3612         if selfA._toStore("InnovationAtCurrentAnalysis"):
3613             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
3614         # ---> avec current state
3615         if selfA._parameters["StoreInternalVariables"] \
3616             or selfA._toStore("CurrentState"):
3617             selfA.StoredVariables["CurrentState"].store( Xn )
3618         if selfA._toStore("ForecastState"):
3619             selfA.StoredVariables["ForecastState"].store( Xn_predicted )
3620         if selfA._toStore("ForecastCovariance"):
3621             selfA.StoredVariables["ForecastCovariance"].store( Pn_predicted )
3622         if selfA._toStore("BMA"):
3623             selfA.StoredVariables["BMA"].store( Xn_predicted - Xa )
3624         if selfA._toStore("InnovationAtCurrentState"):
3625             selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
3626         if selfA._toStore("SimulatedObservationAtCurrentState") \
3627             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3628             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
3629         # ---> autres
3630         if selfA._parameters["StoreInternalVariables"] \
3631             or selfA._toStore("CostFunctionJ") \
3632             or selfA._toStore("CostFunctionJb") \
3633             or selfA._toStore("CostFunctionJo") \
3634             or selfA._toStore("CurrentOptimum") \
3635             or selfA._toStore("APosterioriCovariance"):
3636             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
3637             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
3638             J   = Jb + Jo
3639             selfA.StoredVariables["CostFunctionJb"].store( Jb )
3640             selfA.StoredVariables["CostFunctionJo"].store( Jo )
3641             selfA.StoredVariables["CostFunctionJ" ].store( J )
3642             #
3643             if selfA._toStore("IndexOfOptimum") \
3644                 or selfA._toStore("CurrentOptimum") \
3645                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
3646                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
3647                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
3648                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3649                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3650             if selfA._toStore("IndexOfOptimum"):
3651                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3652             if selfA._toStore("CurrentOptimum"):
3653                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
3654             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3655                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
3656             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3657                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3658             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3659                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3660             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3661                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3662         if selfA._toStore("APosterioriCovariance"):
3663             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
3664         if selfA._parameters["EstimationOf"] == "Parameters" \
3665             and J < previousJMinimum:
3666             previousJMinimum    = J
3667             XaMin               = Xa
3668             if selfA._toStore("APosterioriCovariance"):
3669                 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
3670     #
3671     # Stockage final supplémentaire de l'optimum en estimation de paramètres
3672     # ----------------------------------------------------------------------
3673     if selfA._parameters["EstimationOf"] == "Parameters":
3674         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3675         selfA.StoredVariables["Analysis"].store( XaMin )
3676         if selfA._toStore("APosterioriCovariance"):
3677             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
3678         if selfA._toStore("BMA"):
3679             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
3680     #
3681     return 0
3682
3683 # ==============================================================================
3684 def uckf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3685     """
3686     Constrained Unscented Kalman Filter
3687     """
3688     if selfA._parameters["EstimationOf"] == "Parameters":
3689         selfA._parameters["StoreInternalVariables"] = True
3690     selfA._parameters["Bounds"] = ForceNumericBounds( selfA._parameters["Bounds"] )
3691     #
3692     L     = Xb.size
3693     Alpha = selfA._parameters["Alpha"]
3694     Beta  = selfA._parameters["Beta"]
3695     if selfA._parameters["Kappa"] == 0:
3696         if selfA._parameters["EstimationOf"] == "State":
3697             Kappa = 0
3698         elif selfA._parameters["EstimationOf"] == "Parameters":
3699             Kappa = 3 - L
3700     else:
3701         Kappa = selfA._parameters["Kappa"]
3702     Lambda = float( Alpha**2 ) * ( L + Kappa ) - L
3703     Gamma  = math.sqrt( L + Lambda )
3704     #
3705     Ww = []
3706     Ww.append( 0. )
3707     for i in range(2*L):
3708         Ww.append( 1. / (2.*(L + Lambda)) )
3709     #
3710     Wm = numpy.array( Ww )
3711     Wm[0] = Lambda / (L + Lambda)
3712     Wc = numpy.array( Ww )
3713     Wc[0] = Lambda / (L + Lambda) + (1. - Alpha**2 + Beta)
3714     #
3715     # Opérateurs
3716     Hm = HO["Direct"].appliedControledFormTo
3717     #
3718     if selfA._parameters["EstimationOf"] == "State":
3719         Mm = EM["Direct"].appliedControledFormTo
3720     #
3721     if CM is not None and "Tangent" in CM and U is not None:
3722         Cm = CM["Tangent"].asMatrix(Xb)
3723     else:
3724         Cm = None
3725     #
3726     # Durée d'observation et tailles
3727     if hasattr(Y,"stepnumber"):
3728         duration = Y.stepnumber()
3729         __p = numpy.cumprod(Y.shape())[-1]
3730     else:
3731         duration = 2
3732         __p = numpy.array(Y).size
3733     #
3734     # Précalcul des inversions de B et R
3735     if selfA._parameters["StoreInternalVariables"] \
3736         or selfA._toStore("CostFunctionJ") \
3737         or selfA._toStore("CostFunctionJb") \
3738         or selfA._toStore("CostFunctionJo") \
3739         or selfA._toStore("CurrentOptimum") \
3740         or selfA._toStore("APosterioriCovariance"):
3741         BI = B.getI()
3742         RI = R.getI()
3743     #
3744     __n = Xb.size
3745     #
3746     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
3747         Xn = Xb
3748         if hasattr(B,"asfullmatrix"):
3749             Pn = B.asfullmatrix(__n)
3750         else:
3751             Pn = B
3752         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3753         selfA.StoredVariables["Analysis"].store( Xb )
3754         if selfA._toStore("APosterioriCovariance"):
3755             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
3756     elif selfA._parameters["nextStep"]:
3757         Xn = selfA._getInternalState("Xn")
3758         Pn = selfA._getInternalState("Pn")
3759     #
3760     if selfA._parameters["EstimationOf"] == "Parameters":
3761         XaMin            = Xn
3762         previousJMinimum = numpy.finfo(float).max
3763     #
3764     for step in range(duration-1):
3765         if hasattr(Y,"store"):
3766             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
3767         else:
3768             Ynpu = numpy.ravel( Y ).reshape((__p,1))
3769         #
3770         if U is not None:
3771             if hasattr(U,"store") and len(U)>1:
3772                 Un = numpy.ravel( U[step] ).reshape((-1,1))
3773             elif hasattr(U,"store") and len(U)==1:
3774                 Un = numpy.ravel( U[0] ).reshape((-1,1))
3775             else:
3776                 Un = numpy.ravel( U ).reshape((-1,1))
3777         else:
3778             Un = None
3779         #
3780         Pndemi = numpy.linalg.cholesky(Pn)
3781         Xnp = numpy.hstack([Xn, Xn+Gamma*Pndemi, Xn-Gamma*Pndemi])
3782         nbSpts = 2*Xn.size+1
3783         #
3784         if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
3785             for point in range(nbSpts):
3786                 Xnp[:,point] = ApplyBounds( Xnp[:,point], selfA._parameters["Bounds"] )
3787         #
3788         XEtnnp = []
3789         for point in range(nbSpts):
3790             if selfA._parameters["EstimationOf"] == "State":
3791                 XEtnnpi = numpy.asmatrix(numpy.ravel( Mm( (Xnp[:,point], Un) ) )).T
3792                 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
3793                     Cm = Cm.reshape(Xn.size,Un.size) # ADAO & check shape
3794                     XEtnnpi = XEtnnpi + Cm * Un
3795                 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
3796                     XEtnnpi = ApplyBounds( XEtnnpi, selfA._parameters["Bounds"] )
3797             elif selfA._parameters["EstimationOf"] == "Parameters":
3798                 # --- > Par principe, M = Id, Q = 0
3799                 XEtnnpi = Xnp[:,point]
3800             XEtnnp.append( XEtnnpi )
3801         XEtnnp = numpy.hstack( XEtnnp )
3802         #
3803         Xncm = numpy.matrix( XEtnnp.getA()*numpy.array(Wm) ).sum(axis=1)
3804         #
3805         if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
3806             Xncm = ApplyBounds( Xncm, selfA._parameters["Bounds"] )
3807         #
3808         if selfA._parameters["EstimationOf"] == "State":        Pnm = Q
3809         elif selfA._parameters["EstimationOf"] == "Parameters": Pnm = 0.
3810         for point in range(nbSpts):
3811             Pnm += Wc[i] * (XEtnnp[:,point]-Xncm) * (XEtnnp[:,point]-Xncm).T
3812         #
3813         if selfA._parameters["EstimationOf"] == "Parameters" and selfA._parameters["Bounds"] is not None:
3814             Pnmdemi = selfA._parameters["Reconditioner"] * numpy.linalg.cholesky(Pnm)
3815         else:
3816             Pnmdemi = numpy.linalg.cholesky(Pnm)
3817         #
3818         Xnnp = numpy.hstack([Xncm, Xncm+Gamma*Pnmdemi, Xncm-Gamma*Pnmdemi])
3819         #
3820         if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
3821             for point in range(nbSpts):
3822                 Xnnp[:,point] = ApplyBounds( Xnnp[:,point], selfA._parameters["Bounds"] )
3823         #
3824         Ynnp = []
3825         for point in range(nbSpts):
3826             if selfA._parameters["EstimationOf"] == "State":
3827                 Ynnpi = numpy.asmatrix(numpy.ravel( Hm( (Xnnp[:,point], None) ) )).T
3828             elif selfA._parameters["EstimationOf"] == "Parameters":
3829                 Ynnpi = numpy.asmatrix(numpy.ravel( Hm( (Xnnp[:,point], Un) ) )).T
3830             Ynnp.append( Ynnpi )
3831         Ynnp = numpy.hstack( Ynnp )
3832         #
3833         Yncm = numpy.matrix( Ynnp.getA()*numpy.array(Wm) ).sum(axis=1)
3834         #
3835         Pyyn = R
3836         Pxyn = 0.
3837         for point in range(nbSpts):
3838             Pyyn += Wc[i] * (Ynnp[:,point]-Yncm) * (Ynnp[:,point]-Yncm).T
3839             Pxyn += Wc[i] * (Xnnp[:,point]-Xncm) * (Ynnp[:,point]-Yncm).T
3840         #
3841         _Innovation  = Ynpu - Yncm
3842         if selfA._parameters["EstimationOf"] == "Parameters":
3843             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
3844                 _Innovation = _Innovation - Cm * Un
3845         #
3846         Kn = Pxyn * Pyyn.I
3847         Xn = Xncm + Kn * _Innovation
3848         Pn = Pnm - Kn * Pyyn * Kn.T
3849         #
3850         if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
3851             Xn = ApplyBounds( Xn, selfA._parameters["Bounds"] )
3852         #
3853         Xa = Xn # Pointeurs
3854         #--------------------------
3855         selfA._setInternalState("Xn", Xn)
3856         selfA._setInternalState("Pn", Pn)
3857         #--------------------------
3858         #
3859         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3860         # ---> avec analysis
3861         selfA.StoredVariables["Analysis"].store( Xa )
3862         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
3863             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( Hm((Xa, Un)) )
3864         if selfA._toStore("InnovationAtCurrentAnalysis"):
3865             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
3866         # ---> avec current state
3867         if selfA._parameters["StoreInternalVariables"] \
3868             or selfA._toStore("CurrentState"):
3869             selfA.StoredVariables["CurrentState"].store( Xn )
3870         if selfA._toStore("ForecastState"):
3871             selfA.StoredVariables["ForecastState"].store( Xncm )
3872         if selfA._toStore("ForecastCovariance"):
3873             selfA.StoredVariables["ForecastCovariance"].store( Pnm )
3874         if selfA._toStore("BMA"):
3875             selfA.StoredVariables["BMA"].store( Xncm - Xa )
3876         if selfA._toStore("InnovationAtCurrentState"):
3877             selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
3878         if selfA._toStore("SimulatedObservationAtCurrentState") \
3879             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3880             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Yncm )
3881         # ---> autres
3882         if selfA._parameters["StoreInternalVariables"] \
3883             or selfA._toStore("CostFunctionJ") \
3884             or selfA._toStore("CostFunctionJb") \
3885             or selfA._toStore("CostFunctionJo") \
3886             or selfA._toStore("CurrentOptimum") \
3887             or selfA._toStore("APosterioriCovariance"):
3888             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
3889             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
3890             J   = Jb + Jo
3891             selfA.StoredVariables["CostFunctionJb"].store( Jb )
3892             selfA.StoredVariables["CostFunctionJo"].store( Jo )
3893             selfA.StoredVariables["CostFunctionJ" ].store( J )
3894             #
3895             if selfA._toStore("IndexOfOptimum") \
3896                 or selfA._toStore("CurrentOptimum") \
3897                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
3898                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
3899                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
3900                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3901                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3902             if selfA._toStore("IndexOfOptimum"):
3903                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3904             if selfA._toStore("CurrentOptimum"):
3905                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
3906             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3907                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
3908             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3909                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3910             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3911                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3912             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3913                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3914         if selfA._toStore("APosterioriCovariance"):
3915             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
3916         if selfA._parameters["EstimationOf"] == "Parameters" \
3917             and J < previousJMinimum:
3918             previousJMinimum    = J
3919             XaMin               = Xa
3920             if selfA._toStore("APosterioriCovariance"):
3921                 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
3922     #
3923     # Stockage final supplémentaire de l'optimum en estimation de paramètres
3924     # ----------------------------------------------------------------------
3925     if selfA._parameters["EstimationOf"] == "Parameters":
3926         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3927         selfA.StoredVariables["Analysis"].store( XaMin )
3928         if selfA._toStore("APosterioriCovariance"):
3929             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
3930         if selfA._toStore("BMA"):
3931             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
3932     #
3933     return 0
3934
3935 # ==============================================================================
3936 def uskf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3937     """
3938     Unscented Kalman Filter
3939     """
3940     if selfA._parameters["EstimationOf"] == "Parameters":
3941         selfA._parameters["StoreInternalVariables"] = True
3942     #
3943     L     = Xb.size
3944     Alpha = selfA._parameters["Alpha"]
3945     Beta  = selfA._parameters["Beta"]
3946     if selfA._parameters["Kappa"] == 0:
3947         if selfA._parameters["EstimationOf"] == "State":
3948             Kappa = 0
3949         elif selfA._parameters["EstimationOf"] == "Parameters":
3950             Kappa = 3 - L
3951     else:
3952         Kappa = selfA._parameters["Kappa"]
3953     Lambda = float( Alpha**2 ) * ( L + Kappa ) - L
3954     Gamma  = math.sqrt( L + Lambda )
3955     #
3956     Ww = []
3957     Ww.append( 0. )
3958     for i in range(2*L):
3959         Ww.append( 1. / (2.*(L + Lambda)) )
3960     #
3961     Wm = numpy.array( Ww )
3962     Wm[0] = Lambda / (L + Lambda)
3963     Wc = numpy.array( Ww )
3964     Wc[0] = Lambda / (L + Lambda) + (1. - Alpha**2 + Beta)
3965     #
3966     # Opérateurs
3967     Hm = HO["Direct"].appliedControledFormTo
3968     #
3969     if selfA._parameters["EstimationOf"] == "State":
3970         Mm = EM["Direct"].appliedControledFormTo
3971     #
3972     if CM is not None and "Tangent" in CM and U is not None:
3973         Cm = CM["Tangent"].asMatrix(Xb)
3974     else:
3975         Cm = None
3976     #
3977     # Durée d'observation et tailles
3978     if hasattr(Y,"stepnumber"):
3979         duration = Y.stepnumber()
3980         __p = numpy.cumprod(Y.shape())[-1]
3981     else:
3982         duration = 2
3983         __p = numpy.array(Y).size
3984     #
3985     # Précalcul des inversions de B et R
3986     if selfA._parameters["StoreInternalVariables"] \
3987         or selfA._toStore("CostFunctionJ") \
3988         or selfA._toStore("CostFunctionJb") \
3989         or selfA._toStore("CostFunctionJo") \
3990         or selfA._toStore("CurrentOptimum") \
3991         or selfA._toStore("APosterioriCovariance"):
3992         BI = B.getI()
3993         RI = R.getI()
3994     #
3995     __n = Xb.size
3996     #
3997     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
3998         Xn = Xb
3999         if hasattr(B,"asfullmatrix"):
4000             Pn = B.asfullmatrix(__n)
4001         else:
4002             Pn = B
4003         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
4004         selfA.StoredVariables["Analysis"].store( Xb )
4005         if selfA._toStore("APosterioriCovariance"):
4006             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
4007     elif selfA._parameters["nextStep"]:
4008         Xn = selfA._getInternalState("Xn")
4009         Pn = selfA._getInternalState("Pn")
4010     #
4011     if selfA._parameters["EstimationOf"] == "Parameters":
4012         XaMin            = Xn
4013         previousJMinimum = numpy.finfo(float).max
4014     #
4015     for step in range(duration-1):
4016         if hasattr(Y,"store"):
4017             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
4018         else:
4019             Ynpu = numpy.ravel( Y ).reshape((__p,1))
4020         #
4021         if U is not None:
4022             if hasattr(U,"store") and len(U)>1:
4023                 Un = numpy.ravel( U[step] ).reshape((-1,1))
4024             elif hasattr(U,"store") and len(U)==1:
4025                 Un = numpy.ravel( U[0] ).reshape((-1,1))
4026             else:
4027                 Un = numpy.ravel( U ).reshape((-1,1))
4028         else:
4029             Un = None
4030         #
4031         Pndemi = numpy.linalg.cholesky(Pn)
4032         Xnp = numpy.hstack([Xn, Xn+Gamma*Pndemi, Xn-Gamma*Pndemi])
4033         nbSpts = 2*Xn.size+1
4034         #
4035         XEtnnp = []
4036         for point in range(nbSpts):
4037             if selfA._parameters["EstimationOf"] == "State":
4038                 XEtnnpi = numpy.asmatrix(numpy.ravel( Mm( (Xnp[:,point], Un) ) )).T
4039                 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
4040                     Cm = Cm.reshape(Xn.size,Un.size) # ADAO & check shape
4041                     XEtnnpi = XEtnnpi + Cm * Un
4042             elif selfA._parameters["EstimationOf"] == "Parameters":
4043                 # --- > Par principe, M = Id, Q = 0
4044                 XEtnnpi = Xnp[:,point]
4045             XEtnnp.append( XEtnnpi )
4046         XEtnnp = numpy.hstack( XEtnnp )
4047         #
4048         Xncm = numpy.matrix( XEtnnp.getA()*numpy.array(Wm) ).sum(axis=1)
4049         #
4050         if selfA._parameters["EstimationOf"] == "State":        Pnm = Q
4051         elif selfA._parameters["EstimationOf"] == "Parameters": Pnm = 0.
4052         for point in range(nbSpts):
4053             Pnm += Wc[i] * (XEtnnp[:,point]-Xncm) * (XEtnnp[:,point]-Xncm).T
4054         #
4055         Pnmdemi = numpy.linalg.cholesky(Pnm)
4056         #
4057         Xnnp = numpy.hstack([Xncm, Xncm+Gamma*Pnmdemi, Xncm-Gamma*Pnmdemi])
4058         #
4059         Ynnp = []
4060         for point in range(nbSpts):
4061             if selfA._parameters["EstimationOf"] == "State":
4062                 Ynnpi = numpy.asmatrix(numpy.ravel( Hm( (Xnnp[:,point], None) ) )).T
4063             elif selfA._parameters["EstimationOf"] == "Parameters":
4064                 Ynnpi = numpy.asmatrix(numpy.ravel( Hm( (Xnnp[:,point], Un) ) )).T
4065             Ynnp.append( Ynnpi )
4066         Ynnp = numpy.hstack( Ynnp )
4067         #
4068         Yncm = numpy.matrix( Ynnp.getA()*numpy.array(Wm) ).sum(axis=1)
4069         #
4070         Pyyn = R
4071         Pxyn = 0.
4072         for point in range(nbSpts):
4073             Pyyn += Wc[i] * (Ynnp[:,point]-Yncm) * (Ynnp[:,point]-Yncm).T
4074             Pxyn += Wc[i] * (Xnnp[:,point]-Xncm) * (Ynnp[:,point]-Yncm).T
4075         #
4076         _Innovation  = Ynpu - Yncm
4077         if selfA._parameters["EstimationOf"] == "Parameters":
4078             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
4079                 _Innovation = _Innovation - Cm * Un
4080         #
4081         Kn = Pxyn * Pyyn.I
4082         Xn = Xncm + Kn * _Innovation
4083         Pn = Pnm - Kn * Pyyn * Kn.T
4084         #
4085         Xa = Xn # Pointeurs
4086         #--------------------------
4087         selfA._setInternalState("Xn", Xn)
4088         selfA._setInternalState("Pn", Pn)
4089         #--------------------------
4090         #
4091         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
4092         # ---> avec analysis
4093         selfA.StoredVariables["Analysis"].store( Xa )
4094         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
4095             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( Hm((Xa, Un)) )
4096         if selfA._toStore("InnovationAtCurrentAnalysis"):
4097             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
4098         # ---> avec current state
4099         if selfA._parameters["StoreInternalVariables"] \
4100             or selfA._toStore("CurrentState"):
4101             selfA.StoredVariables["CurrentState"].store( Xn )
4102         if selfA._toStore("ForecastState"):
4103             selfA.StoredVariables["ForecastState"].store( Xncm )
4104         if selfA._toStore("ForecastCovariance"):
4105             selfA.StoredVariables["ForecastCovariance"].store( Pnm )
4106         if selfA._toStore("BMA"):
4107             selfA.StoredVariables["BMA"].store( Xncm - Xa )
4108         if selfA._toStore("InnovationAtCurrentState"):
4109             selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
4110         if selfA._toStore("SimulatedObservationAtCurrentState") \
4111             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4112             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Yncm )
4113         # ---> autres
4114         if selfA._parameters["StoreInternalVariables"] \
4115             or selfA._toStore("CostFunctionJ") \
4116             or selfA._toStore("CostFunctionJb") \
4117             or selfA._toStore("CostFunctionJo") \
4118             or selfA._toStore("CurrentOptimum") \
4119             or selfA._toStore("APosterioriCovariance"):
4120             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
4121             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
4122             J   = Jb + Jo
4123             selfA.StoredVariables["CostFunctionJb"].store( Jb )
4124             selfA.StoredVariables["CostFunctionJo"].store( Jo )
4125             selfA.StoredVariables["CostFunctionJ" ].store( J )
4126             #
4127             if selfA._toStore("IndexOfOptimum") \
4128                 or selfA._toStore("CurrentOptimum") \
4129                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
4130                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
4131                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
4132                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4133                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
4134             if selfA._toStore("IndexOfOptimum"):
4135                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
4136             if selfA._toStore("CurrentOptimum"):
4137                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
4138             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4139                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
4140             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
4141                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
4142             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
4143                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
4144             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
4145                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
4146         if selfA._toStore("APosterioriCovariance"):
4147             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
4148         if selfA._parameters["EstimationOf"] == "Parameters" \
4149             and J < previousJMinimum:
4150             previousJMinimum    = J
4151             XaMin               = Xa
4152             if selfA._toStore("APosterioriCovariance"):
4153                 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
4154     #
4155     # Stockage final supplémentaire de l'optimum en estimation de paramètres
4156     # ----------------------------------------------------------------------
4157     if selfA._parameters["EstimationOf"] == "Parameters":
4158         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
4159         selfA.StoredVariables["Analysis"].store( XaMin )
4160         if selfA._toStore("APosterioriCovariance"):
4161             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
4162         if selfA._toStore("BMA"):
4163             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
4164     #
4165     return 0
4166
4167 # ==============================================================================
4168 def van3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
4169     """
4170     3DVAR variational analysis with no inversion of B
4171     """
4172     #
4173     # Initialisations
4174     # ---------------
4175     #
4176     # Opérateurs
4177     Hm = HO["Direct"].appliedTo
4178     Ha = HO["Adjoint"].appliedInXTo
4179     #
4180     # Précalcul des inversions de B et R
4181     BT = B.getT()
4182     RI = R.getI()
4183     #
4184     # Point de démarrage de l'optimisation
4185     Xini = numpy.zeros(Xb.shape)
4186     #
4187     # Définition de la fonction-coût
4188     # ------------------------------
4189     def CostFunction(v):
4190         _V = numpy.asmatrix(numpy.ravel( v )).T
4191         _X = Xb + B * _V
4192         if selfA._parameters["StoreInternalVariables"] or \
4193             selfA._toStore("CurrentState") or \
4194             selfA._toStore("CurrentOptimum"):
4195             selfA.StoredVariables["CurrentState"].store( _X )
4196         _HX = Hm( _X )
4197         _HX = numpy.asmatrix(numpy.ravel( _HX )).T
4198         _Innovation = Y - _HX
4199         if selfA._toStore("SimulatedObservationAtCurrentState") or \
4200             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4201             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
4202         if selfA._toStore("InnovationAtCurrentState"):
4203             selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
4204         #
4205         Jb  = float( 0.5 * _V.T * BT * _V )
4206         Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
4207         J   = Jb + Jo
4208         #
4209         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
4210         selfA.StoredVariables["CostFunctionJb"].store( Jb )
4211         selfA.StoredVariables["CostFunctionJo"].store( Jo )
4212         selfA.StoredVariables["CostFunctionJ" ].store( J )
4213         if selfA._toStore("IndexOfOptimum") or \
4214             selfA._toStore("CurrentOptimum") or \
4215             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
4216             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
4217             selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
4218             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4219             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
4220         if selfA._toStore("IndexOfOptimum"):
4221             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
4222         if selfA._toStore("CurrentOptimum"):
4223             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
4224         if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4225             selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
4226         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
4227             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
4228         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
4229             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
4230         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
4231             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
4232         return J
4233     #
4234     def GradientOfCostFunction(v):
4235         _V = numpy.asmatrix(numpy.ravel( v )).T
4236         _X = Xb + B * _V
4237         _HX     = Hm( _X )
4238         _HX     = numpy.asmatrix(numpy.ravel( _HX )).T
4239         GradJb  = BT * _V
4240         GradJo  = - Ha( (_X, RI * (Y - _HX)) )
4241         GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
4242         return GradJ
4243     #
4244     # Minimisation de la fonctionnelle
4245     # --------------------------------
4246     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
4247     #
4248     if selfA._parameters["Minimizer"] == "LBFGSB":
4249         if "0.19" <= scipy.version.version <= "1.1.0":
4250             import lbfgsbhlt as optimiseur
4251         else:
4252             import scipy.optimize as optimiseur
4253         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
4254             func        = CostFunction,
4255             x0          = Xini,
4256             fprime      = GradientOfCostFunction,
4257             args        = (),
4258             bounds      = RecentredBounds(selfA._parameters["Bounds"], Xb),
4259             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
4260             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
4261             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
4262             iprint      = selfA._parameters["optiprint"],
4263             )
4264         nfeval = Informations['funcalls']
4265         rc     = Informations['warnflag']
4266     elif selfA._parameters["Minimizer"] == "TNC":
4267         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
4268             func        = CostFunction,
4269             x0          = Xini,
4270             fprime      = GradientOfCostFunction,
4271             args        = (),
4272             bounds      = RecentredBounds(selfA._parameters["Bounds"], Xb),
4273             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
4274             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
4275             ftol        = selfA._parameters["CostDecrementTolerance"],
4276             messages    = selfA._parameters["optmessages"],
4277             )
4278     elif selfA._parameters["Minimizer"] == "CG":
4279         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
4280             f           = CostFunction,
4281             x0          = Xini,
4282             fprime      = GradientOfCostFunction,
4283             args        = (),
4284             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
4285             gtol        = selfA._parameters["GradientNormTolerance"],
4286             disp        = selfA._parameters["optdisp"],
4287             full_output = True,
4288             )
4289     elif selfA._parameters["Minimizer"] == "NCG":
4290         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
4291             f           = CostFunction,
4292             x0          = Xini,
4293             fprime      = GradientOfCostFunction,
4294             args        = (),
4295             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
4296             avextol     = selfA._parameters["CostDecrementTolerance"],
4297             disp        = selfA._parameters["optdisp"],
4298             full_output = True,
4299             )
4300     elif selfA._parameters["Minimizer"] == "BFGS":
4301         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
4302             f           = CostFunction,
4303             x0          = Xini,
4304             fprime      = GradientOfCostFunction,
4305             args        = (),
4306             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
4307             gtol        = selfA._parameters["GradientNormTolerance"],
4308             disp        = selfA._parameters["optdisp"],
4309             full_output = True,
4310             )
4311     else:
4312         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
4313     #
4314     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
4315     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
4316     #
4317     # Correction pour pallier a un bug de TNC sur le retour du Minimum
4318     # ----------------------------------------------------------------
4319     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
4320         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
4321         Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
4322     else:
4323         Minimum = Xb + B * numpy.asmatrix(numpy.ravel( Minimum )).T
4324     #
4325     # Obtention de l'analyse
4326     # ----------------------
4327     Xa = Minimum
4328     #
4329     selfA.StoredVariables["Analysis"].store( Xa )
4330     #
4331     if selfA._toStore("OMA") or \
4332         selfA._toStore("SigmaObs2") or \
4333         selfA._toStore("SimulationQuantiles") or \
4334         selfA._toStore("SimulatedObservationAtOptimum"):
4335         if selfA._toStore("SimulatedObservationAtCurrentState"):
4336             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
4337         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4338             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
4339         else:
4340             HXa = Hm( Xa )
4341     #
4342     # Calcul de la covariance d'analyse
4343     # ---------------------------------
4344     if selfA._toStore("APosterioriCovariance") or \
4345         selfA._toStore("SimulationQuantiles") or \
4346         selfA._toStore("JacobianMatrixAtOptimum") or \
4347         selfA._toStore("KalmanGainAtOptimum"):
4348         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
4349         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
4350     if selfA._toStore("APosterioriCovariance") or \
4351         selfA._toStore("SimulationQuantiles") or \
4352         selfA._toStore("KalmanGainAtOptimum"):
4353         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
4354         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
4355     if selfA._toStore("APosterioriCovariance") or \
4356         selfA._toStore("SimulationQuantiles"):
4357         BI = B.getI()
4358         HessienneI = []
4359         nb = Xa.size
4360         for i in range(nb):
4361             _ee    = numpy.matrix(numpy.zeros(nb)).T
4362             _ee[i] = 1.
4363             _HtEE  = numpy.dot(HtM,_ee)
4364             _HtEE  = numpy.asmatrix(numpy.ravel( _HtEE )).T
4365             HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
4366         HessienneI = numpy.matrix( HessienneI )
4367         A = HessienneI.I
4368         if min(A.shape) != max(A.shape):
4369             raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
4370         if (numpy.diag(A) < 0).any():
4371             raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
4372         if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
4373             try:
4374                 L = numpy.linalg.cholesky( A )
4375             except:
4376                 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
4377     if selfA._toStore("APosterioriCovariance"):
4378         selfA.StoredVariables["APosterioriCovariance"].store( A )
4379     if selfA._toStore("JacobianMatrixAtOptimum"):
4380         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
4381     if selfA._toStore("KalmanGainAtOptimum"):
4382         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
4383         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
4384         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
4385     #
4386     # Calculs et/ou stockages supplémentaires
4387     # ---------------------------------------
4388     if selfA._toStore("Innovation") or \
4389         selfA._toStore("SigmaObs2") or \
4390         selfA._toStore("MahalanobisConsistency") or \
4391         selfA._toStore("OMB"):
4392         d  = Y - HXb
4393     if selfA._toStore("Innovation"):
4394         selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
4395     if selfA._toStore("BMA"):
4396         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
4397     if selfA._toStore("OMA"):
4398         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
4399     if selfA._toStore("OMB"):
4400         selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
4401     if selfA._toStore("SigmaObs2"):
4402         TraceR = R.trace(Y.size)
4403         selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
4404     if selfA._toStore("MahalanobisConsistency"):
4405         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
4406     if selfA._toStore("SimulationQuantiles"):
4407         QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
4408     if selfA._toStore("SimulatedObservationAtBackground"):
4409         selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
4410     if selfA._toStore("SimulatedObservationAtOptimum"):
4411         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
4412     #
4413     return 0
4414
4415 # ==============================================================================
4416 if __name__ == "__main__":
4417     print('\n AUTODIAGNOSTIC\n')