]> SALOME platform Git repositories - modules/adao.git/blob - src/daComposant/daCore/NumericObjects.py
Salome HOME
b9379a90187d4df816ba7fb66968fbf6fed9adcb
[modules/adao.git] / src / daComposant / daCore / NumericObjects.py
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) 2008-2021 EDF R&D
4 #
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
9 #
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13 # Lesser General Public License for more details.
14 #
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
18 #
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
20 #
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
22
23 __doc__ = """
24     Définit les objets numériques génériques.
25 """
26 __author__ = "Jean-Philippe ARGAUD"
27
28 import os, time, copy, types, sys, logging
29 import math, numpy, scipy, scipy.optimize
30 from daCore.BasicObjects import Operator
31 from daCore.PlatformInfo import PlatformInfo
32 mpr = PlatformInfo().MachinePrecision()
33 mfp = PlatformInfo().MaximumPrecision()
34 # logging.getLogger().setLevel(logging.DEBUG)
35
36 # ==============================================================================
37 def ExecuteFunction( paire ):
38     assert len(paire) == 2, "Incorrect number of arguments"
39     X, funcrepr = paire
40     __X = numpy.asmatrix(numpy.ravel( X )).T
41     __sys_path_tmp = sys.path ; sys.path.insert(0,funcrepr["__userFunction__path"])
42     __module = __import__(funcrepr["__userFunction__modl"], globals(), locals(), [])
43     __fonction = getattr(__module,funcrepr["__userFunction__name"])
44     sys.path = __sys_path_tmp ; del __sys_path_tmp
45     __HX  = __fonction( __X )
46     return numpy.ravel( __HX )
47
48 # ==============================================================================
49 class FDApproximation(object):
50     """
51     Cette classe sert d'interface pour définir les opérateurs approximés. A la
52     création d'un objet, en fournissant une fonction "Function", on obtient un
53     objet qui dispose de 3 méthodes "DirectOperator", "TangentOperator" et
54     "AdjointOperator". On contrôle l'approximation DF avec l'incrément
55     multiplicatif "increment" valant par défaut 1%, ou avec l'incrément fixe
56     "dX" qui sera multiplié par "increment" (donc en %), et on effectue de DF
57     centrées si le booléen "centeredDF" est vrai.
58     """
59     def __init__(self,
60             name                  = "FDApproximation",
61             Function              = None,
62             centeredDF            = False,
63             increment             = 0.01,
64             dX                    = None,
65             avoidingRedundancy    = True,
66             toleranceInRedundancy = 1.e-18,
67             lenghtOfRedundancy    = -1,
68             mpEnabled             = False,
69             mpWorkers             = None,
70             mfEnabled             = False,
71             ):
72         self.__name = str(name)
73         if mpEnabled:
74             try:
75                 import multiprocessing
76                 self.__mpEnabled = True
77             except ImportError:
78                 self.__mpEnabled = False
79         else:
80             self.__mpEnabled = False
81         self.__mpWorkers = mpWorkers
82         if self.__mpWorkers is not None and self.__mpWorkers < 1:
83             self.__mpWorkers = None
84         logging.debug("FDA Calculs en multiprocessing : %s (nombre de processus : %s)"%(self.__mpEnabled,self.__mpWorkers))
85         #
86         if mfEnabled:
87             self.__mfEnabled = True
88         else:
89             self.__mfEnabled = False
90         logging.debug("FDA Calculs en multifonctions : %s"%(self.__mfEnabled,))
91         #
92         if avoidingRedundancy:
93             self.__avoidRC = True
94             self.__tolerBP = float(toleranceInRedundancy)
95             self.__lenghtRJ = int(lenghtOfRedundancy)
96             self.__listJPCP = [] # Jacobian Previous Calculated Points
97             self.__listJPCI = [] # Jacobian Previous Calculated Increment
98             self.__listJPCR = [] # Jacobian Previous Calculated Results
99             self.__listJPPN = [] # Jacobian Previous Calculated Point Norms
100             self.__listJPIN = [] # Jacobian Previous Calculated Increment Norms
101         else:
102             self.__avoidRC = False
103         #
104         if self.__mpEnabled:
105             if isinstance(Function,types.FunctionType):
106                 logging.debug("FDA Calculs en multiprocessing : FunctionType")
107                 self.__userFunction__name = Function.__name__
108                 try:
109                     mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
110                 except:
111                     mod = os.path.abspath(Function.__globals__['__file__'])
112                 if not os.path.isfile(mod):
113                     raise ImportError("No user defined function or method found with the name %s"%(mod,))
114                 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
115                 self.__userFunction__path = os.path.dirname(mod)
116                 del mod
117                 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled )
118                 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
119             elif isinstance(Function,types.MethodType):
120                 logging.debug("FDA Calculs en multiprocessing : MethodType")
121                 self.__userFunction__name = Function.__name__
122                 try:
123                     mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
124                 except:
125                     mod = os.path.abspath(Function.__func__.__globals__['__file__'])
126                 if not os.path.isfile(mod):
127                     raise ImportError("No user defined function or method found with the name %s"%(mod,))
128                 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
129                 self.__userFunction__path = os.path.dirname(mod)
130                 del mod
131                 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled )
132                 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
133             else:
134                 raise TypeError("User defined function or method has to be provided for finite differences approximation.")
135         else:
136             self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled )
137             self.__userFunction = self.__userOperator.appliedTo
138         #
139         self.__centeredDF = bool(centeredDF)
140         if abs(float(increment)) > 1.e-15:
141             self.__increment  = float(increment)
142         else:
143             self.__increment  = 0.01
144         if dX is None:
145             self.__dX     = None
146         else:
147             self.__dX     = numpy.asmatrix(numpy.ravel( dX )).T
148         logging.debug("FDA Reduction des doublons de calcul : %s"%self.__avoidRC)
149         if self.__avoidRC:
150             logging.debug("FDA Tolerance de determination des doublons : %.2e"%self.__tolerBP)
151
152     # ---------------------------------------------------------
153     def __doublon__(self, e, l, n, v=None):
154         __ac, __iac = False, -1
155         for i in range(len(l)-1,-1,-1):
156             if numpy.linalg.norm(e - l[i]) < self.__tolerBP * n[i]:
157                 __ac, __iac = True, i
158                 if v is not None: logging.debug("FDA Cas%s déja calculé, récupération du doublon %i"%(v,__iac))
159                 break
160         return __ac, __iac
161
162     # ---------------------------------------------------------
163     def DirectOperator(self, X ):
164         """
165         Calcul du direct à l'aide de la fonction fournie.
166         """
167         logging.debug("FDA Calcul DirectOperator (explicite)")
168         if self.__mfEnabled:
169             _HX = self.__userFunction( X, argsAsSerie = True )
170         else:
171             _X = numpy.asmatrix(numpy.ravel( X )).T
172             _HX = numpy.ravel(self.__userFunction( _X ))
173         #
174         return _HX
175
176     # ---------------------------------------------------------
177     def TangentMatrix(self, X ):
178         """
179         Calcul de l'opérateur tangent comme la Jacobienne par différences finies,
180         c'est-à-dire le gradient de H en X. On utilise des différences finies
181         directionnelles autour du point X. X est un numpy.matrix.
182
183         Différences finies centrées (approximation d'ordre 2):
184         1/ Pour chaque composante i de X, on ajoute et on enlève la perturbation
185            dX[i] à la  composante X[i], pour composer X_plus_dXi et X_moins_dXi, et
186            on calcule les réponses HX_plus_dXi = H( X_plus_dXi ) et HX_moins_dXi =
187            H( X_moins_dXi )
188         2/ On effectue les différences (HX_plus_dXi-HX_moins_dXi) et on divise par
189            le pas 2*dXi
190         3/ Chaque résultat, par composante, devient une colonne de la Jacobienne
191
192         Différences finies non centrées (approximation d'ordre 1):
193         1/ Pour chaque composante i de X, on ajoute la perturbation dX[i] à la
194            composante X[i] pour composer X_plus_dXi, et on calcule la réponse
195            HX_plus_dXi = H( X_plus_dXi )
196         2/ On calcule la valeur centrale HX = H(X)
197         3/ On effectue les différences (HX_plus_dXi-HX) et on divise par
198            le pas dXi
199         4/ Chaque résultat, par composante, devient une colonne de la Jacobienne
200
201         """
202         logging.debug("FDA Début du calcul de la Jacobienne")
203         logging.debug("FDA   Incrément de............: %s*X"%float(self.__increment))
204         logging.debug("FDA   Approximation centrée...: %s"%(self.__centeredDF))
205         #
206         if X is None or len(X)==0:
207             raise ValueError("Nominal point X for approximate derivatives can not be None or void (given X: %s)."%(str(X),))
208         #
209         _X = numpy.asmatrix(numpy.ravel( X )).T
210         #
211         if self.__dX is None:
212             _dX  = self.__increment * _X
213         else:
214             _dX = numpy.asmatrix(numpy.ravel( self.__dX )).T
215         #
216         if (_dX == 0.).any():
217             moyenne = _dX.mean()
218             if moyenne == 0.:
219                 _dX = numpy.where( _dX == 0., float(self.__increment), _dX )
220             else:
221                 _dX = numpy.where( _dX == 0., moyenne, _dX )
222         #
223         __alreadyCalculated  = False
224         if self.__avoidRC:
225             __bidon, __alreadyCalculatedP = self.__doublon__(_X,  self.__listJPCP, self.__listJPPN, None)
226             __bidon, __alreadyCalculatedI = self.__doublon__(_dX, self.__listJPCI, self.__listJPIN, None)
227             if __alreadyCalculatedP == __alreadyCalculatedI > -1:
228                 __alreadyCalculated, __i = True, __alreadyCalculatedP
229                 logging.debug("FDA Cas J déja calculé, récupération du doublon %i"%__i)
230         #
231         if __alreadyCalculated:
232             logging.debug("FDA   Calcul Jacobienne (par récupération du doublon %i)"%__i)
233             _Jacobienne = self.__listJPCR[__i]
234         else:
235             logging.debug("FDA   Calcul Jacobienne (explicite)")
236             if self.__centeredDF:
237                 #
238                 if self.__mpEnabled and not self.__mfEnabled:
239                     funcrepr = {
240                         "__userFunction__path" : self.__userFunction__path,
241                         "__userFunction__modl" : self.__userFunction__modl,
242                         "__userFunction__name" : self.__userFunction__name,
243                     }
244                     _jobs = []
245                     for i in range( len(_dX) ):
246                         _dXi            = _dX[i]
247                         _X_plus_dXi     = numpy.array( _X.A1, dtype=float )
248                         _X_plus_dXi[i]  = _X[i] + _dXi
249                         _X_moins_dXi    = numpy.array( _X.A1, dtype=float )
250                         _X_moins_dXi[i] = _X[i] - _dXi
251                         #
252                         _jobs.append( (_X_plus_dXi,  funcrepr) )
253                         _jobs.append( (_X_moins_dXi, funcrepr) )
254                     #
255                     import multiprocessing
256                     self.__pool = multiprocessing.Pool(self.__mpWorkers)
257                     _HX_plusmoins_dX = self.__pool.map( ExecuteFunction, _jobs )
258                     self.__pool.close()
259                     self.__pool.join()
260                     #
261                     _Jacobienne  = []
262                     for i in range( len(_dX) ):
263                         _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
264                     #
265                 elif self.__mfEnabled:
266                     _xserie = []
267                     for i in range( len(_dX) ):
268                         _dXi            = _dX[i]
269                         _X_plus_dXi     = numpy.array( _X.A1, dtype=float )
270                         _X_plus_dXi[i]  = _X[i] + _dXi
271                         _X_moins_dXi    = numpy.array( _X.A1, dtype=float )
272                         _X_moins_dXi[i] = _X[i] - _dXi
273                         #
274                         _xserie.append( _X_plus_dXi )
275                         _xserie.append( _X_moins_dXi )
276                     #
277                     _HX_plusmoins_dX = self.DirectOperator( _xserie )
278                      #
279                     _Jacobienne  = []
280                     for i in range( len(_dX) ):
281                         _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
282                     #
283                 else:
284                     _Jacobienne  = []
285                     for i in range( _dX.size ):
286                         _dXi            = _dX[i]
287                         _X_plus_dXi     = numpy.array( _X.A1, dtype=float )
288                         _X_plus_dXi[i]  = _X[i] + _dXi
289                         _X_moins_dXi    = numpy.array( _X.A1, dtype=float )
290                         _X_moins_dXi[i] = _X[i] - _dXi
291                         #
292                         _HX_plus_dXi    = self.DirectOperator( _X_plus_dXi )
293                         _HX_moins_dXi   = self.DirectOperator( _X_moins_dXi )
294                         #
295                         _Jacobienne.append( numpy.ravel( _HX_plus_dXi - _HX_moins_dXi ) / (2.*_dXi) )
296                 #
297             else:
298                 #
299                 if self.__mpEnabled and not self.__mfEnabled:
300                     funcrepr = {
301                         "__userFunction__path" : self.__userFunction__path,
302                         "__userFunction__modl" : self.__userFunction__modl,
303                         "__userFunction__name" : self.__userFunction__name,
304                     }
305                     _jobs = []
306                     _jobs.append( (_X.A1, funcrepr) )
307                     for i in range( len(_dX) ):
308                         _X_plus_dXi    = numpy.array( _X.A1, dtype=float )
309                         _X_plus_dXi[i] = _X[i] + _dX[i]
310                         #
311                         _jobs.append( (_X_plus_dXi, funcrepr) )
312                     #
313                     import multiprocessing
314                     self.__pool = multiprocessing.Pool(self.__mpWorkers)
315                     _HX_plus_dX = self.__pool.map( ExecuteFunction, _jobs )
316                     self.__pool.close()
317                     self.__pool.join()
318                     #
319                     _HX = _HX_plus_dX.pop(0)
320                     #
321                     _Jacobienne = []
322                     for i in range( len(_dX) ):
323                         _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
324                     #
325                 elif self.__mfEnabled:
326                     _xserie = []
327                     _xserie.append( _X.A1 )
328                     for i in range( len(_dX) ):
329                         _X_plus_dXi    = numpy.array( _X.A1, dtype=float )
330                         _X_plus_dXi[i] = _X[i] + _dX[i]
331                         #
332                         _xserie.append( _X_plus_dXi )
333                     #
334                     _HX_plus_dX = self.DirectOperator( _xserie )
335                     #
336                     _HX = _HX_plus_dX.pop(0)
337                     #
338                     _Jacobienne = []
339                     for i in range( len(_dX) ):
340                         _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
341                    #
342                 else:
343                     _Jacobienne  = []
344                     _HX = self.DirectOperator( _X )
345                     for i in range( _dX.size ):
346                         _dXi            = _dX[i]
347                         _X_plus_dXi     = numpy.array( _X.A1, dtype=float )
348                         _X_plus_dXi[i]  = _X[i] + _dXi
349                         #
350                         _HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
351                         #
352                         _Jacobienne.append( numpy.ravel(( _HX_plus_dXi - _HX ) / _dXi) )
353                 #
354             #
355             _Jacobienne = numpy.asmatrix( numpy.vstack( _Jacobienne ) ).T
356             if self.__avoidRC:
357                 if self.__lenghtRJ < 0: self.__lenghtRJ = 2 * _X.size
358                 while len(self.__listJPCP) > self.__lenghtRJ:
359                     self.__listJPCP.pop(0)
360                     self.__listJPCI.pop(0)
361                     self.__listJPCR.pop(0)
362                     self.__listJPPN.pop(0)
363                     self.__listJPIN.pop(0)
364                 self.__listJPCP.append( copy.copy(_X) )
365                 self.__listJPCI.append( copy.copy(_dX) )
366                 self.__listJPCR.append( copy.copy(_Jacobienne) )
367                 self.__listJPPN.append( numpy.linalg.norm(_X) )
368                 self.__listJPIN.append( numpy.linalg.norm(_Jacobienne) )
369         #
370         logging.debug("FDA Fin du calcul de la Jacobienne")
371         #
372         return _Jacobienne
373
374     # ---------------------------------------------------------
375     def TangentOperator(self, paire ):
376         """
377         Calcul du tangent à l'aide de la Jacobienne.
378         """
379         if self.__mfEnabled:
380             assert len(paire) == 1, "Incorrect lenght of arguments"
381             _paire = paire[0]
382             assert len(_paire) == 2, "Incorrect number of arguments"
383         else:
384             assert len(paire) == 2, "Incorrect number of arguments"
385             _paire = paire
386         X, dX = _paire
387         _Jacobienne = self.TangentMatrix( X )
388         if dX is None or len(dX) == 0:
389             #
390             # Calcul de la forme matricielle si le second argument est None
391             # -------------------------------------------------------------
392             if self.__mfEnabled: return [_Jacobienne,]
393             else:                return _Jacobienne
394         else:
395             #
396             # Calcul de la valeur linéarisée de H en X appliqué à dX
397             # ------------------------------------------------------
398             _dX = numpy.asmatrix(numpy.ravel( dX )).T
399             _HtX = numpy.dot(_Jacobienne, _dX)
400             if self.__mfEnabled: return [_HtX.A1,]
401             else:                return _HtX.A1
402
403     # ---------------------------------------------------------
404     def AdjointOperator(self, paire ):
405         """
406         Calcul de l'adjoint à l'aide de la Jacobienne.
407         """
408         if self.__mfEnabled:
409             assert len(paire) == 1, "Incorrect lenght of arguments"
410             _paire = paire[0]
411             assert len(_paire) == 2, "Incorrect number of arguments"
412         else:
413             assert len(paire) == 2, "Incorrect number of arguments"
414             _paire = paire
415         X, Y = _paire
416         _JacobienneT = self.TangentMatrix( X ).T
417         if Y is None or len(Y) == 0:
418             #
419             # Calcul de la forme matricielle si le second argument est None
420             # -------------------------------------------------------------
421             if self.__mfEnabled: return [_JacobienneT,]
422             else:                return _JacobienneT
423         else:
424             #
425             # Calcul de la valeur de l'adjoint en X appliqué à Y
426             # --------------------------------------------------
427             _Y = numpy.asmatrix(numpy.ravel( Y )).T
428             _HaY = numpy.dot(_JacobienneT, _Y)
429             if self.__mfEnabled: return [_HaY.A1,]
430             else:                return _HaY.A1
431
432 # ==============================================================================
433 def mmqr(
434         func     = None,
435         x0       = None,
436         fprime   = None,
437         bounds   = None,
438         quantile = 0.5,
439         maxfun   = 15000,
440         toler    = 1.e-06,
441         y        = None,
442         ):
443     """
444     Implémentation informatique de l'algorithme MMQR, basée sur la publication :
445     David R. Hunter, Kenneth Lange, "Quantile Regression via an MM Algorithm",
446     Journal of Computational and Graphical Statistics, 9, 1, pp.60-77, 2000.
447     """
448     #
449     # Recuperation des donnees et informations initiales
450     # --------------------------------------------------
451     variables = numpy.ravel( x0 )
452     mesures   = numpy.ravel( y )
453     increment = sys.float_info[0]
454     p         = variables.size
455     n         = mesures.size
456     quantile  = float(quantile)
457     #
458     # Calcul des parametres du MM
459     # ---------------------------
460     tn      = float(toler) / n
461     e0      = -tn / math.log(tn)
462     epsilon = (e0-tn)/(1+math.log(e0))
463     #
464     # Calculs d'initialisation
465     # ------------------------
466     residus  = mesures - numpy.ravel( func( variables ) )
467     poids    = 1./(epsilon+numpy.abs(residus))
468     veps     = 1. - 2. * quantile - residus * poids
469     lastsurrogate = -numpy.sum(residus*veps) - (1.-2.*quantile)*numpy.sum(residus)
470     iteration = 0
471     #
472     # Recherche iterative
473     # -------------------
474     while (increment > toler) and (iteration < maxfun) :
475         iteration += 1
476         #
477         Derivees  = numpy.array(fprime(variables))
478         Derivees  = Derivees.reshape(n,p) # Necessaire pour remettre en place la matrice si elle passe par des tuyaux YACS
479         DeriveesT = Derivees.transpose()
480         M         =   numpy.dot( DeriveesT , (numpy.array(numpy.matrix(p*[poids,]).T)*Derivees) )
481         SM        =   numpy.transpose(numpy.dot( DeriveesT , veps ))
482         step      = - numpy.linalg.lstsq( M, SM, rcond=-1 )[0]
483         #
484         variables = variables + step
485         if bounds is not None:
486             # Attention : boucle infinie à éviter si un intervalle est trop petit
487             while( (variables < numpy.ravel(numpy.asmatrix(bounds)[:,0])).any() or (variables > numpy.ravel(numpy.asmatrix(bounds)[:,1])).any() ):
488                 step      = step/2.
489                 variables = variables - step
490         residus   = mesures - numpy.ravel( func(variables) )
491         surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
492         #
493         while ( (surrogate > lastsurrogate) and ( max(list(numpy.abs(step))) > 1.e-16 ) ) :
494             step      = step/2.
495             variables = variables - step
496             residus   = mesures - numpy.ravel( func(variables) )
497             surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
498         #
499         increment     = lastsurrogate-surrogate
500         poids         = 1./(epsilon+numpy.abs(residus))
501         veps          = 1. - 2. * quantile - residus * poids
502         lastsurrogate = -numpy.sum(residus * veps) - (1.-2.*quantile)*numpy.sum(residus)
503     #
504     # Mesure d'écart
505     # --------------
506     Ecart = quantile * numpy.sum(residus) - numpy.sum( residus[residus<0] )
507     #
508     return variables, Ecart, [n,p,iteration,increment,0]
509
510 # ==============================================================================
511 def CovarianceInflation(
512         InputCovOrEns,
513         InflationType   = None,
514         InflationFactor = None,
515         BackgroundCov   = None,
516         ):
517     """
518     Inflation applicable soit sur Pb ou Pa, soit sur les ensembles EXb ou EXa
519
520     Synthèse : Hunt 2007, section 2.3.5
521     """
522     if InflationFactor is None:
523         return InputCovOrEns
524     else:
525         InflationFactor = float(InflationFactor)
526     #
527     if InflationType in ["MultiplicativeOnAnalysisCovariance", "MultiplicativeOnBackgroundCovariance"]:
528         if InflationFactor < 1.:
529             raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
530         if InflationFactor < 1.+mpr:
531             return InputCovOrEns
532         OutputCovOrEns = InflationFactor**2 * InputCovOrEns
533     #
534     elif InflationType in ["MultiplicativeOnAnalysisAnomalies", "MultiplicativeOnBackgroundAnomalies"]:
535         if InflationFactor < 1.:
536             raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
537         if InflationFactor < 1.+mpr:
538             return InputCovOrEns
539         InputCovOrEnsMean = InputCovOrEns.mean(axis=1, dtype=mfp).astype('float')
540         OutputCovOrEns = InputCovOrEnsMean[:,numpy.newaxis] \
541             + InflationFactor * (InputCovOrEns - InputCovOrEnsMean[:,numpy.newaxis])
542     #
543     elif InflationType in ["AdditiveOnBackgroundCovariance", "AdditiveOnAnalysisCovariance"]:
544         if InflationFactor < 0.:
545             raise ValueError("Inflation factor for additive inflation has to be greater or equal than 0.")
546         if InflationFactor < mpr:
547             return InputCovOrEns
548         __n, __m = numpy.asarray(InputCovOrEns).shape
549         if __n != __m:
550             raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
551         OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * numpy.eye(__n)
552     #
553     elif InflationType == "HybridOnBackgroundCovariance":
554         if InflationFactor < 0.:
555             raise ValueError("Inflation factor for hybrid inflation has to be greater or equal than 0.")
556         if InflationFactor < mpr:
557             return InputCovOrEns
558         __n, __m = numpy.asarray(InputCovOrEns).shape
559         if __n != __m:
560             raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
561         if BackgroundCov is None:
562             raise ValueError("Background covariance matrix B has to be given for hybrid inflation.")
563         if InputCovOrEns.shape != BackgroundCov.shape:
564             raise ValueError("Ensemble covariance matrix has to be of same size than background covariance matrix B.")
565         OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * BackgroundCov
566     #
567     elif InflationType == "Relaxation":
568         raise NotImplementedError("InflationType Relaxation")
569     #
570     else:
571         raise ValueError("Error in inflation type, '%s' is not a valid keyword."%InflationType)
572     #
573     return OutputCovOrEns
574
575 # ==============================================================================
576 def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
577     """
578     Stochastic EnKF (Envensen 1994, Burgers 1998)
579
580     selfA est identique au "self" d'algorithme appelant et contient les
581     valeurs.
582     """
583     if selfA._parameters["EstimationOf"] == "Parameters":
584         selfA._parameters["StoreInternalVariables"] = True
585     #
586     # Opérateurs
587     # ----------
588     H = HO["Direct"].appliedControledFormTo
589     #
590     if selfA._parameters["EstimationOf"] == "State":
591         M = EM["Direct"].appliedControledFormTo
592     #
593     if CM is not None and "Tangent" in CM and U is not None:
594         Cm = CM["Tangent"].asMatrix(Xb)
595     else:
596         Cm = None
597     #
598     # Nombre de pas identique au nombre de pas d'observations
599     # -------------------------------------------------------
600     if hasattr(Y,"stepnumber"):
601         duration = Y.stepnumber()
602         __p = numpy.cumprod(Y.shape())[-1]
603     else:
604         duration = 2
605         __p = numpy.array(Y).size
606     #
607     # Précalcul des inversions de B et R
608     # ----------------------------------
609     if selfA._parameters["StoreInternalVariables"] \
610         or selfA._toStore("CostFunctionJ") \
611         or selfA._toStore("CostFunctionJb") \
612         or selfA._toStore("CostFunctionJo") \
613         or selfA._toStore("CurrentOptimum") \
614         or selfA._toStore("APosterioriCovariance"):
615         BI = B.getI()
616         RI = R.getI()
617     #
618     # Initialisation
619     # --------------
620     __n = Xb.size
621     __m = selfA._parameters["NumberOfMembers"]
622     Xn = numpy.asmatrix(numpy.dot( Xb.reshape(__n,1), numpy.ones((1,__m)) ))
623     if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
624     else:                         Pn = B
625     if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
626     else:                         Rn = R
627     if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
628     else:                         Qn = Q
629     #
630     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
631         selfA.StoredVariables["Analysis"].store( numpy.ravel(Xb) )
632         if selfA._toStore("APosterioriCovariance"):
633             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
634             covarianceXa = Pn
635     #
636     previousJMinimum = numpy.finfo(float).max
637     #
638     # Predimensionnement
639     Xn_predicted = numpy.asmatrix(numpy.zeros((__n,__m)))
640     HX_predicted = numpy.asmatrix(numpy.zeros((__p,__m)))
641     #
642     for step in range(duration-1):
643         if hasattr(Y,"store"):
644             Ynpu = numpy.asmatrix(numpy.ravel( Y[step+1] )).T
645         else:
646             Ynpu = numpy.asmatrix(numpy.ravel( Y )).T
647         #
648         if U is not None:
649             if hasattr(U,"store") and len(U)>1:
650                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
651             elif hasattr(U,"store") and len(U)==1:
652                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
653             else:
654                 Un = numpy.asmatrix(numpy.ravel( U )).T
655         else:
656             Un = None
657         #
658         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
659             Xn = CovarianceInflation( Xn,
660                 selfA._parameters["InflationType"],
661                 selfA._parameters["InflationFactor"],
662                 )
663         #
664         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
665             EMX = M( [(Xn[:,i], Un) for i in range(__m)], argsAsSerie = True )
666             for i in range(__m):
667                 qi = numpy.random.multivariate_normal(numpy.zeros(__n), Qn)
668                 Xn_predicted[:,i] = (numpy.ravel( EMX[i] ) + qi).reshape((__n,-1))
669             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
670                 argsAsSerie = True,
671                 returnSerieAsArrayMatrix = True )
672             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
673                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
674                 Xn_predicted = Xn_predicted + Cm * Un
675         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
676             # --- > Par principe, M = Id, Q = 0
677             Xn_predicted = Xn
678             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
679                 argsAsSerie = True,
680                 returnSerieAsArrayMatrix = True )
681         #
682         # Mean of forecast and observation of forecast
683         Xfm  = Xn_predicted.mean(axis=1, dtype=mfp).astype('float')
684         Hfm  = HX_predicted.mean(axis=1, dtype=mfp).astype('float')
685         #
686         PfHT, HPfHT = 0., 0.
687         for i in range(__m):
688             Exfi = Xn_predicted[:,i] - Xfm.reshape((__n,-1))
689             Eyfi = (HX_predicted[:,i] - Hfm).reshape((__p,1))
690             PfHT  += Exfi * Eyfi.T
691             HPfHT += Eyfi * Eyfi.T
692         PfHT  = (1./(__m-1)) * PfHT
693         HPfHT = (1./(__m-1)) * HPfHT
694         K     = PfHT * ( R + HPfHT ).I
695         del PfHT, HPfHT
696         #
697         for i in range(__m):
698             ri = numpy.random.multivariate_normal(numpy.zeros(__p), Rn)
699             Xn[:,i] = Xn_predicted[:,i] + K @ (numpy.ravel(Ynpu) + ri - HX_predicted[:,i]).reshape((__p,1))
700         #
701         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
702             Xn = CovarianceInflation( Xn,
703                 selfA._parameters["InflationType"],
704                 selfA._parameters["InflationFactor"],
705                 )
706         #
707         Xa = Xn.mean(axis=1, dtype=mfp).astype('float')
708         #
709         if selfA._parameters["StoreInternalVariables"] \
710             or selfA._toStore("CostFunctionJ") \
711             or selfA._toStore("CostFunctionJb") \
712             or selfA._toStore("CostFunctionJo") \
713             or selfA._toStore("APosterioriCovariance") \
714             or selfA._toStore("InnovationAtCurrentAnalysis") \
715             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
716             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
717             _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
718             _Innovation = Ynpu - _HXa
719         #
720         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
721         # ---> avec analysis
722         selfA.StoredVariables["Analysis"].store( Xa )
723         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
724             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
725         if selfA._toStore("InnovationAtCurrentAnalysis"):
726             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
727         # ---> avec current state
728         if selfA._parameters["StoreInternalVariables"] \
729             or selfA._toStore("CurrentState"):
730             selfA.StoredVariables["CurrentState"].store( Xn )
731         if selfA._toStore("ForecastState"):
732             selfA.StoredVariables["ForecastState"].store( Xn_predicted )
733         if selfA._toStore("BMA"):
734             selfA.StoredVariables["BMA"].store( Xn_predicted - Xa )
735         if selfA._toStore("InnovationAtCurrentState"):
736             selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
737         if selfA._toStore("SimulatedObservationAtCurrentState") \
738             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
739             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
740         # ---> autres
741         if selfA._parameters["StoreInternalVariables"] \
742             or selfA._toStore("CostFunctionJ") \
743             or selfA._toStore("CostFunctionJb") \
744             or selfA._toStore("CostFunctionJo") \
745             or selfA._toStore("CurrentOptimum") \
746             or selfA._toStore("APosterioriCovariance"):
747             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
748             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
749             J   = Jb + Jo
750             selfA.StoredVariables["CostFunctionJb"].store( Jb )
751             selfA.StoredVariables["CostFunctionJo"].store( Jo )
752             selfA.StoredVariables["CostFunctionJ" ].store( J )
753             #
754             if selfA._toStore("IndexOfOptimum") \
755                 or selfA._toStore("CurrentOptimum") \
756                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
757                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
758                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
759                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
760                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
761             if selfA._toStore("IndexOfOptimum"):
762                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
763             if selfA._toStore("CurrentOptimum"):
764                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
765             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
766                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
767             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
768                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
769             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
770                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
771             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
772                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
773         if selfA._toStore("APosterioriCovariance"):
774             Eai = (1/numpy.sqrt(__m-1)) * (Xn - Xa.reshape((__n,-1))) # Anomalies
775             Pn = Eai @ Eai.T
776             Pn = 0.5 * (Pn + Pn.T)
777             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
778         if selfA._parameters["EstimationOf"] == "Parameters" \
779             and J < previousJMinimum:
780             previousJMinimum    = J
781             XaMin               = Xa
782             if selfA._toStore("APosterioriCovariance"):
783                 covarianceXaMin = Pn
784     #
785     # Stockage final supplémentaire de l'optimum en estimation de paramètres
786     # ----------------------------------------------------------------------
787     if selfA._parameters["EstimationOf"] == "Parameters":
788         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
789         selfA.StoredVariables["Analysis"].store( XaMin )
790         if selfA._toStore("APosterioriCovariance"):
791             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
792         if selfA._toStore("BMA"):
793             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
794     #
795     return 0
796
797 # ==============================================================================
798 def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, KorV="KalmanFilterFormula"):
799     """
800     Ensemble-Transform EnKF (ETKF or Deterministic EnKF: Bishop 2001, Hunt 2007)
801
802     selfA est identique au "self" d'algorithme appelant et contient les
803     valeurs.
804     """
805     if selfA._parameters["EstimationOf"] == "Parameters":
806         selfA._parameters["StoreInternalVariables"] = True
807     #
808     # Opérateurs
809     # ----------
810     H = HO["Direct"].appliedControledFormTo
811     #
812     if selfA._parameters["EstimationOf"] == "State":
813         M = EM["Direct"].appliedControledFormTo
814     #
815     if CM is not None and "Tangent" in CM and U is not None:
816         Cm = CM["Tangent"].asMatrix(Xb)
817     else:
818         Cm = None
819     #
820     # Nombre de pas identique au nombre de pas d'observations
821     # -------------------------------------------------------
822     if hasattr(Y,"stepnumber"):
823         duration = Y.stepnumber()
824         __p = numpy.cumprod(Y.shape())[-1]
825     else:
826         duration = 2
827         __p = numpy.array(Y).size
828     #
829     # Précalcul des inversions de B et R
830     # ----------------------------------
831     if selfA._parameters["StoreInternalVariables"] \
832         or selfA._toStore("CostFunctionJ") \
833         or selfA._toStore("CostFunctionJb") \
834         or selfA._toStore("CostFunctionJo") \
835         or selfA._toStore("CurrentOptimum") \
836         or selfA._toStore("APosterioriCovariance"):
837         BI = B.getI()
838         RI = R.getI()
839     elif KorV != "KalmanFilterFormula":
840         RI = R.getI()
841     if KorV == "KalmanFilterFormula":
842         RIdemi = R.choleskyI()
843     #
844     # Initialisation
845     # --------------
846     __n = Xb.size
847     __m = selfA._parameters["NumberOfMembers"]
848     Xn = numpy.asmatrix(numpy.dot( Xb.reshape(__n,1), numpy.ones((1,__m)) ))
849     if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
850     else:                         Pn = B
851     if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
852     else:                         Rn = R
853     if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
854     else:                         Qn = Q
855     #
856     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
857         selfA.StoredVariables["Analysis"].store( numpy.ravel(Xb) )
858         if selfA._toStore("APosterioriCovariance"):
859             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
860             covarianceXa = Pn
861     #
862     previousJMinimum = numpy.finfo(float).max
863     #
864     # Predimensionnement
865     Xn_predicted = numpy.asmatrix(numpy.zeros((__n,__m)))
866     HX_predicted = numpy.asmatrix(numpy.zeros((__p,__m)))
867     #
868     for step in range(duration-1):
869         if hasattr(Y,"store"):
870             Ynpu = numpy.asmatrix(numpy.ravel( Y[step+1] )).T
871         else:
872             Ynpu = numpy.asmatrix(numpy.ravel( Y )).T
873         #
874         if U is not None:
875             if hasattr(U,"store") and len(U)>1:
876                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
877             elif hasattr(U,"store") and len(U)==1:
878                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
879             else:
880                 Un = numpy.asmatrix(numpy.ravel( U )).T
881         else:
882             Un = None
883         #
884         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
885             Xn = CovarianceInflation( Xn,
886                 selfA._parameters["InflationType"],
887                 selfA._parameters["InflationFactor"],
888                 )
889         #
890         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
891             EMX = M( [(Xn[:,i], Un) for i in range(__m)], argsAsSerie = True )
892             for i in range(__m):
893                 qi = numpy.random.multivariate_normal(numpy.zeros(__n), Qn)
894                 Xn_predicted[:,i] = (numpy.ravel( EMX[i] ) + qi).reshape((__n,-1))
895             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
896                 argsAsSerie = True,
897                 returnSerieAsArrayMatrix = True )
898             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
899                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
900                 Xn_predicted = Xn_predicted + Cm * Un
901         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
902             # --- > Par principe, M = Id, Q = 0
903             Xn_predicted = Xn
904             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
905                 argsAsSerie = True,
906                 returnSerieAsArrayMatrix = True )
907         #
908         # Mean of forecast and observation of forecast
909         Xfm  = Xn_predicted.mean(axis=1, dtype=mfp).astype('float')
910         Hfm  = HX_predicted.mean(axis=1, dtype=mfp).astype('float')
911         #
912         EaX   = numpy.matrix(Xn_predicted - Xfm.reshape((__n,-1)))
913         EaHX  = numpy.matrix(HX_predicted - Hfm.reshape((__p,-1)))
914         #
915         #--------------------------
916         if KorV == "KalmanFilterFormula":
917             EaX    = EaX / numpy.sqrt(__m-1)
918             mS    = RIdemi * EaHX / numpy.sqrt(__m-1)
919             delta = RIdemi * ( Ynpu.reshape((__p,-1)) - Hfm.reshape((__p,-1)) )
920             mT    = numpy.linalg.inv( numpy.eye(__m) + mS.T @ mS )
921             vw    = mT @ mS.transpose() @ delta
922             #
923             Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
924             mU    = numpy.eye(__m)
925             #
926             Xn = Xfm.reshape((__n,-1)) + EaX @ ( vw.reshape((__m,-1)) + numpy.sqrt(__m-1) * Tdemi @ mU )
927         #--------------------------
928         elif KorV == "Variational":
929             HXfm = H((Xfm, Un)) # Eventuellement Hfm
930             def CostFunction(w):
931                 _A  = Ynpu.reshape((__p,-1)) - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
932                 _Jo = 0.5 * _A.T * RI * _A
933                 _Jb = 0.5 * (__m-1) * w.T @ w
934                 _J  = _Jo + _Jb
935                 return float(_J)
936             def GradientOfCostFunction(w):
937                 _A  = Ynpu.reshape((__p,-1)) - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
938                 _GardJo = - EaHX.T * RI * _A
939                 _GradJb = (__m-1) * w.reshape((__m,1))
940                 _GradJ  = _GardJo + _GradJb
941                 return numpy.ravel(_GradJ)
942             vw = scipy.optimize.fmin_cg(
943                 f           = CostFunction,
944                 x0          = numpy.zeros(__m),
945                 fprime      = GradientOfCostFunction,
946                 args        = (),
947                 disp        = False,
948                 )
949             #
950             Hto = EaHX.T * RI * EaHX
951             Htb = (__m-1) * numpy.eye(__m)
952             Hta = Hto + Htb
953             #
954             Pta = numpy.linalg.inv( Hta )
955             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
956             #
957             Xn = Xfm.reshape((__n,-1)) + EaX @ (vw.reshape((__m,-1)) + EWa)
958         #--------------------------
959         elif KorV == "FiniteSize11": # Jauge Boc2011
960             HXfm = H((Xfm, Un)) # Eventuellement Hfm
961             def CostFunction(w):
962                 _A  = Ynpu.reshape((__p,-1)) - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
963                 _Jo = 0.5 * _A.T * RI * _A
964                 _Jb = 0.5 * __m * math.log(1 + 1/__m + w.T @ w)
965                 _J  = _Jo + _Jb
966                 return float(_J)
967             def GradientOfCostFunction(w):
968                 _A  = Ynpu.reshape((__p,-1)) - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
969                 _GardJo = - EaHX.T * RI * _A
970                 _GradJb = __m * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
971                 _GradJ  = _GardJo + _GradJb
972                 return numpy.ravel(_GradJ)
973             vw = scipy.optimize.fmin_cg(
974                 f           = CostFunction,
975                 x0          = numpy.zeros(__m),
976                 fprime      = GradientOfCostFunction,
977                 args        = (),
978                 disp        = False,
979                 )
980             #
981             Hto = EaHX.T * RI * EaHX
982             Htb = __m * \
983                 ( (1 + 1/__m + vw.T @ vw) * numpy.eye(__m) - 2 * vw @ vw.T ) \
984                 / (1 + 1/__m + vw.T @ vw)**2
985             Hta = Hto + Htb
986             #
987             Pta = numpy.linalg.inv( Hta )
988             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
989             #
990             Xn = Xfm.reshape((__n,-1)) + EaX @ (vw.reshape((__m,-1)) + EWa)
991         #--------------------------
992         elif KorV == "FiniteSize15": # Jauge Boc2015
993             HXfm = H((Xfm, Un)) # Eventuellement Hfm
994             def CostFunction(w):
995                 _A  = Ynpu.reshape((__p,-1)) - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
996                 _Jo = 0.5 * _A.T * RI * _A
997                 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w)
998                 _J  = _Jo + _Jb
999                 return float(_J)
1000             def GradientOfCostFunction(w):
1001                 _A  = Ynpu.reshape((__p,-1)) - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
1002                 _GardJo = - EaHX.T * RI * _A
1003                 _GradJb = (__m+1) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
1004                 _GradJ  = _GardJo + _GradJb
1005                 return numpy.ravel(_GradJ)
1006             vw = scipy.optimize.fmin_cg(
1007                 f           = CostFunction,
1008                 x0          = numpy.zeros(__m),
1009                 fprime      = GradientOfCostFunction,
1010                 args        = (),
1011                 disp        = False,
1012                 )
1013             #
1014             Hto = EaHX.T * RI * EaHX
1015             Htb = (__m+1) * \
1016                 ( (1 + 1/__m + vw.T @ vw) * numpy.eye(__m) - 2 * vw @ vw.T ) \
1017                 / (1 + 1/__m + vw.T @ vw)**2
1018             Hta = Hto + Htb
1019             #
1020             Pta = numpy.linalg.inv( Hta )
1021             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1022             #
1023             Xn = Xfm.reshape((__n,-1)) + EaX @ (vw.reshape((__m,-1)) + EWa)
1024         #--------------------------
1025         elif KorV == "FiniteSize16": # Jauge Boc2016
1026             HXfm = H((Xfm, Un)) # Eventuellement Hfm
1027             def CostFunction(w):
1028                 _A  = Ynpu.reshape((__p,-1)) - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
1029                 _Jo = 0.5 * _A.T * RI * _A
1030                 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w / (__m-1))
1031                 _J  = _Jo + _Jb
1032                 return float(_J)
1033             def GradientOfCostFunction(w):
1034                 _A  = Ynpu.reshape((__p,-1)) - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
1035                 _GardJo = - EaHX.T * RI * _A
1036                 _GradJb = ((__m+1) / (__m-1)) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w / (__m-1))
1037                 _GradJ  = _GardJo + _GradJb
1038                 return numpy.ravel(_GradJ)
1039             vw = scipy.optimize.fmin_cg(
1040                 f           = CostFunction,
1041                 x0          = numpy.zeros(__m),
1042                 fprime      = GradientOfCostFunction,
1043                 args        = (),
1044                 disp        = False,
1045                 )
1046             #
1047             Hto = EaHX.T * RI * EaHX
1048             Htb = ((__m+1) / (__m-1)) * \
1049                 ( (1 + 1/__m + vw.T @ vw / (__m-1)) * numpy.eye(__m) - 2 * vw @ vw.T / (__m-1) ) \
1050                 / (1 + 1/__m + vw.T @ vw / (__m-1))**2
1051             Hta = Hto + Htb
1052             #
1053             Pta = numpy.linalg.inv( Hta )
1054             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1055             #
1056             Xn = Xfm.reshape((__n,-1)) + EaX @ (vw.reshape((__m,-1)) + EWa)
1057         #--------------------------
1058         else:
1059             raise ValueError("KorV has to be chosen in the authorized methods list.")
1060         #
1061         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
1062             Xn = CovarianceInflation( Xn,
1063                 selfA._parameters["InflationType"],
1064                 selfA._parameters["InflationFactor"],
1065                 )
1066         #
1067         Xa = Xn.mean(axis=1, dtype=mfp).astype('float')
1068         #--------------------------
1069         #
1070         if selfA._parameters["StoreInternalVariables"] \
1071             or selfA._toStore("CostFunctionJ") \
1072             or selfA._toStore("CostFunctionJb") \
1073             or selfA._toStore("CostFunctionJo") \
1074             or selfA._toStore("APosterioriCovariance") \
1075             or selfA._toStore("InnovationAtCurrentAnalysis") \
1076             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1077             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1078             _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
1079             _Innovation = Ynpu - _HXa
1080         #
1081         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1082         # ---> avec analysis
1083         selfA.StoredVariables["Analysis"].store( Xa )
1084         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1085             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
1086         if selfA._toStore("InnovationAtCurrentAnalysis"):
1087             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1088         # ---> avec current state
1089         if selfA._parameters["StoreInternalVariables"] \
1090             or selfA._toStore("CurrentState"):
1091             selfA.StoredVariables["CurrentState"].store( Xn )
1092         if selfA._toStore("ForecastState"):
1093             selfA.StoredVariables["ForecastState"].store( Xn_predicted )
1094         if selfA._toStore("BMA"):
1095             selfA.StoredVariables["BMA"].store( Xn_predicted - Xa )
1096         if selfA._toStore("InnovationAtCurrentState"):
1097             selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
1098         if selfA._toStore("SimulatedObservationAtCurrentState") \
1099             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1100             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
1101         # ---> autres
1102         if selfA._parameters["StoreInternalVariables"] \
1103             or selfA._toStore("CostFunctionJ") \
1104             or selfA._toStore("CostFunctionJb") \
1105             or selfA._toStore("CostFunctionJo") \
1106             or selfA._toStore("CurrentOptimum") \
1107             or selfA._toStore("APosterioriCovariance"):
1108             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
1109             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
1110             J   = Jb + Jo
1111             selfA.StoredVariables["CostFunctionJb"].store( Jb )
1112             selfA.StoredVariables["CostFunctionJo"].store( Jo )
1113             selfA.StoredVariables["CostFunctionJ" ].store( J )
1114             #
1115             if selfA._toStore("IndexOfOptimum") \
1116                 or selfA._toStore("CurrentOptimum") \
1117                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1118                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1119                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1120                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1121                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1122             if selfA._toStore("IndexOfOptimum"):
1123                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1124             if selfA._toStore("CurrentOptimum"):
1125                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1126             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1127                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1128             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1129                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1130             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1131                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1132             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1133                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1134         if selfA._toStore("APosterioriCovariance"):
1135             Eai = (1/numpy.sqrt(__m-1)) * (Xn - Xa.reshape((__n,-1))) # Anomalies
1136             Pn = Eai @ Eai.T
1137             Pn = 0.5 * (Pn + Pn.T)
1138             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1139         if selfA._parameters["EstimationOf"] == "Parameters" \
1140             and J < previousJMinimum:
1141             previousJMinimum    = J
1142             XaMin               = Xa
1143             if selfA._toStore("APosterioriCovariance"):
1144                 covarianceXaMin = Pn
1145     #
1146     # Stockage final supplémentaire de l'optimum en estimation de paramètres
1147     # ----------------------------------------------------------------------
1148     if selfA._parameters["EstimationOf"] == "Parameters":
1149         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1150         selfA.StoredVariables["Analysis"].store( XaMin )
1151         if selfA._toStore("APosterioriCovariance"):
1152             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1153         if selfA._toStore("BMA"):
1154             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1155     #
1156     return 0
1157
1158 # ==============================================================================
1159 def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, BnotT=False, _epsilon=1.e-1, _e=1.e-7, _jmax=15000):
1160     """
1161     Maximum Likelihood Ensemble Filter (EnKF/MLEF Zupanski 2005, Bocquet 2013)
1162
1163     selfA est identique au "self" d'algorithme appelant et contient les
1164     valeurs.
1165     """
1166     if selfA._parameters["EstimationOf"] == "Parameters":
1167         selfA._parameters["StoreInternalVariables"] = True
1168     #
1169     # Opérateurs
1170     # ----------
1171     H = HO["Direct"].appliedControledFormTo
1172     #
1173     if selfA._parameters["EstimationOf"] == "State":
1174         M = EM["Direct"].appliedControledFormTo
1175     #
1176     if CM is not None and "Tangent" in CM and U is not None:
1177         Cm = CM["Tangent"].asMatrix(Xb)
1178     else:
1179         Cm = None
1180     #
1181     # Nombre de pas identique au nombre de pas d'observations
1182     # -------------------------------------------------------
1183     if hasattr(Y,"stepnumber"):
1184         duration = Y.stepnumber()
1185         __p = numpy.cumprod(Y.shape())[-1]
1186     else:
1187         duration = 2
1188         __p = numpy.array(Y).size
1189     #
1190     # Précalcul des inversions de B et R
1191     # ----------------------------------
1192     if selfA._parameters["StoreInternalVariables"] \
1193         or selfA._toStore("CostFunctionJ") \
1194         or selfA._toStore("CostFunctionJb") \
1195         or selfA._toStore("CostFunctionJo") \
1196         or selfA._toStore("CurrentOptimum") \
1197         or selfA._toStore("APosterioriCovariance"):
1198         BI = B.getI()
1199     RI = R.getI()
1200     #
1201     # Initialisation
1202     # --------------
1203     __n = Xb.size
1204     __m = selfA._parameters["NumberOfMembers"]
1205     Xn = numpy.asmatrix(numpy.dot( Xb.reshape(__n,1), numpy.ones((1,__m)) ))
1206     if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
1207     else:                         Pn = B
1208     if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
1209     else:                         Rn = R
1210     if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
1211     else:                         Qn = Q
1212     #
1213     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1214         selfA.StoredVariables["Analysis"].store( numpy.ravel(Xb) )
1215         if selfA._toStore("APosterioriCovariance"):
1216             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1217             covarianceXa = Pn
1218     #
1219     previousJMinimum = numpy.finfo(float).max
1220     #
1221     # Predimensionnement
1222     Xn_predicted = numpy.asmatrix(numpy.zeros((__n,__m)))
1223     #
1224     for step in range(duration-1):
1225         if hasattr(Y,"store"):
1226             Ynpu = numpy.asmatrix(numpy.ravel( Y[step+1] )).T
1227         else:
1228             Ynpu = numpy.asmatrix(numpy.ravel( Y )).T
1229         #
1230         if U is not None:
1231             if hasattr(U,"store") and len(U)>1:
1232                 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
1233             elif hasattr(U,"store") and len(U)==1:
1234                 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
1235             else:
1236                 Un = numpy.asmatrix(numpy.ravel( U )).T
1237         else:
1238             Un = None
1239         #
1240         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
1241             Xn = CovarianceInflation( Xn,
1242                 selfA._parameters["InflationType"],
1243                 selfA._parameters["InflationFactor"],
1244                 )
1245         #
1246         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
1247             EMX = M( [(Xn[:,i], Un) for i in range(__m)], argsAsSerie = True )
1248             for i in range(__m):
1249                 qi = numpy.random.multivariate_normal(numpy.zeros(__n), Qn)
1250                 Xn_predicted[:,i] = (numpy.ravel( EMX[i] ) + qi).reshape((__n,-1))
1251             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1252                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
1253                 Xn_predicted = Xn_predicted + Cm * Un
1254         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
1255             # --- > Par principe, M = Id, Q = 0
1256             Xn_predicted = Xn
1257         #
1258         # Mean of forecast and observation of forecast
1259         Xfm  = Xn_predicted.mean(axis=1, dtype=mfp).astype('float')
1260         #
1261         EaX   = (Xn_predicted - Xfm.reshape((__n,-1))) / numpy.sqrt(__m-1)
1262         #
1263         #--------------------------
1264         Ua = numpy.eye(__m)
1265         Ta = numpy.eye(__m)
1266         #
1267         __j = 0 # 4:
1268         vw = numpy.zeros(__m) # 4:
1269         Deltaw = 1
1270         while numpy.linalg.norm(Deltaw) >= _e or __j >= _jmax: # 5: et 19:
1271             vx = numpy.ravel(Xfm) + EaX @ vw # 6:
1272             #
1273             if BnotT:
1274                 EE = vx.reshape((__n,-1)) + _epsilon * EaX # 7:
1275             else:
1276                 EE = vx.reshape((__n,-1)) + numpy.sqrt(__m-1) * EaX @ Ta # 8:
1277             #
1278             EZ = H( [(EE[:,i], Un) for i in range(__m)],
1279                 argsAsSerie = True,
1280                 returnSerieAsArrayMatrix = True )
1281             #
1282             ybar = EZ.mean(axis=1, dtype=mfp).astype('float').reshape((__p,-1)) # 10: Observation mean
1283             #
1284             if BnotT:
1285                 EY = (EZ - ybar) / _epsilon # 11:
1286             else:
1287                 EY = ( (EZ - ybar) @ numpy.linalg.inv(Ta) ) / numpy.sqrt(__m-1) # 12:
1288             #
1289             GradJ = numpy.ravel(vw.reshape((__m,1)) - EY.transpose() @ (RI * (Ynpu - ybar))) # 13:
1290             mH = numpy.eye(__m) + EY.transpose() @ (RI * EY) # 14:
1291             Deltaw = numpy.linalg.solve(mH,GradJ) # 15:
1292             vw = vw - Deltaw # 16:
1293             if not BnotT:
1294                 Ta = numpy.linalg.inv(numpy.real(scipy.linalg.sqrtm( mH ))) # 17:
1295             __j = __j + 1 # 18:
1296         #
1297         if BnotT:
1298             Ta = numpy.linalg.inv(numpy.real(scipy.linalg.sqrtm( mH ))) # 20:
1299         #
1300         Xn = vx.reshape((__n,-1)) + numpy.sqrt(__m-1) * EaX @ Ta @ Ua # 21:
1301         #
1302         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
1303             Xn = CovarianceInflation( Xn,
1304                 selfA._parameters["InflationType"],
1305                 selfA._parameters["InflationFactor"],
1306                 )
1307         #
1308         Xa = Xn.mean(axis=1, dtype=mfp).astype('float')
1309         #--------------------------
1310         #
1311         if selfA._parameters["StoreInternalVariables"] \
1312             or selfA._toStore("CostFunctionJ") \
1313             or selfA._toStore("CostFunctionJb") \
1314             or selfA._toStore("CostFunctionJo") \
1315             or selfA._toStore("APosterioriCovariance") \
1316             or selfA._toStore("InnovationAtCurrentAnalysis") \
1317             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1318             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1319             _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
1320             _Innovation = Ynpu - _HXa
1321         #
1322         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1323         # ---> avec analysis
1324         selfA.StoredVariables["Analysis"].store( Xa )
1325         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1326             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
1327         if selfA._toStore("InnovationAtCurrentAnalysis"):
1328             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1329         # ---> avec current state
1330         if selfA._parameters["StoreInternalVariables"] \
1331             or selfA._toStore("CurrentState"):
1332             selfA.StoredVariables["CurrentState"].store( Xn )
1333         if selfA._toStore("ForecastState"):
1334             selfA.StoredVariables["ForecastState"].store( Xn_predicted )
1335         if selfA._toStore("BMA"):
1336             selfA.StoredVariables["BMA"].store( Xn_predicted - Xa )
1337         #~ if selfA._toStore("InnovationAtCurrentState"):
1338             #~ selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
1339         #~ if selfA._toStore("SimulatedObservationAtCurrentState") \
1340             #~ or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1341             #~ selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
1342         # ---> autres
1343         if selfA._parameters["StoreInternalVariables"] \
1344             or selfA._toStore("CostFunctionJ") \
1345             or selfA._toStore("CostFunctionJb") \
1346             or selfA._toStore("CostFunctionJo") \
1347             or selfA._toStore("CurrentOptimum") \
1348             or selfA._toStore("APosterioriCovariance"):
1349             Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
1350             Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
1351             J   = Jb + Jo
1352             selfA.StoredVariables["CostFunctionJb"].store( Jb )
1353             selfA.StoredVariables["CostFunctionJo"].store( Jo )
1354             selfA.StoredVariables["CostFunctionJ" ].store( J )
1355             #
1356             if selfA._toStore("IndexOfOptimum") \
1357                 or selfA._toStore("CurrentOptimum") \
1358                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1359                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1360                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1361                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1362                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1363             if selfA._toStore("IndexOfOptimum"):
1364                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1365             if selfA._toStore("CurrentOptimum"):
1366                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1367             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1368                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1369             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1370                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1371             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1372                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1373             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1374                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1375         if selfA._toStore("APosterioriCovariance"):
1376             Eai = (1/numpy.sqrt(__m-1)) * (Xn - Xa.reshape((__n,-1))) # Anomalies
1377             Pn = Eai @ Eai.T
1378             Pn = 0.5 * (Pn + Pn.T)
1379             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1380         if selfA._parameters["EstimationOf"] == "Parameters" \
1381             and J < previousJMinimum:
1382             previousJMinimum    = J
1383             XaMin               = Xa
1384             if selfA._toStore("APosterioriCovariance"):
1385                 covarianceXaMin = Pn
1386     #
1387     # Stockage final supplémentaire de l'optimum en estimation de paramètres
1388     # ----------------------------------------------------------------------
1389     if selfA._parameters["EstimationOf"] == "Parameters":
1390         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1391         selfA.StoredVariables["Analysis"].store( XaMin )
1392         if selfA._toStore("APosterioriCovariance"):
1393             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1394         if selfA._toStore("BMA"):
1395             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1396     #
1397     return 0
1398
1399 # ==============================================================================
1400 if __name__ == "__main__":
1401     print('\n AUTODIAGNOSTIC\n')