X-Git-Url: http://git.salome-platform.org/gitweb/?a=blobdiff_plain;f=src%2FdaComposant%2FdaCore%2FNumericObjects.py;h=5089c21f3ef060896e8739797cad63995898096f;hb=c3059023647feca54ad621b0af00ef55127b59fa;hp=ba218365f0a14a2b0474b874b81dda1b4714ec8a;hpb=5616e873137f4215c5700e9f6b4c476ac60230ad;p=modules%2Fadao.git diff --git a/src/daComposant/daCore/NumericObjects.py b/src/daComposant/daCore/NumericObjects.py index ba21836..5089c21 100644 --- a/src/daComposant/daCore/NumericObjects.py +++ b/src/daComposant/daCore/NumericObjects.py @@ -34,15 +34,18 @@ mfp = PlatformInfo().MaximumPrecision() # logging.getLogger().setLevel(logging.DEBUG) # ============================================================================== -def ExecuteFunction( paire ): - assert len(paire) == 2, "Incorrect number of arguments" - X, funcrepr = paire +def ExecuteFunction( triplet ): + assert len(triplet) == 3, "Incorrect number of arguments" + X, xArgs, funcrepr = triplet __X = numpy.asmatrix(numpy.ravel( X )).T __sys_path_tmp = sys.path ; sys.path.insert(0,funcrepr["__userFunction__path"]) __module = __import__(funcrepr["__userFunction__modl"], globals(), locals(), []) __fonction = getattr(__module,funcrepr["__userFunction__name"]) sys.path = __sys_path_tmp ; del __sys_path_tmp - __HX = __fonction( __X ) + if isinstance(xArgs, dict): + __HX = __fonction( __X, **xArgs ) + else: + __HX = __fonction( __X ) return numpy.ravel( __HX ) # ============================================================================== @@ -62,6 +65,7 @@ class FDApproximation(object): centeredDF = False, increment = 0.01, dX = None, + extraArguments = None, avoidingRedundancy = True, toleranceInRedundancy = 1.e-18, lenghtOfRedundancy = -1, @@ -70,6 +74,7 @@ class FDApproximation(object): mfEnabled = False, ): self.__name = str(name) + self.__extraArgs = extraArguments if mpEnabled: try: import multiprocessing @@ -114,7 +119,7 @@ class FDApproximation(object): self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','') self.__userFunction__path = os.path.dirname(mod) del mod - self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled ) + self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs ) self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct elif isinstance(Function,types.MethodType): logging.debug("FDA Calculs en multiprocessing : MethodType") @@ -128,12 +133,12 @@ class FDApproximation(object): self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','') self.__userFunction__path = os.path.dirname(mod) del mod - self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled ) + self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs ) self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct else: raise TypeError("User defined function or method has to be provided for finite differences approximation.") else: - self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled ) + self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs ) self.__userFunction = self.__userOperator.appliedTo # self.__centeredDF = bool(centeredDF) @@ -160,9 +165,12 @@ class FDApproximation(object): return __ac, __iac # --------------------------------------------------------- - def DirectOperator(self, X ): + def DirectOperator(self, X, **extraArgs ): """ Calcul du direct à l'aide de la fonction fournie. + + NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais + ne doivent pas être données ici à la fonction utilisateur. """ logging.debug("FDA Calcul DirectOperator (explicite)") if self.__mfEnabled: @@ -249,8 +257,8 @@ class FDApproximation(object): _X_moins_dXi = numpy.array( _X.A1, dtype=float ) _X_moins_dXi[i] = _X[i] - _dXi # - _jobs.append( (_X_plus_dXi, funcrepr) ) - _jobs.append( (_X_moins_dXi, funcrepr) ) + _jobs.append( (_X_plus_dXi, self.__extraArgs, funcrepr) ) + _jobs.append( (_X_moins_dXi, self.__extraArgs, funcrepr) ) # import multiprocessing self.__pool = multiprocessing.Pool(self.__mpWorkers) @@ -303,12 +311,12 @@ class FDApproximation(object): "__userFunction__name" : self.__userFunction__name, } _jobs = [] - _jobs.append( (_X.A1, funcrepr) ) + _jobs.append( (_X.A1, self.__extraArgs, funcrepr) ) for i in range( len(_dX) ): _X_plus_dXi = numpy.array( _X.A1, dtype=float ) _X_plus_dXi[i] = _X[i] + _dX[i] # - _jobs.append( (_X_plus_dXi, funcrepr) ) + _jobs.append( (_X_plus_dXi, self.__extraArgs, funcrepr) ) # import multiprocessing self.__pool = multiprocessing.Pool(self.__mpWorkers) @@ -372,9 +380,12 @@ class FDApproximation(object): return _Jacobienne # --------------------------------------------------------- - def TangentOperator(self, paire ): + def TangentOperator(self, paire, **extraArgs ): """ Calcul du tangent à l'aide de la Jacobienne. + + NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais + ne doivent pas être données ici à la fonction utilisateur. """ if self.__mfEnabled: assert len(paire) == 1, "Incorrect lenght of arguments" @@ -401,9 +412,12 @@ class FDApproximation(object): else: return _HtX.A1 # --------------------------------------------------------- - def AdjointOperator(self, paire ): + def AdjointOperator(self, paire, **extraArgs ): """ Calcul de l'adjoint à l'aide de la Jacobienne. + + NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais + ne doivent pas être données ici à la fonction utilisateur. """ if self.__mfEnabled: assert len(paire) == 1, "Incorrect lenght of arguments" @@ -429,84 +443,6 @@ class FDApproximation(object): if self.__mfEnabled: return [_HaY.A1,] else: return _HaY.A1 -# ============================================================================== -def mmqr( - func = None, - x0 = None, - fprime = None, - bounds = None, - quantile = 0.5, - maxfun = 15000, - toler = 1.e-06, - y = None, - ): - """ - Implémentation informatique de l'algorithme MMQR, basée sur la publication : - David R. Hunter, Kenneth Lange, "Quantile Regression via an MM Algorithm", - Journal of Computational and Graphical Statistics, 9, 1, pp.60-77, 2000. - """ - # - # Recuperation des donnees et informations initiales - # -------------------------------------------------- - variables = numpy.ravel( x0 ) - mesures = numpy.ravel( y ) - increment = sys.float_info[0] - p = variables.size - n = mesures.size - quantile = float(quantile) - # - # Calcul des parametres du MM - # --------------------------- - tn = float(toler) / n - e0 = -tn / math.log(tn) - epsilon = (e0-tn)/(1+math.log(e0)) - # - # Calculs d'initialisation - # ------------------------ - residus = mesures - numpy.ravel( func( variables ) ) - poids = 1./(epsilon+numpy.abs(residus)) - veps = 1. - 2. * quantile - residus * poids - lastsurrogate = -numpy.sum(residus*veps) - (1.-2.*quantile)*numpy.sum(residus) - iteration = 0 - # - # Recherche iterative - # ------------------- - while (increment > toler) and (iteration < maxfun) : - iteration += 1 - # - Derivees = numpy.array(fprime(variables)) - Derivees = Derivees.reshape(n,p) # Necessaire pour remettre en place la matrice si elle passe par des tuyaux YACS - DeriveesT = Derivees.transpose() - M = numpy.dot( DeriveesT , (numpy.array(numpy.matrix(p*[poids,]).T)*Derivees) ) - SM = numpy.transpose(numpy.dot( DeriveesT , veps )) - step = - numpy.linalg.lstsq( M, SM, rcond=-1 )[0] - # - variables = variables + step - if bounds is not None: - # Attention : boucle infinie à éviter si un intervalle est trop petit - while( (variables < numpy.ravel(numpy.asmatrix(bounds)[:,0])).any() or (variables > numpy.ravel(numpy.asmatrix(bounds)[:,1])).any() ): - step = step/2. - variables = variables - step - residus = mesures - numpy.ravel( func(variables) ) - surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus) - # - while ( (surrogate > lastsurrogate) and ( max(list(numpy.abs(step))) > 1.e-16 ) ) : - step = step/2. - variables = variables - step - residus = mesures - numpy.ravel( func(variables) ) - surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus) - # - increment = lastsurrogate-surrogate - poids = 1./(epsilon+numpy.abs(residus)) - veps = 1. - 2. * quantile - residus * poids - lastsurrogate = -numpy.sum(residus * veps) - (1.-2.*quantile)*numpy.sum(residus) - # - # Mesure d'écart - # -------------- - Ecart = quantile * numpy.sum(residus) - numpy.sum( residus[residus<0] ) - # - return variables, Ecart, [n,p,iteration,increment,0] - # ============================================================================== def EnsembleOfCenteredPerturbations( _bgcenter, _bgcovariance, _nbmembers ): "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés" @@ -532,14 +468,14 @@ def EnsembleOfBackgroundPerturbations( _bgcenter, _bgcovariance, _nbmembers, _wi notes manuscrites de MB et conforme au code de PS avec eps = -1 """ eps = -1 - Q = numpy.eye(N-1)-numpy.ones((N-1,N-1))/numpy.sqrt(N)/(numpy.sqrt(N)-eps) + Q = numpy.identity(N-1)-numpy.ones((N-1,N-1))/numpy.sqrt(N)/(numpy.sqrt(N)-eps) Q = numpy.concatenate((Q, [eps*numpy.ones(N-1)/numpy.sqrt(N)]), axis=0) R, _ = numpy.linalg.qr(numpy.random.normal(size = (N-1,N-1))) Q = numpy.dot(Q,R) Zr = numpy.dot(Q,Zr) return Zr.T # - _bgcenter = numpy.ravel(_bgcenter)[:,None] + _bgcenter = numpy.ravel(_bgcenter).reshape((-1,1)) if _nbmembers < 1: raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),)) if _bgcovariance is None: @@ -568,14 +504,29 @@ def EnsembleOfBackgroundPerturbations( _bgcenter, _bgcovariance, _nbmembers, _wi return BackgroundEnsemble # ============================================================================== -def EnsembleOfAnomalies( _ensemble, _optmean = None): +def EnsembleOfAnomalies( Ensemble, OptMean = None, Normalisation = 1.): "Renvoie les anomalies centrées à partir d'un ensemble TailleEtat*NbMembres" - if _optmean is None: - Em = numpy.asarray(_ensemble).mean(axis=1, dtype=mfp).astype('float')[:,numpy.newaxis] + if OptMean is None: + __Em = numpy.asarray(Ensemble).mean(axis=1, dtype=mfp).astype('float').reshape((-1,1)) else: - Em = numpy.ravel(_optmean)[:,numpy.newaxis] + __Em = numpy.ravel(OptMean).reshape((-1,1)) # - return numpy.asarray(_ensemble) - Em + return Normalisation * (numpy.asarray(Ensemble) - __Em) + +# ============================================================================== +def EnsembleErrorCovariance( Ensemble ): + "Renvoie la covariance d'ensemble" + __Anomalies = EnsembleOfAnomalies( Ensemble ) + __n, __m = numpy.asarray(__Anomalies).shape + # Estimation empirique + __Covariance = (__Anomalies @ __Anomalies.T) / (__m-1) + # Assure la symétrie + __Covariance = (__Covariance + __Covariance.T) * 0.5 + # Assure la positivité + __epsilon = mpr*numpy.trace(__Covariance) + __Covariance = __Covariance + __epsilon * numpy.identity(__n) + # + return __Covariance # ============================================================================== def CovarianceInflation( @@ -618,7 +569,7 @@ def CovarianceInflation( __n, __m = numpy.asarray(InputCovOrEns).shape if __n != __m: raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.") - OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * numpy.eye(__n) + OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * numpy.identity(__n) # elif InflationType == "HybridOnBackgroundCovariance": if InflationFactor < 0.: @@ -643,581 +594,744 @@ def CovarianceInflation( return OutputCovOrEns # ============================================================================== -def std3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): +def enks(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="EnKS16-KalmanFilterFormula"): """ - 3DVAR (Bouttier 1999, Courtier 1993) - - selfA est identique au "self" d'algorithme appelant et contient les - valeurs. + EnKS """ # - # Correction pour pallier a un bug de TNC sur le retour du Minimum - if "Minimizer" in selfA._parameters and selfA._parameters["Minimizer"] == "TNC": - selfA.setParameterValue("StoreInternalVariables",True) + # Initialisations + # --------------- # # Opérateurs - # ---------- - Hm = HO["Direct"].appliedTo - Ha = HO["Adjoint"].appliedInXTo + H = HO["Direct"].appliedControledFormTo # - # Utilisation éventuelle d'un vecteur H(Xb) précalculé - # ---------------------------------------------------- - if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]: - HXb = Hm( Xb, HO["AppliedInX"]["HXb"] ) - else: - HXb = Hm( Xb ) - HXb = numpy.asmatrix(numpy.ravel( HXb )).T - if Y.size != HXb.size: - raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size)) - if max(Y.shape) != max(HXb.shape): - raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape)) + if selfA._parameters["EstimationOf"] == "State": + M = EM["Direct"].appliedControledFormTo # - if selfA._toStore("JacobianMatrixAtBackground"): - HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb) - HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape - selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb ) + if CM is not None and "Tangent" in CM and U is not None: + Cm = CM["Tangent"].asMatrix(Xb) + else: + Cm = None # # Précalcul des inversions de B et R - # ---------------------------------- - BI = B.getI() - RI = R.getI() + RIdemi = R.sqrtmI() + # + LagL = selfA._parameters["SmootherLagL"] + if (not hasattr(Y,"store")) or (not hasattr(Y,"stepnumber")): + raise ValueError("Fixed-lag smoother requires a series of observation") + if Y.stepnumber() < LagL: + raise ValueError("Fixed-lag smoother requires a series of observation greater then the lag L") + duration = Y.stepnumber() + __p = numpy.cumprod(Y.shape())[-1] + __n = Xb.size + __m = selfA._parameters["NumberOfMembers"] # - # Point de démarrage de l'optimisation - # ------------------------------------ - Xini = selfA._parameters["InitializationPoint"] + if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n) + else: Pn = B + if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n) + else: Qn = Q + if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]: + selfA.StoredVariables["Analysis"].store( Xb ) + if selfA._toStore("APosterioriCovariance"): + selfA.StoredVariables["APosterioriCovariance"].store( Pn ) + covarianceXa = Pn # - # Définition de la fonction-coût - # ------------------------------ - def CostFunction(x): - _X = numpy.asmatrix(numpy.ravel( x )).T - if selfA._parameters["StoreInternalVariables"] or \ - selfA._toStore("CurrentState") or \ - selfA._toStore("CurrentOptimum"): - selfA.StoredVariables["CurrentState"].store( _X ) - _HX = Hm( _X ) - _HX = numpy.asmatrix(numpy.ravel( _HX )).T - _Innovation = Y - _HX - if selfA._toStore("SimulatedObservationAtCurrentState") or \ - selfA._toStore("SimulatedObservationAtCurrentOptimum"): - selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX ) - if selfA._toStore("InnovationAtCurrentState"): - selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation ) + # Calcul direct initial (on privilégie la mémorisation au recalcul) + __seed = numpy.random.get_state() + selfB = copy.deepcopy(selfA) + selfB._parameters["StoreSupplementaryCalculations"] = ["CurrentEnsembleState"] + if VariantM == "EnKS16-KalmanFilterFormula": + etkf(selfB, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM = "KalmanFilterFormula") + else: + raise ValueError("VariantM has to be chosen in the authorized methods list.") + if LagL > 0: + EL = selfB.StoredVariables["CurrentEnsembleState"][LagL-1] + else: + EL = EnsembleOfBackgroundPerturbations( Xb, None, __m ) # Cf. etkf + selfA._parameters["SetSeed"] = numpy.random.set_state(__seed) + # + for step in range(LagL,duration-1): # - Jb = float( 0.5 * (_X - Xb).T * BI * (_X - Xb) ) - Jo = float( 0.5 * _Innovation.T * RI * _Innovation ) - J = Jb + Jo + sEL = selfB.StoredVariables["CurrentEnsembleState"][step+1-LagL:step+1] + sEL.append(None) # - selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) ) - selfA.StoredVariables["CostFunctionJb"].store( Jb ) - selfA.StoredVariables["CostFunctionJo"].store( Jo ) - selfA.StoredVariables["CostFunctionJ" ].store( J ) - if selfA._toStore("IndexOfOptimum") or \ - selfA._toStore("CurrentOptimum") or \ - selfA._toStore("CostFunctionJAtCurrentOptimum") or \ - selfA._toStore("CostFunctionJbAtCurrentOptimum") or \ - selfA._toStore("CostFunctionJoAtCurrentOptimum") or \ - selfA._toStore("SimulatedObservationAtCurrentOptimum"): - IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps - if selfA._toStore("IndexOfOptimum"): - selfA.StoredVariables["IndexOfOptimum"].store( IndexMin ) - if selfA._toStore("CurrentOptimum"): - selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] ) - if selfA._toStore("SimulatedObservationAtCurrentOptimum"): - selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] ) - if selfA._toStore("CostFunctionJbAtCurrentOptimum"): - selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] ) - if selfA._toStore("CostFunctionJoAtCurrentOptimum"): - selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] ) - if selfA._toStore("CostFunctionJAtCurrentOptimum"): - selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] ) - return J + if hasattr(Y,"store"): + Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1)) + else: + Ynpu = numpy.ravel( Y ).reshape((__p,1)) + # + if U is not None: + if hasattr(U,"store") and len(U)>1: + Un = numpy.asmatrix(numpy.ravel( U[step] )).T + elif hasattr(U,"store") and len(U)==1: + Un = numpy.asmatrix(numpy.ravel( U[0] )).T + else: + Un = numpy.asmatrix(numpy.ravel( U )).T + else: + Un = None + # + #-------------------------- + if VariantM == "EnKS16-KalmanFilterFormula": + if selfA._parameters["EstimationOf"] == "State": # Forecast + EL = M( [(EL[:,i], Un) for i in range(__m)], + argsAsSerie = True, + returnSerieAsArrayMatrix = True ) + EL = EL + numpy.random.multivariate_normal(numpy.zeros(__n), Qn, size=__m).T + EZ = H( [(EL[:,i], Un) for i in range(__m)], + argsAsSerie = True, + returnSerieAsArrayMatrix = True ) + if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon ! + Cm = Cm.reshape(__n,Un.size) # ADAO & check shape + EZ = EZ + Cm * Un + elif selfA._parameters["EstimationOf"] == "Parameters": + # --- > Par principe, M = Id, Q = 0 + EZ = H( [(EL[:,i], Un) for i in range(__m)], + argsAsSerie = True, + returnSerieAsArrayMatrix = True ) + # + vEm = EL.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1)) + vZm = EZ.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1)) + # + mS = RIdemi @ EnsembleOfAnomalies( EZ, vZm, 1./math.sqrt(__m-1) ) + delta = RIdemi @ ( Ynpu - vZm ) + mT = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS ) + vw = mT @ mS.T @ delta + # + Tdemi = numpy.real(scipy.linalg.sqrtm(mT)) + mU = numpy.identity(__m) + wTU = (vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU) + # + EX = EnsembleOfAnomalies( EL, vEm, 1./math.sqrt(__m-1) ) + EL = vEm + EX @ wTU + # + sEL[LagL] = EL + for irl in range(LagL): # Lissage des L précédentes analysis + vEm = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1)) + EX = EnsembleOfAnomalies( sEL[irl], vEm, 1./math.sqrt(__m-1) ) + sEL[irl] = vEm + EX @ wTU + # + # Conservation de l'analyse retrospective d'ordre 0 avant rotation + Xa = sEL[0].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1)) + if selfA._toStore("APosterioriCovariance"): + EXn = sEL[0] + # + for irl in range(LagL): + sEL[irl] = sEL[irl+1] + sEL[LagL] = None + #-------------------------- + else: + raise ValueError("VariantM has to be chosen in the authorized methods list.") + # + selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) ) + selfA.StoredVariables["Analysis"].store( Xa ) + if selfA._toStore("APosterioriCovariance"): + selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(EXn) ) # - def GradientOfCostFunction(x): - _X = numpy.asmatrix(numpy.ravel( x )).T - _HX = Hm( _X ) - _HX = numpy.asmatrix(numpy.ravel( _HX )).T - GradJb = BI * (_X - Xb) - GradJo = - Ha( (_X, RI * (Y - _HX)) ) - GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo ) - return GradJ + # Stockage des dernières analyses incomplètement remises à jour + for irl in range(LagL): + selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) ) + Xa = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1)) + selfA.StoredVariables["Analysis"].store( Xa ) # - # Minimisation de la fonctionnelle - # -------------------------------- - nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber() + return 0 + +# ============================================================================== +def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"): + """ + Ensemble-Transform EnKF + """ + if selfA._parameters["EstimationOf"] == "Parameters": + selfA._parameters["StoreInternalVariables"] = True # - if selfA._parameters["Minimizer"] == "LBFGSB": - if "0.19" <= scipy.version.version <= "1.1.0": - import lbfgsbhlt as optimiseur - else: - import scipy.optimize as optimiseur - Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b( - func = CostFunction, - x0 = Xini, - fprime = GradientOfCostFunction, - args = (), - bounds = selfA._parameters["Bounds"], - maxfun = selfA._parameters["MaximumNumberOfSteps"]-1, - factr = selfA._parameters["CostDecrementTolerance"]*1.e14, - pgtol = selfA._parameters["ProjectedGradientTolerance"], - iprint = selfA._parameters["optiprint"], - ) - nfeval = Informations['funcalls'] - rc = Informations['warnflag'] - elif selfA._parameters["Minimizer"] == "TNC": - Minimum, nfeval, rc = scipy.optimize.fmin_tnc( - func = CostFunction, - x0 = Xini, - fprime = GradientOfCostFunction, - args = (), - bounds = selfA._parameters["Bounds"], - maxfun = selfA._parameters["MaximumNumberOfSteps"], - pgtol = selfA._parameters["ProjectedGradientTolerance"], - ftol = selfA._parameters["CostDecrementTolerance"], - messages = selfA._parameters["optmessages"], - ) - elif selfA._parameters["Minimizer"] == "CG": - Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg( - f = CostFunction, - x0 = Xini, - fprime = GradientOfCostFunction, - args = (), - maxiter = selfA._parameters["MaximumNumberOfSteps"], - gtol = selfA._parameters["GradientNormTolerance"], - disp = selfA._parameters["optdisp"], - full_output = True, - ) - elif selfA._parameters["Minimizer"] == "NCG": - Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg( - f = CostFunction, - x0 = Xini, - fprime = GradientOfCostFunction, - args = (), - maxiter = selfA._parameters["MaximumNumberOfSteps"], - avextol = selfA._parameters["CostDecrementTolerance"], - disp = selfA._parameters["optdisp"], - full_output = True, - ) - elif selfA._parameters["Minimizer"] == "BFGS": - Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs( - f = CostFunction, - x0 = Xini, - fprime = GradientOfCostFunction, - args = (), - maxiter = selfA._parameters["MaximumNumberOfSteps"], - gtol = selfA._parameters["GradientNormTolerance"], - disp = selfA._parameters["optdisp"], - full_output = True, - ) - else: - raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"]) - # - IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps - MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin] - # - # Correction pour pallier a un bug de TNC sur le retour du Minimum - # ---------------------------------------------------------------- - if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"): - Minimum = selfA.StoredVariables["CurrentState"][IndexMin] - # - # Obtention de l'analyse - # ---------------------- - Xa = numpy.asmatrix(numpy.ravel( Minimum )).T - # - selfA.StoredVariables["Analysis"].store( Xa.A1 ) - # - if selfA._toStore("OMA") or \ - selfA._toStore("SigmaObs2") or \ - selfA._toStore("SimulationQuantiles") or \ - selfA._toStore("SimulatedObservationAtOptimum"): - if selfA._toStore("SimulatedObservationAtCurrentState"): - HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] - elif selfA._toStore("SimulatedObservationAtCurrentOptimum"): - HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1] - else: - HXa = Hm( Xa ) + # Opérateurs + # ---------- + H = HO["Direct"].appliedControledFormTo # - # Calcul de la covariance d'analyse - # --------------------------------- - if selfA._toStore("APosterioriCovariance") or \ - selfA._toStore("SimulationQuantiles") or \ - selfA._toStore("JacobianMatrixAtOptimum") or \ - selfA._toStore("KalmanGainAtOptimum"): - HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa) - HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape - if selfA._toStore("APosterioriCovariance") or \ - selfA._toStore("SimulationQuantiles") or \ - selfA._toStore("KalmanGainAtOptimum"): - HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa) - HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape - if selfA._toStore("APosterioriCovariance") or \ - selfA._toStore("SimulationQuantiles"): - HessienneI = [] - nb = Xa.size - for i in range(nb): - _ee = numpy.matrix(numpy.zeros(nb)).T - _ee[i] = 1. - _HtEE = numpy.dot(HtM,_ee) - _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T - HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) ) - HessienneI = numpy.matrix( HessienneI ) - A = HessienneI.I - if min(A.shape) != max(A.shape): - raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape))) - if (numpy.diag(A) < 0).any(): - raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,)) - if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug - try: - L = numpy.linalg.cholesky( A ) - except: - raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,)) - if selfA._toStore("APosterioriCovariance"): - selfA.StoredVariables["APosterioriCovariance"].store( A ) - if selfA._toStore("JacobianMatrixAtOptimum"): - selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM ) - if selfA._toStore("KalmanGainAtOptimum"): - if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I - elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI - selfA.StoredVariables["KalmanGainAtOptimum"].store( KG ) + if selfA._parameters["EstimationOf"] == "State": + M = EM["Direct"].appliedControledFormTo # - # Calculs et/ou stockages supplémentaires - # --------------------------------------- - if selfA._toStore("Innovation") or \ - selfA._toStore("SigmaObs2") or \ - selfA._toStore("MahalanobisConsistency") or \ - selfA._toStore("OMB"): - d = Y - HXb - if selfA._toStore("Innovation"): - selfA.StoredVariables["Innovation"].store( numpy.ravel(d) ) - if selfA._toStore("BMA"): - selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) ) - if selfA._toStore("OMA"): - selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) ) - if selfA._toStore("OMB"): - selfA.StoredVariables["OMB"].store( numpy.ravel(d) ) - if selfA._toStore("SigmaObs2"): - TraceR = R.trace(Y.size) - selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR ) - if selfA._toStore("MahalanobisConsistency"): - selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) ) - if selfA._toStore("SimulationQuantiles"): - nech = selfA._parameters["NumberOfSamplesForQuantiles"] - HXa = numpy.matrix(numpy.ravel( HXa )).T - YfQ = None - for i in range(nech): - if selfA._parameters["SimulationForQuantiles"] == "Linear": - dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T - dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T - Yr = HXa + dYr - elif selfA._parameters["SimulationForQuantiles"] == "NonLinear": - Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T - Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T - if YfQ is None: - YfQ = Yr - else: - YfQ = numpy.hstack((YfQ,Yr)) - YfQ.sort(axis=-1) - YQ = None - for quantile in selfA._parameters["Quantiles"]: - if not (0. <= float(quantile) <= 1.): continue - indice = int(nech * float(quantile) - 1./nech) - if YQ is None: YQ = YfQ[:,indice] - else: YQ = numpy.hstack((YQ,YfQ[:,indice])) - selfA.StoredVariables["SimulationQuantiles"].store( YQ ) - if selfA._toStore("SimulatedObservationAtBackground"): - selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) ) - if selfA._toStore("SimulatedObservationAtOptimum"): - selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) ) + if CM is not None and "Tangent" in CM and U is not None: + Cm = CM["Tangent"].asMatrix(Xb) + else: + Cm = None # - return 0 - -# ============================================================================== -def van3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): - """ - 3DVAR variational analysis with no inversion of B (Huang 2000) - - selfA est identique au "self" d'algorithme appelant et contient les - valeurs. - """ + # Nombre de pas identique au nombre de pas d'observations + # ------------------------------------------------------- + if hasattr(Y,"stepnumber"): + duration = Y.stepnumber() + __p = numpy.cumprod(Y.shape())[-1] + else: + duration = 2 + __p = numpy.array(Y).size # - # Correction pour pallier a un bug de TNC sur le retour du Minimum - if "Minimizer" in selfA._parameters and selfA._parameters["Minimizer"] == "TNC": - selfA.setParameterValue("StoreInternalVariables",True) + # Précalcul des inversions de B et R + # ---------------------------------- + if selfA._parameters["StoreInternalVariables"] \ + or selfA._toStore("CostFunctionJ") \ + or selfA._toStore("CostFunctionJb") \ + or selfA._toStore("CostFunctionJo") \ + or selfA._toStore("CurrentOptimum") \ + or selfA._toStore("APosterioriCovariance"): + BI = B.getI() + RI = R.getI() + elif VariantM != "KalmanFilterFormula": + RI = R.getI() + if VariantM == "KalmanFilterFormula": + RIdemi = R.sqrtmI() # - # Initialisations - # --------------- - Hm = HO["Direct"].appliedTo - Ha = HO["Adjoint"].appliedInXTo + # Initialisation + # -------------- + __n = Xb.size + __m = selfA._parameters["NumberOfMembers"] + if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n) + else: Pn = B + if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n) + else: Qn = Q + Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m ) + #~ Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m ) # - # Précalcul des inversions de B et R - BT = B.getT() - RI = R.getI() + if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]: + selfA.StoredVariables["Analysis"].store( Xb ) + if selfA._toStore("APosterioriCovariance"): + selfA.StoredVariables["APosterioriCovariance"].store( Pn ) + covarianceXa = Pn # - # Point de démarrage de l'optimisation - Xini = numpy.zeros(Xb.shape) + previousJMinimum = numpy.finfo(float).max # - # Définition de la fonction-coût - # ------------------------------ - def CostFunction(v): - _V = numpy.asmatrix(numpy.ravel( v )).T - _X = Xb + B * _V - if selfA._parameters["StoreInternalVariables"] or \ - selfA._toStore("CurrentState") or \ - selfA._toStore("CurrentOptimum"): - selfA.StoredVariables["CurrentState"].store( _X ) - _HX = Hm( _X ) - _HX = numpy.asmatrix(numpy.ravel( _HX )).T - _Innovation = Y - _HX - if selfA._toStore("SimulatedObservationAtCurrentState") or \ - selfA._toStore("SimulatedObservationAtCurrentOptimum"): - selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX ) - if selfA._toStore("InnovationAtCurrentState"): - selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation ) + for step in range(duration-1): + if hasattr(Y,"store"): + Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1)) + else: + Ynpu = numpy.ravel( Y ).reshape((__p,1)) # - Jb = float( 0.5 * _V.T * BT * _V ) - Jo = float( 0.5 * _Innovation.T * RI * _Innovation ) - J = Jb + Jo + if U is not None: + if hasattr(U,"store") and len(U)>1: + Un = numpy.asmatrix(numpy.ravel( U[step] )).T + elif hasattr(U,"store") and len(U)==1: + Un = numpy.asmatrix(numpy.ravel( U[0] )).T + else: + Un = numpy.asmatrix(numpy.ravel( U )).T + else: + Un = None # - selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) ) - selfA.StoredVariables["CostFunctionJb"].store( Jb ) - selfA.StoredVariables["CostFunctionJo"].store( Jo ) - selfA.StoredVariables["CostFunctionJ" ].store( J ) - if selfA._toStore("IndexOfOptimum") or \ - selfA._toStore("CurrentOptimum") or \ - selfA._toStore("CostFunctionJAtCurrentOptimum") or \ - selfA._toStore("CostFunctionJbAtCurrentOptimum") or \ - selfA._toStore("CostFunctionJoAtCurrentOptimum") or \ - selfA._toStore("SimulatedObservationAtCurrentOptimum"): - IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps - if selfA._toStore("IndexOfOptimum"): - selfA.StoredVariables["IndexOfOptimum"].store( IndexMin ) - if selfA._toStore("CurrentOptimum"): - selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] ) - if selfA._toStore("SimulatedObservationAtCurrentOptimum"): - selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] ) - if selfA._toStore("CostFunctionJbAtCurrentOptimum"): - selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] ) - if selfA._toStore("CostFunctionJoAtCurrentOptimum"): - selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] ) - if selfA._toStore("CostFunctionJAtCurrentOptimum"): - selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] ) - return J + if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies": + Xn = CovarianceInflation( Xn, + selfA._parameters["InflationType"], + selfA._parameters["InflationFactor"], + ) + # + if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast + EMX = M( [(Xn[:,i], Un) for i in range(__m)], + argsAsSerie = True, + returnSerieAsArrayMatrix = True ) + qi = numpy.random.multivariate_normal(numpy.zeros(__n), Qn, size=__m).T + Xn_predicted = EMX + qi + HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)], + argsAsSerie = True, + returnSerieAsArrayMatrix = True ) + if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon ! + Cm = Cm.reshape(__n,Un.size) # ADAO & check shape + Xn_predicted = Xn_predicted + Cm * Un + elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast + # --- > Par principe, M = Id, Q = 0 + Xn_predicted = Xn + HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)], + argsAsSerie = True, + returnSerieAsArrayMatrix = True ) + # + # Mean of forecast and observation of forecast + Xfm = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1)) + Hfm = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1)) + # + # Anomalies + EaX = EnsembleOfAnomalies( Xn_predicted, Xfm ) + EaHX = EnsembleOfAnomalies( HX_predicted, Hfm) + # + #-------------------------- + if VariantM == "KalmanFilterFormula": + mS = RIdemi * EaHX / math.sqrt(__m-1) + delta = RIdemi * ( Ynpu - Hfm ) + mT = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS ) + vw = mT @ mS.T @ delta + # + Tdemi = numpy.real(scipy.linalg.sqrtm(mT)) + mU = numpy.identity(__m) + # + EaX = EaX / math.sqrt(__m-1) + Xn = Xfm + EaX @ ( vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU ) + #-------------------------- + elif VariantM == "Variational": + HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm + def CostFunction(w): + _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1)) + _Jo = 0.5 * _A.T @ (RI * _A) + _Jb = 0.5 * (__m-1) * w.T @ w + _J = _Jo + _Jb + return float(_J) + def GradientOfCostFunction(w): + _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1)) + _GardJo = - EaHX.T @ (RI * _A) + _GradJb = (__m-1) * w.reshape((__m,1)) + _GradJ = _GardJo + _GradJb + return numpy.ravel(_GradJ) + vw = scipy.optimize.fmin_cg( + f = CostFunction, + x0 = numpy.zeros(__m), + fprime = GradientOfCostFunction, + args = (), + disp = False, + ) + # + Hto = EaHX.T @ (RI * EaHX) + Htb = (__m-1) * numpy.identity(__m) + Hta = Hto + Htb + # + Pta = numpy.linalg.inv( Hta ) + EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18 + # + Xn = Xfm + EaX @ (vw[:,None] + EWa) + #-------------------------- + elif VariantM == "FiniteSize11": # Jauge Boc2011 + HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm + def CostFunction(w): + _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1)) + _Jo = 0.5 * _A.T @ (RI * _A) + _Jb = 0.5 * __m * math.log(1 + 1/__m + w.T @ w) + _J = _Jo + _Jb + return float(_J) + def GradientOfCostFunction(w): + _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1)) + _GardJo = - EaHX.T @ (RI * _A) + _GradJb = __m * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w) + _GradJ = _GardJo + _GradJb + return numpy.ravel(_GradJ) + vw = scipy.optimize.fmin_cg( + f = CostFunction, + x0 = numpy.zeros(__m), + fprime = GradientOfCostFunction, + args = (), + disp = False, + ) + # + Hto = EaHX.T @ (RI * EaHX) + Htb = __m * \ + ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \ + / (1 + 1/__m + vw.T @ vw)**2 + Hta = Hto + Htb + # + Pta = numpy.linalg.inv( Hta ) + EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18 + # + Xn = Xfm + EaX @ (vw.reshape((__m,1)) + EWa) + #-------------------------- + elif VariantM == "FiniteSize15": # Jauge Boc2015 + HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm + def CostFunction(w): + _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1)) + _Jo = 0.5 * _A.T * RI * _A + _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w) + _J = _Jo + _Jb + return float(_J) + def GradientOfCostFunction(w): + _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1)) + _GardJo = - EaHX.T @ (RI * _A) + _GradJb = (__m+1) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w) + _GradJ = _GardJo + _GradJb + return numpy.ravel(_GradJ) + vw = scipy.optimize.fmin_cg( + f = CostFunction, + x0 = numpy.zeros(__m), + fprime = GradientOfCostFunction, + args = (), + disp = False, + ) + # + Hto = EaHX.T @ (RI * EaHX) + Htb = (__m+1) * \ + ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \ + / (1 + 1/__m + vw.T @ vw)**2 + Hta = Hto + Htb + # + Pta = numpy.linalg.inv( Hta ) + EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18 + # + Xn = Xfm + EaX @ (vw.reshape((__m,1)) + EWa) + #-------------------------- + elif VariantM == "FiniteSize16": # Jauge Boc2016 + HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm + def CostFunction(w): + _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1)) + _Jo = 0.5 * _A.T @ (RI * _A) + _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w / (__m-1)) + _J = _Jo + _Jb + return float(_J) + def GradientOfCostFunction(w): + _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1)) + _GardJo = - EaHX.T @ (RI * _A) + _GradJb = ((__m+1) / (__m-1)) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w / (__m-1)) + _GradJ = _GardJo + _GradJb + return numpy.ravel(_GradJ) + vw = scipy.optimize.fmin_cg( + f = CostFunction, + x0 = numpy.zeros(__m), + fprime = GradientOfCostFunction, + args = (), + disp = False, + ) + # + Hto = EaHX.T @ (RI * EaHX) + Htb = ((__m+1) / (__m-1)) * \ + ( (1 + 1/__m + vw.T @ vw / (__m-1)) * numpy.identity(__m) - 2 * vw @ vw.T / (__m-1) ) \ + / (1 + 1/__m + vw.T @ vw / (__m-1))**2 + Hta = Hto + Htb + # + Pta = numpy.linalg.inv( Hta ) + EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18 + # + Xn = Xfm + EaX @ (vw[:,None] + EWa) + #-------------------------- + else: + raise ValueError("VariantM has to be chosen in the authorized methods list.") + # + if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies": + Xn = CovarianceInflation( Xn, + selfA._parameters["InflationType"], + selfA._parameters["InflationFactor"], + ) + # + Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1)) + #-------------------------- + # + if selfA._parameters["StoreInternalVariables"] \ + or selfA._toStore("CostFunctionJ") \ + or selfA._toStore("CostFunctionJb") \ + or selfA._toStore("CostFunctionJo") \ + or selfA._toStore("APosterioriCovariance") \ + or selfA._toStore("InnovationAtCurrentAnalysis") \ + or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \ + or selfA._toStore("SimulatedObservationAtCurrentOptimum"): + _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T + _Innovation = Ynpu - _HXa + # + selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) ) + # ---> avec analysis + selfA.StoredVariables["Analysis"].store( Xa ) + if selfA._toStore("SimulatedObservationAtCurrentAnalysis"): + selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa ) + if selfA._toStore("InnovationAtCurrentAnalysis"): + selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation ) + # ---> avec current state + if selfA._parameters["StoreInternalVariables"] \ + or selfA._toStore("CurrentState"): + selfA.StoredVariables["CurrentState"].store( Xn ) + if selfA._toStore("ForecastState"): + selfA.StoredVariables["ForecastState"].store( EMX ) + if selfA._toStore("BMA"): + selfA.StoredVariables["BMA"].store( EMX - Xa.reshape((__n,1)) ) + if selfA._toStore("InnovationAtCurrentState"): + selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu ) + if selfA._toStore("SimulatedObservationAtCurrentState") \ + or selfA._toStore("SimulatedObservationAtCurrentOptimum"): + selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted ) + # ---> autres + if selfA._parameters["StoreInternalVariables"] \ + or selfA._toStore("CostFunctionJ") \ + or selfA._toStore("CostFunctionJb") \ + or selfA._toStore("CostFunctionJo") \ + or selfA._toStore("CurrentOptimum") \ + or selfA._toStore("APosterioriCovariance"): + Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) ) + Jo = float( 0.5 * _Innovation.T * RI * _Innovation ) + J = Jb + Jo + selfA.StoredVariables["CostFunctionJb"].store( Jb ) + selfA.StoredVariables["CostFunctionJo"].store( Jo ) + selfA.StoredVariables["CostFunctionJ" ].store( J ) + # + if selfA._toStore("IndexOfOptimum") \ + or selfA._toStore("CurrentOptimum") \ + or selfA._toStore("CostFunctionJAtCurrentOptimum") \ + or selfA._toStore("CostFunctionJbAtCurrentOptimum") \ + or selfA._toStore("CostFunctionJoAtCurrentOptimum") \ + or selfA._toStore("SimulatedObservationAtCurrentOptimum"): + IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps + if selfA._toStore("IndexOfOptimum"): + selfA.StoredVariables["IndexOfOptimum"].store( IndexMin ) + if selfA._toStore("CurrentOptimum"): + selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] ) + if selfA._toStore("SimulatedObservationAtCurrentOptimum"): + selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] ) + if selfA._toStore("CostFunctionJbAtCurrentOptimum"): + selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] ) + if selfA._toStore("CostFunctionJoAtCurrentOptimum"): + selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] ) + if selfA._toStore("CostFunctionJAtCurrentOptimum"): + selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] ) + if selfA._toStore("APosterioriCovariance"): + selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) ) + if selfA._parameters["EstimationOf"] == "Parameters" \ + and J < previousJMinimum: + previousJMinimum = J + XaMin = Xa + if selfA._toStore("APosterioriCovariance"): + covarianceXaMin = Pn + # ---> Pour les smoothers + if selfA._toStore("CurrentEnsembleState"): + selfA.StoredVariables["CurrentEnsembleState"].store( Xn ) # - def GradientOfCostFunction(v): - _V = numpy.asmatrix(numpy.ravel( v )).T - _X = Xb + B * _V - _HX = Hm( _X ) - _HX = numpy.asmatrix(numpy.ravel( _HX )).T - GradJb = BT * _V - GradJo = - Ha( (_X, RI * (Y - _HX)) ) - GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo ) - return GradJ + # Stockage final supplémentaire de l'optimum en estimation de paramètres + # ---------------------------------------------------------------------- + if selfA._parameters["EstimationOf"] == "Parameters": + selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) ) + selfA.StoredVariables["Analysis"].store( XaMin ) + if selfA._toStore("APosterioriCovariance"): + selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin ) + if selfA._toStore("BMA"): + selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) ) # - # Minimisation de la fonctionnelle - # -------------------------------- - nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber() + return 0 + +# ============================================================================== +def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12", + BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000): + """ + Iterative EnKF + """ + if selfA._parameters["EstimationOf"] == "Parameters": + selfA._parameters["StoreInternalVariables"] = True # - if selfA._parameters["Minimizer"] == "LBFGSB": - if "0.19" <= scipy.version.version <= "1.1.0": - import lbfgsbhlt as optimiseur - else: - import scipy.optimize as optimiseur - Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b( - func = CostFunction, - x0 = Xini, - fprime = GradientOfCostFunction, - args = (), - bounds = selfA._parameters["Bounds"], - maxfun = selfA._parameters["MaximumNumberOfSteps"]-1, - factr = selfA._parameters["CostDecrementTolerance"]*1.e14, - pgtol = selfA._parameters["ProjectedGradientTolerance"], - iprint = selfA._parameters["optiprint"], - ) - nfeval = Informations['funcalls'] - rc = Informations['warnflag'] - elif selfA._parameters["Minimizer"] == "TNC": - Minimum, nfeval, rc = scipy.optimize.fmin_tnc( - func = CostFunction, - x0 = Xini, - fprime = GradientOfCostFunction, - args = (), - bounds = selfA._parameters["Bounds"], - maxfun = selfA._parameters["MaximumNumberOfSteps"], - pgtol = selfA._parameters["ProjectedGradientTolerance"], - ftol = selfA._parameters["CostDecrementTolerance"], - messages = selfA._parameters["optmessages"], - ) - elif selfA._parameters["Minimizer"] == "CG": - Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg( - f = CostFunction, - x0 = Xini, - fprime = GradientOfCostFunction, - args = (), - maxiter = selfA._parameters["MaximumNumberOfSteps"], - gtol = selfA._parameters["GradientNormTolerance"], - disp = selfA._parameters["optdisp"], - full_output = True, - ) - elif selfA._parameters["Minimizer"] == "NCG": - Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg( - f = CostFunction, - x0 = Xini, - fprime = GradientOfCostFunction, - args = (), - maxiter = selfA._parameters["MaximumNumberOfSteps"], - avextol = selfA._parameters["CostDecrementTolerance"], - disp = selfA._parameters["optdisp"], - full_output = True, - ) - elif selfA._parameters["Minimizer"] == "BFGS": - Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs( - f = CostFunction, - x0 = Xini, - fprime = GradientOfCostFunction, - args = (), - maxiter = selfA._parameters["MaximumNumberOfSteps"], - gtol = selfA._parameters["GradientNormTolerance"], - disp = selfA._parameters["optdisp"], - full_output = True, - ) - else: - raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"]) + # Opérateurs + # ---------- + H = HO["Direct"].appliedControledFormTo # - IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps - MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin] + if selfA._parameters["EstimationOf"] == "State": + M = EM["Direct"].appliedControledFormTo # - # Correction pour pallier a un bug de TNC sur le retour du Minimum - # ---------------------------------------------------------------- - if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"): - Minimum = selfA.StoredVariables["CurrentState"][IndexMin] - Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T + if CM is not None and "Tangent" in CM and U is not None: + Cm = CM["Tangent"].asMatrix(Xb) else: - Minimum = Xb + B * numpy.asmatrix(numpy.ravel( Minimum )).T + Cm = None + # + # Nombre de pas identique au nombre de pas d'observations + # ------------------------------------------------------- + if hasattr(Y,"stepnumber"): + duration = Y.stepnumber() + __p = numpy.cumprod(Y.shape())[-1] + else: + duration = 2 + __p = numpy.array(Y).size + # + # Précalcul des inversions de B et R + # ---------------------------------- + if selfA._parameters["StoreInternalVariables"] \ + or selfA._toStore("CostFunctionJ") \ + or selfA._toStore("CostFunctionJb") \ + or selfA._toStore("CostFunctionJo") \ + or selfA._toStore("CurrentOptimum") \ + or selfA._toStore("APosterioriCovariance"): + BI = B.getI() + RI = R.getI() + # + # Initialisation + # -------------- + __n = Xb.size + __m = selfA._parameters["NumberOfMembers"] + if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n) + else: Pn = B + if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p) + else: Rn = R + if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n) + else: Qn = Q + Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m ) # - # Obtention de l'analyse - # ---------------------- - Xa = Minimum + if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]: + selfA.StoredVariables["Analysis"].store( Xb ) + if selfA._toStore("APosterioriCovariance"): + selfA.StoredVariables["APosterioriCovariance"].store( Pn ) + covarianceXa = Pn # - selfA.StoredVariables["Analysis"].store( Xa ) + previousJMinimum = numpy.finfo(float).max # - if selfA._toStore("OMA") or \ - selfA._toStore("SigmaObs2") or \ - selfA._toStore("SimulationQuantiles") or \ - selfA._toStore("SimulatedObservationAtOptimum"): - if selfA._toStore("SimulatedObservationAtCurrentState"): - HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] - elif selfA._toStore("SimulatedObservationAtCurrentOptimum"): - HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1] + for step in range(duration-1): + if hasattr(Y,"store"): + Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1)) else: - HXa = Hm( Xa ) - # - # Calcul de la covariance d'analyse - # --------------------------------- - if selfA._toStore("APosterioriCovariance") or \ - selfA._toStore("SimulationQuantiles") or \ - selfA._toStore("JacobianMatrixAtOptimum") or \ - selfA._toStore("KalmanGainAtOptimum"): - HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa) - HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape - if selfA._toStore("APosterioriCovariance") or \ - selfA._toStore("SimulationQuantiles") or \ - selfA._toStore("KalmanGainAtOptimum"): - HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa) - HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape - if selfA._toStore("APosterioriCovariance") or \ - selfA._toStore("SimulationQuantiles"): - BI = B.getI() - HessienneI = [] - nb = Xa.size - for i in range(nb): - _ee = numpy.matrix(numpy.zeros(nb)).T - _ee[i] = 1. - _HtEE = numpy.dot(HtM,_ee) - _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T - HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) ) - HessienneI = numpy.matrix( HessienneI ) - A = HessienneI.I - if min(A.shape) != max(A.shape): - raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape))) - if (numpy.diag(A) < 0).any(): - raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,)) - if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug - try: - L = numpy.linalg.cholesky( A ) - except: - raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,)) - if selfA._toStore("APosterioriCovariance"): - selfA.StoredVariables["APosterioriCovariance"].store( A ) - if selfA._toStore("JacobianMatrixAtOptimum"): - selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM ) - if selfA._toStore("KalmanGainAtOptimum"): - if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I - elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI - selfA.StoredVariables["KalmanGainAtOptimum"].store( KG ) - # - # Calculs et/ou stockages supplémentaires - # --------------------------------------- - if selfA._toStore("Innovation") or \ - selfA._toStore("SigmaObs2") or \ - selfA._toStore("MahalanobisConsistency") or \ - selfA._toStore("OMB"): - d = Y - HXb - if selfA._toStore("Innovation"): - selfA.StoredVariables["Innovation"].store( numpy.ravel(d) ) - if selfA._toStore("BMA"): - selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) ) - if selfA._toStore("OMA"): - selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) ) - if selfA._toStore("OMB"): - selfA.StoredVariables["OMB"].store( numpy.ravel(d) ) - if selfA._toStore("SigmaObs2"): - TraceR = R.trace(Y.size) - selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR ) - if selfA._toStore("MahalanobisConsistency"): - selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) ) - if selfA._toStore("SimulationQuantiles"): - nech = selfA._parameters["NumberOfSamplesForQuantiles"] - HXa = numpy.matrix(numpy.ravel( HXa )).T - YfQ = None - for i in range(nech): - if selfA._parameters["SimulationForQuantiles"] == "Linear": - dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T - dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T - Yr = HXa + dYr - elif selfA._parameters["SimulationForQuantiles"] == "NonLinear": - Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T - Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T - if YfQ is None: - YfQ = Yr + Ynpu = numpy.ravel( Y ).reshape((__p,1)) + # + if U is not None: + if hasattr(U,"store") and len(U)>1: + Un = numpy.asmatrix(numpy.ravel( U[step] )).T + elif hasattr(U,"store") and len(U)==1: + Un = numpy.asmatrix(numpy.ravel( U[0] )).T else: - YfQ = numpy.hstack((YfQ,Yr)) - YfQ.sort(axis=-1) - YQ = None - for quantile in selfA._parameters["Quantiles"]: - if not (0. <= float(quantile) <= 1.): continue - indice = int(nech * float(quantile) - 1./nech) - if YQ is None: YQ = YfQ[:,indice] - else: YQ = numpy.hstack((YQ,YfQ[:,indice])) - selfA.StoredVariables["SimulationQuantiles"].store( YQ ) - if selfA._toStore("SimulatedObservationAtBackground"): - selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) ) - if selfA._toStore("SimulatedObservationAtOptimum"): - selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) ) + Un = numpy.asmatrix(numpy.ravel( U )).T + else: + Un = None + # + if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies": + Xn = CovarianceInflation( Xn, + selfA._parameters["InflationType"], + selfA._parameters["InflationFactor"], + ) + # + #-------------------------- + if VariantM == "IEnKF12": + Xfm = numpy.ravel(Xn.mean(axis=1, dtype=mfp).astype('float')) + EaX = EnsembleOfAnomalies( Xn ) / math.sqrt(__m-1) + __j = 0 + Deltaw = 1 + if not BnotT: + Ta = numpy.identity(__m) + vw = numpy.zeros(__m) + while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax: + vx1 = (Xfm + EaX @ vw).reshape((__n,1)) + # + if BnotT: + E1 = vx1 + _epsilon * EaX + else: + E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta + # + if selfA._parameters["EstimationOf"] == "State": # Forecast + Q + E2 = M( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)], + argsAsSerie = True, + returnSerieAsArrayMatrix = True ) + elif selfA._parameters["EstimationOf"] == "Parameters": + # --- > Par principe, M = Id + E2 = Xn + vx2 = E2.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1)) + vy1 = H((vx2, Un)).reshape((__p,1)) + # + HE2 = H( [(E2[:,i,numpy.newaxis], Un) for i in range(__m)], + argsAsSerie = True, + returnSerieAsArrayMatrix = True ) + vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1)) + # + if BnotT: + EaY = (HE2 - vy2) / _epsilon + else: + EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1) + # + GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy1 ))) + mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY) + Deltaw = - numpy.linalg.solve(mH,GradJ) + # + vw = vw + Deltaw + # + if not BnotT: + Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH ))) + # + __j = __j + 1 + # + A2 = EnsembleOfAnomalies( E2 ) + # + if BnotT: + Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH ))) + A2 = math.sqrt(__m-1) * A2 @ Ta / _epsilon + # + Xn = vx2 + A2 + #-------------------------- + else: + raise ValueError("VariantM has to be chosen in the authorized methods list.") + # + if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies": + Xn = CovarianceInflation( Xn, + selfA._parameters["InflationType"], + selfA._parameters["InflationFactor"], + ) + # + Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1)) + #-------------------------- + # + if selfA._parameters["StoreInternalVariables"] \ + or selfA._toStore("CostFunctionJ") \ + or selfA._toStore("CostFunctionJb") \ + or selfA._toStore("CostFunctionJo") \ + or selfA._toStore("APosterioriCovariance") \ + or selfA._toStore("InnovationAtCurrentAnalysis") \ + or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \ + or selfA._toStore("SimulatedObservationAtCurrentOptimum"): + _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T + _Innovation = Ynpu - _HXa + # + selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) ) + # ---> avec analysis + selfA.StoredVariables["Analysis"].store( Xa ) + if selfA._toStore("SimulatedObservationAtCurrentAnalysis"): + selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa ) + if selfA._toStore("InnovationAtCurrentAnalysis"): + selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation ) + # ---> avec current state + if selfA._parameters["StoreInternalVariables"] \ + or selfA._toStore("CurrentState"): + selfA.StoredVariables["CurrentState"].store( Xn ) + if selfA._toStore("ForecastState"): + selfA.StoredVariables["ForecastState"].store( E2 ) + if selfA._toStore("BMA"): + selfA.StoredVariables["BMA"].store( E2 - Xa ) + if selfA._toStore("InnovationAtCurrentState"): + selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu ) + if selfA._toStore("SimulatedObservationAtCurrentState") \ + or selfA._toStore("SimulatedObservationAtCurrentOptimum"): + selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 ) + # ---> autres + if selfA._parameters["StoreInternalVariables"] \ + or selfA._toStore("CostFunctionJ") \ + or selfA._toStore("CostFunctionJb") \ + or selfA._toStore("CostFunctionJo") \ + or selfA._toStore("CurrentOptimum") \ + or selfA._toStore("APosterioriCovariance"): + Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) ) + Jo = float( 0.5 * _Innovation.T * RI * _Innovation ) + J = Jb + Jo + selfA.StoredVariables["CostFunctionJb"].store( Jb ) + selfA.StoredVariables["CostFunctionJo"].store( Jo ) + selfA.StoredVariables["CostFunctionJ" ].store( J ) + # + if selfA._toStore("IndexOfOptimum") \ + or selfA._toStore("CurrentOptimum") \ + or selfA._toStore("CostFunctionJAtCurrentOptimum") \ + or selfA._toStore("CostFunctionJbAtCurrentOptimum") \ + or selfA._toStore("CostFunctionJoAtCurrentOptimum") \ + or selfA._toStore("SimulatedObservationAtCurrentOptimum"): + IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps + if selfA._toStore("IndexOfOptimum"): + selfA.StoredVariables["IndexOfOptimum"].store( IndexMin ) + if selfA._toStore("CurrentOptimum"): + selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] ) + if selfA._toStore("SimulatedObservationAtCurrentOptimum"): + selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] ) + if selfA._toStore("CostFunctionJbAtCurrentOptimum"): + selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] ) + if selfA._toStore("CostFunctionJoAtCurrentOptimum"): + selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] ) + if selfA._toStore("CostFunctionJAtCurrentOptimum"): + selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] ) + if selfA._toStore("APosterioriCovariance"): + selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) ) + if selfA._parameters["EstimationOf"] == "Parameters" \ + and J < previousJMinimum: + previousJMinimum = J + XaMin = Xa + if selfA._toStore("APosterioriCovariance"): + covarianceXaMin = Pn + # + # Stockage final supplémentaire de l'optimum en estimation de paramètres + # ---------------------------------------------------------------------- + if selfA._parameters["EstimationOf"] == "Parameters": + selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) ) + selfA.StoredVariables["Analysis"].store( XaMin ) + if selfA._toStore("APosterioriCovariance"): + selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin ) + if selfA._toStore("BMA"): + selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) ) # return 0 # ============================================================================== def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): """ - 3DVAR incrémental (Courtier 1994, 1997) - - selfA est identique au "self" d'algorithme appelant et contient les - valeurs. + 3DVAR incrémental """ # - # Correction pour pallier a un bug de TNC sur le retour du Minimum - if "Minimizer" in selfA._parameters and selfA._parameters["Minimizer"] == "TNC": - selfA.setParameterValue("StoreInternalVariables",True) - # # Initialisations # --------------- # @@ -1312,370 +1426,84 @@ def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): if "0.19" <= scipy.version.version <= "1.1.0": import lbfgsbhlt as optimiseur else: - import scipy.optimize as optimiseur - Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b( - func = CostFunction, - x0 = numpy.zeros(Xini.size), - fprime = GradientOfCostFunction, - args = (), - bounds = selfA._parameters["Bounds"], - maxfun = selfA._parameters["MaximumNumberOfSteps"]-1, - factr = selfA._parameters["CostDecrementTolerance"]*1.e14, - pgtol = selfA._parameters["ProjectedGradientTolerance"], - iprint = selfA._parameters["optiprint"], - ) - nfeval = Informations['funcalls'] - rc = Informations['warnflag'] - elif selfA._parameters["Minimizer"] == "TNC": - Minimum, nfeval, rc = scipy.optimize.fmin_tnc( - func = CostFunction, - x0 = numpy.zeros(Xini.size), - fprime = GradientOfCostFunction, - args = (), - bounds = selfA._parameters["Bounds"], - maxfun = selfA._parameters["MaximumNumberOfSteps"], - pgtol = selfA._parameters["ProjectedGradientTolerance"], - ftol = selfA._parameters["CostDecrementTolerance"], - messages = selfA._parameters["optmessages"], - ) - elif selfA._parameters["Minimizer"] == "CG": - Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg( - f = CostFunction, - x0 = numpy.zeros(Xini.size), - fprime = GradientOfCostFunction, - args = (), - maxiter = selfA._parameters["MaximumNumberOfSteps"], - gtol = selfA._parameters["GradientNormTolerance"], - disp = selfA._parameters["optdisp"], - full_output = True, - ) - elif selfA._parameters["Minimizer"] == "NCG": - Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg( - f = CostFunction, - x0 = numpy.zeros(Xini.size), - fprime = GradientOfCostFunction, - args = (), - maxiter = selfA._parameters["MaximumNumberOfSteps"], - avextol = selfA._parameters["CostDecrementTolerance"], - disp = selfA._parameters["optdisp"], - full_output = True, - ) - elif selfA._parameters["Minimizer"] == "BFGS": - Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs( - f = CostFunction, - x0 = numpy.zeros(Xini.size), - fprime = GradientOfCostFunction, - args = (), - maxiter = selfA._parameters["MaximumNumberOfSteps"], - gtol = selfA._parameters["GradientNormTolerance"], - disp = selfA._parameters["optdisp"], - full_output = True, - ) - else: - raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"]) - # - IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps - MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin] - # - if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"): - Minimum = selfA.StoredVariables["CurrentState"][IndexMin] - Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T - else: - Minimum = Xb + numpy.asmatrix(numpy.ravel( Minimum )).T - # - Xr = Minimum - DeltaJ = selfA.StoredVariables["CostFunctionJ" ][-1] - J - iOuter = selfA.StoredVariables["CurrentIterationNumber"][-1] - # - # Obtention de l'analyse - # ---------------------- - Xa = Xr - # - selfA.StoredVariables["Analysis"].store( Xa ) - # - if selfA._toStore("OMA") or \ - selfA._toStore("SigmaObs2") or \ - selfA._toStore("SimulationQuantiles") or \ - selfA._toStore("SimulatedObservationAtOptimum"): - if selfA._toStore("SimulatedObservationAtCurrentState"): - HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] - elif selfA._toStore("SimulatedObservationAtCurrentOptimum"): - HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1] - else: - HXa = Hm( Xa ) - # - # Calcul de la covariance d'analyse - # --------------------------------- - if selfA._toStore("APosterioriCovariance") or \ - selfA._toStore("SimulationQuantiles") or \ - selfA._toStore("JacobianMatrixAtOptimum") or \ - selfA._toStore("KalmanGainAtOptimum"): - HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa) - HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape - if selfA._toStore("APosterioriCovariance") or \ - selfA._toStore("SimulationQuantiles") or \ - selfA._toStore("KalmanGainAtOptimum"): - HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa) - HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape - if selfA._toStore("APosterioriCovariance") or \ - selfA._toStore("SimulationQuantiles"): - HessienneI = [] - nb = Xa.size - for i in range(nb): - _ee = numpy.matrix(numpy.zeros(nb)).T - _ee[i] = 1. - _HtEE = numpy.dot(HtM,_ee) - _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T - HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) ) - HessienneI = numpy.matrix( HessienneI ) - A = HessienneI.I - if min(A.shape) != max(A.shape): - raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape))) - if (numpy.diag(A) < 0).any(): - raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,)) - if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug - try: - L = numpy.linalg.cholesky( A ) - except: - raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,)) - if selfA._toStore("APosterioriCovariance"): - selfA.StoredVariables["APosterioriCovariance"].store( A ) - if selfA._toStore("JacobianMatrixAtOptimum"): - selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM ) - if selfA._toStore("KalmanGainAtOptimum"): - if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I - elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI - selfA.StoredVariables["KalmanGainAtOptimum"].store( KG ) - # - # Calculs et/ou stockages supplémentaires - # --------------------------------------- - if selfA._toStore("Innovation") or \ - selfA._toStore("SigmaObs2") or \ - selfA._toStore("MahalanobisConsistency") or \ - selfA._toStore("OMB"): - d = Y - HXb - if selfA._toStore("Innovation"): - selfA.StoredVariables["Innovation"].store( numpy.ravel(d) ) - if selfA._toStore("BMA"): - selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) ) - if selfA._toStore("OMA"): - selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) ) - if selfA._toStore("OMB"): - selfA.StoredVariables["OMB"].store( numpy.ravel(d) ) - if selfA._toStore("SigmaObs2"): - TraceR = R.trace(Y.size) - selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR ) - if selfA._toStore("MahalanobisConsistency"): - selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) ) - if selfA._toStore("SimulationQuantiles"): - nech = selfA._parameters["NumberOfSamplesForQuantiles"] - HXa = numpy.matrix(numpy.ravel( HXa )).T - YfQ = None - for i in range(nech): - if selfA._parameters["SimulationForQuantiles"] == "Linear": - dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T - dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T - Yr = HXa + dYr - elif selfA._parameters["SimulationForQuantiles"] == "NonLinear": - Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T - Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T - if YfQ is None: - YfQ = Yr - else: - YfQ = numpy.hstack((YfQ,Yr)) - YfQ.sort(axis=-1) - YQ = None - for quantile in selfA._parameters["Quantiles"]: - if not (0. <= float(quantile) <= 1.): continue - indice = int(nech * float(quantile) - 1./nech) - if YQ is None: YQ = YfQ[:,indice] - else: YQ = numpy.hstack((YQ,YfQ[:,indice])) - selfA.StoredVariables["SimulationQuantiles"].store( YQ ) - if selfA._toStore("SimulatedObservationAtBackground"): - selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) ) - if selfA._toStore("SimulatedObservationAtOptimum"): - selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) ) - # - return 0 - -# ============================================================================== -def psas3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): - """ - 3DVAR PSAS (Huang 2000) - - selfA est identique au "self" d'algorithme appelant et contient les - valeurs. - """ - # - # Correction pour pallier a un bug de TNC sur le retour du Minimum - if "Minimizer" in selfA._parameters and selfA._parameters["Minimizer"] == "TNC": - selfA.setParameterValue("StoreInternalVariables",True) - # - # Initialisations - # --------------- - # - # Opérateurs - Hm = HO["Direct"].appliedTo - # - # Utilisation éventuelle d'un vecteur H(Xb) précalculé - if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]: - HXb = Hm( Xb, HO["AppliedInX"]["HXb"] ) - else: - HXb = Hm( Xb ) - HXb = numpy.asmatrix(numpy.ravel( HXb )).T - if Y.size != HXb.size: - raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size)) - if max(Y.shape) != max(HXb.shape): - raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape)) - # - if selfA._toStore("JacobianMatrixAtBackground"): - HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb) - HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape - selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb ) - # - Ht = HO["Tangent"].asMatrix(Xb) - BHT = B * Ht.T - HBHTpR = R + Ht * BHT - Innovation = Y - HXb - # - # Point de démarrage de l'optimisation - Xini = numpy.zeros(Xb.shape) - # - # Définition de la fonction-coût - # ------------------------------ - def CostFunction(w): - _W = numpy.asmatrix(numpy.ravel( w )).T - if selfA._parameters["StoreInternalVariables"] or \ - selfA._toStore("CurrentState") or \ - selfA._toStore("CurrentOptimum"): - selfA.StoredVariables["CurrentState"].store( Xb + BHT * _W ) - if selfA._toStore("SimulatedObservationAtCurrentState") or \ - selfA._toStore("SimulatedObservationAtCurrentOptimum"): - selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Hm( Xb + BHT * _W ) ) - if selfA._toStore("InnovationAtCurrentState"): - selfA.StoredVariables["InnovationAtCurrentState"].store( Innovation ) + import scipy.optimize as optimiseur + Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b( + func = CostFunction, + x0 = numpy.zeros(Xini.size), + fprime = GradientOfCostFunction, + args = (), + bounds = selfA._parameters["Bounds"], + maxfun = selfA._parameters["MaximumNumberOfSteps"]-1, + factr = selfA._parameters["CostDecrementTolerance"]*1.e14, + pgtol = selfA._parameters["ProjectedGradientTolerance"], + iprint = selfA._parameters["optiprint"], + ) + nfeval = Informations['funcalls'] + rc = Informations['warnflag'] + elif selfA._parameters["Minimizer"] == "TNC": + Minimum, nfeval, rc = scipy.optimize.fmin_tnc( + func = CostFunction, + x0 = numpy.zeros(Xini.size), + fprime = GradientOfCostFunction, + args = (), + bounds = selfA._parameters["Bounds"], + maxfun = selfA._parameters["MaximumNumberOfSteps"], + pgtol = selfA._parameters["ProjectedGradientTolerance"], + ftol = selfA._parameters["CostDecrementTolerance"], + messages = selfA._parameters["optmessages"], + ) + elif selfA._parameters["Minimizer"] == "CG": + Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg( + f = CostFunction, + x0 = numpy.zeros(Xini.size), + fprime = GradientOfCostFunction, + args = (), + maxiter = selfA._parameters["MaximumNumberOfSteps"], + gtol = selfA._parameters["GradientNormTolerance"], + disp = selfA._parameters["optdisp"], + full_output = True, + ) + elif selfA._parameters["Minimizer"] == "NCG": + Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg( + f = CostFunction, + x0 = numpy.zeros(Xini.size), + fprime = GradientOfCostFunction, + args = (), + maxiter = selfA._parameters["MaximumNumberOfSteps"], + avextol = selfA._parameters["CostDecrementTolerance"], + disp = selfA._parameters["optdisp"], + full_output = True, + ) + elif selfA._parameters["Minimizer"] == "BFGS": + Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs( + f = CostFunction, + x0 = numpy.zeros(Xini.size), + fprime = GradientOfCostFunction, + args = (), + maxiter = selfA._parameters["MaximumNumberOfSteps"], + gtol = selfA._parameters["GradientNormTolerance"], + disp = selfA._parameters["optdisp"], + full_output = True, + ) + else: + raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"]) # - Jb = float( 0.5 * _W.T * HBHTpR * _W ) - Jo = float( - _W.T * Innovation ) - J = Jb + Jo + IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps + MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin] # - selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) ) - selfA.StoredVariables["CostFunctionJb"].store( Jb ) - selfA.StoredVariables["CostFunctionJo"].store( Jo ) - selfA.StoredVariables["CostFunctionJ" ].store( J ) - if selfA._toStore("IndexOfOptimum") or \ - selfA._toStore("CurrentOptimum") or \ - selfA._toStore("CostFunctionJAtCurrentOptimum") or \ - selfA._toStore("CostFunctionJbAtCurrentOptimum") or \ - selfA._toStore("CostFunctionJoAtCurrentOptimum") or \ - selfA._toStore("SimulatedObservationAtCurrentOptimum"): - IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps - if selfA._toStore("IndexOfOptimum"): - selfA.StoredVariables["IndexOfOptimum"].store( IndexMin ) - if selfA._toStore("CurrentOptimum"): - selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] ) - if selfA._toStore("SimulatedObservationAtCurrentOptimum"): - selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] ) - if selfA._toStore("CostFunctionJbAtCurrentOptimum"): - selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] ) - if selfA._toStore("CostFunctionJoAtCurrentOptimum"): - selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] ) - if selfA._toStore("CostFunctionJAtCurrentOptimum"): - selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] ) - return J - # - def GradientOfCostFunction(w): - _W = numpy.asmatrix(numpy.ravel( w )).T - GradJb = HBHTpR * _W - GradJo = - Innovation - GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo ) - return GradJ - # - # Minimisation de la fonctionnelle - # -------------------------------- - nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber() - # - if selfA._parameters["Minimizer"] == "LBFGSB": - # Minimum, J_optimal, Informations = scipy.optimize.fmin_l_bfgs_b( - if "0.19" <= scipy.version.version <= "1.1.0": - import lbfgsbhlt as optimiseur + if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"): + Minimum = selfA.StoredVariables["CurrentState"][IndexMin] + Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T else: - import scipy.optimize as optimiseur - Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b( - func = CostFunction, - x0 = Xini, - fprime = GradientOfCostFunction, - args = (), - bounds = selfA._parameters["Bounds"], - maxfun = selfA._parameters["MaximumNumberOfSteps"]-1, - factr = selfA._parameters["CostDecrementTolerance"]*1.e14, - pgtol = selfA._parameters["ProjectedGradientTolerance"], - iprint = selfA._parameters["optiprint"], - ) - nfeval = Informations['funcalls'] - rc = Informations['warnflag'] - elif selfA._parameters["Minimizer"] == "TNC": - Minimum, nfeval, rc = scipy.optimize.fmin_tnc( - func = CostFunction, - x0 = Xini, - fprime = GradientOfCostFunction, - args = (), - bounds = selfA._parameters["Bounds"], - maxfun = selfA._parameters["MaximumNumberOfSteps"], - pgtol = selfA._parameters["ProjectedGradientTolerance"], - ftol = selfA._parameters["CostDecrementTolerance"], - messages = selfA._parameters["optmessages"], - ) - elif selfA._parameters["Minimizer"] == "CG": - Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg( - f = CostFunction, - x0 = Xini, - fprime = GradientOfCostFunction, - args = (), - maxiter = selfA._parameters["MaximumNumberOfSteps"], - gtol = selfA._parameters["GradientNormTolerance"], - disp = selfA._parameters["optdisp"], - full_output = True, - ) - elif selfA._parameters["Minimizer"] == "NCG": - Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg( - f = CostFunction, - x0 = Xini, - fprime = GradientOfCostFunction, - args = (), - maxiter = selfA._parameters["MaximumNumberOfSteps"], - avextol = selfA._parameters["CostDecrementTolerance"], - disp = selfA._parameters["optdisp"], - full_output = True, - ) - elif selfA._parameters["Minimizer"] == "BFGS": - Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs( - f = CostFunction, - x0 = Xini, - fprime = GradientOfCostFunction, - args = (), - maxiter = selfA._parameters["MaximumNumberOfSteps"], - gtol = selfA._parameters["GradientNormTolerance"], - disp = selfA._parameters["optdisp"], - full_output = True, - ) - else: - raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"]) - # - IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps - MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin] - # - # Correction pour pallier a un bug de TNC sur le retour du Minimum - # ---------------------------------------------------------------- - if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"): - Minimum = selfA.StoredVariables["CurrentState"][IndexMin] - Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T - else: - Minimum = Xb + BHT * numpy.asmatrix(numpy.ravel( Minimum )).T + Minimum = Xb + numpy.asmatrix(numpy.ravel( Minimum )).T + # + Xr = Minimum + DeltaJ = selfA.StoredVariables["CostFunctionJ" ][-1] - J + iOuter = selfA.StoredVariables["CurrentIterationNumber"][-1] # # Obtention de l'analyse # ---------------------- - Xa = Minimum + Xa = Xr # selfA.StoredVariables["Analysis"].store( Xa ) # @@ -1705,8 +1533,6 @@ def psas3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape if selfA._toStore("APosterioriCovariance") or \ selfA._toStore("SimulationQuantiles"): - BI = B.getI() - RI = R.getI() HessienneI = [] nb = Xa.size for i in range(nb): @@ -1787,12 +1613,10 @@ def psas3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): return 0 # ============================================================================== -def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"): +def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13", + BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000): """ - Stochastic EnKF (Envensen 1994, Burgers 1998) - - selfA est identique au "self" d'algorithme appelant et contient les - valeurs. + Maximum Likelihood Ensemble Filter """ if selfA._parameters["EstimationOf"] == "Parameters": selfA._parameters["StoreInternalVariables"] = True @@ -1827,7 +1651,7 @@ def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"): or selfA._toStore("CurrentOptimum") \ or selfA._toStore("APosterioriCovariance"): BI = B.getI() - RI = R.getI() + RI = R.getI() # # Initialisation # -------------- @@ -1842,7 +1666,7 @@ def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"): Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m ) # if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]: - selfA.StoredVariables["Analysis"].store( numpy.ravel(Xb) ) + selfA.StoredVariables["Analysis"].store( Xb ) if selfA._toStore("APosterioriCovariance"): selfA.StoredVariables["APosterioriCovariance"].store( Pn ) covarianceXa = Pn @@ -1851,9 +1675,9 @@ def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"): # for step in range(duration-1): if hasattr(Y,"store"): - Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,-1)) + Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1)) else: - Ynpu = numpy.ravel( Y ).reshape((__p,-1)) + Ynpu = numpy.ravel( Y ).reshape((__p,1)) # if U is not None: if hasattr(U,"store") and len(U)>1: @@ -1877,51 +1701,56 @@ def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"): returnSerieAsArrayMatrix = True ) qi = numpy.random.multivariate_normal(numpy.zeros(__n), Qn, size=__m).T Xn_predicted = EMX + qi - HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)], - argsAsSerie = True, - returnSerieAsArrayMatrix = True ) if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon ! Cm = Cm.reshape(__n,Un.size) # ADAO & check shape Xn_predicted = Xn_predicted + Cm * Un elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast # --- > Par principe, M = Id, Q = 0 Xn_predicted = Xn - HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)], - argsAsSerie = True, - returnSerieAsArrayMatrix = True ) - # - # Mean of forecast and observation of forecast - Xfm = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1)) - Hfm = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,-1)) # #-------------------------- - if VariantM == "KalmanFilterFormula05": - PfHT, HPfHT = 0., 0. - for i in range(__m): - Exfi = Xn_predicted[:,i].reshape((__n,-1)) - Xfm - Eyfi = HX_predicted[:,i].reshape((__p,-1)) - Hfm - PfHT += Exfi * Eyfi.T - HPfHT += Eyfi * Eyfi.T - PfHT = (1./(__m-1)) * PfHT - HPfHT = (1./(__m-1)) * HPfHT - Kn = PfHT * ( R + HPfHT ).I - del PfHT, HPfHT - # - for i in range(__m): - ri = numpy.random.multivariate_normal(numpy.zeros(__p), Rn) - Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(Ynpu) + ri - HX_predicted[:,i]) - #-------------------------- - elif VariantM == "KalmanFilterFormula16": - EpY = EnsembleOfCenteredPerturbations(Ynpu, Rn, __m) - EpYm = EpY.mean(axis=1, dtype=mfp).astype('float').reshape((__p,-1)) - # - EaX = EnsembleOfAnomalies( Xn_predicted ) / numpy.sqrt(__m-1) - EaY = (HX_predicted - Hfm - EpY + EpYm) / numpy.sqrt(__m-1) + if VariantM == "MLEF13": + Xfm = numpy.ravel(Xn_predicted.mean(axis=1, dtype=mfp).astype('float')) + EaX = EnsembleOfAnomalies( Xn_predicted, Xfm, 1./math.sqrt(__m-1) ) + Ua = numpy.identity(__m) + __j = 0 + Deltaw = 1 + if not BnotT: + Ta = numpy.identity(__m) + vw = numpy.zeros(__m) + while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax: + vx1 = (Xfm + EaX @ vw).reshape((__n,1)) + # + if BnotT: + E1 = vx1 + _epsilon * EaX + else: + E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta + # + HE2 = H( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)], + argsAsSerie = True, + returnSerieAsArrayMatrix = True ) + vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1)) + # + if BnotT: + EaY = (HE2 - vy2) / _epsilon + else: + EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1) + # + GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy2 ))) + mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY) + Deltaw = - numpy.linalg.solve(mH,GradJ) + # + vw = vw + Deltaw + # + if not BnotT: + Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH ))) + # + __j = __j + 1 # - Kn = EaX @ EaY.T @ numpy.linalg.inv( EaY @ EaY.T) + if BnotT: + Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH ))) # - for i in range(__m): - Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(EpY[:,i]) - HX_predicted[:,i]) + Xn = vx1 + math.sqrt(__m-1) * EaX @ Ta @ Ua #-------------------------- else: raise ValueError("VariantM has to be chosen in the authorized methods list.") @@ -1932,7 +1761,7 @@ def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"): selfA._parameters["InflationFactor"], ) # - Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1)) + Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1)) #-------------------------- # if selfA._parameters["StoreInternalVariables"] \ @@ -1960,12 +1789,12 @@ def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"): if selfA._toStore("ForecastState"): selfA.StoredVariables["ForecastState"].store( EMX ) if selfA._toStore("BMA"): - selfA.StoredVariables["BMA"].store( EMX - Xa.reshape((__n,1)) ) + selfA.StoredVariables["BMA"].store( EMX - Xa ) if selfA._toStore("InnovationAtCurrentState"): - selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu ) + selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu ) if selfA._toStore("SimulatedObservationAtCurrentState") \ or selfA._toStore("SimulatedObservationAtCurrentOptimum"): - selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted ) + selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 ) # ---> autres if selfA._parameters["StoreInternalVariables"] \ or selfA._toStore("CostFunctionJ") \ @@ -2000,10 +1829,7 @@ def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"): if selfA._toStore("CostFunctionJAtCurrentOptimum"): selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] ) if selfA._toStore("APosterioriCovariance"): - Eai = EnsembleOfAnomalies( Xn ) / numpy.sqrt(__m-1) # Anomalies - Pn = Eai @ Eai.T - Pn = 0.5 * (Pn + Pn.T) - selfA.StoredVariables["APosterioriCovariance"].store( Pn ) + selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) ) if selfA._parameters["EstimationOf"] == "Parameters" \ and J < previousJMinimum: previousJMinimum = J @@ -2011,25 +1837,429 @@ def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"): if selfA._toStore("APosterioriCovariance"): covarianceXaMin = Pn # - # Stockage final supplémentaire de l'optimum en estimation de paramètres - # ---------------------------------------------------------------------- - if selfA._parameters["EstimationOf"] == "Parameters": - selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) ) - selfA.StoredVariables["Analysis"].store( XaMin ) - if selfA._toStore("APosterioriCovariance"): - selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin ) - if selfA._toStore("BMA"): - selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) ) + # Stockage final supplémentaire de l'optimum en estimation de paramètres + # ---------------------------------------------------------------------- + if selfA._parameters["EstimationOf"] == "Parameters": + selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) ) + selfA.StoredVariables["Analysis"].store( XaMin ) + if selfA._toStore("APosterioriCovariance"): + selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin ) + if selfA._toStore("BMA"): + selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) ) + # + return 0 + +# ============================================================================== +def mmqr( + func = None, + x0 = None, + fprime = None, + bounds = None, + quantile = 0.5, + maxfun = 15000, + toler = 1.e-06, + y = None, + ): + """ + Implémentation informatique de l'algorithme MMQR, basée sur la publication : + David R. Hunter, Kenneth Lange, "Quantile Regression via an MM Algorithm", + Journal of Computational and Graphical Statistics, 9, 1, pp.60-77, 2000. + """ + # + # Recuperation des donnees et informations initiales + # -------------------------------------------------- + variables = numpy.ravel( x0 ) + mesures = numpy.ravel( y ) + increment = sys.float_info[0] + p = variables.size + n = mesures.size + quantile = float(quantile) + # + # Calcul des parametres du MM + # --------------------------- + tn = float(toler) / n + e0 = -tn / math.log(tn) + epsilon = (e0-tn)/(1+math.log(e0)) + # + # Calculs d'initialisation + # ------------------------ + residus = mesures - numpy.ravel( func( variables ) ) + poids = 1./(epsilon+numpy.abs(residus)) + veps = 1. - 2. * quantile - residus * poids + lastsurrogate = -numpy.sum(residus*veps) - (1.-2.*quantile)*numpy.sum(residus) + iteration = 0 + # + # Recherche iterative + # ------------------- + while (increment > toler) and (iteration < maxfun) : + iteration += 1 + # + Derivees = numpy.array(fprime(variables)) + Derivees = Derivees.reshape(n,p) # Necessaire pour remettre en place la matrice si elle passe par des tuyaux YACS + DeriveesT = Derivees.transpose() + M = numpy.dot( DeriveesT , (numpy.array(numpy.matrix(p*[poids,]).T)*Derivees) ) + SM = numpy.transpose(numpy.dot( DeriveesT , veps )) + step = - numpy.linalg.lstsq( M, SM, rcond=-1 )[0] + # + variables = variables + step + if bounds is not None: + # Attention : boucle infinie à éviter si un intervalle est trop petit + while( (variables < numpy.ravel(numpy.asmatrix(bounds)[:,0])).any() or (variables > numpy.ravel(numpy.asmatrix(bounds)[:,1])).any() ): + step = step/2. + variables = variables - step + residus = mesures - numpy.ravel( func(variables) ) + surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus) + # + while ( (surrogate > lastsurrogate) and ( max(list(numpy.abs(step))) > 1.e-16 ) ) : + step = step/2. + variables = variables - step + residus = mesures - numpy.ravel( func(variables) ) + surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus) + # + increment = lastsurrogate-surrogate + poids = 1./(epsilon+numpy.abs(residus)) + veps = 1. - 2. * quantile - residus * poids + lastsurrogate = -numpy.sum(residus * veps) - (1.-2.*quantile)*numpy.sum(residus) + # + # Mesure d'écart + # -------------- + Ecart = quantile * numpy.sum(residus) - numpy.sum( residus[residus<0] ) + # + return variables, Ecart, [n,p,iteration,increment,0] + +# ============================================================================== +def multi3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, oneCycle): + """ + 3DVAR multi-pas et multi-méthodes + """ + # + # Initialisation + # -------------- + Xn = numpy.ravel(Xb).reshape((-1,1)) + # + if selfA._parameters["EstimationOf"] == "State": + M = EM["Direct"].appliedTo + # + if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]: + selfA.StoredVariables["Analysis"].store( Xn ) + if selfA._toStore("APosterioriCovariance"): + if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(Xn.size) + else: Pn = B + selfA.StoredVariables["APosterioriCovariance"].store( Pn ) + if selfA._toStore("ForecastState"): + selfA.StoredVariables["ForecastState"].store( Xn ) + # + if hasattr(Y,"stepnumber"): + duration = Y.stepnumber() + else: + duration = 2 + # + # Multi-pas + # --------- + for step in range(duration-1): + if hasattr(Y,"store"): + Ynpu = numpy.ravel( Y[step+1] ).reshape((-1,1)) + else: + Ynpu = numpy.ravel( Y ).reshape((-1,1)) + # + if selfA._parameters["EstimationOf"] == "State": # Forecast + Xn = selfA.StoredVariables["Analysis"][-1] + Xn_predicted = M( Xn ) + if selfA._toStore("ForecastState"): + selfA.StoredVariables["ForecastState"].store( Xn_predicted ) + elif selfA._parameters["EstimationOf"] == "Parameters": # No forecast + # --- > Par principe, M = Id, Q = 0 + Xn_predicted = Xn + Xn_predicted = numpy.ravel(Xn_predicted).reshape((-1,1)) + # + oneCycle(selfA, Xn_predicted, Ynpu, U, HO, None, None, R, B, None) + # + return 0 + +# ============================================================================== +def psas3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): + """ + 3DVAR PSAS + """ + # + # Initialisations + # --------------- + # + # Opérateurs + Hm = HO["Direct"].appliedTo + # + # Utilisation éventuelle d'un vecteur H(Xb) précalculé + if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]: + HXb = Hm( Xb, HO["AppliedInX"]["HXb"] ) + else: + HXb = Hm( Xb ) + HXb = numpy.asmatrix(numpy.ravel( HXb )).T + if Y.size != HXb.size: + raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size)) + if max(Y.shape) != max(HXb.shape): + raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape)) + # + if selfA._toStore("JacobianMatrixAtBackground"): + HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb) + HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape + selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb ) + # + Ht = HO["Tangent"].asMatrix(Xb) + BHT = B * Ht.T + HBHTpR = R + Ht * BHT + Innovation = Y - HXb + # + # Point de démarrage de l'optimisation + Xini = numpy.zeros(Xb.shape) + # + # Définition de la fonction-coût + # ------------------------------ + def CostFunction(w): + _W = numpy.asmatrix(numpy.ravel( w )).T + if selfA._parameters["StoreInternalVariables"] or \ + selfA._toStore("CurrentState") or \ + selfA._toStore("CurrentOptimum"): + selfA.StoredVariables["CurrentState"].store( Xb + BHT * _W ) + if selfA._toStore("SimulatedObservationAtCurrentState") or \ + selfA._toStore("SimulatedObservationAtCurrentOptimum"): + selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Hm( Xb + BHT * _W ) ) + if selfA._toStore("InnovationAtCurrentState"): + selfA.StoredVariables["InnovationAtCurrentState"].store( Innovation ) + # + Jb = float( 0.5 * _W.T * HBHTpR * _W ) + Jo = float( - _W.T * Innovation ) + J = Jb + Jo + # + selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) ) + selfA.StoredVariables["CostFunctionJb"].store( Jb ) + selfA.StoredVariables["CostFunctionJo"].store( Jo ) + selfA.StoredVariables["CostFunctionJ" ].store( J ) + if selfA._toStore("IndexOfOptimum") or \ + selfA._toStore("CurrentOptimum") or \ + selfA._toStore("CostFunctionJAtCurrentOptimum") or \ + selfA._toStore("CostFunctionJbAtCurrentOptimum") or \ + selfA._toStore("CostFunctionJoAtCurrentOptimum") or \ + selfA._toStore("SimulatedObservationAtCurrentOptimum"): + IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps + if selfA._toStore("IndexOfOptimum"): + selfA.StoredVariables["IndexOfOptimum"].store( IndexMin ) + if selfA._toStore("CurrentOptimum"): + selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] ) + if selfA._toStore("SimulatedObservationAtCurrentOptimum"): + selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] ) + if selfA._toStore("CostFunctionJbAtCurrentOptimum"): + selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] ) + if selfA._toStore("CostFunctionJoAtCurrentOptimum"): + selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] ) + if selfA._toStore("CostFunctionJAtCurrentOptimum"): + selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] ) + return J + # + def GradientOfCostFunction(w): + _W = numpy.asmatrix(numpy.ravel( w )).T + GradJb = HBHTpR * _W + GradJo = - Innovation + GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo ) + return GradJ + # + # Minimisation de la fonctionnelle + # -------------------------------- + nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber() + # + if selfA._parameters["Minimizer"] == "LBFGSB": + if "0.19" <= scipy.version.version <= "1.1.0": + import lbfgsbhlt as optimiseur + else: + import scipy.optimize as optimiseur + Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b( + func = CostFunction, + x0 = Xini, + fprime = GradientOfCostFunction, + args = (), + bounds = selfA._parameters["Bounds"], + maxfun = selfA._parameters["MaximumNumberOfSteps"]-1, + factr = selfA._parameters["CostDecrementTolerance"]*1.e14, + pgtol = selfA._parameters["ProjectedGradientTolerance"], + iprint = selfA._parameters["optiprint"], + ) + nfeval = Informations['funcalls'] + rc = Informations['warnflag'] + elif selfA._parameters["Minimizer"] == "TNC": + Minimum, nfeval, rc = scipy.optimize.fmin_tnc( + func = CostFunction, + x0 = Xini, + fprime = GradientOfCostFunction, + args = (), + bounds = selfA._parameters["Bounds"], + maxfun = selfA._parameters["MaximumNumberOfSteps"], + pgtol = selfA._parameters["ProjectedGradientTolerance"], + ftol = selfA._parameters["CostDecrementTolerance"], + messages = selfA._parameters["optmessages"], + ) + elif selfA._parameters["Minimizer"] == "CG": + Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg( + f = CostFunction, + x0 = Xini, + fprime = GradientOfCostFunction, + args = (), + maxiter = selfA._parameters["MaximumNumberOfSteps"], + gtol = selfA._parameters["GradientNormTolerance"], + disp = selfA._parameters["optdisp"], + full_output = True, + ) + elif selfA._parameters["Minimizer"] == "NCG": + Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg( + f = CostFunction, + x0 = Xini, + fprime = GradientOfCostFunction, + args = (), + maxiter = selfA._parameters["MaximumNumberOfSteps"], + avextol = selfA._parameters["CostDecrementTolerance"], + disp = selfA._parameters["optdisp"], + full_output = True, + ) + elif selfA._parameters["Minimizer"] == "BFGS": + Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs( + f = CostFunction, + x0 = Xini, + fprime = GradientOfCostFunction, + args = (), + maxiter = selfA._parameters["MaximumNumberOfSteps"], + gtol = selfA._parameters["GradientNormTolerance"], + disp = selfA._parameters["optdisp"], + full_output = True, + ) + else: + raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"]) + # + IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps + MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin] + # + # Correction pour pallier a un bug de TNC sur le retour du Minimum + # ---------------------------------------------------------------- + if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"): + Minimum = selfA.StoredVariables["CurrentState"][IndexMin] + Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T + else: + Minimum = Xb + BHT * numpy.asmatrix(numpy.ravel( Minimum )).T + # + # Obtention de l'analyse + # ---------------------- + Xa = Minimum + # + selfA.StoredVariables["Analysis"].store( Xa ) + # + if selfA._toStore("OMA") or \ + selfA._toStore("SigmaObs2") or \ + selfA._toStore("SimulationQuantiles") or \ + selfA._toStore("SimulatedObservationAtOptimum"): + if selfA._toStore("SimulatedObservationAtCurrentState"): + HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] + elif selfA._toStore("SimulatedObservationAtCurrentOptimum"): + HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1] + else: + HXa = Hm( Xa ) + # + # Calcul de la covariance d'analyse + # --------------------------------- + if selfA._toStore("APosterioriCovariance") or \ + selfA._toStore("SimulationQuantiles") or \ + selfA._toStore("JacobianMatrixAtOptimum") or \ + selfA._toStore("KalmanGainAtOptimum"): + HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa) + HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape + if selfA._toStore("APosterioriCovariance") or \ + selfA._toStore("SimulationQuantiles") or \ + selfA._toStore("KalmanGainAtOptimum"): + HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa) + HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape + if selfA._toStore("APosterioriCovariance") or \ + selfA._toStore("SimulationQuantiles"): + BI = B.getI() + RI = R.getI() + HessienneI = [] + nb = Xa.size + for i in range(nb): + _ee = numpy.matrix(numpy.zeros(nb)).T + _ee[i] = 1. + _HtEE = numpy.dot(HtM,_ee) + _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T + HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) ) + HessienneI = numpy.matrix( HessienneI ) + A = HessienneI.I + if min(A.shape) != max(A.shape): + raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape))) + if (numpy.diag(A) < 0).any(): + raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,)) + if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug + try: + L = numpy.linalg.cholesky( A ) + except: + raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,)) + if selfA._toStore("APosterioriCovariance"): + selfA.StoredVariables["APosterioriCovariance"].store( A ) + if selfA._toStore("JacobianMatrixAtOptimum"): + selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM ) + if selfA._toStore("KalmanGainAtOptimum"): + if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I + elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI + selfA.StoredVariables["KalmanGainAtOptimum"].store( KG ) + # + # Calculs et/ou stockages supplémentaires + # --------------------------------------- + if selfA._toStore("Innovation") or \ + selfA._toStore("SigmaObs2") or \ + selfA._toStore("MahalanobisConsistency") or \ + selfA._toStore("OMB"): + d = Y - HXb + if selfA._toStore("Innovation"): + selfA.StoredVariables["Innovation"].store( numpy.ravel(d) ) + if selfA._toStore("BMA"): + selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) ) + if selfA._toStore("OMA"): + selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) ) + if selfA._toStore("OMB"): + selfA.StoredVariables["OMB"].store( numpy.ravel(d) ) + if selfA._toStore("SigmaObs2"): + TraceR = R.trace(Y.size) + selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR ) + if selfA._toStore("MahalanobisConsistency"): + selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) ) + if selfA._toStore("SimulationQuantiles"): + nech = selfA._parameters["NumberOfSamplesForQuantiles"] + HXa = numpy.matrix(numpy.ravel( HXa )).T + YfQ = None + for i in range(nech): + if selfA._parameters["SimulationForQuantiles"] == "Linear": + dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T + dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T + Yr = HXa + dYr + elif selfA._parameters["SimulationForQuantiles"] == "NonLinear": + Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T + Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T + if YfQ is None: + YfQ = Yr + else: + YfQ = numpy.hstack((YfQ,Yr)) + YfQ.sort(axis=-1) + YQ = None + for quantile in selfA._parameters["Quantiles"]: + if not (0. <= float(quantile) <= 1.): continue + indice = int(nech * float(quantile) - 1./nech) + if YQ is None: YQ = YfQ[:,indice] + else: YQ = numpy.hstack((YQ,YfQ[:,indice])) + selfA.StoredVariables["SimulationQuantiles"].store( YQ ) + if selfA._toStore("SimulatedObservationAtBackground"): + selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) ) + if selfA._toStore("SimulatedObservationAtOptimum"): + selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) ) # return 0 # ============================================================================== -def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"): +def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"): """ - Ensemble-Transform EnKF (ETKF or Deterministic EnKF: Bishop 2001, Hunt 2007) - - selfA est identique au "self" d'algorithme appelant et contient les - valeurs. + Stochastic EnKF """ if selfA._parameters["EstimationOf"] == "Parameters": selfA._parameters["StoreInternalVariables"] = True @@ -2065,10 +2295,6 @@ def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"): or selfA._toStore("APosterioriCovariance"): BI = B.getI() RI = R.getI() - elif VariantM != "KalmanFilterFormula": - RI = R.getI() - if VariantM == "KalmanFilterFormula": - RIdemi = R.choleskyI() # # Initialisation # -------------- @@ -2083,7 +2309,7 @@ def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"): Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m ) # if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]: - selfA.StoredVariables["Analysis"].store( numpy.ravel(Xb) ) + selfA.StoredVariables["Analysis"].store( Xb ) if selfA._toStore("APosterioriCovariance"): selfA.StoredVariables["APosterioriCovariance"].store( Pn ) covarianceXa = Pn @@ -2092,9 +2318,9 @@ def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"): # for step in range(duration-1): if hasattr(Y,"store"): - Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,-1)) + Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1)) else: - Ynpu = numpy.ravel( Y ).reshape((__p,-1)) + Ynpu = numpy.ravel( Y ).reshape((__p,1)) # if U is not None: if hasattr(U,"store") and len(U)>1: @@ -2132,738 +2358,916 @@ def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"): returnSerieAsArrayMatrix = True ) # # Mean of forecast and observation of forecast - Xfm = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1)) + Xfm = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1)) Hfm = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1)) # - # Anomalies - EaX = EnsembleOfAnomalies( Xn_predicted ) - EaHX = numpy.array(HX_predicted - Hfm) - # - #-------------------------- - if VariantM == "KalmanFilterFormula": - mS = RIdemi * EaHX / numpy.sqrt(__m-1) - delta = RIdemi * ( Ynpu - Hfm ) - mT = numpy.linalg.inv( numpy.eye(__m) + mS.T @ mS ) - vw = mT @ mS.transpose() @ delta - # - Tdemi = numpy.real(scipy.linalg.sqrtm(mT)) - mU = numpy.eye(__m) - # - EaX = EaX / numpy.sqrt(__m-1) - Xn = Xfm + EaX @ ( vw.reshape((__m,-1)) + numpy.sqrt(__m-1) * Tdemi @ mU ) - #-------------------------- - elif VariantM == "Variational": - HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm - def CostFunction(w): - _A = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1)) - _Jo = 0.5 * _A.T @ (RI * _A) - _Jb = 0.5 * (__m-1) * w.T @ w - _J = _Jo + _Jb - return float(_J) - def GradientOfCostFunction(w): - _A = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1)) - _GardJo = - EaHX.T @ (RI * _A) - _GradJb = (__m-1) * w.reshape((__m,1)) - _GradJ = _GardJo + _GradJb - return numpy.ravel(_GradJ) - vw = scipy.optimize.fmin_cg( - f = CostFunction, - x0 = numpy.zeros(__m), - fprime = GradientOfCostFunction, - args = (), - disp = False, - ) - # - Hto = EaHX.T @ (RI * EaHX) - Htb = (__m-1) * numpy.eye(__m) - Hta = Hto + Htb - # - Pta = numpy.linalg.inv( Hta ) - EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18 - # - Xn = Xfm + EaX @ (vw[:,None] + EWa) - #-------------------------- - elif VariantM == "FiniteSize11": # Jauge Boc2011 - HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm - def CostFunction(w): - _A = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1)) - _Jo = 0.5 * _A.T @ (RI * _A) - _Jb = 0.5 * __m * math.log(1 + 1/__m + w.T @ w) - _J = _Jo + _Jb - return float(_J) - def GradientOfCostFunction(w): - _A = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1)) - _GardJo = - EaHX.T @ (RI * _A) - _GradJb = __m * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w) - _GradJ = _GardJo + _GradJb - return numpy.ravel(_GradJ) - vw = scipy.optimize.fmin_cg( - f = CostFunction, - x0 = numpy.zeros(__m), - fprime = GradientOfCostFunction, - args = (), - disp = False, - ) - # - Hto = EaHX.T @ (RI * EaHX) - Htb = __m * \ - ( (1 + 1/__m + vw.T @ vw) * numpy.eye(__m) - 2 * vw @ vw.T ) \ - / (1 + 1/__m + vw.T @ vw)**2 - Hta = Hto + Htb - # - Pta = numpy.linalg.inv( Hta ) - EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18 - # - Xn = Xfm + EaX @ (vw.reshape((__m,-1)) + EWa) #-------------------------- - elif VariantM == "FiniteSize15": # Jauge Boc2015 - HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm - def CostFunction(w): - _A = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1)) - _Jo = 0.5 * _A.T * RI * _A - _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w) - _J = _Jo + _Jb - return float(_J) - def GradientOfCostFunction(w): - _A = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1)) - _GardJo = - EaHX.T @ (RI * _A) - _GradJb = (__m+1) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w) - _GradJ = _GardJo + _GradJb - return numpy.ravel(_GradJ) - vw = scipy.optimize.fmin_cg( - f = CostFunction, - x0 = numpy.zeros(__m), - fprime = GradientOfCostFunction, - args = (), - disp = False, - ) - # - Hto = EaHX.T @ (RI * EaHX) - Htb = (__m+1) * \ - ( (1 + 1/__m + vw.T @ vw) * numpy.eye(__m) - 2 * vw @ vw.T ) \ - / (1 + 1/__m + vw.T @ vw)**2 - Hta = Hto + Htb - # - Pta = numpy.linalg.inv( Hta ) - EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18 + if VariantM == "KalmanFilterFormula05": + PfHT, HPfHT = 0., 0. + for i in range(__m): + Exfi = Xn_predicted[:,i].reshape((__n,1)) - Xfm + Eyfi = HX_predicted[:,i].reshape((__p,1)) - Hfm + PfHT += Exfi * Eyfi.T + HPfHT += Eyfi * Eyfi.T + PfHT = (1./(__m-1)) * PfHT + HPfHT = (1./(__m-1)) * HPfHT + Kn = PfHT * ( R + HPfHT ).I + del PfHT, HPfHT # - Xn = Xfm + EaX @ (vw.reshape((__m,-1)) + EWa) + for i in range(__m): + ri = numpy.random.multivariate_normal(numpy.zeros(__p), Rn) + Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(Ynpu) + ri - HX_predicted[:,i]) #-------------------------- - elif VariantM == "FiniteSize16": # Jauge Boc2016 - HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm - def CostFunction(w): - _A = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1)) - _Jo = 0.5 * _A.T @ (RI * _A) - _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w / (__m-1)) - _J = _Jo + _Jb - return float(_J) - def GradientOfCostFunction(w): - _A = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1)) - _GardJo = - EaHX.T @ (RI * _A) - _GradJb = ((__m+1) / (__m-1)) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w / (__m-1)) - _GradJ = _GardJo + _GradJb - return numpy.ravel(_GradJ) - vw = scipy.optimize.fmin_cg( - f = CostFunction, - x0 = numpy.zeros(__m), - fprime = GradientOfCostFunction, - args = (), - disp = False, - ) + elif VariantM == "KalmanFilterFormula16": + EpY = EnsembleOfCenteredPerturbations(Ynpu, Rn, __m) + EpYm = EpY.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1)) # - Hto = EaHX.T @ (RI * EaHX) - Htb = ((__m+1) / (__m-1)) * \ - ( (1 + 1/__m + vw.T @ vw / (__m-1)) * numpy.eye(__m) - 2 * vw @ vw.T / (__m-1) ) \ - / (1 + 1/__m + vw.T @ vw / (__m-1))**2 - Hta = Hto + Htb + EaX = EnsembleOfAnomalies( Xn_predicted ) / math.sqrt(__m-1) + EaY = (HX_predicted - Hfm - EpY + EpYm) / math.sqrt(__m-1) # - Pta = numpy.linalg.inv( Hta ) - EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18 + Kn = EaX @ EaY.T @ numpy.linalg.inv( EaY @ EaY.T) # - Xn = Xfm + EaX @ (vw[:,None] + EWa) + for i in range(__m): + Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(EpY[:,i]) - HX_predicted[:,i]) #-------------------------- else: - raise ValueError("VariantM has to be chosen in the authorized methods list.") - # - if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies": - Xn = CovarianceInflation( Xn, - selfA._parameters["InflationType"], - selfA._parameters["InflationFactor"], - ) - # - Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1)) - #-------------------------- - # - if selfA._parameters["StoreInternalVariables"] \ - or selfA._toStore("CostFunctionJ") \ - or selfA._toStore("CostFunctionJb") \ - or selfA._toStore("CostFunctionJo") \ - or selfA._toStore("APosterioriCovariance") \ - or selfA._toStore("InnovationAtCurrentAnalysis") \ - or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \ - or selfA._toStore("SimulatedObservationAtCurrentOptimum"): - _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T - _Innovation = Ynpu - _HXa - # - selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) ) - # ---> avec analysis - selfA.StoredVariables["Analysis"].store( Xa ) - if selfA._toStore("SimulatedObservationAtCurrentAnalysis"): - selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa ) - if selfA._toStore("InnovationAtCurrentAnalysis"): - selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation ) - # ---> avec current state - if selfA._parameters["StoreInternalVariables"] \ - or selfA._toStore("CurrentState"): - selfA.StoredVariables["CurrentState"].store( Xn ) - if selfA._toStore("ForecastState"): - selfA.StoredVariables["ForecastState"].store( EMX ) - if selfA._toStore("BMA"): - selfA.StoredVariables["BMA"].store( EMX - Xa.reshape((__n,1)) ) - if selfA._toStore("InnovationAtCurrentState"): - selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu.reshape((__p,1)) ) - if selfA._toStore("SimulatedObservationAtCurrentState") \ - or selfA._toStore("SimulatedObservationAtCurrentOptimum"): - selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted ) - # ---> autres - if selfA._parameters["StoreInternalVariables"] \ - or selfA._toStore("CostFunctionJ") \ - or selfA._toStore("CostFunctionJb") \ - or selfA._toStore("CostFunctionJo") \ - or selfA._toStore("CurrentOptimum") \ - or selfA._toStore("APosterioriCovariance"): - Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) ) - Jo = float( 0.5 * _Innovation.T * RI * _Innovation ) - J = Jb + Jo - selfA.StoredVariables["CostFunctionJb"].store( Jb ) - selfA.StoredVariables["CostFunctionJo"].store( Jo ) - selfA.StoredVariables["CostFunctionJ" ].store( J ) - # - if selfA._toStore("IndexOfOptimum") \ - or selfA._toStore("CurrentOptimum") \ - or selfA._toStore("CostFunctionJAtCurrentOptimum") \ - or selfA._toStore("CostFunctionJbAtCurrentOptimum") \ - or selfA._toStore("CostFunctionJoAtCurrentOptimum") \ - or selfA._toStore("SimulatedObservationAtCurrentOptimum"): - IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps - if selfA._toStore("IndexOfOptimum"): - selfA.StoredVariables["IndexOfOptimum"].store( IndexMin ) - if selfA._toStore("CurrentOptimum"): - selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] ) - if selfA._toStore("SimulatedObservationAtCurrentOptimum"): - selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] ) - if selfA._toStore("CostFunctionJbAtCurrentOptimum"): - selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] ) - if selfA._toStore("CostFunctionJoAtCurrentOptimum"): - selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] ) - if selfA._toStore("CostFunctionJAtCurrentOptimum"): - selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] ) - if selfA._toStore("APosterioriCovariance"): - Eai = EnsembleOfAnomalies( Xn ) / numpy.sqrt(__m-1) # Anomalies - Pn = Eai @ Eai.T - Pn = 0.5 * (Pn + Pn.T) - selfA.StoredVariables["APosterioriCovariance"].store( Pn ) - if selfA._parameters["EstimationOf"] == "Parameters" \ - and J < previousJMinimum: - previousJMinimum = J - XaMin = Xa - if selfA._toStore("APosterioriCovariance"): - covarianceXaMin = Pn + raise ValueError("VariantM has to be chosen in the authorized methods list.") + # + if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies": + Xn = CovarianceInflation( Xn, + selfA._parameters["InflationType"], + selfA._parameters["InflationFactor"], + ) + # + Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1)) + #-------------------------- + # + if selfA._parameters["StoreInternalVariables"] \ + or selfA._toStore("CostFunctionJ") \ + or selfA._toStore("CostFunctionJb") \ + or selfA._toStore("CostFunctionJo") \ + or selfA._toStore("APosterioriCovariance") \ + or selfA._toStore("InnovationAtCurrentAnalysis") \ + or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \ + or selfA._toStore("SimulatedObservationAtCurrentOptimum"): + _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T + _Innovation = Ynpu - _HXa + # + selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) ) + # ---> avec analysis + selfA.StoredVariables["Analysis"].store( Xa ) + if selfA._toStore("SimulatedObservationAtCurrentAnalysis"): + selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa ) + if selfA._toStore("InnovationAtCurrentAnalysis"): + selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation ) + # ---> avec current state + if selfA._parameters["StoreInternalVariables"] \ + or selfA._toStore("CurrentState"): + selfA.StoredVariables["CurrentState"].store( Xn ) + if selfA._toStore("ForecastState"): + selfA.StoredVariables["ForecastState"].store( EMX ) + if selfA._toStore("BMA"): + selfA.StoredVariables["BMA"].store( EMX - Xa ) + if selfA._toStore("InnovationAtCurrentState"): + selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu ) + if selfA._toStore("SimulatedObservationAtCurrentState") \ + or selfA._toStore("SimulatedObservationAtCurrentOptimum"): + selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted ) + # ---> autres + if selfA._parameters["StoreInternalVariables"] \ + or selfA._toStore("CostFunctionJ") \ + or selfA._toStore("CostFunctionJb") \ + or selfA._toStore("CostFunctionJo") \ + or selfA._toStore("CurrentOptimum") \ + or selfA._toStore("APosterioriCovariance"): + Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) ) + Jo = float( 0.5 * _Innovation.T * RI * _Innovation ) + J = Jb + Jo + selfA.StoredVariables["CostFunctionJb"].store( Jb ) + selfA.StoredVariables["CostFunctionJo"].store( Jo ) + selfA.StoredVariables["CostFunctionJ" ].store( J ) + # + if selfA._toStore("IndexOfOptimum") \ + or selfA._toStore("CurrentOptimum") \ + or selfA._toStore("CostFunctionJAtCurrentOptimum") \ + or selfA._toStore("CostFunctionJbAtCurrentOptimum") \ + or selfA._toStore("CostFunctionJoAtCurrentOptimum") \ + or selfA._toStore("SimulatedObservationAtCurrentOptimum"): + IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps + if selfA._toStore("IndexOfOptimum"): + selfA.StoredVariables["IndexOfOptimum"].store( IndexMin ) + if selfA._toStore("CurrentOptimum"): + selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] ) + if selfA._toStore("SimulatedObservationAtCurrentOptimum"): + selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] ) + if selfA._toStore("CostFunctionJbAtCurrentOptimum"): + selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] ) + if selfA._toStore("CostFunctionJoAtCurrentOptimum"): + selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] ) + if selfA._toStore("CostFunctionJAtCurrentOptimum"): + selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] ) + if selfA._toStore("APosterioriCovariance"): + selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) ) + if selfA._parameters["EstimationOf"] == "Parameters" \ + and J < previousJMinimum: + previousJMinimum = J + XaMin = Xa + if selfA._toStore("APosterioriCovariance"): + covarianceXaMin = Pn + # + # Stockage final supplémentaire de l'optimum en estimation de paramètres + # ---------------------------------------------------------------------- + if selfA._parameters["EstimationOf"] == "Parameters": + selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) ) + selfA.StoredVariables["Analysis"].store( XaMin ) + if selfA._toStore("APosterioriCovariance"): + selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin ) + if selfA._toStore("BMA"): + selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) ) + # + return 0 + +# ============================================================================== +def std3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): + """ + 3DVAR + """ + # + # Initialisations + # --------------- + # + # Opérateurs + Hm = HO["Direct"].appliedTo + Ha = HO["Adjoint"].appliedInXTo + # + # Utilisation éventuelle d'un vecteur H(Xb) précalculé + if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]: + HXb = Hm( Xb, HO["AppliedInX"]["HXb"] ) + else: + HXb = Hm( Xb ) + HXb = numpy.asmatrix(numpy.ravel( HXb )).T + if Y.size != HXb.size: + raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size)) + if max(Y.shape) != max(HXb.shape): + raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape)) + # + if selfA._toStore("JacobianMatrixAtBackground"): + HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb) + HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape + selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb ) + # + # Précalcul des inversions de B et R + BI = B.getI() + RI = R.getI() + # + # Point de démarrage de l'optimisation + Xini = selfA._parameters["InitializationPoint"] + # + # Définition de la fonction-coût + # ------------------------------ + def CostFunction(x): + _X = numpy.asmatrix(numpy.ravel( x )).T + if selfA._parameters["StoreInternalVariables"] or \ + selfA._toStore("CurrentState") or \ + selfA._toStore("CurrentOptimum"): + selfA.StoredVariables["CurrentState"].store( _X ) + _HX = Hm( _X ) + _HX = numpy.asmatrix(numpy.ravel( _HX )).T + _Innovation = Y - _HX + if selfA._toStore("SimulatedObservationAtCurrentState") or \ + selfA._toStore("SimulatedObservationAtCurrentOptimum"): + selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX ) + if selfA._toStore("InnovationAtCurrentState"): + selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation ) + # + Jb = float( 0.5 * (_X - Xb).T * BI * (_X - Xb) ) + Jo = float( 0.5 * _Innovation.T * RI * _Innovation ) + J = Jb + Jo + # + selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) ) + selfA.StoredVariables["CostFunctionJb"].store( Jb ) + selfA.StoredVariables["CostFunctionJo"].store( Jo ) + selfA.StoredVariables["CostFunctionJ" ].store( J ) + if selfA._toStore("IndexOfOptimum") or \ + selfA._toStore("CurrentOptimum") or \ + selfA._toStore("CostFunctionJAtCurrentOptimum") or \ + selfA._toStore("CostFunctionJbAtCurrentOptimum") or \ + selfA._toStore("CostFunctionJoAtCurrentOptimum") or \ + selfA._toStore("SimulatedObservationAtCurrentOptimum"): + IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps + if selfA._toStore("IndexOfOptimum"): + selfA.StoredVariables["IndexOfOptimum"].store( IndexMin ) + if selfA._toStore("CurrentOptimum"): + selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] ) + if selfA._toStore("SimulatedObservationAtCurrentOptimum"): + selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] ) + if selfA._toStore("CostFunctionJbAtCurrentOptimum"): + selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] ) + if selfA._toStore("CostFunctionJoAtCurrentOptimum"): + selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] ) + if selfA._toStore("CostFunctionJAtCurrentOptimum"): + selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] ) + return J + # + def GradientOfCostFunction(x): + _X = numpy.asmatrix(numpy.ravel( x )).T + _HX = Hm( _X ) + _HX = numpy.asmatrix(numpy.ravel( _HX )).T + GradJb = BI * (_X - Xb) + GradJo = - Ha( (_X, RI * (Y - _HX)) ) + GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo ) + return GradJ + # + # Minimisation de la fonctionnelle + # -------------------------------- + nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber() + # + if selfA._parameters["Minimizer"] == "LBFGSB": + if "0.19" <= scipy.version.version <= "1.1.0": + import lbfgsbhlt as optimiseur + else: + import scipy.optimize as optimiseur + Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b( + func = CostFunction, + x0 = Xini, + fprime = GradientOfCostFunction, + args = (), + bounds = selfA._parameters["Bounds"], + maxfun = selfA._parameters["MaximumNumberOfSteps"]-1, + factr = selfA._parameters["CostDecrementTolerance"]*1.e14, + pgtol = selfA._parameters["ProjectedGradientTolerance"], + iprint = selfA._parameters["optiprint"], + ) + nfeval = Informations['funcalls'] + rc = Informations['warnflag'] + elif selfA._parameters["Minimizer"] == "TNC": + Minimum, nfeval, rc = scipy.optimize.fmin_tnc( + func = CostFunction, + x0 = Xini, + fprime = GradientOfCostFunction, + args = (), + bounds = selfA._parameters["Bounds"], + maxfun = selfA._parameters["MaximumNumberOfSteps"], + pgtol = selfA._parameters["ProjectedGradientTolerance"], + ftol = selfA._parameters["CostDecrementTolerance"], + messages = selfA._parameters["optmessages"], + ) + elif selfA._parameters["Minimizer"] == "CG": + Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg( + f = CostFunction, + x0 = Xini, + fprime = GradientOfCostFunction, + args = (), + maxiter = selfA._parameters["MaximumNumberOfSteps"], + gtol = selfA._parameters["GradientNormTolerance"], + disp = selfA._parameters["optdisp"], + full_output = True, + ) + elif selfA._parameters["Minimizer"] == "NCG": + Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg( + f = CostFunction, + x0 = Xini, + fprime = GradientOfCostFunction, + args = (), + maxiter = selfA._parameters["MaximumNumberOfSteps"], + avextol = selfA._parameters["CostDecrementTolerance"], + disp = selfA._parameters["optdisp"], + full_output = True, + ) + elif selfA._parameters["Minimizer"] == "BFGS": + Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs( + f = CostFunction, + x0 = Xini, + fprime = GradientOfCostFunction, + args = (), + maxiter = selfA._parameters["MaximumNumberOfSteps"], + gtol = selfA._parameters["GradientNormTolerance"], + disp = selfA._parameters["optdisp"], + full_output = True, + ) + else: + raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"]) + # + IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps + MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin] + # + # Correction pour pallier a un bug de TNC sur le retour du Minimum + # ---------------------------------------------------------------- + if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"): + Minimum = selfA.StoredVariables["CurrentState"][IndexMin] + # + # Obtention de l'analyse + # ---------------------- + Xa = numpy.asmatrix(numpy.ravel( Minimum )).T + # + selfA.StoredVariables["Analysis"].store( Xa ) + # + if selfA._toStore("OMA") or \ + selfA._toStore("SigmaObs2") or \ + selfA._toStore("SimulationQuantiles") or \ + selfA._toStore("SimulatedObservationAtOptimum"): + if selfA._toStore("SimulatedObservationAtCurrentState"): + HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] + elif selfA._toStore("SimulatedObservationAtCurrentOptimum"): + HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1] + else: + HXa = Hm( Xa ) # - # Stockage final supplémentaire de l'optimum en estimation de paramètres - # ---------------------------------------------------------------------- - if selfA._parameters["EstimationOf"] == "Parameters": - selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) ) - selfA.StoredVariables["Analysis"].store( XaMin ) - if selfA._toStore("APosterioriCovariance"): - selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin ) - if selfA._toStore("BMA"): - selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) ) + # Calcul de la covariance d'analyse + # --------------------------------- + if selfA._toStore("APosterioriCovariance") or \ + selfA._toStore("SimulationQuantiles") or \ + selfA._toStore("JacobianMatrixAtOptimum") or \ + selfA._toStore("KalmanGainAtOptimum"): + HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa) + HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape + if selfA._toStore("APosterioriCovariance") or \ + selfA._toStore("SimulationQuantiles") or \ + selfA._toStore("KalmanGainAtOptimum"): + HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa) + HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape + if selfA._toStore("APosterioriCovariance") or \ + selfA._toStore("SimulationQuantiles"): + HessienneI = [] + nb = Xa.size + for i in range(nb): + _ee = numpy.matrix(numpy.zeros(nb)).T + _ee[i] = 1. + _HtEE = numpy.dot(HtM,_ee) + _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T + HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) ) + HessienneI = numpy.matrix( HessienneI ) + A = HessienneI.I + if min(A.shape) != max(A.shape): + raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape))) + if (numpy.diag(A) < 0).any(): + raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,)) + if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug + try: + L = numpy.linalg.cholesky( A ) + except: + raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,)) + if selfA._toStore("APosterioriCovariance"): + selfA.StoredVariables["APosterioriCovariance"].store( A ) + if selfA._toStore("JacobianMatrixAtOptimum"): + selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM ) + if selfA._toStore("KalmanGainAtOptimum"): + if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I + elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI + selfA.StoredVariables["KalmanGainAtOptimum"].store( KG ) + # + # Calculs et/ou stockages supplémentaires + # --------------------------------------- + if selfA._toStore("Innovation") or \ + selfA._toStore("SigmaObs2") or \ + selfA._toStore("MahalanobisConsistency") or \ + selfA._toStore("OMB"): + d = Y - HXb + if selfA._toStore("Innovation"): + selfA.StoredVariables["Innovation"].store( numpy.ravel(d) ) + if selfA._toStore("BMA"): + selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) ) + if selfA._toStore("OMA"): + selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) ) + if selfA._toStore("OMB"): + selfA.StoredVariables["OMB"].store( numpy.ravel(d) ) + if selfA._toStore("SigmaObs2"): + TraceR = R.trace(Y.size) + selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR ) + if selfA._toStore("MahalanobisConsistency"): + selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) ) + if selfA._toStore("SimulationQuantiles"): + nech = selfA._parameters["NumberOfSamplesForQuantiles"] + HXa = numpy.matrix(numpy.ravel( HXa )).T + YfQ = None + for i in range(nech): + if selfA._parameters["SimulationForQuantiles"] == "Linear": + dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T + dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T + Yr = HXa + dYr + elif selfA._parameters["SimulationForQuantiles"] == "NonLinear": + Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T + Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T + if YfQ is None: + YfQ = Yr + else: + YfQ = numpy.hstack((YfQ,Yr)) + YfQ.sort(axis=-1) + YQ = None + for quantile in selfA._parameters["Quantiles"]: + if not (0. <= float(quantile) <= 1.): continue + indice = int(nech * float(quantile) - 1./nech) + if YQ is None: YQ = YfQ[:,indice] + else: YQ = numpy.hstack((YQ,YfQ[:,indice])) + selfA.StoredVariables["SimulationQuantiles"].store( YQ ) + if selfA._toStore("SimulatedObservationAtBackground"): + selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) ) + if selfA._toStore("SimulatedObservationAtOptimum"): + selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) ) # return 0 # ============================================================================== -def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13", - BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000): +def std4dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): """ - Maximum Likelihood Ensemble Filter (EnKF/MLEF Zupanski 2005, Bocquet 2013) - - selfA est identique au "self" d'algorithme appelant et contient les - valeurs. + 4DVAR """ - if selfA._parameters["EstimationOf"] == "Parameters": - selfA._parameters["StoreInternalVariables"] = True + # + # Initialisations + # --------------- # # Opérateurs - # ---------- - H = HO["Direct"].appliedControledFormTo + Hm = HO["Direct"].appliedControledFormTo + Mm = EM["Direct"].appliedControledFormTo # - if selfA._parameters["EstimationOf"] == "State": - M = EM["Direct"].appliedControledFormTo + if CM is not None and "Tangent" in CM and U is not None: + Cm = CM["Tangent"].asMatrix(Xb) + else: + Cm = None + # + def Un(_step): + if U is not None: + if hasattr(U,"store") and 1<=_step1: - Un = numpy.asmatrix(numpy.ravel( U[step] )).T - elif hasattr(U,"store") and len(U)==1: - Un = numpy.asmatrix(numpy.ravel( U[0] )).T + # Définition de la fonction-coût + # ------------------------------ + selfA.DirectCalculation = [None,] # Le pas 0 n'est pas observé + selfA.DirectInnovation = [None,] # Le pas 0 n'est pas observé + def CostFunction(x): + _X = numpy.asmatrix(numpy.ravel( x )).T + if selfA._parameters["StoreInternalVariables"] or \ + selfA._toStore("CurrentState") or \ + selfA._toStore("CurrentOptimum"): + selfA.StoredVariables["CurrentState"].store( _X ) + Jb = float( 0.5 * (_X - Xb).T * BI * (_X - Xb) ) + selfA.DirectCalculation = [None,] + selfA.DirectInnovation = [None,] + Jo = 0. + _Xn = _X + for step in range(0,duration-1): + if hasattr(Y,"store"): + _Ynpu = numpy.asmatrix(numpy.ravel( Y[step+1] )).T else: - Un = numpy.asmatrix(numpy.ravel( U )).T - else: - Un = None - # - if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies": - Xn = CovarianceInflation( Xn, - selfA._parameters["InflationType"], - selfA._parameters["InflationFactor"], - ) - # - if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast - EMX = M( [(Xn[:,i], Un) for i in range(__m)], - argsAsSerie = True, - returnSerieAsArrayMatrix = True ) - qi = numpy.random.multivariate_normal(numpy.zeros(__n), Qn, size=__m).T - Xn_predicted = EMX + qi - if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon ! - Cm = Cm.reshape(__n,Un.size) # ADAO & check shape - Xn_predicted = Xn_predicted + Cm * Un - elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast - # --- > Par principe, M = Id, Q = 0 - Xn_predicted = Xn - # - #-------------------------- - if VariantM == "MLEF13": - Xfm = numpy.ravel(Xn_predicted.mean(axis=1, dtype=mfp).astype('float')) - EaX = EnsembleOfAnomalies( Xn_predicted ) / numpy.sqrt(__m-1) - Ua = numpy.eye(__m) - __j = 0 - Deltaw = 1 - if not BnotT: - Ta = numpy.eye(__m) - vw = numpy.zeros(__m) - while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax: - vx1 = (Xfm + EaX @ vw).reshape((__n,-1)) - # - if BnotT: - E1 = vx1 + _epsilon * EaX - else: - E1 = vx1 + numpy.sqrt(__m-1) * EaX @ Ta - # - HE2 = H( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)], - argsAsSerie = True, - returnSerieAsArrayMatrix = True ) - vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,-1)) - # - if BnotT: - EaY = (HE2 - vy2) / _epsilon - else: - EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / numpy.sqrt(__m-1) - # - GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy2 ))) - mH = numpy.eye(__m) + EaY.transpose() @ (RI * EaY) - Deltaw = - numpy.linalg.solve(mH,GradJ) - # - vw = vw + Deltaw - # - if not BnotT: - Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH ))) - # - __j = __j + 1 + _Ynpu = numpy.asmatrix(numpy.ravel( Y )).T + _Un = Un(step) # - if BnotT: - Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH ))) + # Etape d'évolution + if selfA._parameters["EstimationOf"] == "State": + _Xn = Mm( (_Xn, _Un) ) + CmUn(_Xn, _Un) + elif selfA._parameters["EstimationOf"] == "Parameters": + pass # - Xn = vx1 + numpy.sqrt(__m-1) * EaX @ Ta @ Ua - #-------------------------- - else: - raise ValueError("VariantM has to be chosen in the authorized methods list.") - # - if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies": - Xn = CovarianceInflation( Xn, - selfA._parameters["InflationType"], - selfA._parameters["InflationFactor"], - ) - # - Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1)) - #-------------------------- - # - if selfA._parameters["StoreInternalVariables"] \ - or selfA._toStore("CostFunctionJ") \ - or selfA._toStore("CostFunctionJb") \ - or selfA._toStore("CostFunctionJo") \ - or selfA._toStore("APosterioriCovariance") \ - or selfA._toStore("InnovationAtCurrentAnalysis") \ - or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \ - or selfA._toStore("SimulatedObservationAtCurrentOptimum"): - _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T - _Innovation = Ynpu - _HXa - # - selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) ) - # ---> avec analysis - selfA.StoredVariables["Analysis"].store( Xa ) - if selfA._toStore("SimulatedObservationAtCurrentAnalysis"): - selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa ) - if selfA._toStore("InnovationAtCurrentAnalysis"): - selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation ) - # ---> avec current state - if selfA._parameters["StoreInternalVariables"] \ - or selfA._toStore("CurrentState"): - selfA.StoredVariables["CurrentState"].store( Xn ) - if selfA._toStore("ForecastState"): - selfA.StoredVariables["ForecastState"].store( EMX ) - if selfA._toStore("BMA"): - selfA.StoredVariables["BMA"].store( EMX - Xa.reshape((__n,1)) ) - if selfA._toStore("InnovationAtCurrentState"): - selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu.reshape((__p,-1)) ) - if selfA._toStore("SimulatedObservationAtCurrentState") \ - or selfA._toStore("SimulatedObservationAtCurrentOptimum"): - selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 ) - # ---> autres - if selfA._parameters["StoreInternalVariables"] \ - or selfA._toStore("CostFunctionJ") \ - or selfA._toStore("CostFunctionJb") \ - or selfA._toStore("CostFunctionJo") \ - or selfA._toStore("CurrentOptimum") \ - or selfA._toStore("APosterioriCovariance"): - Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) ) - Jo = float( 0.5 * _Innovation.T * RI * _Innovation ) - J = Jb + Jo - selfA.StoredVariables["CostFunctionJb"].store( Jb ) - selfA.StoredVariables["CostFunctionJo"].store( Jo ) - selfA.StoredVariables["CostFunctionJ" ].store( J ) + if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection": + _Xn = numpy.max(numpy.hstack((_Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1) + _Xn = numpy.min(numpy.hstack((_Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1) # - if selfA._toStore("IndexOfOptimum") \ - or selfA._toStore("CurrentOptimum") \ - or selfA._toStore("CostFunctionJAtCurrentOptimum") \ - or selfA._toStore("CostFunctionJbAtCurrentOptimum") \ - or selfA._toStore("CostFunctionJoAtCurrentOptimum") \ - or selfA._toStore("SimulatedObservationAtCurrentOptimum"): - IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps - if selfA._toStore("IndexOfOptimum"): - selfA.StoredVariables["IndexOfOptimum"].store( IndexMin ) - if selfA._toStore("CurrentOptimum"): - selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] ) - if selfA._toStore("SimulatedObservationAtCurrentOptimum"): - selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] ) - if selfA._toStore("CostFunctionJbAtCurrentOptimum"): - selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] ) - if selfA._toStore("CostFunctionJoAtCurrentOptimum"): - selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] ) - if selfA._toStore("CostFunctionJAtCurrentOptimum"): - selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] ) - if selfA._toStore("APosterioriCovariance"): - Eai = EnsembleOfAnomalies( Xn ) / numpy.sqrt(__m-1) # Anomalies - Pn = Eai @ Eai.T - Pn = 0.5 * (Pn + Pn.T) - selfA.StoredVariables["APosterioriCovariance"].store( Pn ) - if selfA._parameters["EstimationOf"] == "Parameters" \ - and J < previousJMinimum: - previousJMinimum = J - XaMin = Xa - if selfA._toStore("APosterioriCovariance"): - covarianceXaMin = Pn + # Etape de différence aux observations + if selfA._parameters["EstimationOf"] == "State": + _YmHMX = _Ynpu - numpy.asmatrix(numpy.ravel( Hm( (_Xn, None) ) )).T + elif selfA._parameters["EstimationOf"] == "Parameters": + _YmHMX = _Ynpu - numpy.asmatrix(numpy.ravel( Hm( (_Xn, _Un) ) )).T - CmUn(_Xn, _Un) + # + # Stockage de l'état + selfA.DirectCalculation.append( _Xn ) + selfA.DirectInnovation.append( _YmHMX ) + # + # Ajout dans la fonctionnelle d'observation + Jo = Jo + 0.5 * float( _YmHMX.T * RI * _YmHMX ) + J = Jb + Jo + # + selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) ) + selfA.StoredVariables["CostFunctionJb"].store( Jb ) + selfA.StoredVariables["CostFunctionJo"].store( Jo ) + selfA.StoredVariables["CostFunctionJ" ].store( J ) + if selfA._toStore("IndexOfOptimum") or \ + selfA._toStore("CurrentOptimum") or \ + selfA._toStore("CostFunctionJAtCurrentOptimum") or \ + selfA._toStore("CostFunctionJbAtCurrentOptimum") or \ + selfA._toStore("CostFunctionJoAtCurrentOptimum"): + IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps + if selfA._toStore("IndexOfOptimum"): + selfA.StoredVariables["IndexOfOptimum"].store( IndexMin ) + if selfA._toStore("CurrentOptimum"): + selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] ) + if selfA._toStore("CostFunctionJAtCurrentOptimum"): + selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] ) + if selfA._toStore("CostFunctionJbAtCurrentOptimum"): + selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] ) + if selfA._toStore("CostFunctionJoAtCurrentOptimum"): + selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] ) + return J + # + def GradientOfCostFunction(x): + _X = numpy.asmatrix(numpy.ravel( x )).T + GradJb = BI * (_X - Xb) + GradJo = 0. + for step in range(duration-1,0,-1): + # Étape de récupération du dernier stockage de l'évolution + _Xn = selfA.DirectCalculation.pop() + # Étape de récupération du dernier stockage de l'innovation + _YmHMX = selfA.DirectInnovation.pop() + # Calcul des adjoints + Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = _Xn) + Ha = Ha.reshape(_Xn.size,_YmHMX.size) # ADAO & check shape + Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = _Xn) + Ma = Ma.reshape(_Xn.size,_Xn.size) # ADAO & check shape + # Calcul du gradient par état adjoint + GradJo = GradJo + Ha * RI * _YmHMX # Équivaut pour Ha linéaire à : Ha( (_Xn, RI * _YmHMX) ) + GradJo = Ma * GradJo # Équivaut pour Ma linéaire à : Ma( (_Xn, GradJo) ) + GradJ = numpy.ravel( GradJb ) - numpy.ravel( GradJo ) + return GradJ + # + # Minimisation de la fonctionnelle + # -------------------------------- + nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber() + # + if selfA._parameters["Minimizer"] == "LBFGSB": + if "0.19" <= scipy.version.version <= "1.1.0": + import lbfgsbhlt as optimiseur + else: + import scipy.optimize as optimiseur + Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b( + func = CostFunction, + x0 = Xini, + fprime = GradientOfCostFunction, + args = (), + bounds = selfA._parameters["Bounds"], + maxfun = selfA._parameters["MaximumNumberOfSteps"]-1, + factr = selfA._parameters["CostDecrementTolerance"]*1.e14, + pgtol = selfA._parameters["ProjectedGradientTolerance"], + iprint = selfA._parameters["optiprint"], + ) + nfeval = Informations['funcalls'] + rc = Informations['warnflag'] + elif selfA._parameters["Minimizer"] == "TNC": + Minimum, nfeval, rc = scipy.optimize.fmin_tnc( + func = CostFunction, + x0 = Xini, + fprime = GradientOfCostFunction, + args = (), + bounds = selfA._parameters["Bounds"], + maxfun = selfA._parameters["MaximumNumberOfSteps"], + pgtol = selfA._parameters["ProjectedGradientTolerance"], + ftol = selfA._parameters["CostDecrementTolerance"], + messages = selfA._parameters["optmessages"], + ) + elif selfA._parameters["Minimizer"] == "CG": + Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg( + f = CostFunction, + x0 = Xini, + fprime = GradientOfCostFunction, + args = (), + maxiter = selfA._parameters["MaximumNumberOfSteps"], + gtol = selfA._parameters["GradientNormTolerance"], + disp = selfA._parameters["optdisp"], + full_output = True, + ) + elif selfA._parameters["Minimizer"] == "NCG": + Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg( + f = CostFunction, + x0 = Xini, + fprime = GradientOfCostFunction, + args = (), + maxiter = selfA._parameters["MaximumNumberOfSteps"], + avextol = selfA._parameters["CostDecrementTolerance"], + disp = selfA._parameters["optdisp"], + full_output = True, + ) + elif selfA._parameters["Minimizer"] == "BFGS": + Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs( + f = CostFunction, + x0 = Xini, + fprime = GradientOfCostFunction, + args = (), + maxiter = selfA._parameters["MaximumNumberOfSteps"], + gtol = selfA._parameters["GradientNormTolerance"], + disp = selfA._parameters["optdisp"], + full_output = True, + ) + else: + raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"]) # - # Stockage final supplémentaire de l'optimum en estimation de paramètres - # ---------------------------------------------------------------------- - if selfA._parameters["EstimationOf"] == "Parameters": - selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) ) - selfA.StoredVariables["Analysis"].store( XaMin ) - if selfA._toStore("APosterioriCovariance"): - selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin ) - if selfA._toStore("BMA"): - selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) ) + IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps + MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin] + # + # Correction pour pallier a un bug de TNC sur le retour du Minimum + # ---------------------------------------------------------------- + if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"): + Minimum = selfA.StoredVariables["CurrentState"][IndexMin] + # + # Obtention de l'analyse + # ---------------------- + Xa = numpy.asmatrix(numpy.ravel( Minimum )).T + # + selfA.StoredVariables["Analysis"].store( Xa ) + # + # Calculs et/ou stockages supplémentaires + # --------------------------------------- + if selfA._toStore("BMA"): + selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) ) # return 0 # ============================================================================== -def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12", - BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000): +def van3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): """ - Iterative EnKF (Sakov 2012, Sakov 2018) - - selfA est identique au "self" d'algorithme appelant et contient les - valeurs. + 3DVAR variational analysis with no inversion of B """ - if selfA._parameters["EstimationOf"] == "Parameters": - selfA._parameters["StoreInternalVariables"] = True + # + # Initialisations + # --------------- # # Opérateurs - # ---------- - H = HO["Direct"].appliedControledFormTo + Hm = HO["Direct"].appliedTo + Ha = HO["Adjoint"].appliedInXTo # - if selfA._parameters["EstimationOf"] == "State": - M = EM["Direct"].appliedControledFormTo + # Précalcul des inversions de B et R + BT = B.getT() + RI = R.getI() # - if CM is not None and "Tangent" in CM and U is not None: - Cm = CM["Tangent"].asMatrix(Xb) + # Point de démarrage de l'optimisation + Xini = numpy.zeros(Xb.shape) + # + # Définition de la fonction-coût + # ------------------------------ + def CostFunction(v): + _V = numpy.asmatrix(numpy.ravel( v )).T + _X = Xb + B * _V + if selfA._parameters["StoreInternalVariables"] or \ + selfA._toStore("CurrentState") or \ + selfA._toStore("CurrentOptimum"): + selfA.StoredVariables["CurrentState"].store( _X ) + _HX = Hm( _X ) + _HX = numpy.asmatrix(numpy.ravel( _HX )).T + _Innovation = Y - _HX + if selfA._toStore("SimulatedObservationAtCurrentState") or \ + selfA._toStore("SimulatedObservationAtCurrentOptimum"): + selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX ) + if selfA._toStore("InnovationAtCurrentState"): + selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation ) + # + Jb = float( 0.5 * _V.T * BT * _V ) + Jo = float( 0.5 * _Innovation.T * RI * _Innovation ) + J = Jb + Jo + # + selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) ) + selfA.StoredVariables["CostFunctionJb"].store( Jb ) + selfA.StoredVariables["CostFunctionJo"].store( Jo ) + selfA.StoredVariables["CostFunctionJ" ].store( J ) + if selfA._toStore("IndexOfOptimum") or \ + selfA._toStore("CurrentOptimum") or \ + selfA._toStore("CostFunctionJAtCurrentOptimum") or \ + selfA._toStore("CostFunctionJbAtCurrentOptimum") or \ + selfA._toStore("CostFunctionJoAtCurrentOptimum") or \ + selfA._toStore("SimulatedObservationAtCurrentOptimum"): + IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps + if selfA._toStore("IndexOfOptimum"): + selfA.StoredVariables["IndexOfOptimum"].store( IndexMin ) + if selfA._toStore("CurrentOptimum"): + selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] ) + if selfA._toStore("SimulatedObservationAtCurrentOptimum"): + selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] ) + if selfA._toStore("CostFunctionJbAtCurrentOptimum"): + selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] ) + if selfA._toStore("CostFunctionJoAtCurrentOptimum"): + selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] ) + if selfA._toStore("CostFunctionJAtCurrentOptimum"): + selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] ) + return J + # + def GradientOfCostFunction(v): + _V = numpy.asmatrix(numpy.ravel( v )).T + _X = Xb + B * _V + _HX = Hm( _X ) + _HX = numpy.asmatrix(numpy.ravel( _HX )).T + GradJb = BT * _V + GradJo = - Ha( (_X, RI * (Y - _HX)) ) + GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo ) + return GradJ + # + # Minimisation de la fonctionnelle + # -------------------------------- + nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber() + # + if selfA._parameters["Minimizer"] == "LBFGSB": + if "0.19" <= scipy.version.version <= "1.1.0": + import lbfgsbhlt as optimiseur + else: + import scipy.optimize as optimiseur + Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b( + func = CostFunction, + x0 = Xini, + fprime = GradientOfCostFunction, + args = (), + bounds = selfA._parameters["Bounds"], + maxfun = selfA._parameters["MaximumNumberOfSteps"]-1, + factr = selfA._parameters["CostDecrementTolerance"]*1.e14, + pgtol = selfA._parameters["ProjectedGradientTolerance"], + iprint = selfA._parameters["optiprint"], + ) + nfeval = Informations['funcalls'] + rc = Informations['warnflag'] + elif selfA._parameters["Minimizer"] == "TNC": + Minimum, nfeval, rc = scipy.optimize.fmin_tnc( + func = CostFunction, + x0 = Xini, + fprime = GradientOfCostFunction, + args = (), + bounds = selfA._parameters["Bounds"], + maxfun = selfA._parameters["MaximumNumberOfSteps"], + pgtol = selfA._parameters["ProjectedGradientTolerance"], + ftol = selfA._parameters["CostDecrementTolerance"], + messages = selfA._parameters["optmessages"], + ) + elif selfA._parameters["Minimizer"] == "CG": + Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg( + f = CostFunction, + x0 = Xini, + fprime = GradientOfCostFunction, + args = (), + maxiter = selfA._parameters["MaximumNumberOfSteps"], + gtol = selfA._parameters["GradientNormTolerance"], + disp = selfA._parameters["optdisp"], + full_output = True, + ) + elif selfA._parameters["Minimizer"] == "NCG": + Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg( + f = CostFunction, + x0 = Xini, + fprime = GradientOfCostFunction, + args = (), + maxiter = selfA._parameters["MaximumNumberOfSteps"], + avextol = selfA._parameters["CostDecrementTolerance"], + disp = selfA._parameters["optdisp"], + full_output = True, + ) + elif selfA._parameters["Minimizer"] == "BFGS": + Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs( + f = CostFunction, + x0 = Xini, + fprime = GradientOfCostFunction, + args = (), + maxiter = selfA._parameters["MaximumNumberOfSteps"], + gtol = selfA._parameters["GradientNormTolerance"], + disp = selfA._parameters["optdisp"], + full_output = True, + ) else: - Cm = None + raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"]) # - # Nombre de pas identique au nombre de pas d'observations - # ------------------------------------------------------- - if hasattr(Y,"stepnumber"): - duration = Y.stepnumber() - __p = numpy.cumprod(Y.shape())[-1] + IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps + MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin] + # + # Correction pour pallier a un bug de TNC sur le retour du Minimum + # ---------------------------------------------------------------- + if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"): + Minimum = selfA.StoredVariables["CurrentState"][IndexMin] + Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T else: - duration = 2 - __p = numpy.array(Y).size + Minimum = Xb + B * numpy.asmatrix(numpy.ravel( Minimum )).T # - # Précalcul des inversions de B et R - # ---------------------------------- - if selfA._parameters["StoreInternalVariables"] \ - or selfA._toStore("CostFunctionJ") \ - or selfA._toStore("CostFunctionJb") \ - or selfA._toStore("CostFunctionJo") \ - or selfA._toStore("CurrentOptimum") \ - or selfA._toStore("APosterioriCovariance"): - BI = B.getI() - RI = R.getI() + # Obtention de l'analyse + # ---------------------- + Xa = Minimum # - # Initialisation - # -------------- - __n = Xb.size - __m = selfA._parameters["NumberOfMembers"] - if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n) - else: Pn = B - if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p) - else: Rn = R - if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n) - else: Qn = Q - Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m ) + selfA.StoredVariables["Analysis"].store( Xa ) # - if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]: - selfA.StoredVariables["Analysis"].store( numpy.ravel(Xb) ) - if selfA._toStore("APosterioriCovariance"): - selfA.StoredVariables["APosterioriCovariance"].store( Pn ) - covarianceXa = Pn + if selfA._toStore("OMA") or \ + selfA._toStore("SigmaObs2") or \ + selfA._toStore("SimulationQuantiles") or \ + selfA._toStore("SimulatedObservationAtOptimum"): + if selfA._toStore("SimulatedObservationAtCurrentState"): + HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] + elif selfA._toStore("SimulatedObservationAtCurrentOptimum"): + HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1] + else: + HXa = Hm( Xa ) # - previousJMinimum = numpy.finfo(float).max + # Calcul de la covariance d'analyse + # --------------------------------- + if selfA._toStore("APosterioriCovariance") or \ + selfA._toStore("SimulationQuantiles") or \ + selfA._toStore("JacobianMatrixAtOptimum") or \ + selfA._toStore("KalmanGainAtOptimum"): + HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa) + HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape + if selfA._toStore("APosterioriCovariance") or \ + selfA._toStore("SimulationQuantiles") or \ + selfA._toStore("KalmanGainAtOptimum"): + HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa) + HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape + if selfA._toStore("APosterioriCovariance") or \ + selfA._toStore("SimulationQuantiles"): + BI = B.getI() + HessienneI = [] + nb = Xa.size + for i in range(nb): + _ee = numpy.matrix(numpy.zeros(nb)).T + _ee[i] = 1. + _HtEE = numpy.dot(HtM,_ee) + _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T + HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) ) + HessienneI = numpy.matrix( HessienneI ) + A = HessienneI.I + if min(A.shape) != max(A.shape): + raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape))) + if (numpy.diag(A) < 0).any(): + raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,)) + if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug + try: + L = numpy.linalg.cholesky( A ) + except: + raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,)) + if selfA._toStore("APosterioriCovariance"): + selfA.StoredVariables["APosterioriCovariance"].store( A ) + if selfA._toStore("JacobianMatrixAtOptimum"): + selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM ) + if selfA._toStore("KalmanGainAtOptimum"): + if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I + elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI + selfA.StoredVariables["KalmanGainAtOptimum"].store( KG ) # - for step in range(duration-1): - if hasattr(Y,"store"): - Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,-1)) - else: - Ynpu = numpy.ravel( Y ).reshape((__p,-1)) - # - if U is not None: - if hasattr(U,"store") and len(U)>1: - Un = numpy.asmatrix(numpy.ravel( U[step] )).T - elif hasattr(U,"store") and len(U)==1: - Un = numpy.asmatrix(numpy.ravel( U[0] )).T + # Calculs et/ou stockages supplémentaires + # --------------------------------------- + if selfA._toStore("Innovation") or \ + selfA._toStore("SigmaObs2") or \ + selfA._toStore("MahalanobisConsistency") or \ + selfA._toStore("OMB"): + d = Y - HXb + if selfA._toStore("Innovation"): + selfA.StoredVariables["Innovation"].store( numpy.ravel(d) ) + if selfA._toStore("BMA"): + selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) ) + if selfA._toStore("OMA"): + selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) ) + if selfA._toStore("OMB"): + selfA.StoredVariables["OMB"].store( numpy.ravel(d) ) + if selfA._toStore("SigmaObs2"): + TraceR = R.trace(Y.size) + selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR ) + if selfA._toStore("MahalanobisConsistency"): + selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) ) + if selfA._toStore("SimulationQuantiles"): + nech = selfA._parameters["NumberOfSamplesForQuantiles"] + HXa = numpy.matrix(numpy.ravel( HXa )).T + YfQ = None + for i in range(nech): + if selfA._parameters["SimulationForQuantiles"] == "Linear": + dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T + dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T + Yr = HXa + dYr + elif selfA._parameters["SimulationForQuantiles"] == "NonLinear": + Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T + Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T + if YfQ is None: + YfQ = Yr else: - Un = numpy.asmatrix(numpy.ravel( U )).T - else: - Un = None - # - if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies": - Xn = CovarianceInflation( Xn, - selfA._parameters["InflationType"], - selfA._parameters["InflationFactor"], - ) - # - #-------------------------- - if VariantM == "IEnKF12": - Xfm = numpy.ravel(Xn.mean(axis=1, dtype=mfp).astype('float')) - EaX = EnsembleOfAnomalies( Xn ) / numpy.sqrt(__m-1) - __j = 0 - Deltaw = 1 - if not BnotT: - Ta = numpy.eye(__m) - vw = numpy.zeros(__m) - while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax: - vx1 = (Xfm + EaX @ vw).reshape((__n,-1)) - # - if BnotT: - E1 = vx1 + _epsilon * EaX - else: - E1 = vx1 + numpy.sqrt(__m-1) * EaX @ Ta - # - if selfA._parameters["EstimationOf"] == "State": # Forecast + Q - E2 = M( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)], - argsAsSerie = True, - returnSerieAsArrayMatrix = True ) - elif selfA._parameters["EstimationOf"] == "Parameters": - # --- > Par principe, M = Id - E2 = Xn - vx2 = E2.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1)) - vy1 = H((vx2, Un)).reshape((__p,-1)) - # - HE2 = H( [(E2[:,i,numpy.newaxis], Un) for i in range(__m)], - argsAsSerie = True, - returnSerieAsArrayMatrix = True ) - vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,-1)) - # - if BnotT: - EaY = (HE2 - vy2) / _epsilon - else: - EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / numpy.sqrt(__m-1) - # - GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy1 ))) - mH = numpy.eye(__m) + EaY.transpose() @ (RI * EaY) - Deltaw = - numpy.linalg.solve(mH,GradJ) - # - vw = vw + Deltaw - # - if not BnotT: - Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH ))) - # - __j = __j + 1 - # - A2 = EnsembleOfAnomalies( E2 ) - # - if BnotT: - Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH ))) - A2 = numpy.sqrt(__m-1) * A2 @ Ta / _epsilon - # - Xn = vx2 + A2 - #-------------------------- - else: - raise ValueError("VariantM has to be chosen in the authorized methods list.") - # - if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies": - Xn = CovarianceInflation( Xn, - selfA._parameters["InflationType"], - selfA._parameters["InflationFactor"], - ) - # - Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1)) - #-------------------------- - # - if selfA._parameters["StoreInternalVariables"] \ - or selfA._toStore("CostFunctionJ") \ - or selfA._toStore("CostFunctionJb") \ - or selfA._toStore("CostFunctionJo") \ - or selfA._toStore("APosterioriCovariance") \ - or selfA._toStore("InnovationAtCurrentAnalysis") \ - or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \ - or selfA._toStore("SimulatedObservationAtCurrentOptimum"): - _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T - _Innovation = Ynpu - _HXa - # - selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) ) - # ---> avec analysis - selfA.StoredVariables["Analysis"].store( Xa ) - if selfA._toStore("SimulatedObservationAtCurrentAnalysis"): - selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa ) - if selfA._toStore("InnovationAtCurrentAnalysis"): - selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation ) - # ---> avec current state - if selfA._parameters["StoreInternalVariables"] \ - or selfA._toStore("CurrentState"): - selfA.StoredVariables["CurrentState"].store( Xn ) - if selfA._toStore("ForecastState"): - selfA.StoredVariables["ForecastState"].store( E2 ) - if selfA._toStore("BMA"): - selfA.StoredVariables["BMA"].store( E2 - Xa ) - if selfA._toStore("InnovationAtCurrentState"): - selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu.reshape((__p,-1)) ) - if selfA._toStore("SimulatedObservationAtCurrentState") \ - or selfA._toStore("SimulatedObservationAtCurrentOptimum"): - selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 ) - # ---> autres - if selfA._parameters["StoreInternalVariables"] \ - or selfA._toStore("CostFunctionJ") \ - or selfA._toStore("CostFunctionJb") \ - or selfA._toStore("CostFunctionJo") \ - or selfA._toStore("CurrentOptimum") \ - or selfA._toStore("APosterioriCovariance"): - Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) ) - Jo = float( 0.5 * _Innovation.T * RI * _Innovation ) - J = Jb + Jo - selfA.StoredVariables["CostFunctionJb"].store( Jb ) - selfA.StoredVariables["CostFunctionJo"].store( Jo ) - selfA.StoredVariables["CostFunctionJ" ].store( J ) - # - if selfA._toStore("IndexOfOptimum") \ - or selfA._toStore("CurrentOptimum") \ - or selfA._toStore("CostFunctionJAtCurrentOptimum") \ - or selfA._toStore("CostFunctionJbAtCurrentOptimum") \ - or selfA._toStore("CostFunctionJoAtCurrentOptimum") \ - or selfA._toStore("SimulatedObservationAtCurrentOptimum"): - IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps - if selfA._toStore("IndexOfOptimum"): - selfA.StoredVariables["IndexOfOptimum"].store( IndexMin ) - if selfA._toStore("CurrentOptimum"): - selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] ) - if selfA._toStore("SimulatedObservationAtCurrentOptimum"): - selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] ) - if selfA._toStore("CostFunctionJbAtCurrentOptimum"): - selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] ) - if selfA._toStore("CostFunctionJoAtCurrentOptimum"): - selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] ) - if selfA._toStore("CostFunctionJAtCurrentOptimum"): - selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] ) - if selfA._toStore("APosterioriCovariance"): - Eai = EnsembleOfAnomalies( Xn ) / numpy.sqrt(__m-1) # Anomalies - Pn = Eai @ Eai.T - Pn = 0.5 * (Pn + Pn.T) - selfA.StoredVariables["APosterioriCovariance"].store( Pn ) - if selfA._parameters["EstimationOf"] == "Parameters" \ - and J < previousJMinimum: - previousJMinimum = J - XaMin = Xa - if selfA._toStore("APosterioriCovariance"): - covarianceXaMin = Pn - # - # Stockage final supplémentaire de l'optimum en estimation de paramètres - # ---------------------------------------------------------------------- - if selfA._parameters["EstimationOf"] == "Parameters": - selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) ) - selfA.StoredVariables["Analysis"].store( XaMin ) - if selfA._toStore("APosterioriCovariance"): - selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin ) - if selfA._toStore("BMA"): - selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) ) + YfQ = numpy.hstack((YfQ,Yr)) + YfQ.sort(axis=-1) + YQ = None + for quantile in selfA._parameters["Quantiles"]: + if not (0. <= float(quantile) <= 1.): continue + indice = int(nech * float(quantile) - 1./nech) + if YQ is None: YQ = YfQ[:,indice] + else: YQ = numpy.hstack((YQ,YfQ[:,indice])) + selfA.StoredVariables["SimulationQuantiles"].store( YQ ) + if selfA._toStore("SimulatedObservationAtBackground"): + selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) ) + if selfA._toStore("SimulatedObservationAtOptimum"): + selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) ) # return 0