From: Jean-Philippe ARGAUD Date: Tue, 10 Mar 2020 12:08:43 +0000 (+0100) Subject: Simplification update X-Git-Tag: V9_5_0a2~1 X-Git-Url: http://git.salome-platform.org/gitweb/?a=commitdiff_plain;h=63f879484564c47f757d9d09581cc3adb8c54ae5;p=modules%2Fadao.git Simplification update --- diff --git a/src/daComposant/daCore/NumericObjects.py b/src/daComposant/daCore/NumericObjects.py index 3938e48..c09d5b0 100644 --- a/src/daComposant/daCore/NumericObjects.py +++ b/src/daComposant/daCore/NumericObjects.py @@ -506,411 +506,6 @@ def mmqr( # return variables, Ecart, [n,p,iteration,increment,0] -# ============================================================================== - -def _BackgroundEnsembleGeneration( _bgcenter, _bgcovariance, _nbmembers, _withSVD = True): - "Génération d'un ensemble d'ébauche de taille _nbmembers-1" - # ~ numpy.random.seed(1234567) - if _nbmembers < 1: - raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),)) - if _withSVD: - U, s, V = numpy.linalg.svd(_bgcovariance, full_matrices=False) - _nbctl = len(_bgcenter) - if _nbmembers > _nbctl: - _Z = numpy.concatenate((numpy.dot( - numpy.diag(numpy.sqrt(s[:_nbctl])), V[:_nbctl]), - numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1-_nbctl)), axis = 0) - else: - _Z = numpy.dot(numpy.diag(numpy.sqrt(s[:_nbmembers-1])), V[:_nbmembers-1]) - _Zca = _CenteredAnomalies(_Z, _nbmembers) - BackgroundEnsemble = (_bgcenter + _Zca.T).T - else: - if max(abs(_bgcovariance.flatten())) > 0: - _nbctl = len(_bgcenter) - _Z = numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1) - _Zca = _CenteredAnomalies(_Z, _nbmembers) - BackgroundEnsemble = (_bgcenter + _Zca.T).T - else: - BackgroundEnsemble = numpy.tile([_bgcenter],(_nbmembers,1)).T - return BackgroundEnsemble - -def _CenteredAnomalies(Zr, N): - """ - Génère une matrice d'anomalies centrées selon les notes manuscrites de MB - et conforme au code de PS avec eps = -1 - """ - eps = -1 - Q = numpy.eye(N-1)-numpy.ones((N-1,N-1))/numpy.sqrt(N)/(numpy.sqrt(N)-eps) - Q = numpy.concatenate((Q, [eps*numpy.ones(N-1)/numpy.sqrt(N)]), axis=0) - R, _ = numpy.linalg.qr(numpy.random.normal(size = (N-1,N-1))) - Q = numpy.dot(Q,R) - Zr = numpy.dot(Q,Zr) - return Zr.T - -def _IEnKF_cycle_Lag_1_SDA_GN( - E0 = None, - yObs = None, - RIdemi = None, - Mnnpu = None, - Hn = None, - variant = "IEnKF", # IEnKF or IEKF - iMaximum = 15000, - sTolerance = mfp, - jTolerance = mfp, - epsilonE = 1e-5, - nbPS = 0, # nbPreviousSteps - ): - # 201206 - if logging.getLogger().level < logging.WARNING: - assert len(E0.shape) == 2, "Ensemble E0 is not well formed: not of shape 2!" - assert len(RIdemi.shape) == 2, "R^{-1/2} is not well formed: not of shape 2!" - assert variant in ("IEnKF", "IEKF"), "Variant has to be IEnKF or IEKF" - # - nbCtl, nbMbr = E0.shape - nbObs = yObs.size - # - if logging.getLogger().level < logging.WARNING: - assert RIdemi.shape[0] == RIdemi.shape[1] == nbObs, "R^{-1} not of good size: not of size nbObs!" - # - yo = yObs.reshape((nbObs,1)) - IN = numpy.identity(nbMbr) - if variant == "IEnKF": - T = numpy.identity(nbMbr) - Tinv = numpy.identity(nbMbr) - x00 = numpy.mean(E0, axis = 1) - Ah0 = E0 - x00 - Ap0 = numpy.linalg.pinv( Ah0.T.dot(Ah0) ) - if logging.getLogger().level < logging.WARNING: - assert len(Ah0.shape) == 2, "Ensemble A0 is not well formed, of shape 2!" - assert Ah0.shape[0] == nbCtl and Ah0.shape[1] == nbMbr, "Ensemble A0 is not well shaped!" - assert abs(max(numpy.mean(Ah0, axis = 1))) < nbMbr*mpr, "Ensemble A0 seems not to be centered!" - # - def _convergence_condition(j, dx, JCurr, JPrev): - if j > iMaximum: - logging.debug("Convergence on maximum number of iterations per cycle, that reach the limit of %i."%iMaximum) - return True - #--------- - if j == 1: - _deltaOnJ = 1. - else: - _deltaOnJ = abs(JCurr - JPrev) / JPrev - if _deltaOnJ <= jTolerance: - logging.debug("Convergence on cost decrement tolerance, that is below the threshold of %.1e."%jTolerance) - return True - #--------- - _deltaOnX = numpy.linalg.norm(dx) - if _deltaOnX <= sTolerance: - logging.debug("Convergence on norm of state correction, that is below the threshold of %.1e."%sTolerance) - return True # En correction de l'état - #--------- - return False - # - St = dict([(k,[]) for k in [ - "CurrentState", "CurrentEnsemble", - "CostFunctionJb", "CostFunctionJo", "CostFunctionJ", - ]]) - # - j, convergence, JPrev = 1, False, numpy.nan - x1 = x00 - while not convergence: - logging.debug("Internal IEnKS step number %i"%j) - St["CurrentState"].append( x1.squeeze() ) - if variant == "IEnKF": # Transform - E1 = x1 + Ah0.dot(T) - else: # IEKF - E1 = x1 + epsilonE * Ah0 - St["CurrentEnsemble"].append( E1 ) - E2 = numpy.array([Mnnpu(_x) for _x in E1.T]).reshape((nbCtl, nbMbr)) # Evolution 1->2 - HEL = numpy.array([Hn(_x) for _x in E2.T]).T # Observation à 2 - yLm = numpy.mean( HEL, axis = 1).reshape((nbObs,1)) - HA2 = HEL - yLm - if variant == "IEnKF": - HA2 = HA2.dot(Tinv) - else: - HA2 = HA2 / epsilonE - RIdemidy = RIdemi.dot(yo - yLm) - xs = RIdemidy / math.sqrt(nbMbr-1) - ES = RIdemi.dot(HA2) / math.sqrt(nbMbr-1) - G = numpy.linalg.inv(IN + ES.T.dot(ES)) - xb = G.dot(ES.T.dot(xs)) - dx = Ah0.dot(xb) + Ah0.dot(G.dot(Ap0.dot(Ah0.T.dot(x00 - x1)))) - # - Jb = float(dx.T.dot(dx)) - Jo = float(RIdemidy.T.dot(RIdemidy)) - J = Jo + Jb - logging.debug("Values for cost functions are: J = %.5e Jo = %.5e Jb = %.5e"%(J,Jo,Jb)) - St["CostFunctionJb"].append( Jb ) - St["CostFunctionJo"].append( Jo ) - St["CostFunctionJ"].append( J ) - # - x1 = x1 + dx - j = j + 1 - convergence = _convergence_condition(j, dx, J, JPrev) - JPrev = J - # - if variant == "IEnKF": - T = numpy.real_if_close(scipy.linalg.sqrtm(G)) - Tinv = numpy.linalg.inv(T) - # - # Stocke le dernier pas - x2 = numpy.mean( E2, axis = 1) - if variant == "IEKF": - A2 = E2 - x2 - A2 = A2.dot(numpy.linalg.cholesky(G)) / epsilonE - E2 = x2 + A2 - St["CurrentState"].append( x2.squeeze() ) - St["CurrentEnsemble"].append( E2 ) - # - IndexMin = numpy.argmin( St["CostFunctionJ"][nbPS:] ) + nbPS - xa = St["CurrentState"][IndexMin] - Ea = St["CurrentEnsemble"][IndexMin] - # - return (xa, Ea, St) - -def ienkf( - xb = None, # Background (None si E0) - E0 = None, # Background ensemble (None si xb) - yObs = None, # Observation (série) - B = None, # B - RIdemi = None, # R^(-1/2) - Mnnpu = None, # Evolution operator - Hn = None, # Observation operator - variant = "IEnKF", # IEnKF or IEKF - nMembers = 5, # Number of members - sMaximum = 0, # Number of spinup steps - cMaximum = 15000, # Number of steps or cycles - iMaximum = 15000, # Number of iterations per cycle - sTolerance = mfp, # State correction tolerance - jTolerance = mfp, # Cost decrement tolerance - epsilon = 1e-5, - inflation = 1., - nbPS = 0, # Number of previous steps - setSeed = None, - ): - # - # Initial - if setSeed is not None: numpy.random.seed(setSeed) - if E0 is None: E0 = _BackgroundEnsembleGeneration( xb, B, nMembers) - # - # Spinup - # ------ - # - # Cycles - # ------ - xa, Ea, Sa = [xb,], [E0,], [{}] - for step in range(cMaximum): - if hasattr(yObs,"store"): Ynpu = numpy.ravel( yObs[step+1] ) - elif type(yObs) in [list, tuple]: Ynpu = numpy.ravel( yObs[step+1] ) - else: Ynpu = numpy.ravel( yObs ) - # - (xa_c, Ea_c, Sa_c) = _IEnKF_cycle_Lag_1_SDA_GN( - E0, - Ynpu, - RIdemi, - Mnnpu, - Hn, - variant, - iMaximum, - sTolerance, - jTolerance, - epsilon, - nbPS, - ) - xa.append( xa_c ) - Ea.append( Ea_c ) - Sa.append( Sa_c ) - # - # Inflation for next cycle - E0 = xa_c + inflation * (Ea_c - xa_c) - # - return (xa, Ea, Sa) - -def _IEnKS_cycle_Lag_L_SDA_GN( - E0 = None, - yObs = None, - RIdemi = None, - Mnnpu = None, - Hn = None, - method = "Transform", - iMaximum = 15000, - sTolerance = mfp, - jTolerance = mfp, - Lag = 1, - epsilon = -1., - nbPS = 0, - ): - # 201407 & 201905 - if logging.getLogger().level < logging.WARNING: - assert len(E0.shape) == 2, "Ensemble E0 is not well formed: not of shape 2!" - assert len(RIdemi.shape) == 2, "R^{-1/2} is not well formed: not of shape 2!" - assert method in ("Transform", "Bundle"), "Method has to be Transform or Bundle" - # - nbCtl, nbMbr = E0.shape - nbObs = yObs.size - # - if logging.getLogger().level < logging.WARNING: - assert RIdemi.shape[0] == RIdemi.shape[1] == nbObs, "R^{-1} not of good size: not of size nbObs!" - # - yo = yObs.reshape((nbObs,1)) - IN = numpy.identity(nbMbr) - if method == "Transform": - T = numpy.identity(nbMbr) - Tinv = numpy.identity(nbMbr) - x00 = numpy.mean(E0, axis = 1) - Ah0 = E0 - x00 - Am0 = (1/math.sqrt(nbMbr - 1)) * Ah0 - w = numpy.zeros((nbMbr,1)) - if logging.getLogger().level < logging.WARNING: - assert len(Ah0.shape) == 2, "Ensemble A0 is not well formed, of shape 2!" - assert Ah0.shape[0] == nbCtl and Ah0.shape[1] == nbMbr, "Ensemble A0 is not well shaped!" - assert abs(max(numpy.mean(Ah0, axis = 1))) < nbMbr*mpr, "Ensemble A0 seems not to be centered!" - # - def _convergence_condition(j, dw, JCurr, JPrev): - if j > iMaximum: - logging.debug("Convergence on maximum number of iterations per cycle, that reach the limit of %i."%iMaximum) - return True - #--------- - if j == 1: - _deltaOnJ = 1. - else: - _deltaOnJ = abs(JCurr - JPrev) / JPrev - if _deltaOnJ <= jTolerance: - logging.debug("Convergence on cost decrement tolerance, that is below the threshold of %.1e."%jTolerance) - return True - #--------- - _deltaOnW = numpy.sqrt(numpy.mean(dw.squeeze()**2)) - if _deltaOnW <= sTolerance: - logging.debug("Convergence on norm of weights correction, that is below the threshold of %.1e."%sTolerance) - return True # En correction des poids - #--------- - return False - # - St = dict([(k,[]) for k in [ - "CurrentState", "CurrentEnsemble", "CurrentWeights", - "CostFunctionJb", "CostFunctionJo", "CostFunctionJ", - ]]) - # - j, convergence, JPrev = 1, False, numpy.nan - while not convergence: - logging.debug("Internal IEnKS step number %i"%j) - x0 = x00 + Am0.dot( w ) - St["CurrentState"].append( x0.squeeze() ) - if method == "Transform": - E0 = x0 + Ah0.dot(T) - else: - E0 = x0 + epsilon * Am0 - St["CurrentEnsemble"].append( E0 ) - Ek = E0 - yHmean = numpy.mean(E0, axis = 1) - for k in range(1, Lag+1): - Ek = numpy.array([Mnnpu(_x) for _x in Ek.T]).reshape((nbCtl, nbMbr)) # Evolution 0->L - if method == "Transform": - yHmean = Mnnpu(yHmean) - HEL = numpy.array([Hn(_x) for _x in Ek.T]).T # Observation à L - # - if method == "Transform": - yLm = Hn( yHmean ).reshape((nbObs,1)) - YL = RIdemi.dot( (HEL - numpy.mean( HEL, axis = 1).reshape((nbObs,1))).dot(Tinv) ) / math.sqrt(nbMbr-1) - else: - yLm = numpy.mean( HEL, axis = 1).reshape((nbObs,1)) - YL = RIdemi.dot(HEL - yLm) / epsilon - dy = RIdemi.dot(yo - yLm) - # - Jb = float(w.T.dot(w)) - Jo = float(dy.T.dot(dy)) - J = Jo + Jb - logging.debug("Values for cost functions are: J = %.5e Jo = %.5e Jb = %.5e"%(J,Jo,Jb)) - St["CurrentWeights"].append( w.squeeze() ) - St["CostFunctionJb"].append( Jb ) - St["CostFunctionJo"].append( Jo ) - St["CostFunctionJ"].append( J ) - if method == "Transform": - GradJ = w - YL.T.dot(dy) - HTild = IN + YL.T.dot(YL) - else: - GradJ = (nbMbr - 1)*w - YL.T.dot(RIdemi.dot(dy)) - HTild = (nbMbr - 1)*IN + YL.T.dot(RIdemi.dot(YL)) - HTild = numpy.array(HTild, dtype=float) - dw = numpy.linalg.solve( HTild, numpy.array(GradJ, dtype=float) ) - w = w - dw - j = j + 1 - convergence = _convergence_condition(j, dw, J, JPrev) - JPrev = J - # - if method == "Transform": - (U, s, _) = numpy.linalg.svd(HTild, full_matrices=False) # Hess = U s V - T = U.dot(numpy.diag(numpy.sqrt(1./s)).dot(U.T)) # T = Hess^(-1/2) - Tinv = U.dot(numpy.diag(numpy.sqrt(s)).dot(U.T)) # Tinv = T^(-1) - # - # Stocke le dernier pas - St["CurrentState"].append( numpy.mean( Ek, axis = 1).squeeze() ) - St["CurrentEnsemble"].append( Ek ) - # - IndexMin = numpy.argmin( St["CostFunctionJ"][nbPS:] ) + nbPS - xa = St["CurrentState"][IndexMin] - Ea = St["CurrentEnsemble"][IndexMin] - # - return (xa, Ea, St) - -def ienks( - xb = None, # Background - yObs = None, # Observation (série) - E0 = None, # Background ensemble - B = None, # B - RIdemi = None, # R^(-1/2) - Mnnpu = None, # Evolution operator - Hn = None, # Observation operator - method = "Transform", # Bundle ou Transform - nMembers = 5, # Number of members - cMaximum = 15000, # Number of steps or cycles - iMaximum = 15000, # Number of iterations per cycle - sTolerance = mfp, # Weights correction tolerance - jTolerance = mfp, # Cost decrement tolerance - Lag = 1, # Lenght of smoothing window - epsilon = -1., - inflation = 1., - nbPS = 0, # Number of previous steps - setSeed = None, - ): - # - # Initial - if setSeed is not None: numpy.random.seed(setSeed) - if E0 is None: E0 = _BackgroundEnsembleGeneration( xb, B, nMembers) - # - # Spinup - # ------ - # - # Cycles - # ------ - xa, Ea, Sa = [], [], [] - for i in range(Lag): # Lag void results - xa.append([]) - Ea.append([]) - Sa.append([]) - for i in range(Lag,cMaximum): - (xa_c, Ea_c, Sa_c) = _IEnKS_cycle_Lag_L_SDA_GN( - E0, - yObs[i-Lag:i], - RIdemi, - Mnnpu, - Hn, - method, - iMaximum, - sTolerance, - jTolerance, - Lag, - epsilon, - nbPS, - ) - xa.append( xa_c ) - Ea.append( Ea_c ) - Sa.append( Sa_c ) - # - # Inflation for next cycle - E0 = xa_c + inflation * (Ea_c - xa_c) - # - return (xa, Ea, Sa) - # ============================================================================== if __name__ == "__main__": print('\n AUTODIAGNOSTIC\n')