From: Jean-Philippe ARGAUD Date: Sat, 13 Mar 2021 19:29:34 +0000 (+0100) Subject: Minor improvements and fixes for internal variables X-Git-Tag: V9_7_0b1~14 X-Git-Url: http://git.salome-platform.org/gitweb/?a=commitdiff_plain;h=e6c96be6caa465a29838828ec926696cb0535453;p=modules%2Fadao.git Minor improvements and fixes for internal variables --- diff --git a/bin/AdaoCatalogGenerator.py b/bin/AdaoCatalogGenerator.py index d7b0e3e..9b1037d 100644 --- a/bin/AdaoCatalogGenerator.py +++ b/bin/AdaoCatalogGenerator.py @@ -411,7 +411,7 @@ optim_names = "" reduc_names = "" check_names = "" decl_algos = "" -algal_names = "" +adao_all_names = "" assim_study_object = daCore.Aidsm.Aidsm() algos_list = assim_study_object.get_available_algorithms() del assim_study_object @@ -430,7 +430,7 @@ for algo_name in algos_list: check_names += "\"" + algo_name + "\", " if algo_name in infos.AssimAlgos+infos.OptimizationAlgos+infos.ReductionAlgos+infos.CheckAlgos: # Pour filtrer sur les algorithmes vraiment interfacés, car il peut y en avoir moins que "algos_list" - algal_names += "\"" + algo_name + "\", " + adao_all_names += "\"" + algo_name + "\", " # Step 1: A partir des infos, on crée les fonctions qui vont permettre # d'entrer les données utilisateur @@ -455,7 +455,6 @@ for data_input_name in infos.DataTypeDict: 'data_into' : data_into, 'data_default' : data_default, 'ms_default' : ms_default, - #~ 'algos_names' : algal_names, })) # Step 2: On crée les fonctions qui permettent de rentrer les données des algorithmes @@ -505,7 +504,6 @@ for opt_name in infos.OptDict: 'data_into' : data_into, 'data_default' : data_default, 'ms_default' : ms_default, - #~ 'algos_names' : algal_names, })) # Step 3bis: On ajoute la méthode optionnelle init @@ -522,7 +520,7 @@ mem_file.write(unicode(observers_method, 'utf-8').format(**{ })) # Step 5: On ajoute les choix algorithmiques -all_names = eval((algal_names)) +all_names = eval((adao_all_names)) all_algo_defaults = "" for algo in all_names: assim_study_object = daCore.Aidsm.Aidsm() diff --git a/resources/ADAOSchemaCatalog.xml b/resources/ADAOSchemaCatalog.xml index c01b789..7f532f5 100644 --- a/resources/ADAOSchemaCatalog.xml +++ b/resources/ADAOSchemaCatalog.xml @@ -612,7 +612,7 @@ if sys.version_info.major > 2: import adao def loads( data ): return pickle.loads(codecs.decode(data.encode(), "base64")) logging.debug("TERMINATE Entering in ExtractData/Node") -from daCore.AssimilationStudy import AssimilationStudy +from daCore.Aidsm import Aidsm var = None info = None for param in data["specificParameters"]: diff --git a/src/daComposant/daCore/BasicObjects.py b/src/daComposant/daCore/BasicObjects.py index 3406124..7afa808 100644 --- a/src/daComposant/daCore/BasicObjects.py +++ b/src/daComposant/daCore/BasicObjects.py @@ -1775,7 +1775,7 @@ class Covariance(object): elif __Object is not None: self.__is_object = True self.__C = __Object - for at in ("getT","getI","diag","trace","__add__","__sub__","__neg__","__mul__","__rmul__"): + for at in ("getT","getI","diag","trace","__add__","__sub__","__neg__","__matmul__","__mul__","__rmatmul__","__rmul__"): if not hasattr(self.__C,at): raise ValueError("The matrix given for %s as an object has no attribute \"%s\". Please check your object input."%(self.__name,at)) if hasattr(self.__C,"shape"): @@ -1918,14 +1918,14 @@ class Covariance(object): def asfullmatrix(self, msize=None): "Matrice pleine" if self.ismatrix(): - return self.__C + return numpy.asarray(self.__C) elif self.isvector(): - return numpy.matrix( numpy.diag(self.__C), float ) + return numpy.asarray( numpy.diag(self.__C), float ) elif self.isscalar(): if msize is None: raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,)) else: - return numpy.matrix( self.__C * numpy.eye(int(msize)), float ) + return numpy.asarray( self.__C * numpy.eye(int(msize)), float ) elif self.isobject() and hasattr(self.__C,"asfullmatrix"): return self.__C.asfullmatrix() @@ -1984,6 +1984,36 @@ class Covariance(object): "x.__neg__() <==> -x" return - self.__C + def __matmul__(self, other): + "x.__mul__(y) <==> x@y" + if self.ismatrix() and isinstance(other, (int, float)): + return numpy.asarray(self.__C) * other + elif self.ismatrix() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)): + if numpy.ravel(other).size == self.shape[1]: # Vecteur + return numpy.ravel(self.__C @ numpy.ravel(other)) + elif numpy.asarray(other).shape[0] == self.shape[1]: # Matrice + return numpy.asarray(self.__C) @ numpy.asarray(other) + else: + raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asarray(other).shape,self.__name)) + elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)): + if numpy.ravel(other).size == self.shape[1]: # Vecteur + return numpy.ravel(self.__C) * numpy.ravel(other) + elif numpy.asarray(other).shape[0] == self.shape[1]: # Matrice + return numpy.ravel(self.__C).reshape((-1,1)) * numpy.asarray(other) + else: + raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name)) + elif self.isscalar() and isinstance(other,numpy.matrix): + return numpy.asarray(self.__C * other) + elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)): + if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1: + return self.__C * numpy.ravel(other) + else: + return self.__C * numpy.asarray(other) + elif self.isobject(): + return self.__C.__matmul__(other) + else: + raise NotImplementedError("%s covariance matrix __matmul__ method not available for %s type!"%(self.__name,type(other))) + def __mul__(self, other): "x.__mul__(y) <==> x*y" if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)): @@ -2014,6 +2044,31 @@ class Covariance(object): else: raise NotImplementedError("%s covariance matrix __mul__ method not available for %s type!"%(self.__name,type(other))) + def __rmatmul__(self, other): + "x.__rmul__(y) <==> y@x" + if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)): + return other * self.__C + elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)): + if numpy.ravel(other).size == self.shape[1]: # Vecteur + return numpy.asmatrix(numpy.ravel(other)) * self.__C + elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice + return numpy.asmatrix(other) * self.__C + else: + raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name)) + elif self.isvector() and isinstance(other,numpy.matrix): + if numpy.ravel(other).size == self.shape[0]: # Vecteur + return numpy.asmatrix(numpy.ravel(other) * self.__C) + elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice + return numpy.asmatrix(numpy.array(other) * self.__C) + else: + raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name)) + elif self.isscalar() and isinstance(other,numpy.matrix): + return other * self.__C + elif self.isobject(): + return self.__C.__rmatmul__(other) + else: + raise NotImplementedError("%s covariance matrix __rmatmul__ method not available for %s type!"%(self.__name,type(other))) + def __rmul__(self, other): "x.__rmul__(y) <==> y*x" if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)): diff --git a/src/daComposant/daCore/NumericObjects.py b/src/daComposant/daCore/NumericObjects.py index 51d7fdf..65ad52a 100644 --- a/src/daComposant/daCore/NumericObjects.py +++ b/src/daComposant/daCore/NumericObjects.py @@ -1935,18 +1935,18 @@ def std4dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): GradJb = BI * (_X - Xb) GradJo = 0. for step in range(duration-1,0,-1): - # Etape de récupération du dernier stockage de l'évolution + # Étape de récupération du dernier stockage de l'évolution _Xn = selfA.DirectCalculation.pop() - # Etape de récupération du dernier stockage de l'innovation + # Étape de récupération du dernier stockage de l'innovation _YmHMX = selfA.DirectInnovation.pop() # Calcul des adjoints Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = _Xn) Ha = Ha.reshape(_Xn.size,_YmHMX.size) # ADAO & check shape Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = _Xn) Ma = Ma.reshape(_Xn.size,_Xn.size) # ADAO & check shape - # Calcul du gradient par etat adjoint - GradJo = GradJo + Ha * RI * _YmHMX # Equivaut pour Ha lineaire à : Ha( (_Xn, RI * _YmHMX) ) - GradJo = Ma * GradJo # Equivaut pour Ma lineaire à : Ma( (_Xn, GradJo) ) + # Calcul du gradient par état adjoint + GradJo = GradJo + Ha * RI * _YmHMX # Équivaut pour Ha linéaire à : Ha( (_Xn, RI * _YmHMX) ) + GradJo = Ma * GradJo # Équivaut pour Ma linéaire à : Ma( (_Xn, GradJo) ) GradJ = numpy.ravel( GradJb ) - numpy.ravel( GradJo ) return GradJ # @@ -2103,9 +2103,9 @@ def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"): # for step in range(duration-1): if hasattr(Y,"store"): - Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,-1)) + Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1)) else: - Ynpu = numpy.ravel( Y ).reshape((__p,-1)) + Ynpu = numpy.ravel( Y ).reshape((__p,1)) # if U is not None: if hasattr(U,"store") and len(U)>1: @@ -2143,15 +2143,15 @@ def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"): returnSerieAsArrayMatrix = True ) # # Mean of forecast and observation of forecast - Xfm = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1)) - Hfm = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,-1)) + Xfm = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1)) + Hfm = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1)) # #-------------------------- if VariantM == "KalmanFilterFormula05": PfHT, HPfHT = 0., 0. for i in range(__m): - Exfi = Xn_predicted[:,i].reshape((__n,-1)) - Xfm - Eyfi = HX_predicted[:,i].reshape((__p,-1)) - Hfm + Exfi = Xn_predicted[:,i].reshape((__n,1)) - Xfm + Eyfi = HX_predicted[:,i].reshape((__p,1)) - Hfm PfHT += Exfi * Eyfi.T HPfHT += Eyfi * Eyfi.T PfHT = (1./(__m-1)) * PfHT @@ -2165,7 +2165,7 @@ def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"): #-------------------------- elif VariantM == "KalmanFilterFormula16": EpY = EnsembleOfCenteredPerturbations(Ynpu, Rn, __m) - EpYm = EpY.mean(axis=1, dtype=mfp).astype('float').reshape((__p,-1)) + EpYm = EpY.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1)) # EaX = EnsembleOfAnomalies( Xn_predicted ) / numpy.sqrt(__m-1) EaY = (HX_predicted - Hfm - EpY + EpYm) / numpy.sqrt(__m-1) @@ -2184,7 +2184,7 @@ def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"): selfA._parameters["InflationFactor"], ) # - Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1)) + Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1)) #-------------------------- # if selfA._parameters["StoreInternalVariables"] \ @@ -2212,7 +2212,7 @@ def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"): if selfA._toStore("ForecastState"): selfA.StoredVariables["ForecastState"].store( EMX ) if selfA._toStore("BMA"): - selfA.StoredVariables["BMA"].store( EMX - Xa.reshape((__n,1)) ) + selfA.StoredVariables["BMA"].store( EMX - Xa ) if selfA._toStore("InnovationAtCurrentState"): selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu ) if selfA._toStore("SimulatedObservationAtCurrentState") \ @@ -2341,9 +2341,9 @@ def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"): # for step in range(duration-1): if hasattr(Y,"store"): - Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,-1)) + Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1)) else: - Ynpu = numpy.ravel( Y ).reshape((__p,-1)) + Ynpu = numpy.ravel( Y ).reshape((__p,1)) # if U is not None: if hasattr(U,"store") and len(U)>1: @@ -2381,8 +2381,8 @@ def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"): returnSerieAsArrayMatrix = True ) # # Mean of forecast and observation of forecast - Xfm = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1)) - Hfm = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,-1)) + Xfm = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1)) + Hfm = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1)) # # Anomalies EaX = EnsembleOfAnomalies( Xn_predicted ) @@ -2399,18 +2399,18 @@ def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"): mU = numpy.eye(__m) # EaX = EaX / numpy.sqrt(__m-1) - Xn = Xfm + EaX @ ( vw.reshape((__m,-1)) + numpy.sqrt(__m-1) * Tdemi @ mU ) + Xn = Xfm + EaX @ ( vw.reshape((__m,1)) + numpy.sqrt(__m-1) * Tdemi @ mU ) #-------------------------- elif VariantM == "Variational": HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm def CostFunction(w): - _A = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1)) + _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1)) _Jo = 0.5 * _A.T @ (RI * _A) _Jb = 0.5 * (__m-1) * w.T @ w _J = _Jo + _Jb return float(_J) def GradientOfCostFunction(w): - _A = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1)) + _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1)) _GardJo = - EaHX.T @ (RI * _A) _GradJb = (__m-1) * w.reshape((__m,1)) _GradJ = _GardJo + _GradJb @@ -2435,13 +2435,13 @@ def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"): elif VariantM == "FiniteSize11": # Jauge Boc2011 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm def CostFunction(w): - _A = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1)) + _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1)) _Jo = 0.5 * _A.T @ (RI * _A) _Jb = 0.5 * __m * math.log(1 + 1/__m + w.T @ w) _J = _Jo + _Jb return float(_J) def GradientOfCostFunction(w): - _A = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1)) + _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1)) _GardJo = - EaHX.T @ (RI * _A) _GradJb = __m * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w) _GradJ = _GardJo + _GradJb @@ -2463,18 +2463,18 @@ def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"): Pta = numpy.linalg.inv( Hta ) EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18 # - Xn = Xfm + EaX @ (vw.reshape((__m,-1)) + EWa) + Xn = Xfm + EaX @ (vw.reshape((__m,1)) + EWa) #-------------------------- elif VariantM == "FiniteSize15": # Jauge Boc2015 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm def CostFunction(w): - _A = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1)) + _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1)) _Jo = 0.5 * _A.T * RI * _A _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w) _J = _Jo + _Jb return float(_J) def GradientOfCostFunction(w): - _A = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1)) + _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1)) _GardJo = - EaHX.T @ (RI * _A) _GradJb = (__m+1) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w) _GradJ = _GardJo + _GradJb @@ -2496,18 +2496,18 @@ def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"): Pta = numpy.linalg.inv( Hta ) EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18 # - Xn = Xfm + EaX @ (vw.reshape((__m,-1)) + EWa) + Xn = Xfm + EaX @ (vw.reshape((__m,1)) + EWa) #-------------------------- elif VariantM == "FiniteSize16": # Jauge Boc2016 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm def CostFunction(w): - _A = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1)) + _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1)) _Jo = 0.5 * _A.T @ (RI * _A) _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w / (__m-1)) _J = _Jo + _Jb return float(_J) def GradientOfCostFunction(w): - _A = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1)) + _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1)) _GardJo = - EaHX.T @ (RI * _A) _GradJb = ((__m+1) / (__m-1)) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w / (__m-1)) _GradJ = _GardJo + _GradJb @@ -2540,7 +2540,7 @@ def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"): selfA._parameters["InflationFactor"], ) # - Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1)) + Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1)) #-------------------------- # if selfA._parameters["StoreInternalVariables"] \ @@ -2570,7 +2570,7 @@ def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"): if selfA._toStore("BMA"): selfA.StoredVariables["BMA"].store( EMX - Xa.reshape((__n,1)) ) if selfA._toStore("InnovationAtCurrentState"): - selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu.reshape((__p,1)) ) + selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu ) if selfA._toStore("SimulatedObservationAtCurrentState") \ or selfA._toStore("SimulatedObservationAtCurrentOptimum"): selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted ) @@ -2694,9 +2694,9 @@ def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13", # for step in range(duration-1): if hasattr(Y,"store"): - Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,-1)) + Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1)) else: - Ynpu = numpy.ravel( Y ).reshape((__p,-1)) + Ynpu = numpy.ravel( Y ).reshape((__p,1)) # if U is not None: if hasattr(U,"store") and len(U)>1: @@ -2738,7 +2738,7 @@ def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13", Ta = numpy.eye(__m) vw = numpy.zeros(__m) while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax: - vx1 = (Xfm + EaX @ vw).reshape((__n,-1)) + vx1 = (Xfm + EaX @ vw).reshape((__n,1)) # if BnotT: E1 = vx1 + _epsilon * EaX @@ -2748,7 +2748,7 @@ def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13", HE2 = H( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)], argsAsSerie = True, returnSerieAsArrayMatrix = True ) - vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,-1)) + vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1)) # if BnotT: EaY = (HE2 - vy2) / _epsilon @@ -2780,7 +2780,7 @@ def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13", selfA._parameters["InflationFactor"], ) # - Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1)) + Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1)) #-------------------------- # if selfA._parameters["StoreInternalVariables"] \ @@ -2808,9 +2808,9 @@ def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13", if selfA._toStore("ForecastState"): selfA.StoredVariables["ForecastState"].store( EMX ) if selfA._toStore("BMA"): - selfA.StoredVariables["BMA"].store( EMX - Xa.reshape((__n,1)) ) + selfA.StoredVariables["BMA"].store( EMX - Xa ) if selfA._toStore("InnovationAtCurrentState"): - selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu.reshape((__p,-1)) ) + selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu ) if selfA._toStore("SimulatedObservationAtCurrentState") \ or selfA._toStore("SimulatedObservationAtCurrentOptimum"): selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 ) @@ -2934,9 +2934,9 @@ def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12", # for step in range(duration-1): if hasattr(Y,"store"): - Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,-1)) + Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1)) else: - Ynpu = numpy.ravel( Y ).reshape((__p,-1)) + Ynpu = numpy.ravel( Y ).reshape((__p,1)) # if U is not None: if hasattr(U,"store") and len(U)>1: @@ -2964,7 +2964,7 @@ def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12", Ta = numpy.eye(__m) vw = numpy.zeros(__m) while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax: - vx1 = (Xfm + EaX @ vw).reshape((__n,-1)) + vx1 = (Xfm + EaX @ vw).reshape((__n,1)) # if BnotT: E1 = vx1 + _epsilon * EaX @@ -2978,13 +2978,13 @@ def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12", elif selfA._parameters["EstimationOf"] == "Parameters": # --- > Par principe, M = Id E2 = Xn - vx2 = E2.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1)) - vy1 = H((vx2, Un)).reshape((__p,-1)) + vx2 = E2.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1)) + vy1 = H((vx2, Un)).reshape((__p,1)) # HE2 = H( [(E2[:,i,numpy.newaxis], Un) for i in range(__m)], argsAsSerie = True, returnSerieAsArrayMatrix = True ) - vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,-1)) + vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1)) # if BnotT: EaY = (HE2 - vy2) / _epsilon @@ -3019,7 +3019,7 @@ def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12", selfA._parameters["InflationFactor"], ) # - Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1)) + Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1)) #-------------------------- # if selfA._parameters["StoreInternalVariables"] \ @@ -3049,7 +3049,7 @@ def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12", if selfA._toStore("BMA"): selfA.StoredVariables["BMA"].store( E2 - Xa ) if selfA._toStore("InnovationAtCurrentState"): - selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu.reshape((__p,-1)) ) + selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu ) if selfA._toStore("SimulatedObservationAtCurrentState") \ or selfA._toStore("SimulatedObservationAtCurrentOptimum"): selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 ) diff --git a/src/daSalome/daYacsIntegration/daOptimizerLoop.py b/src/daSalome/daYacsIntegration/daOptimizerLoop.py index 2dfd8ed..1bb9178 100644 --- a/src/daSalome/daYacsIntegration/daOptimizerLoop.py +++ b/src/daSalome/daYacsIntegration/daOptimizerLoop.py @@ -35,7 +35,8 @@ import traceback import codecs # Pour disposer des classes dans l'espace de nommage lors du pickle -from daCore.AssimilationStudy import AssimilationStudy +from daCore.Aidsm import Aidsm +from daCore.AssimilationStudy import AssimilationStudy # JPA à résorber from daYacsIntegration import daStudy def dumps( data ): diff --git a/src/daSalome/daYacsIntegration/daStudy.py b/src/daSalome/daYacsIntegration/daStudy.py index 8f9a8e4..b2f63fb 100644 --- a/src/daSalome/daYacsIntegration/daStudy.py +++ b/src/daSalome/daYacsIntegration/daStudy.py @@ -22,7 +22,7 @@ # # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D -from daCore.AssimilationStudy import AssimilationStudy +from daCore.Aidsm import Aidsm #from daCore import Logging import logging @@ -36,7 +36,7 @@ class daStudy: def __init__(self, name, algorithm, debug): - self.ADD = AssimilationStudy(name) + self.ADD = Aidsm(name) self.algorithm = algorithm self.algorithm_dict = None self.Background = None @@ -91,7 +91,7 @@ class daStudy: return self.ADD #-------------------------------------- - # Methods to initialize AssimilationStudy + # Methods to initialize assimilation study def setYIBackgroundType(self, Type): if Type == "Vector": diff --git a/src/daSalome/daYacsSchemaCreator/methods.py b/src/daSalome/daYacsSchemaCreator/methods.py index 655441e..62c960b 100644 --- a/src/daSalome/daYacsSchemaCreator/methods.py +++ b/src/daSalome/daYacsSchemaCreator/methods.py @@ -101,7 +101,7 @@ def create_yacs_proc(study_config): ADAO_Case = runtime.createBloc("ADAO_Case_Bloc") proc.edAddChild(ADAO_Case) - # Step 0: create AssimilationStudyObject + # Step 0: create assimilation study object factory_CAS_node = catalogAd.getNodeFromNodeMap("CreateAssimilationStudy") CAS_node = factory_CAS_node.cloneNode("CreateAssimilationStudy") CAS_node.getInputPort("Name").edInitPy(study_config["Name"]) @@ -132,7 +132,7 @@ def create_yacs_proc(study_config): CAS_node.setExecutionMode(SALOMERuntime.PythonNode.REMOTE_STR) CAS_node.setContainer(mycontainer) - # Adding an observer init node if an user defines some + # Add an observer init node if an user defines some factory_init_observers_node = catalogAd.getNodeFromNodeMap("SetObserversNode") init_observers_node = factory_init_observers_node.cloneNode("SetObservers") if "Observers" in list(study_config.keys()):