]> SALOME platform Git repositories - modules/adao.git/commitdiff
Salome HOME
Minor improvements and fixes for internal variables
authorJean-Philippe ARGAUD <jean-philippe.argaud@edf.fr>
Sat, 13 Mar 2021 19:29:34 +0000 (20:29 +0100)
committerJean-Philippe ARGAUD <jean-philippe.argaud@edf.fr>
Sun, 14 Mar 2021 20:00:39 +0000 (21:00 +0100)
bin/AdaoCatalogGenerator.py
resources/ADAOSchemaCatalog.xml
src/daComposant/daCore/BasicObjects.py
src/daComposant/daCore/NumericObjects.py
src/daSalome/daYacsIntegration/daOptimizerLoop.py
src/daSalome/daYacsIntegration/daStudy.py
src/daSalome/daYacsSchemaCreator/methods.py

index d7b0e3ea210e1bdc9244fc0cfb79db12b46b5f3f..9b1037dc1d194454ede616700437b327a8aca414 100644 (file)
@@ -411,7 +411,7 @@ optim_names = ""
 reduc_names = ""
 check_names = ""
 decl_algos  = ""
-algal_names = ""
+adao_all_names = ""
 assim_study_object = daCore.Aidsm.Aidsm()
 algos_list = assim_study_object.get_available_algorithms()
 del assim_study_object
@@ -430,7 +430,7 @@ for algo_name in algos_list:
     check_names += "\"" + algo_name + "\", "
   if algo_name in infos.AssimAlgos+infos.OptimizationAlgos+infos.ReductionAlgos+infos.CheckAlgos:
     # Pour filtrer sur les algorithmes vraiment interfacés, car il peut y en avoir moins que "algos_list"
-    algal_names += "\"" + algo_name + "\", "
+    adao_all_names += "\"" + algo_name + "\", "
 
 # Step 1: A partir des infos, on crée les fonctions qui vont permettre
 # d'entrer les données utilisateur
@@ -455,7 +455,6 @@ for data_input_name in infos.DataTypeDict:
     'data_into'    : data_into,
     'data_default' : data_default,
     'ms_default'   : ms_default,
-    #~ 'algos_names'  : algal_names,
     }))
 
 # Step 2: On crée les fonctions qui permettent de rentrer les données des algorithmes
@@ -505,7 +504,6 @@ for opt_name in infos.OptDict:
     'data_into'    : data_into,
     'data_default' : data_default,
     'ms_default'   : ms_default,
-    #~ 'algos_names'  : algal_names,
     }))
 
 # Step 3bis: On ajoute la méthode optionnelle init
@@ -522,7 +520,7 @@ mem_file.write(unicode(observers_method, 'utf-8').format(**{
   }))
 
 # Step 5: On ajoute les choix algorithmiques
-all_names = eval((algal_names))
+all_names = eval((adao_all_names))
 all_algo_defaults = ""
 for algo in all_names:
     assim_study_object = daCore.Aidsm.Aidsm()
index c01b789c935256277b80f5358886a5e78d960199..7f532f59b833d36dd1c50de578ff0266e828828a 100644 (file)
@@ -612,7 +612,7 @@ if sys.version_info.major > 2: import adao
 def loads( data ):
   return pickle.loads(codecs.decode(data.encode(), "base64"))
 logging.debug("TERMINATE Entering in ExtractData/Node")
-from daCore.AssimilationStudy import AssimilationStudy
+from daCore.Aidsm import Aidsm
 var = None
 info = None
 for param in data["specificParameters"]:
index 340612437730232b165d9a618594406ef50a0981..7afa8084bae770a98f253e6ce10ed84d6c5a42e3 100644 (file)
@@ -1775,7 +1775,7 @@ class Covariance(object):
         elif __Object is not None:
             self.__is_object = True
             self.__C         = __Object
-            for at in ("getT","getI","diag","trace","__add__","__sub__","__neg__","__mul__","__rmul__"):
+            for at in ("getT","getI","diag","trace","__add__","__sub__","__neg__","__matmul__","__mul__","__rmatmul__","__rmul__"):
                 if not hasattr(self.__C,at):
                     raise ValueError("The matrix given for %s as an object has no attribute \"%s\". Please check your object input."%(self.__name,at))
             if hasattr(self.__C,"shape"):
@@ -1918,14 +1918,14 @@ class Covariance(object):
     def asfullmatrix(self, msize=None):
         "Matrice pleine"
         if   self.ismatrix():
-            return self.__C
+            return numpy.asarray(self.__C)
         elif self.isvector():
-            return numpy.matrix( numpy.diag(self.__C), float )
+            return numpy.asarray( numpy.diag(self.__C), float )
         elif self.isscalar():
             if msize is None:
                 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
             else:
-                return numpy.matrix( self.__C * numpy.eye(int(msize)), float )
+                return numpy.asarray( self.__C * numpy.eye(int(msize)), float )
         elif self.isobject() and hasattr(self.__C,"asfullmatrix"):
             return self.__C.asfullmatrix()
 
@@ -1984,6 +1984,36 @@ class Covariance(object):
         "x.__neg__() <==> -x"
         return - self.__C
 
+    def __matmul__(self, other):
+        "x.__mul__(y) <==> x@y"
+        if   self.ismatrix() and isinstance(other, (int, float)):
+            return numpy.asarray(self.__C) * other
+        elif self.ismatrix() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
+            if numpy.ravel(other).size == self.shape[1]: # Vecteur
+                return numpy.ravel(self.__C @ numpy.ravel(other))
+            elif numpy.asarray(other).shape[0] == self.shape[1]: # Matrice
+                return numpy.asarray(self.__C) @ numpy.asarray(other)
+            else:
+                raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asarray(other).shape,self.__name))
+        elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
+            if numpy.ravel(other).size == self.shape[1]: # Vecteur
+                return numpy.ravel(self.__C) * numpy.ravel(other)
+            elif numpy.asarray(other).shape[0] == self.shape[1]: # Matrice
+                return numpy.ravel(self.__C).reshape((-1,1)) * numpy.asarray(other)
+            else:
+                raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
+        elif self.isscalar() and isinstance(other,numpy.matrix):
+            return numpy.asarray(self.__C * other)
+        elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
+            if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
+                return self.__C * numpy.ravel(other)
+            else:
+                return self.__C * numpy.asarray(other)
+        elif self.isobject():
+            return self.__C.__matmul__(other)
+        else:
+            raise NotImplementedError("%s covariance matrix __matmul__ method not available for %s type!"%(self.__name,type(other)))
+
     def __mul__(self, other):
         "x.__mul__(y) <==> x*y"
         if   self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
@@ -2014,6 +2044,31 @@ class Covariance(object):
         else:
             raise NotImplementedError("%s covariance matrix __mul__ method not available for %s type!"%(self.__name,type(other)))
 
+    def __rmatmul__(self, other):
+        "x.__rmul__(y) <==> y@x"
+        if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
+            return other * self.__C
+        elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
+            if numpy.ravel(other).size == self.shape[1]: # Vecteur
+                return numpy.asmatrix(numpy.ravel(other)) * self.__C
+            elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
+                return numpy.asmatrix(other) * self.__C
+            else:
+                raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name))
+        elif self.isvector() and isinstance(other,numpy.matrix):
+            if numpy.ravel(other).size == self.shape[0]: # Vecteur
+                return numpy.asmatrix(numpy.ravel(other) * self.__C)
+            elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
+                return numpy.asmatrix(numpy.array(other) * self.__C)
+            else:
+                raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
+        elif self.isscalar() and isinstance(other,numpy.matrix):
+            return other * self.__C
+        elif self.isobject():
+            return self.__C.__rmatmul__(other)
+        else:
+            raise NotImplementedError("%s covariance matrix __rmatmul__ method not available for %s type!"%(self.__name,type(other)))
+
     def __rmul__(self, other):
         "x.__rmul__(y) <==> y*x"
         if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
index 51d7fdfe3265631f2183439bc6181134aa6f36b4..65ad52ad5769cc0b15e32b675874771ea562500d 100644 (file)
@@ -1935,18 +1935,18 @@ def std4dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
         GradJb  = BI * (_X - Xb)
         GradJo  = 0.
         for step in range(duration-1,0,-1):
-            # Etape de récupération du dernier stockage de l'évolution
+            # Étape de récupération du dernier stockage de l'évolution
             _Xn = selfA.DirectCalculation.pop()
-            # Etape de récupération du dernier stockage de l'innovation
+            # Étape de récupération du dernier stockage de l'innovation
             _YmHMX = selfA.DirectInnovation.pop()
             # Calcul des adjoints
             Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
             Ha = Ha.reshape(_Xn.size,_YmHMX.size) # ADAO & check shape
             Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
             Ma = Ma.reshape(_Xn.size,_Xn.size) # ADAO & check shape
-            # Calcul du gradient par etat adjoint
-            GradJo = GradJo + Ha * RI * _YmHMX # Equivaut pour Ha lineaire à : Ha( (_Xn, RI * _YmHMX) )
-            GradJo = Ma * GradJo               # Equivaut pour Ma lineaire à : Ma( (_Xn, GradJo) )
+            # Calcul du gradient par état adjoint
+            GradJo = GradJo + Ha * RI * _YmHMX # Équivaut pour Ha linéaire à : Ha( (_Xn, RI * _YmHMX) )
+            GradJo = Ma * GradJo               # Équivaut pour Ma linéaire à : Ma( (_Xn, GradJo) )
         GradJ = numpy.ravel( GradJb ) - numpy.ravel( GradJo )
         return GradJ
     #
@@ -2103,9 +2103,9 @@ def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
     #
     for step in range(duration-1):
         if hasattr(Y,"store"):
-            Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,-1))
+            Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
         else:
-            Ynpu = numpy.ravel( Y ).reshape((__p,-1))
+            Ynpu = numpy.ravel( Y ).reshape((__p,1))
         #
         if U is not None:
             if hasattr(U,"store") and len(U)>1:
@@ -2143,15 +2143,15 @@ def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
                 returnSerieAsArrayMatrix = True )
         #
         # Mean of forecast and observation of forecast
-        Xfm  = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
-        Hfm  = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,-1))
+        Xfm  = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
+        Hfm  = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
         #
         #--------------------------
         if VariantM == "KalmanFilterFormula05":
             PfHT, HPfHT = 0., 0.
             for i in range(__m):
-                Exfi = Xn_predicted[:,i].reshape((__n,-1)) - Xfm
-                Eyfi = HX_predicted[:,i].reshape((__p,-1)) - Hfm
+                Exfi = Xn_predicted[:,i].reshape((__n,1)) - Xfm
+                Eyfi = HX_predicted[:,i].reshape((__p,1)) - Hfm
                 PfHT  += Exfi * Eyfi.T
                 HPfHT += Eyfi * Eyfi.T
             PfHT  = (1./(__m-1)) * PfHT
@@ -2165,7 +2165,7 @@ def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
         #--------------------------
         elif VariantM == "KalmanFilterFormula16":
             EpY   = EnsembleOfCenteredPerturbations(Ynpu, Rn, __m)
-            EpYm  = EpY.mean(axis=1, dtype=mfp).astype('float').reshape((__p,-1))
+            EpYm  = EpY.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
             #
             EaX   = EnsembleOfAnomalies( Xn_predicted ) / numpy.sqrt(__m-1)
             EaY = (HX_predicted - Hfm - EpY + EpYm) / numpy.sqrt(__m-1)
@@ -2184,7 +2184,7 @@ def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
                 selfA._parameters["InflationFactor"],
                 )
         #
-        Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
+        Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
         #--------------------------
         #
         if selfA._parameters["StoreInternalVariables"] \
@@ -2212,7 +2212,7 @@ def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
         if selfA._toStore("ForecastState"):
             selfA.StoredVariables["ForecastState"].store( EMX )
         if selfA._toStore("BMA"):
-            selfA.StoredVariables["BMA"].store( EMX - Xa.reshape((__n,1)) )
+            selfA.StoredVariables["BMA"].store( EMX - Xa )
         if selfA._toStore("InnovationAtCurrentState"):
             selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
         if selfA._toStore("SimulatedObservationAtCurrentState") \
@@ -2341,9 +2341,9 @@ def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
     #
     for step in range(duration-1):
         if hasattr(Y,"store"):
-            Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,-1))
+            Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
         else:
-            Ynpu = numpy.ravel( Y ).reshape((__p,-1))
+            Ynpu = numpy.ravel( Y ).reshape((__p,1))
         #
         if U is not None:
             if hasattr(U,"store") and len(U)>1:
@@ -2381,8 +2381,8 @@ def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
                 returnSerieAsArrayMatrix = True )
         #
         # Mean of forecast and observation of forecast
-        Xfm  = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
-        Hfm  = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,-1))
+        Xfm  = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
+        Hfm  = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
         #
         # Anomalies
         EaX   = EnsembleOfAnomalies( Xn_predicted )
@@ -2399,18 +2399,18 @@ def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
             mU    = numpy.eye(__m)
             #
             EaX   = EaX / numpy.sqrt(__m-1)
-            Xn    = Xfm + EaX @ ( vw.reshape((__m,-1)) + numpy.sqrt(__m-1) * Tdemi @ mU )
+            Xn    = Xfm + EaX @ ( vw.reshape((__m,1)) + numpy.sqrt(__m-1) * Tdemi @ mU )
         #--------------------------
         elif VariantM == "Variational":
             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
             def CostFunction(w):
-                _A  = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
+                _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
                 _Jo = 0.5 * _A.T @ (RI * _A)
                 _Jb = 0.5 * (__m-1) * w.T @ w
                 _J  = _Jo + _Jb
                 return float(_J)
             def GradientOfCostFunction(w):
-                _A  = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
+                _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
                 _GardJo = - EaHX.T @ (RI * _A)
                 _GradJb = (__m-1) * w.reshape((__m,1))
                 _GradJ  = _GardJo + _GradJb
@@ -2435,13 +2435,13 @@ def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
         elif VariantM == "FiniteSize11": # Jauge Boc2011
             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
             def CostFunction(w):
-                _A  = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
+                _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
                 _Jo = 0.5 * _A.T @ (RI * _A)
                 _Jb = 0.5 * __m * math.log(1 + 1/__m + w.T @ w)
                 _J  = _Jo + _Jb
                 return float(_J)
             def GradientOfCostFunction(w):
-                _A  = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
+                _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
                 _GardJo = - EaHX.T @ (RI * _A)
                 _GradJb = __m * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
                 _GradJ  = _GardJo + _GradJb
@@ -2463,18 +2463,18 @@ def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
             Pta = numpy.linalg.inv( Hta )
             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
             #
-            Xn  = Xfm + EaX @ (vw.reshape((__m,-1)) + EWa)
+            Xn  = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
         #--------------------------
         elif VariantM == "FiniteSize15": # Jauge Boc2015
             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
             def CostFunction(w):
-                _A  = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
+                _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
                 _Jo = 0.5 * _A.T * RI * _A
                 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w)
                 _J  = _Jo + _Jb
                 return float(_J)
             def GradientOfCostFunction(w):
-                _A  = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
+                _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
                 _GardJo = - EaHX.T @ (RI * _A)
                 _GradJb = (__m+1) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
                 _GradJ  = _GardJo + _GradJb
@@ -2496,18 +2496,18 @@ def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
             Pta = numpy.linalg.inv( Hta )
             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
             #
-            Xn  = Xfm + EaX @ (vw.reshape((__m,-1)) + EWa)
+            Xn  = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
         #--------------------------
         elif VariantM == "FiniteSize16": # Jauge Boc2016
             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
             def CostFunction(w):
-                _A  = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
+                _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
                 _Jo = 0.5 * _A.T @ (RI * _A)
                 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w / (__m-1))
                 _J  = _Jo + _Jb
                 return float(_J)
             def GradientOfCostFunction(w):
-                _A  = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
+                _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
                 _GardJo = - EaHX.T @ (RI * _A)
                 _GradJb = ((__m+1) / (__m-1)) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w / (__m-1))
                 _GradJ  = _GardJo + _GradJb
@@ -2540,7 +2540,7 @@ def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
                 selfA._parameters["InflationFactor"],
                 )
         #
-        Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
+        Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
         #--------------------------
         #
         if selfA._parameters["StoreInternalVariables"] \
@@ -2570,7 +2570,7 @@ def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
         if selfA._toStore("BMA"):
             selfA.StoredVariables["BMA"].store( EMX - Xa.reshape((__n,1)) )
         if selfA._toStore("InnovationAtCurrentState"):
-            selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu.reshape((__p,1)) )
+            selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
         if selfA._toStore("SimulatedObservationAtCurrentState") \
             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
@@ -2694,9 +2694,9 @@ def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13",
     #
     for step in range(duration-1):
         if hasattr(Y,"store"):
-            Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,-1))
+            Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
         else:
-            Ynpu = numpy.ravel( Y ).reshape((__p,-1))
+            Ynpu = numpy.ravel( Y ).reshape((__p,1))
         #
         if U is not None:
             if hasattr(U,"store") and len(U)>1:
@@ -2738,7 +2738,7 @@ def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13",
                 Ta  = numpy.eye(__m)
             vw  = numpy.zeros(__m)
             while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
-                vx1 = (Xfm + EaX @ vw).reshape((__n,-1))
+                vx1 = (Xfm + EaX @ vw).reshape((__n,1))
                 #
                 if BnotT:
                     E1 = vx1 + _epsilon * EaX
@@ -2748,7 +2748,7 @@ def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13",
                 HE2 = H( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
                     argsAsSerie = True,
                     returnSerieAsArrayMatrix = True )
-                vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,-1))
+                vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
                 #
                 if BnotT:
                     EaY = (HE2 - vy2) / _epsilon
@@ -2780,7 +2780,7 @@ def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13",
                 selfA._parameters["InflationFactor"],
                 )
         #
-        Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
+        Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
         #--------------------------
         #
         if selfA._parameters["StoreInternalVariables"] \
@@ -2808,9 +2808,9 @@ def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13",
         if selfA._toStore("ForecastState"):
             selfA.StoredVariables["ForecastState"].store( EMX )
         if selfA._toStore("BMA"):
-            selfA.StoredVariables["BMA"].store( EMX - Xa.reshape((__n,1)) )
+            selfA.StoredVariables["BMA"].store( EMX - Xa )
         if selfA._toStore("InnovationAtCurrentState"):
-            selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu.reshape((__p,-1)) )
+            selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
         if selfA._toStore("SimulatedObservationAtCurrentState") \
             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
@@ -2934,9 +2934,9 @@ def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12",
     #
     for step in range(duration-1):
         if hasattr(Y,"store"):
-            Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,-1))
+            Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
         else:
-            Ynpu = numpy.ravel( Y ).reshape((__p,-1))
+            Ynpu = numpy.ravel( Y ).reshape((__p,1))
         #
         if U is not None:
             if hasattr(U,"store") and len(U)>1:
@@ -2964,7 +2964,7 @@ def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12",
                 Ta  = numpy.eye(__m)
             vw  = numpy.zeros(__m)
             while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
-                vx1 = (Xfm + EaX @ vw).reshape((__n,-1))
+                vx1 = (Xfm + EaX @ vw).reshape((__n,1))
                 #
                 if BnotT:
                     E1 = vx1 + _epsilon * EaX
@@ -2978,13 +2978,13 @@ def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12",
                 elif selfA._parameters["EstimationOf"] == "Parameters":
                     # --- > Par principe, M = Id
                     E2 = Xn
-                vx2 = E2.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
-                vy1 = H((vx2, Un)).reshape((__p,-1))
+                vx2 = E2.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
+                vy1 = H((vx2, Un)).reshape((__p,1))
                 #
                 HE2 = H( [(E2[:,i,numpy.newaxis], Un) for i in range(__m)],
                     argsAsSerie = True,
                     returnSerieAsArrayMatrix = True )
-                vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,-1))
+                vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
                 #
                 if BnotT:
                     EaY = (HE2 - vy2) / _epsilon
@@ -3019,7 +3019,7 @@ def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12",
                 selfA._parameters["InflationFactor"],
                 )
         #
-        Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
+        Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
         #--------------------------
         #
         if selfA._parameters["StoreInternalVariables"] \
@@ -3049,7 +3049,7 @@ def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12",
         if selfA._toStore("BMA"):
             selfA.StoredVariables["BMA"].store( E2 - Xa )
         if selfA._toStore("InnovationAtCurrentState"):
-            selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu.reshape((__p,-1)) )
+            selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
         if selfA._toStore("SimulatedObservationAtCurrentState") \
             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
index 2dfd8ede8a6b713b7c3591359eb52aa93bacbcda..1bb91788cdb4dd838ebc56aa74ba42283b782567 100644 (file)
@@ -35,7 +35,8 @@ import traceback
 import codecs
 
 # Pour disposer des classes dans l'espace de nommage lors du pickle
-from daCore.AssimilationStudy import AssimilationStudy
+from daCore.Aidsm import Aidsm
+from daCore.AssimilationStudy import AssimilationStudy # JPA à résorber
 from daYacsIntegration import daStudy
 
 def dumps( data ):
index 8f9a8e41a4726ec703f5e5f974bba4e9bfc23941..b2f63fbdeb8c001e8fbeca8808b0c41a7f6276a4 100644 (file)
@@ -22,7 +22,7 @@
 #
 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
 
-from daCore.AssimilationStudy import AssimilationStudy
+from daCore.Aidsm import Aidsm
 #from daCore import Logging
 import logging
 
@@ -36,7 +36,7 @@ class daStudy:
 
   def __init__(self, name, algorithm, debug):
 
-    self.ADD = AssimilationStudy(name)
+    self.ADD = Aidsm(name)
     self.algorithm = algorithm
     self.algorithm_dict = None
     self.Background = None
@@ -91,7 +91,7 @@ class daStudy:
     return self.ADD
 
   #--------------------------------------
-  # Methods to initialize AssimilationStudy
+  # Methods to initialize assimilation study
 
   def setYIBackgroundType(self, Type):
     if Type == "Vector":
index 655441ecfd16af8b5f4409080016683b7254b10d..62c960bb0b1e779893ac01417d273050f879c100 100644 (file)
@@ -101,7 +101,7 @@ def create_yacs_proc(study_config):
   ADAO_Case = runtime.createBloc("ADAO_Case_Bloc")
   proc.edAddChild(ADAO_Case)
 
-  # Step 0: create AssimilationStudyObject
+  # Step 0: create assimilation study object
   factory_CAS_node = catalogAd.getNodeFromNodeMap("CreateAssimilationStudy")
   CAS_node = factory_CAS_node.cloneNode("CreateAssimilationStudy")
   CAS_node.getInputPort("Name").edInitPy(study_config["Name"])
@@ -132,7 +132,7 @@ def create_yacs_proc(study_config):
     CAS_node.setExecutionMode(SALOMERuntime.PythonNode.REMOTE_STR)
     CAS_node.setContainer(mycontainer)
 
-  # Adding an observer init node if an user defines some
+  # Add an observer init node if an user defines some
   factory_init_observers_node = catalogAd.getNodeFromNodeMap("SetObserversNode")
   init_observers_node = factory_init_observers_node.cloneNode("SetObservers")
   if "Observers" in list(study_config.keys()):