]> SALOME platform Git repositories - modules/adao.git/commitdiff
Salome HOME
Minor documentation and code review corrections (26)
authorJean-Philippe ARGAUD <jean-philippe.argaud@edf.fr>
Tue, 1 Mar 2022 12:21:54 +0000 (13:21 +0100)
committerJean-Philippe ARGAUD <jean-philippe.argaud@edf.fr>
Tue, 1 Mar 2022 12:21:54 +0000 (13:21 +0100)
Safely remove some deprecated numpy.matrix

src/daComposant/daAlgorithms/AdjointTest.py
src/daComposant/daAlgorithms/GradientTest.py
src/daComposant/daAlgorithms/LinearityTest.py
src/daComposant/daAlgorithms/LocalSensitivityTest.py
src/daComposant/daAlgorithms/TangentTest.py

index 7c7d1c584ec497015cb530b05852130e9d677e3a..c9bbcecf710ae405447e1f7f998c6973081d970d 100644 (file)
@@ -96,28 +96,28 @@ class ElementaryAlgorithm(BasicObjects.Algorithm):
         Perturbations = [ 10**i for i in range(self._parameters["EpsilonMinimumExponent"],1) ]
         Perturbations.reverse()
         #
-        X       = numpy.asmatrix(numpy.ravel( Xb )).T
+        X       = numpy.ravel( Xb ).reshape((-1,1))
         NormeX  = numpy.linalg.norm( X )
         if Y is None:
-            Y = numpy.asmatrix(numpy.ravel( Hm( X ) )).T
-        Y = numpy.asmatrix(numpy.ravel( Y )).T
+            Y = numpy.ravel( Hm( X ) ).reshape((-1,1))
+        Y = numpy.ravel( Y ).reshape((-1,1))
         NormeY = numpy.linalg.norm( Y )
         if self._toStore("CurrentState"):
-            self.StoredVariables["CurrentState"].store( numpy.ravel(X) )
+            self.StoredVariables["CurrentState"].store( X )
         if self._toStore("SimulatedObservationAtCurrentState"):
-            self.StoredVariables["SimulatedObservationAtCurrentState"].store( numpy.ravel(Y) )
+            self.StoredVariables["SimulatedObservationAtCurrentState"].store( Y )
         #
         if len(self._parameters["InitialDirection"]) == 0:
             dX0 = []
-            for v in X.A1:
+            for v in X:
                 if abs(v) > 1.e-8:
                     dX0.append( numpy.random.normal(0.,abs(v)) )
                 else:
                     dX0.append( numpy.random.normal(0.,X.mean()) )
         else:
-            dX0 = numpy.asmatrix(numpy.ravel( self._parameters["InitialDirection"] ))
+            dX0 = self._parameters["InitialDirection"]
         #
-        dX0 = float(self._parameters["AmplitudeOfInitialDirection"]) * numpy.matrix( dX0 ).T
+        dX0 = float(self._parameters["AmplitudeOfInitialDirection"]) * numpy.ravel( dX0 )
         #
         # Entete des resultats
         # --------------------
@@ -156,10 +156,10 @@ class ElementaryAlgorithm(BasicObjects.Algorithm):
             dX          = amplitude * dX0
             NormedX     = numpy.linalg.norm( dX )
             #
-            TangentFXdX = numpy.asmatrix( Ht( (X,dX) ) )
-            AdjointFXY  = numpy.asmatrix( Ha( (X,Y)  ) )
+            TangentFXdX = numpy.ravel( Ht( (X,dX) ) )
+            AdjointFXY  = numpy.ravel( Ha( (X,Y)  ) )
             #
-            Residu = abs(float(numpy.dot( TangentFXdX.A1 , Y.A1 ) - numpy.dot( dX.A1 , AdjointFXY.A1 )))
+            Residu = abs(float(numpy.dot( TangentFXdX, Y ) - numpy.dot( dX, AdjointFXY )))
             #
             msg = "  %2i  %5.0e   %9.3e   %9.3e   %9.3e   |  %9.3e"%(i,amplitude,NormeX,NormeY,NormedX,Residu)
             msgs += "\n" + __marge + msg
index 5963ecb93f1f2c46e3b0f77a4b0df083f3e86dca..eb18c8fd5a99aea35e96be7a5540b88a56b9157d 100644 (file)
@@ -121,18 +121,18 @@ class ElementaryAlgorithm(BasicObjects.Algorithm):
         Perturbations = [ 10**i for i in range(self._parameters["EpsilonMinimumExponent"],1) ]
         Perturbations.reverse()
         #
-        X       = numpy.asmatrix(numpy.ravel(    Xb   )).T
-        FX      = numpy.asmatrix(numpy.ravel( Hm( X ) )).T
+        X       = numpy.ravel(    Xb   ).reshape((-1,1))
+        FX      = numpy.ravel( Hm( X ) ).reshape((-1,1))
         NormeX  = numpy.linalg.norm( X )
         NormeFX = numpy.linalg.norm( FX )
         if self._toStore("CurrentState"):
-            self.StoredVariables["CurrentState"].store( numpy.ravel(X) )
+            self.StoredVariables["CurrentState"].store( X )
         if self._toStore("SimulatedObservationAtCurrentState"):
-            self.StoredVariables["SimulatedObservationAtCurrentState"].store( numpy.ravel(FX) )
+            self.StoredVariables["SimulatedObservationAtCurrentState"].store( FX )
         #
         if len(self._parameters["InitialDirection"]) == 0:
             dX0 = []
-            for v in X.A1:
+            for v in X:
                 if abs(v) > 1.e-8:
                     dX0.append( numpy.random.normal(0.,abs(v)) )
                 else:
@@ -140,12 +140,12 @@ class ElementaryAlgorithm(BasicObjects.Algorithm):
         else:
             dX0 = numpy.ravel( self._parameters["InitialDirection"] )
         #
-        dX0 = float(self._parameters["AmplitudeOfInitialDirection"]) * numpy.matrix( dX0 ).T
+        dX0 = float(self._parameters["AmplitudeOfInitialDirection"]) * numpy.ravel( dX0 ).reshape((-1,1))
         #
         if self._parameters["ResiduFormula"] in ["Taylor", "TaylorOnNorm"]:
             dX1      = float(self._parameters["AmplitudeOfTangentPerturbation"]) * dX0
             GradFxdX = Ht( (X, dX1) )
-            GradFxdX = numpy.asmatrix(numpy.ravel( GradFxdX )).T
+            GradFxdX = numpy.ravel( GradFxdX ).reshape((-1,1))
             GradFxdX = float(1./self._parameters["AmplitudeOfTangentPerturbation"]) * GradFxdX
         #
         # Entete des resultats
@@ -239,7 +239,7 @@ class ElementaryAlgorithm(BasicObjects.Algorithm):
             dX      = amplitude * dX0
             #
             FX_plus_dX = Hm( X + dX )
-            FX_plus_dX = numpy.asmatrix(numpy.ravel( FX_plus_dX )).T
+            FX_plus_dX = numpy.ravel( FX_plus_dX ).reshape((-1,1))
             #
             if self._toStore("CurrentState"):
                 self.StoredVariables["CurrentState"].store( numpy.ravel(X + dX) )
index d4170e677bcddb5a86a834606edb22e553e58500..fda2bc0ed98527162da1983691d09b15b6b794c5 100644 (file)
@@ -112,8 +112,8 @@ class ElementaryAlgorithm(BasicObjects.Algorithm):
         #
         # Calcul du point courant
         # -----------------------
-        Xn      = numpy.asmatrix(numpy.ravel( Xb )).T
-        FX      = numpy.asmatrix(numpy.ravel( Hm( Xn ) )).T
+        Xn      = numpy.ravel(     Xb   ).reshape((-1,1))
+        FX      = numpy.ravel( Hm( Xn ) ).reshape((-1,1))
         NormeX  = numpy.linalg.norm( Xn )
         NormeFX = numpy.linalg.norm( FX )
         if self._toStore("CurrentState"):
@@ -125,7 +125,7 @@ class ElementaryAlgorithm(BasicObjects.Algorithm):
         # ---------------------------------------------
         if len(self._parameters["InitialDirection"]) == 0:
             dX0 = []
-            for v in Xn.A1:
+            for v in Xn:
                 if abs(v) > 1.e-8:
                     dX0.append( numpy.random.normal(0.,abs(v)) )
                 else:
@@ -133,14 +133,14 @@ class ElementaryAlgorithm(BasicObjects.Algorithm):
         else:
             dX0 = numpy.ravel( self._parameters["InitialDirection"] )
         #
-        dX0 = float(self._parameters["AmplitudeOfInitialDirection"]) * numpy.matrix( dX0 ).T
+        dX0 = float(self._parameters["AmplitudeOfInitialDirection"]) * numpy.ravel( dX0 ).reshape((-1,1))
         #
         # Calcul du gradient au point courant X pour l'increment dX
         # ---------------------------------------------------------
         if self._parameters["ResiduFormula"] in ["Taylor", "NominalTaylor", "NominalTaylorRMS"]:
             dX1      = float(self._parameters["AmplitudeOfTangentPerturbation"]) * dX0
             GradFxdX = Ht( (Xn, dX1) )
-            GradFxdX = numpy.asmatrix(numpy.ravel( GradFxdX )).T
+            GradFxdX = numpy.ravel( GradFxdX ).reshape((-1,1))
             GradFxdX = float(1./self._parameters["AmplitudeOfTangentPerturbation"]) * GradFxdX
         #
         # Entete des resultats
@@ -256,15 +256,15 @@ class ElementaryAlgorithm(BasicObjects.Algorithm):
             #
             if self._parameters["ResiduFormula"] == "CenteredDL":
                 if self._toStore("CurrentState"):
-                    self.StoredVariables["CurrentState"].store( numpy.ravel(Xn + dX) )
-                    self.StoredVariables["CurrentState"].store( numpy.ravel(Xn - dX) )
+                    self.StoredVariables["CurrentState"].store( Xn + dX )
+                    self.StoredVariables["CurrentState"].store( Xn - dX )
                 #
-                FX_plus_dX  = numpy.asmatrix(numpy.ravel( Hm( Xn + dX ) )).T
-                FX_moins_dX = numpy.asmatrix(numpy.ravel( Hm( Xn - dX ) )).T
+                FX_plus_dX  = numpy.ravel( Hm( Xn + dX ) ).reshape((-1,1))
+                FX_moins_dX = numpy.ravel( Hm( Xn - dX ) ).reshape((-1,1))
                 #
                 if self._toStore("SimulatedObservationAtCurrentState"):
-                    self.StoredVariables["SimulatedObservationAtCurrentState"].store( numpy.ravel(FX_plus_dX) )
-                    self.StoredVariables["SimulatedObservationAtCurrentState"].store( numpy.ravel(FX_moins_dX) )
+                    self.StoredVariables["SimulatedObservationAtCurrentState"].store( FX_plus_dX )
+                    self.StoredVariables["SimulatedObservationAtCurrentState"].store( FX_moins_dX )
                 #
                 Residu = numpy.linalg.norm( FX_plus_dX + FX_moins_dX - 2 * FX ) / NormeFX
                 #
@@ -274,12 +274,12 @@ class ElementaryAlgorithm(BasicObjects.Algorithm):
             #
             if self._parameters["ResiduFormula"] == "Taylor":
                 if self._toStore("CurrentState"):
-                    self.StoredVariables["CurrentState"].store( numpy.ravel(Xn + dX) )
+                    self.StoredVariables["CurrentState"].store( Xn + dX )
                 #
-                FX_plus_dX  = numpy.asmatrix(numpy.ravel( Hm( Xn + dX ) )).T
+                FX_plus_dX  = numpy.ravel( Hm( Xn + dX ) ).reshape((-1,1))
                 #
                 if self._toStore("SimulatedObservationAtCurrentState"):
-                    self.StoredVariables["SimulatedObservationAtCurrentState"].store( numpy.ravel(FX_plus_dX) )
+                    self.StoredVariables["SimulatedObservationAtCurrentState"].store( FX_plus_dX )
                 #
                 Residu = numpy.linalg.norm( FX_plus_dX - FX - amplitude * GradFxdX ) / NormeFX
                 #
@@ -289,18 +289,18 @@ class ElementaryAlgorithm(BasicObjects.Algorithm):
             #
             if self._parameters["ResiduFormula"] == "NominalTaylor":
                 if self._toStore("CurrentState"):
-                    self.StoredVariables["CurrentState"].store( numpy.ravel(Xn + dX) )
-                    self.StoredVariables["CurrentState"].store( numpy.ravel(Xn - dX) )
-                    self.StoredVariables["CurrentState"].store( numpy.ravel(dX) )
+                    self.StoredVariables["CurrentState"].store( Xn + dX )
+                    self.StoredVariables["CurrentState"].store( Xn - dX )
+                    self.StoredVariables["CurrentState"].store( dX )
                 #
-                FX_plus_dX  = numpy.asmatrix(numpy.ravel( Hm( Xn + dX ) )).T
-                FX_moins_dX = numpy.asmatrix(numpy.ravel( Hm( Xn - dX ) )).T
-                FdX         = numpy.asmatrix(numpy.ravel( Hm( dX ) )).T
+                FX_plus_dX  = numpy.ravel( Hm( Xn + dX ) ).reshape((-1,1))
+                FX_moins_dX = numpy.ravel( Hm( Xn - dX ) ).reshape((-1,1))
+                FdX         = numpy.ravel( Hm( dX )      ).reshape((-1,1))
                 #
                 if self._toStore("SimulatedObservationAtCurrentState"):
-                    self.StoredVariables["SimulatedObservationAtCurrentState"].store( numpy.ravel(FX_plus_dX) )
-                    self.StoredVariables["SimulatedObservationAtCurrentState"].store( numpy.ravel(FX_moins_dX) )
-                    self.StoredVariables["SimulatedObservationAtCurrentState"].store( numpy.ravel(FdX) )
+                    self.StoredVariables["SimulatedObservationAtCurrentState"].store( FX_plus_dX )
+                    self.StoredVariables["SimulatedObservationAtCurrentState"].store( FX_moins_dX )
+                    self.StoredVariables["SimulatedObservationAtCurrentState"].store( FdX )
                 #
                 Residu = max(
                     numpy.linalg.norm( FX_plus_dX  - amplitude * FdX ) / NormeFX,
@@ -313,18 +313,18 @@ class ElementaryAlgorithm(BasicObjects.Algorithm):
             #
             if self._parameters["ResiduFormula"] == "NominalTaylorRMS":
                 if self._toStore("CurrentState"):
-                    self.StoredVariables["CurrentState"].store( numpy.ravel(Xn + dX) )
-                    self.StoredVariables["CurrentState"].store( numpy.ravel(Xn - dX) )
-                    self.StoredVariables["CurrentState"].store( numpy.ravel(dX) )
+                    self.StoredVariables["CurrentState"].store( Xn + dX )
+                    self.StoredVariables["CurrentState"].store( Xn - dX )
+                    self.StoredVariables["CurrentState"].store( dX )
                 #
-                FX_plus_dX  = numpy.asmatrix(numpy.ravel( Hm( Xn + dX ) )).T
-                FX_moins_dX = numpy.asmatrix(numpy.ravel( Hm( Xn - dX ) )).T
-                FdX         = numpy.asmatrix(numpy.ravel( Hm( dX ) )).T
+                FX_plus_dX  = numpy.ravel( Hm( Xn + dX ) ).reshape((-1,1))
+                FX_moins_dX = numpy.ravel( Hm( Xn - dX ) ).reshape((-1,1))
+                FdX         = numpy.ravel( Hm( dX )      ).reshape((-1,1))
                 #
                 if self._toStore("SimulatedObservationAtCurrentState"):
-                    self.StoredVariables["SimulatedObservationAtCurrentState"].store( numpy.ravel(FX_plus_dX) )
-                    self.StoredVariables["SimulatedObservationAtCurrentState"].store( numpy.ravel(FX_moins_dX) )
-                    self.StoredVariables["SimulatedObservationAtCurrentState"].store( numpy.ravel(FdX) )
+                    self.StoredVariables["SimulatedObservationAtCurrentState"].store( FX_plus_dX )
+                    self.StoredVariables["SimulatedObservationAtCurrentState"].store( FX_moins_dX )
+                    self.StoredVariables["SimulatedObservationAtCurrentState"].store( FdX )
                 #
                 Residu = max(
                     RMS( FX, FX_plus_dX   - amplitude * FdX ) / NormeFX,
index 556e484a7f11b7e1c96730a6f0cba9747d9665f7..69b5a545c255de58d8961588d08983d9db34577b 100644 (file)
@@ -79,7 +79,7 @@ class ElementaryAlgorithm(BasicObjects.Algorithm):
                 HXb = HO["AppliedInX"]["HXb"]
             else:
                 HXb = Ht @ Xb
-            HXb = numpy.asmatrix(numpy.ravel( HXb )).T
+            HXb = numpy.ravel( HXb ).reshape((-1,1))
             if Y.size != HXb.size:
                 raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
             if max(Y.shape) != max(HXb.shape):
index 356976134a250bb14b46805ecc28a2625bd6e3f1..cd0565f0774127280387a3d09cc32db778e2098e 100644 (file)
@@ -105,20 +105,20 @@ class ElementaryAlgorithm(BasicObjects.Algorithm):
         #
         # Calcul du point courant
         # -----------------------
-        Xn      = numpy.asmatrix(numpy.ravel( Xb )).T
-        FX      = numpy.asmatrix(numpy.ravel( Hm( Xn ) )).T
+        Xn      = numpy.ravel( Xb ).reshape((-1,1))
+        FX      = numpy.ravel( Hm( Xn ) ).reshape((-1,1))
         NormeX  = numpy.linalg.norm( Xn )
         NormeFX = numpy.linalg.norm( FX )
         if self._toStore("CurrentState"):
-            self.StoredVariables["CurrentState"].store( numpy.ravel(Xn) )
+            self.StoredVariables["CurrentState"].store( Xn )
         if self._toStore("SimulatedObservationAtCurrentState"):
-            self.StoredVariables["SimulatedObservationAtCurrentState"].store( numpy.ravel(FX) )
+            self.StoredVariables["SimulatedObservationAtCurrentState"].store( FX )
         #
         # Fabrication de la direction de l'increment dX
         # ---------------------------------------------
         if len(self._parameters["InitialDirection"]) == 0:
             dX0 = []
-            for v in Xn.A1:
+            for v in Xn:
                 if abs(v) > 1.e-8:
                     dX0.append( numpy.random.normal(0.,abs(v)) )
                 else:
@@ -126,14 +126,14 @@ class ElementaryAlgorithm(BasicObjects.Algorithm):
         else:
             dX0 = numpy.ravel( self._parameters["InitialDirection"] )
         #
-        dX0 = float(self._parameters["AmplitudeOfInitialDirection"]) * numpy.matrix( dX0 ).T
+        dX0 = float(self._parameters["AmplitudeOfInitialDirection"]) * numpy.ravel( dX0 ).reshape((-1,1))
         #
         # Calcul du gradient au point courant X pour l'increment dX
         # qui est le tangent en X multiplie par dX
         # ---------------------------------------------------------
         dX1      = float(self._parameters["AmplitudeOfTangentPerturbation"]) * dX0
         GradFxdX = Ht( (Xn, dX1) )
-        GradFxdX = numpy.asmatrix(numpy.ravel( GradFxdX )).T
+        GradFxdX = numpy.ravel( GradFxdX ).reshape((-1,1))
         GradFxdX = float(1./self._parameters["AmplitudeOfTangentPerturbation"]) * GradFxdX
         NormeGX  = numpy.linalg.norm( GradFxdX )
         #
@@ -187,7 +187,7 @@ class ElementaryAlgorithm(BasicObjects.Algorithm):
             dX      = amplitude * dX0
             #
             if self._parameters["ResiduFormula"] == "Taylor":
-                FX_plus_dX  = numpy.asmatrix(numpy.ravel( Hm( Xn + dX ) )).T
+                FX_plus_dX  = numpy.ravel( Hm( Xn + dX ) ).reshape((-1,1))
                 #
                 Residu = numpy.linalg.norm( FX_plus_dX - FX ) / (amplitude * NormeGX)
                 #