From 6721b1675c534768703b98144d143fec71a14e1d Mon Sep 17 00:00:00 2001 From: Jean-Philippe ARGAUD Date: Tue, 1 Mar 2022 13:21:54 +0100 Subject: [PATCH] Minor documentation and code review corrections (26) Safely remove some deprecated numpy.matrix --- src/daComposant/daAlgorithms/AdjointTest.py | 22 +++---- src/daComposant/daAlgorithms/GradientTest.py | 16 ++--- src/daComposant/daAlgorithms/LinearityTest.py | 64 +++++++++---------- .../daAlgorithms/LocalSensitivityTest.py | 2 +- src/daComposant/daAlgorithms/TangentTest.py | 16 ++--- 5 files changed, 60 insertions(+), 60 deletions(-) diff --git a/src/daComposant/daAlgorithms/AdjointTest.py b/src/daComposant/daAlgorithms/AdjointTest.py index 7c7d1c5..c9bbcec 100644 --- a/src/daComposant/daAlgorithms/AdjointTest.py +++ b/src/daComposant/daAlgorithms/AdjointTest.py @@ -96,28 +96,28 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): Perturbations = [ 10**i for i in range(self._parameters["EpsilonMinimumExponent"],1) ] Perturbations.reverse() # - X = numpy.asmatrix(numpy.ravel( Xb )).T + X = numpy.ravel( Xb ).reshape((-1,1)) NormeX = numpy.linalg.norm( X ) if Y is None: - Y = numpy.asmatrix(numpy.ravel( Hm( X ) )).T - Y = numpy.asmatrix(numpy.ravel( Y )).T + Y = numpy.ravel( Hm( X ) ).reshape((-1,1)) + Y = numpy.ravel( Y ).reshape((-1,1)) NormeY = numpy.linalg.norm( Y ) if self._toStore("CurrentState"): - self.StoredVariables["CurrentState"].store( numpy.ravel(X) ) + self.StoredVariables["CurrentState"].store( X ) if self._toStore("SimulatedObservationAtCurrentState"): - self.StoredVariables["SimulatedObservationAtCurrentState"].store( numpy.ravel(Y) ) + self.StoredVariables["SimulatedObservationAtCurrentState"].store( Y ) # if len(self._parameters["InitialDirection"]) == 0: dX0 = [] - for v in X.A1: + for v in X: if abs(v) > 1.e-8: dX0.append( numpy.random.normal(0.,abs(v)) ) else: dX0.append( numpy.random.normal(0.,X.mean()) ) else: - dX0 = numpy.asmatrix(numpy.ravel( self._parameters["InitialDirection"] )) + dX0 = self._parameters["InitialDirection"] # - dX0 = float(self._parameters["AmplitudeOfInitialDirection"]) * numpy.matrix( dX0 ).T + dX0 = float(self._parameters["AmplitudeOfInitialDirection"]) * numpy.ravel( dX0 ) # # Entete des resultats # -------------------- @@ -156,10 +156,10 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): dX = amplitude * dX0 NormedX = numpy.linalg.norm( dX ) # - TangentFXdX = numpy.asmatrix( Ht( (X,dX) ) ) - AdjointFXY = numpy.asmatrix( Ha( (X,Y) ) ) + TangentFXdX = numpy.ravel( Ht( (X,dX) ) ) + AdjointFXY = numpy.ravel( Ha( (X,Y) ) ) # - Residu = abs(float(numpy.dot( TangentFXdX.A1 , Y.A1 ) - numpy.dot( dX.A1 , AdjointFXY.A1 ))) + Residu = abs(float(numpy.dot( TangentFXdX, Y ) - numpy.dot( dX, AdjointFXY ))) # msg = " %2i %5.0e %9.3e %9.3e %9.3e | %9.3e"%(i,amplitude,NormeX,NormeY,NormedX,Residu) msgs += "\n" + __marge + msg diff --git a/src/daComposant/daAlgorithms/GradientTest.py b/src/daComposant/daAlgorithms/GradientTest.py index 5963ecb..eb18c8f 100644 --- a/src/daComposant/daAlgorithms/GradientTest.py +++ b/src/daComposant/daAlgorithms/GradientTest.py @@ -121,18 +121,18 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): Perturbations = [ 10**i for i in range(self._parameters["EpsilonMinimumExponent"],1) ] Perturbations.reverse() # - X = numpy.asmatrix(numpy.ravel( Xb )).T - FX = numpy.asmatrix(numpy.ravel( Hm( X ) )).T + X = numpy.ravel( Xb ).reshape((-1,1)) + FX = numpy.ravel( Hm( X ) ).reshape((-1,1)) NormeX = numpy.linalg.norm( X ) NormeFX = numpy.linalg.norm( FX ) if self._toStore("CurrentState"): - self.StoredVariables["CurrentState"].store( numpy.ravel(X) ) + self.StoredVariables["CurrentState"].store( X ) if self._toStore("SimulatedObservationAtCurrentState"): - self.StoredVariables["SimulatedObservationAtCurrentState"].store( numpy.ravel(FX) ) + self.StoredVariables["SimulatedObservationAtCurrentState"].store( FX ) # if len(self._parameters["InitialDirection"]) == 0: dX0 = [] - for v in X.A1: + for v in X: if abs(v) > 1.e-8: dX0.append( numpy.random.normal(0.,abs(v)) ) else: @@ -140,12 +140,12 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): else: dX0 = numpy.ravel( self._parameters["InitialDirection"] ) # - dX0 = float(self._parameters["AmplitudeOfInitialDirection"]) * numpy.matrix( dX0 ).T + dX0 = float(self._parameters["AmplitudeOfInitialDirection"]) * numpy.ravel( dX0 ).reshape((-1,1)) # if self._parameters["ResiduFormula"] in ["Taylor", "TaylorOnNorm"]: dX1 = float(self._parameters["AmplitudeOfTangentPerturbation"]) * dX0 GradFxdX = Ht( (X, dX1) ) - GradFxdX = numpy.asmatrix(numpy.ravel( GradFxdX )).T + GradFxdX = numpy.ravel( GradFxdX ).reshape((-1,1)) GradFxdX = float(1./self._parameters["AmplitudeOfTangentPerturbation"]) * GradFxdX # # Entete des resultats @@ -239,7 +239,7 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): dX = amplitude * dX0 # FX_plus_dX = Hm( X + dX ) - FX_plus_dX = numpy.asmatrix(numpy.ravel( FX_plus_dX )).T + FX_plus_dX = numpy.ravel( FX_plus_dX ).reshape((-1,1)) # if self._toStore("CurrentState"): self.StoredVariables["CurrentState"].store( numpy.ravel(X + dX) ) diff --git a/src/daComposant/daAlgorithms/LinearityTest.py b/src/daComposant/daAlgorithms/LinearityTest.py index d4170e6..fda2bc0 100644 --- a/src/daComposant/daAlgorithms/LinearityTest.py +++ b/src/daComposant/daAlgorithms/LinearityTest.py @@ -112,8 +112,8 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): # # Calcul du point courant # ----------------------- - Xn = numpy.asmatrix(numpy.ravel( Xb )).T - FX = numpy.asmatrix(numpy.ravel( Hm( Xn ) )).T + Xn = numpy.ravel( Xb ).reshape((-1,1)) + FX = numpy.ravel( Hm( Xn ) ).reshape((-1,1)) NormeX = numpy.linalg.norm( Xn ) NormeFX = numpy.linalg.norm( FX ) if self._toStore("CurrentState"): @@ -125,7 +125,7 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): # --------------------------------------------- if len(self._parameters["InitialDirection"]) == 0: dX0 = [] - for v in Xn.A1: + for v in Xn: if abs(v) > 1.e-8: dX0.append( numpy.random.normal(0.,abs(v)) ) else: @@ -133,14 +133,14 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): else: dX0 = numpy.ravel( self._parameters["InitialDirection"] ) # - dX0 = float(self._parameters["AmplitudeOfInitialDirection"]) * numpy.matrix( dX0 ).T + dX0 = float(self._parameters["AmplitudeOfInitialDirection"]) * numpy.ravel( dX0 ).reshape((-1,1)) # # Calcul du gradient au point courant X pour l'increment dX # --------------------------------------------------------- if self._parameters["ResiduFormula"] in ["Taylor", "NominalTaylor", "NominalTaylorRMS"]: dX1 = float(self._parameters["AmplitudeOfTangentPerturbation"]) * dX0 GradFxdX = Ht( (Xn, dX1) ) - GradFxdX = numpy.asmatrix(numpy.ravel( GradFxdX )).T + GradFxdX = numpy.ravel( GradFxdX ).reshape((-1,1)) GradFxdX = float(1./self._parameters["AmplitudeOfTangentPerturbation"]) * GradFxdX # # Entete des resultats @@ -256,15 +256,15 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): # if self._parameters["ResiduFormula"] == "CenteredDL": if self._toStore("CurrentState"): - self.StoredVariables["CurrentState"].store( numpy.ravel(Xn + dX) ) - self.StoredVariables["CurrentState"].store( numpy.ravel(Xn - dX) ) + self.StoredVariables["CurrentState"].store( Xn + dX ) + self.StoredVariables["CurrentState"].store( Xn - dX ) # - FX_plus_dX = numpy.asmatrix(numpy.ravel( Hm( Xn + dX ) )).T - FX_moins_dX = numpy.asmatrix(numpy.ravel( Hm( Xn - dX ) )).T + FX_plus_dX = numpy.ravel( Hm( Xn + dX ) ).reshape((-1,1)) + FX_moins_dX = numpy.ravel( Hm( Xn - dX ) ).reshape((-1,1)) # if self._toStore("SimulatedObservationAtCurrentState"): - self.StoredVariables["SimulatedObservationAtCurrentState"].store( numpy.ravel(FX_plus_dX) ) - self.StoredVariables["SimulatedObservationAtCurrentState"].store( numpy.ravel(FX_moins_dX) ) + self.StoredVariables["SimulatedObservationAtCurrentState"].store( FX_plus_dX ) + self.StoredVariables["SimulatedObservationAtCurrentState"].store( FX_moins_dX ) # Residu = numpy.linalg.norm( FX_plus_dX + FX_moins_dX - 2 * FX ) / NormeFX # @@ -274,12 +274,12 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): # if self._parameters["ResiduFormula"] == "Taylor": if self._toStore("CurrentState"): - self.StoredVariables["CurrentState"].store( numpy.ravel(Xn + dX) ) + self.StoredVariables["CurrentState"].store( Xn + dX ) # - FX_plus_dX = numpy.asmatrix(numpy.ravel( Hm( Xn + dX ) )).T + FX_plus_dX = numpy.ravel( Hm( Xn + dX ) ).reshape((-1,1)) # if self._toStore("SimulatedObservationAtCurrentState"): - self.StoredVariables["SimulatedObservationAtCurrentState"].store( numpy.ravel(FX_plus_dX) ) + self.StoredVariables["SimulatedObservationAtCurrentState"].store( FX_plus_dX ) # Residu = numpy.linalg.norm( FX_plus_dX - FX - amplitude * GradFxdX ) / NormeFX # @@ -289,18 +289,18 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): # if self._parameters["ResiduFormula"] == "NominalTaylor": if self._toStore("CurrentState"): - self.StoredVariables["CurrentState"].store( numpy.ravel(Xn + dX) ) - self.StoredVariables["CurrentState"].store( numpy.ravel(Xn - dX) ) - self.StoredVariables["CurrentState"].store( numpy.ravel(dX) ) + self.StoredVariables["CurrentState"].store( Xn + dX ) + self.StoredVariables["CurrentState"].store( Xn - dX ) + self.StoredVariables["CurrentState"].store( dX ) # - FX_plus_dX = numpy.asmatrix(numpy.ravel( Hm( Xn + dX ) )).T - FX_moins_dX = numpy.asmatrix(numpy.ravel( Hm( Xn - dX ) )).T - FdX = numpy.asmatrix(numpy.ravel( Hm( dX ) )).T + FX_plus_dX = numpy.ravel( Hm( Xn + dX ) ).reshape((-1,1)) + FX_moins_dX = numpy.ravel( Hm( Xn - dX ) ).reshape((-1,1)) + FdX = numpy.ravel( Hm( dX ) ).reshape((-1,1)) # if self._toStore("SimulatedObservationAtCurrentState"): - self.StoredVariables["SimulatedObservationAtCurrentState"].store( numpy.ravel(FX_plus_dX) ) - self.StoredVariables["SimulatedObservationAtCurrentState"].store( numpy.ravel(FX_moins_dX) ) - self.StoredVariables["SimulatedObservationAtCurrentState"].store( numpy.ravel(FdX) ) + self.StoredVariables["SimulatedObservationAtCurrentState"].store( FX_plus_dX ) + self.StoredVariables["SimulatedObservationAtCurrentState"].store( FX_moins_dX ) + self.StoredVariables["SimulatedObservationAtCurrentState"].store( FdX ) # Residu = max( numpy.linalg.norm( FX_plus_dX - amplitude * FdX ) / NormeFX, @@ -313,18 +313,18 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): # if self._parameters["ResiduFormula"] == "NominalTaylorRMS": if self._toStore("CurrentState"): - self.StoredVariables["CurrentState"].store( numpy.ravel(Xn + dX) ) - self.StoredVariables["CurrentState"].store( numpy.ravel(Xn - dX) ) - self.StoredVariables["CurrentState"].store( numpy.ravel(dX) ) + self.StoredVariables["CurrentState"].store( Xn + dX ) + self.StoredVariables["CurrentState"].store( Xn - dX ) + self.StoredVariables["CurrentState"].store( dX ) # - FX_plus_dX = numpy.asmatrix(numpy.ravel( Hm( Xn + dX ) )).T - FX_moins_dX = numpy.asmatrix(numpy.ravel( Hm( Xn - dX ) )).T - FdX = numpy.asmatrix(numpy.ravel( Hm( dX ) )).T + FX_plus_dX = numpy.ravel( Hm( Xn + dX ) ).reshape((-1,1)) + FX_moins_dX = numpy.ravel( Hm( Xn - dX ) ).reshape((-1,1)) + FdX = numpy.ravel( Hm( dX ) ).reshape((-1,1)) # if self._toStore("SimulatedObservationAtCurrentState"): - self.StoredVariables["SimulatedObservationAtCurrentState"].store( numpy.ravel(FX_plus_dX) ) - self.StoredVariables["SimulatedObservationAtCurrentState"].store( numpy.ravel(FX_moins_dX) ) - self.StoredVariables["SimulatedObservationAtCurrentState"].store( numpy.ravel(FdX) ) + self.StoredVariables["SimulatedObservationAtCurrentState"].store( FX_plus_dX ) + self.StoredVariables["SimulatedObservationAtCurrentState"].store( FX_moins_dX ) + self.StoredVariables["SimulatedObservationAtCurrentState"].store( FdX ) # Residu = max( RMS( FX, FX_plus_dX - amplitude * FdX ) / NormeFX, diff --git a/src/daComposant/daAlgorithms/LocalSensitivityTest.py b/src/daComposant/daAlgorithms/LocalSensitivityTest.py index 556e484..69b5a54 100644 --- a/src/daComposant/daAlgorithms/LocalSensitivityTest.py +++ b/src/daComposant/daAlgorithms/LocalSensitivityTest.py @@ -79,7 +79,7 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): HXb = HO["AppliedInX"]["HXb"] else: HXb = Ht @ Xb - HXb = numpy.asmatrix(numpy.ravel( HXb )).T + HXb = numpy.ravel( HXb ).reshape((-1,1)) if Y.size != HXb.size: raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size)) if max(Y.shape) != max(HXb.shape): diff --git a/src/daComposant/daAlgorithms/TangentTest.py b/src/daComposant/daAlgorithms/TangentTest.py index 3569761..cd0565f 100644 --- a/src/daComposant/daAlgorithms/TangentTest.py +++ b/src/daComposant/daAlgorithms/TangentTest.py @@ -105,20 +105,20 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): # # Calcul du point courant # ----------------------- - Xn = numpy.asmatrix(numpy.ravel( Xb )).T - FX = numpy.asmatrix(numpy.ravel( Hm( Xn ) )).T + Xn = numpy.ravel( Xb ).reshape((-1,1)) + FX = numpy.ravel( Hm( Xn ) ).reshape((-1,1)) NormeX = numpy.linalg.norm( Xn ) NormeFX = numpy.linalg.norm( FX ) if self._toStore("CurrentState"): - self.StoredVariables["CurrentState"].store( numpy.ravel(Xn) ) + self.StoredVariables["CurrentState"].store( Xn ) if self._toStore("SimulatedObservationAtCurrentState"): - self.StoredVariables["SimulatedObservationAtCurrentState"].store( numpy.ravel(FX) ) + self.StoredVariables["SimulatedObservationAtCurrentState"].store( FX ) # # Fabrication de la direction de l'increment dX # --------------------------------------------- if len(self._parameters["InitialDirection"]) == 0: dX0 = [] - for v in Xn.A1: + for v in Xn: if abs(v) > 1.e-8: dX0.append( numpy.random.normal(0.,abs(v)) ) else: @@ -126,14 +126,14 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): else: dX0 = numpy.ravel( self._parameters["InitialDirection"] ) # - dX0 = float(self._parameters["AmplitudeOfInitialDirection"]) * numpy.matrix( dX0 ).T + dX0 = float(self._parameters["AmplitudeOfInitialDirection"]) * numpy.ravel( dX0 ).reshape((-1,1)) # # Calcul du gradient au point courant X pour l'increment dX # qui est le tangent en X multiplie par dX # --------------------------------------------------------- dX1 = float(self._parameters["AmplitudeOfTangentPerturbation"]) * dX0 GradFxdX = Ht( (Xn, dX1) ) - GradFxdX = numpy.asmatrix(numpy.ravel( GradFxdX )).T + GradFxdX = numpy.ravel( GradFxdX ).reshape((-1,1)) GradFxdX = float(1./self._parameters["AmplitudeOfTangentPerturbation"]) * GradFxdX NormeGX = numpy.linalg.norm( GradFxdX ) # @@ -187,7 +187,7 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): dX = amplitude * dX0 # if self._parameters["ResiduFormula"] == "Taylor": - FX_plus_dX = numpy.asmatrix(numpy.ravel( Hm( Xn + dX ) )).T + FX_plus_dX = numpy.ravel( Hm( Xn + dX ) ).reshape((-1,1)) # Residu = numpy.linalg.norm( FX_plus_dX - FX ) / (amplitude * NormeGX) # -- 2.39.2