From c2860fa79f8579049b85f42f850ebe2cd13d2bc6 Mon Sep 17 00:00:00 2001 From: Jean-Philippe ARGAUD Date: Wed, 26 Sep 2012 12:46:36 +0200 Subject: [PATCH] Performance improvements by reducing logging and flatten --- resources/ADAOSchemaCatalog.xml | 4 +- src/daComposant/daAlgorithms/3DVAR.py | 46 ++++--------- src/daComposant/daAlgorithms/AdjointTest.py | 11 ++-- src/daComposant/daAlgorithms/Blue.py | 34 ++-------- src/daComposant/daAlgorithms/EnsembleBlue.py | 8 --- src/daComposant/daAlgorithms/GradientTest.py | 15 ++--- src/daComposant/daAlgorithms/KalmanFilter.py | 14 ---- .../daAlgorithms/LinearLeastSquares.py | 11 +--- .../daAlgorithms/NonLinearLeastSquares.py | 66 +++++-------------- .../daAlgorithms/ParticleSwarmOptimization.py | 46 +++++++------ .../daAlgorithms/QuantileRegression.py | 48 +++++++------- src/daComposant/daCore/AssimilationStudy.py | 4 +- .../daNumerics/ApproximatedDerivatives.py | 14 ++-- src/daComposant/daNumerics/mmqr.py | 6 +- .../daYacsIntegration/daOptimizerLoop.py | 22 ++++--- .../daSalome/test017_3DVAR_function_script.py | 1 - .../daSalome/test_aster_zzzz159a_functions.py | 1 - 17 files changed, 121 insertions(+), 230 deletions(-) diff --git a/resources/ADAOSchemaCatalog.xml b/resources/ADAOSchemaCatalog.xml index 3c34efe..273ef16 100644 --- a/resources/ADAOSchemaCatalog.xml +++ b/resources/ADAOSchemaCatalog.xml @@ -485,13 +485,13 @@ logging.debug(" switching to value : "+str(switch_value)) diff --git a/src/daComposant/daAlgorithms/3DVAR.py b/src/daComposant/daAlgorithms/3DVAR.py index 7d788a8..6cd3fee 100644 --- a/src/daComposant/daAlgorithms/3DVAR.py +++ b/src/daComposant/daAlgorithms/3DVAR.py @@ -88,9 +88,6 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): ) def run(self, Xb=None, Y=None, H=None, M=None, R=None, B=None, Q=None, Parameters=None): - """ - Calcul de l'estimateur 3D-VAR - """ logging.debug("%s Lancement"%self._name) logging.debug("%s Taille mémoire utilisée de %.1f Mo"%(self._name, m.getUsedMemory("M"))) # @@ -116,12 +113,10 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): # Utilisation éventuelle d'un vecteur H(Xb) précalculé # ---------------------------------------------------- if H["AppliedToX"] is not None and H["AppliedToX"].has_key("HXb"): - logging.debug("%s Utilisation de HXb"%self._name) HXb = H["AppliedToX"]["HXb"] else: - logging.debug("%s Calcul de Hm(Xb)"%self._name) HXb = Hm( Xb ) - HXb = numpy.asmatrix(HXb).flatten().T + HXb = numpy.asmatrix(numpy.ravel( HXb )).T # # Calcul de l'innovation # ---------------------- @@ -130,7 +125,6 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): if max(Y.shape) != max(HXb.shape): raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape)) d = Y - HXb - logging.debug("%s Innovation d = %s"%(self._name, d)) # # Précalcul des inversions de B et R # ---------------------------------- @@ -151,16 +145,12 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): # Définition de la fonction-coût # ------------------------------ def CostFunction(x): - _X = numpy.asmatrix(x).flatten().T - logging.debug("%s CostFunction X = %s"%(self._name, numpy.asmatrix( _X ).flatten())) + _X = numpy.asmatrix(numpy.ravel( x )).T _HX = Hm( _X ) - _HX = numpy.asmatrix(_HX).flatten().T + _HX = numpy.asmatrix(numpy.ravel( _HX )).T Jb = 0.5 * (_X - Xb).T * BI * (_X - Xb) Jo = 0.5 * (Y - _HX).T * RI * (Y - _HX) J = float( Jb ) + float( Jo ) - logging.debug("%s CostFunction Jb = %s"%(self._name, Jb)) - logging.debug("%s CostFunction Jo = %s"%(self._name, Jo)) - logging.debug("%s CostFunction J = %s"%(self._name, J)) if self._parameters["StoreInternalVariables"]: self.StoredVariables["CurrentState"].store( _X.A1 ) self.StoredVariables["CostFunctionJb"].store( Jb ) @@ -169,16 +159,12 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): return J # def GradientOfCostFunction(x): - _X = numpy.asmatrix(x).flatten().T - logging.debug("%s GradientOfCostFunction X = %s"%(self._name, numpy.asmatrix( _X ).flatten())) + _X = numpy.asmatrix(numpy.ravel( x )).T _HX = Hm( _X ) - _HX = numpy.asmatrix(_HX).flatten().T + _HX = numpy.asmatrix(numpy.ravel( _HX )).T GradJb = BI * (_X - Xb) GradJo = - Ha( (_X, RI * (Y - _HX)) ) - GradJ = numpy.asmatrix( GradJb ).flatten().T + numpy.asmatrix( GradJo ).flatten().T - logging.debug("%s GradientOfCostFunction GradJb = %s"%(self._name, numpy.asmatrix( GradJb ).flatten())) - logging.debug("%s GradientOfCostFunction GradJo = %s"%(self._name, numpy.asmatrix( GradJo ).flatten())) - logging.debug("%s GradientOfCostFunction GradJ = %s"%(self._name, numpy.asmatrix( GradJ ).flatten())) + GradJ = numpy.asmatrix( numpy.ravel( GradJb ) + numpy.ravel( GradJo ) ).T return GradJ.A1 # # Point de démarrage de l'optimisation : Xini = Xb @@ -187,7 +173,6 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): Xini = Xb.A1.tolist() else: Xini = list(Xb) - logging.debug("%s Point de démarrage Xini = %s"%(self._name, Xini)) # # Minimisation de la fonctionnelle # -------------------------------- @@ -261,16 +246,9 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): if self._parameters["StoreInternalVariables"]: Minimum = self.StoredVariables["CurrentState"].valueserie(step = StepMin) # - logging.debug("%s %s Step of min cost = %s"%(self._name, self._parameters["Minimizer"], StepMin)) - logging.debug("%s %s Minimum cost = %s"%(self._name, self._parameters["Minimizer"], MinJ)) - logging.debug("%s %s Minimum state = %s"%(self._name, self._parameters["Minimizer"], Minimum)) - logging.debug("%s %s Nb of F = %s"%(self._name, self._parameters["Minimizer"], nfeval)) - logging.debug("%s %s RetCode = %s"%(self._name, self._parameters["Minimizer"], rc)) - # # Obtention de l'analyse # ---------------------- - Xa = numpy.asmatrix(Minimum).flatten().T - logging.debug("%s Analyse Xa = %s"%(self._name, Xa)) + Xa = numpy.asmatrix(numpy.ravel( Minimum )).T # self.StoredVariables["Analysis"].store( Xa.A1 ) # @@ -283,7 +261,7 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): _ee = numpy.matrix(numpy.zeros(nb)).T _ee[i] = 1. _HmEE = Hm(_ee) - _HmEE = numpy.asmatrix(_HmEE).flatten().T + _HmEE = numpy.asmatrix(numpy.ravel( _HmEE )).T HessienneI.append( ( BI*_ee + Ha((Xa,RI*_HmEE)) ).A1 ) HessienneI = numpy.matrix( HessienneI ) A = HessienneI.I @@ -297,13 +275,13 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): # Calculs et/ou stockages supplémentaires # --------------------------------------- if "Innovation" in self._parameters["StoreSupplementaryCalculations"]: - self.StoredVariables["Innovation"].store( numpy.asmatrix(d).flatten().A1 ) + self.StoredVariables["Innovation"].store( numpy.ravel(d) ) if "BMA" in self._parameters["StoreSupplementaryCalculations"]: - self.StoredVariables["BMA"].store( numpy.asmatrix(Xb - Xa).flatten().A1 ) + self.StoredVariables["BMA"].store( numpy.ravel(Xb - Xa) ) if "OMA" in self._parameters["StoreSupplementaryCalculations"]: - self.StoredVariables["OMA"].store( numpy.asmatrix(Y - Hm(Xa)).flatten().A1 ) + self.StoredVariables["OMA"].store( numpy.ravel(Y - Hm(Xa)) ) if "OMB" in self._parameters["StoreSupplementaryCalculations"]: - self.StoredVariables["OMB"].store( numpy.asmatrix(d).flatten().A1 ) + self.StoredVariables["OMB"].store( numpy.ravel(d) ) if "SigmaObs2" in self._parameters["StoreSupplementaryCalculations"]: self.StoredVariables["SigmaObs2"].store( float( (d.T * (Y-Hm(Xa))) / R.trace() ) ) # diff --git a/src/daComposant/daAlgorithms/AdjointTest.py b/src/daComposant/daAlgorithms/AdjointTest.py index e1c0fe3..3a69c97 100644 --- a/src/daComposant/daAlgorithms/AdjointTest.py +++ b/src/daComposant/daAlgorithms/AdjointTest.py @@ -90,11 +90,11 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): # # Calcul du point courant # ----------------------- - X = numpy.asmatrix(Xb).flatten().T + X = numpy.asmatrix(numpy.ravel( Xb )).T NormeX = numpy.linalg.norm( X ) if Y is None: - Y = numpy.asmatrix( Hm( X ) ).flatten().T - Y = numpy.asmatrix(Y).flatten().T + Y = numpy.asmatrix(numpy.ravel( Hm( X ) )).T + Y = numpy.asmatrix(numpy.ravel( Y )).T NormeY = numpy.linalg.norm( Y ) # # Fabrication de la direction de l'incrément dX @@ -107,7 +107,7 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): else: dX0.append( numpy.random.normal(0.,X.mean()) ) else: - dX0 = numpy.asmatrix(self._parameters["InitialDirection"]).flatten() + dX0 = numpy.asmatrix(numpy.ravel( self._parameters["InitialDirection"] )) # dX0 = float(self._parameters["AmplitudeOfInitialDirection"]) * numpy.matrix( dX0 ).T # @@ -145,8 +145,6 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): # Boucle sur les perturbations # ---------------------------- for i,amplitude in enumerate(Perturbations): - logging.debug("%s Etape de calcul numéro %i, avec la perturbation %8.3e"%(self._name, i, amplitude)) - # dX = amplitude * dX0 NormedX = numpy.linalg.norm( dX ) # @@ -164,7 +162,6 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): # # Sorties eventuelles # ------------------- - logging.debug("%s Résultats :\n%s"%(self._name, msgs)) print print "Results of adjoint stability check:" print msgs diff --git a/src/daComposant/daAlgorithms/Blue.py b/src/daComposant/daAlgorithms/Blue.py index 906989f..6fa42d3 100644 --- a/src/daComposant/daAlgorithms/Blue.py +++ b/src/daComposant/daAlgorithms/Blue.py @@ -39,9 +39,6 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): ) def run(self, Xb=None, Y=None, H=None, M=None, R=None, B=None, Q=None, Parameters=None): - """ - Calcul de l'estimateur BLUE (ou Kalman simple, ou Interpolation Optimale) - """ logging.debug("%s Lancement"%self._name) logging.debug("%s Taille mémoire utilisée de %.1f Mo"%(self._name, m.getUsedMemory("M"))) # @@ -57,12 +54,10 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): # Utilisation éventuelle d'un vecteur H(Xb) précalculé # ---------------------------------------------------- if H["AppliedToX"] is not None and H["AppliedToX"].has_key("HXb"): - logging.debug("%s Utilisation de HXb"%self._name) HXb = H["AppliedToX"]["HXb"] else: - logging.debug("%s Calcul de Hm * Xb"%self._name) HXb = Hm * Xb - HXb = numpy.asmatrix(HXb).flatten().T + HXb = numpy.asmatrix(numpy.ravel( HXb )).T # # Précalcul des inversions de B et R # ---------------------------------- @@ -88,30 +83,16 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): if max(Y.shape) != max(HXb.shape): raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape)) d = Y - HXb - Jb = 0. - Jo = 0.5 * d.T * RI * d - J = float( Jb ) + float( Jo ) - logging.debug("%s Innovation d = %s"%(self._name, d)) - logging.debug("%s CostFunction Jb = %s"%(self._name, Jb)) - logging.debug("%s CostFunction Jo = %s"%(self._name, Jo)) - logging.debug("%s CostFunction J = %s"%(self._name, J)) - # - self.StoredVariables["CostFunctionJb"].store( Jb ) - self.StoredVariables["CostFunctionJo"].store( Jo ) - self.StoredVariables["CostFunctionJ" ].store( J ) # # Calcul de la matrice de gain et de l'analyse # -------------------------------------------- if Y.size <= Xb.size: if self._parameters["R_scalar"] is not None: R = self._parameters["R_scalar"] * numpy.eye(len(Y), dtype=numpy.float) - logging.debug("%s Calcul de K dans l'espace des observations"%self._name) K = B * Ha * (Hm * B * Ha + R).I else: - logging.debug("%s Calcul de K dans l'espace d'ébauche"%self._name) K = (Ha * RI * Hm + BI).I * Ha * RI Xa = Xb + K*d - logging.debug("%s Analyse Xa = %s"%(self._name, Xa)) # # Calcul de la fonction coût # -------------------------- @@ -119,11 +100,6 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): Jb = 0.5 * (Xa - Xb).T * BI * (Xa - Xb) Jo = 0.5 * oma.T * RI * oma J = float( Jb ) + float( Jo ) - logging.debug("%s OMA = %s"%(self._name, oma)) - logging.debug("%s CostFunction Jb = %s"%(self._name, Jb)) - logging.debug("%s CostFunction Jo = %s"%(self._name, Jo)) - logging.debug("%s CostFunction J = %s"%(self._name, J)) - # self.StoredVariables["Analysis"].store( Xa.A1 ) self.StoredVariables["CostFunctionJb"].store( Jb ) self.StoredVariables["CostFunctionJo"].store( Jo ) @@ -143,13 +119,13 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): # Calculs et/ou stockages supplémentaires # --------------------------------------- if "Innovation" in self._parameters["StoreSupplementaryCalculations"]: - self.StoredVariables["Innovation"].store( numpy.asmatrix(d).flatten().A1 ) + self.StoredVariables["Innovation"].store( numpy.ravel(d) ) if "BMA" in self._parameters["StoreSupplementaryCalculations"]: - self.StoredVariables["BMA"].store( numpy.asmatrix(Xb - Xa).flatten().A1 ) + self.StoredVariables["BMA"].store( numpy.ravel(Xb - Xa) ) if "OMA" in self._parameters["StoreSupplementaryCalculations"]: - self.StoredVariables["OMA"].store( numpy.asmatrix(oma).flatten().A1 ) + self.StoredVariables["OMA"].store( numpy.ravel(oma) ) if "OMB" in self._parameters["StoreSupplementaryCalculations"]: - self.StoredVariables["OMB"].store( numpy.asmatrix(d).flatten().A1 ) + self.StoredVariables["OMB"].store( numpy.ravel(d) ) if "SigmaObs2" in self._parameters["StoreSupplementaryCalculations"]: self.StoredVariables["SigmaObs2"].store( float( (d.T * (Y-Hm*Xa)) / R.trace() ) ) if "SigmaBck2" in self._parameters["StoreSupplementaryCalculations"]: diff --git a/src/daComposant/daAlgorithms/EnsembleBlue.py b/src/daComposant/daAlgorithms/EnsembleBlue.py index 084943f..5b25f69 100644 --- a/src/daComposant/daAlgorithms/EnsembleBlue.py +++ b/src/daComposant/daAlgorithms/EnsembleBlue.py @@ -37,12 +37,6 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): ) def run(self, Xb=None, Y=None, H=None, M=None, R=None, B=None, Q=None, Parameters=None ): - """ - Calcul d'une estimation BLUE d'ensemble : - - génération d'un ensemble d'observations, de même taille que le - nombre d'ébauches - - calcul de l'estimateur BLUE pour chaque membre de l'ensemble - """ logging.debug("%s Lancement"%self._name) logging.debug("%s Taille mémoire utilisée de %.1f Mo"%(self._name, m.getUsedMemory("M"))) # @@ -91,10 +85,8 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): if Y.size <= Xb.valueserie(0).size: if self._parameters["R_scalar"] is not None: R = self._parameters["R_scalar"] * numpy.eye(len(Y), dtype=numpy.float) - logging.debug("%s Calcul de K dans l'espace des observations"%self._name) K = B * Ha * (Hm * B * Ha + R).I else: - logging.debug("%s Calcul de K dans l'espace d'ébauche"%self._name) K = (Ha * RI * Hm + BI).I * Ha * RI # # Calcul du BLUE pour chaque membre de l'ensemble diff --git a/src/daComposant/daAlgorithms/GradientTest.py b/src/daComposant/daAlgorithms/GradientTest.py index bff28d1..b969e33 100644 --- a/src/daComposant/daAlgorithms/GradientTest.py +++ b/src/daComposant/daAlgorithms/GradientTest.py @@ -108,9 +108,9 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): # # Calcul du point courant # ----------------------- - X = numpy.asmatrix(Xb).flatten().T - FX = numpy.asmatrix( Hm( X ) ).flatten().T - FX = numpy.asmatrix(FX).flatten().T + X = numpy.asmatrix(numpy.ravel( Xb )).T + FX = numpy.asmatrix(numpy.ravel( Hm( X ) )).T + FX = numpy.asmatrix(numpy.ravel( FX )).T NormeX = numpy.linalg.norm( X ) NormeFX = numpy.linalg.norm( FX ) # @@ -124,7 +124,7 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): else: dX0.append( numpy.random.normal(0.,X.mean()) ) else: - dX0 = numpy.asmatrix(self._parameters["InitialDirection"]).flatten() + dX0 = numpy.ravel( self._parameters["InitialDirection"] ) # dX0 = float(self._parameters["AmplitudeOfInitialDirection"]) * numpy.matrix( dX0 ).T # @@ -132,7 +132,7 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): # --------------------------------------------------------- if self._parameters["ResiduFormula"] is "Taylor": GradFxdX = Ht( (X, dX0) ) - GradFxdX = numpy.asmatrix(GradFxdX).flatten().T + GradFxdX = numpy.asmatrix(numpy.ravel( GradFxdX )).T # # Entete des resultats # -------------------- @@ -181,12 +181,10 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): # Boucle sur les perturbations # ---------------------------- for i,amplitude in enumerate(Perturbations): - logging.debug("%s Etape de calcul numéro %i, avec la perturbation %8.3e"%(self._name, i, amplitude)) - # dX = amplitude * dX0 # FX_plus_dX = Hm( X + dX ) - FX_plus_dX = numpy.asmatrix(FX_plus_dX).flatten().T + FX_plus_dX = numpy.asmatrix(numpy.ravel( FX_plus_dX )).T # NormedX = numpy.linalg.norm( dX ) NormeFXdX = numpy.linalg.norm( FX_plus_dX ) @@ -224,7 +222,6 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): # # Sorties eventuelles # ------------------- - logging.debug("%s Résultats :\n%s"%(self._name, msgs)) print print "Results of gradient stability check:" print msgs diff --git a/src/daComposant/daAlgorithms/KalmanFilter.py b/src/daComposant/daAlgorithms/KalmanFilter.py index 9bf49a0..c034e8c 100644 --- a/src/daComposant/daAlgorithms/KalmanFilter.py +++ b/src/daComposant/daAlgorithms/KalmanFilter.py @@ -36,14 +36,6 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): ) def run(self, Xb=None, Y=None, H=None, M=None, R=None, B=None, Q=None, Parameters=None): - """ - Calcul de l'estimateur du filtre de Kalman - - Remarque : les observations sont exploitées à partir du pas de temps 1, - et sont utilisées dans Yo comme rangées selon ces indices. Donc le pas 0 - n'est pas utilisé puisque la première étape de Kalman passe de 0 à 1 - avec l'observation du pas 1. - """ logging.debug("%s Lancement"%self._name) logging.debug("%s Taille mémoire utilisée de %.1f Mo"%(self._name, m.getUsedMemory("M"))) # @@ -79,15 +71,9 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): self.StoredVariables["APosterioriCovariance"].store( Pn ) # for step in range(duration-1): - logging.debug("%s Etape de Kalman %i (i.e. %i->%i) sur un total de %i"%(self._name, step+1, step,step+1, duration-1)) - # - # Etape de prédiction - # ------------------- Xn_predicted = Mm * Xn Pn_predicted = Mm * Pn * Mt + Q # - # Etape de correction - # ------------------- d = Y.valueserie(step+1) - Hm * Xn_predicted K = Pn_predicted * Ha * (Hm * Pn_predicted * Ha + R).I Xn = Xn_predicted + K * d diff --git a/src/daComposant/daAlgorithms/LinearLeastSquares.py b/src/daComposant/daAlgorithms/LinearLeastSquares.py index 1a8ea25..fa5fccc 100644 --- a/src/daComposant/daAlgorithms/LinearLeastSquares.py +++ b/src/daComposant/daAlgorithms/LinearLeastSquares.py @@ -39,10 +39,6 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): ) def run(self, Xb=None, Y=None, H=None, M=None, R=None, B=None, Q=None, Parameters=None): - """ - Calcul de l'estimateur moindres carrés pondérés linéaires - (assimilation variationnelle sans ébauche) - """ logging.debug("%s Lancement"%self._name) logging.debug("%s Taille mémoire utilisée de %.1f Mo"%(self._name, m.getUsedMemory("M"))) # @@ -66,7 +62,6 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): # -------------------------------------------- K = (Ha * RI * Hm ).I * Ha * RI Xa = K * Y - logging.debug("%s Analyse Xa = %s"%(self._name, Xa)) # # Calcul de la fonction coût # -------------------------- @@ -74,10 +69,6 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): Jb = 0. Jo = 0.5 * oma.T * RI * oma J = float( Jb ) + float( Jo ) - logging.debug("%s CostFunction Jb = %s"%(self._name, Jb)) - logging.debug("%s CostFunction Jo = %s"%(self._name, Jo)) - logging.debug("%s CostFunction J = %s"%(self._name, J)) - # self.StoredVariables["Analysis"].store( Xa.A1 ) self.StoredVariables["CostFunctionJb"].store( Jb ) self.StoredVariables["CostFunctionJo"].store( Jo ) @@ -86,7 +77,7 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): # Calculs et/ou stockages supplémentaires # --------------------------------------- if "OMA" in self._parameters["StoreSupplementaryCalculations"]: - self.StoredVariables["OMA"].store( numpy.asmatrix(oma).flatten().A1 ) + self.StoredVariables["OMA"].store( numpy.ravel(oma) ) # logging.debug("%s Taille mémoire utilisée de %.1f Mo"%(self._name, m.getUsedMemory("M"))) logging.debug("%s Terminé"%self._name) diff --git a/src/daComposant/daAlgorithms/NonLinearLeastSquares.py b/src/daComposant/daAlgorithms/NonLinearLeastSquares.py index cf975df..27cec60 100644 --- a/src/daComposant/daAlgorithms/NonLinearLeastSquares.py +++ b/src/daComposant/daAlgorithms/NonLinearLeastSquares.py @@ -88,10 +88,7 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): ) def run(self, Xb=None, Y=None, H=None, M=None, R=None, B=None, Q=None, Parameters=None): - """ - Calcul de l'estimateur moindres carrés pondérés non linéaires - (assimilation variationnelle sans ébauche) - """ + # logging.debug("%s Lancement"%self._name) logging.debug("%s Taille mémoire utilisée de %.1f Mo"%(self._name, m.getUsedMemory("M"))) # @@ -117,12 +114,10 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): # Utilisation éventuelle d'un vecteur H(Xb) précalculé # ---------------------------------------------------- if H["AppliedToX"] is not None and H["AppliedToX"].has_key("HXb"): - logging.debug("%s Utilisation de HXb"%self._name) HXb = H["AppliedToX"]["HXb"] else: - logging.debug("%s Calcul de Hm(Xb)"%self._name) HXb = Hm( Xb ) - HXb = numpy.asmatrix(HXb).flatten().T + HXb = numpy.asmatrix(numpy.ravel( HXb )).T # # Calcul de l'innovation # ---------------------- @@ -131,7 +126,6 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): if max(Y.shape) != max(HXb.shape): raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape)) d = Y - HXb - logging.debug("%s Innovation d = %s"%(self._name, d)) # # Précalcul des inversions de B et R # ---------------------------------- @@ -157,16 +151,12 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): # Définition de la fonction-coût # ------------------------------ def CostFunction(x): - _X = numpy.asmatrix(x).flatten().T - logging.debug("%s CostFunction X = %s"%(self._name, numpy.asmatrix( _X ).flatten())) + _X = numpy.asmatrix(numpy.ravel( x )).T _HX = Hm( _X ) - _HX = numpy.asmatrix(_HX).flatten().T + _HX = numpy.asmatrix(numpy.ravel( _HX )).T Jb = 0. Jo = 0.5 * (Y - _HX).T * RI * (Y - _HX) J = float( Jb ) + float( Jo ) - logging.debug("%s CostFunction Jb = %s"%(self._name, Jb)) - logging.debug("%s CostFunction Jo = %s"%(self._name, Jo)) - logging.debug("%s CostFunction J = %s"%(self._name, J)) if self._parameters["StoreInternalVariables"]: self.StoredVariables["CurrentState"].store( _X.A1 ) self.StoredVariables["CostFunctionJb"].store( Jb ) @@ -175,48 +165,36 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): return J # def GradientOfCostFunction(x): - _X = numpy.asmatrix(x).flatten().T - logging.debug("%s GradientOfCostFunction X = %s"%(self._name, numpy.asmatrix( _X ).flatten())) + _X = numpy.asmatrix(numpy.ravel( x )).T _HX = Hm( _X ) - _HX = numpy.asmatrix(_HX).flatten().T + _HX = numpy.asmatrix(numpy.ravel( _HX )).T GradJb = 0. GradJo = - Ha( (_X, RI * (Y - _HX)) ) - GradJ = numpy.asmatrix( GradJb ).flatten().T + numpy.asmatrix( GradJo ).flatten().T - logging.debug("%s GradientOfCostFunction GradJb = %s"%(self._name, numpy.asmatrix( GradJb ).flatten())) - logging.debug("%s GradientOfCostFunction GradJo = %s"%(self._name, numpy.asmatrix( GradJo ).flatten())) - logging.debug("%s GradientOfCostFunction GradJ = %s"%(self._name, numpy.asmatrix( GradJ ).flatten())) + GradJ = numpy.asmatrix( numpy.ravel( GradJb ) + numpy.ravel( GradJo ) ).T return GradJ.A1 # def CostFunctionLM(x): - _X = numpy.asmatrix(x).flatten().T - logging.debug("%s CostFunction X = %s"%(self._name, numpy.asmatrix( _X ).flatten())) + _X = numpy.asmatrix(numpy.ravel( x )).T _HX = Hm( _X ) - _HX = numpy.asmatrix(_HX).flatten().T + _HX = numpy.asmatrix(numpy.ravel( _HX )).T Jb = 0. Jo = 0.5 * (Y - _HX).T * RI * (Y - _HX) J = float( Jb ) + float( Jo ) - logging.debug("%s CostFunction Jb = %s"%(self._name, Jb)) - logging.debug("%s CostFunction Jo = %s"%(self._name, Jo)) - logging.debug("%s CostFunction J = %s"%(self._name, J)) if self._parameters["StoreInternalVariables"]: self.StoredVariables["CurrentState"].store( _X.A1 ) self.StoredVariables["CostFunctionJb"].store( Jb ) self.StoredVariables["CostFunctionJo"].store( Jo ) self.StoredVariables["CostFunctionJ" ].store( J ) # - return numpy.asmatrix( RdemiI*(Y - _HX) ).flatten().A1 + return numpy.ravel( RdemiI*(Y - _HX) ) # def GradientOfCostFunctionLM(x): - _X = numpy.asmatrix(x).flatten().T - logging.debug("%s GradientOfCostFunction X = %s"%(self._name, numpy.asmatrix( _X ).flatten())) + _X = numpy.asmatrix(numpy.ravel( x )).T _HX = Hm( _X ) - _HX = numpy.asmatrix(_HX).flatten().T + _HX = numpy.asmatrix(numpy.ravel( _HX )).T GradJb = 0. GradJo = - Ha( (_X, RI * (Y - _HX)) ) - GradJ = numpy.asmatrix( GradJb ).flatten().T + numpy.asmatrix( GradJo ).flatten().T - logging.debug("%s GradientOfCostFunction GradJb = %s"%(self._name, numpy.asmatrix( GradJb ).flatten())) - logging.debug("%s GradientOfCostFunction GradJo = %s"%(self._name, numpy.asmatrix( GradJo ).flatten())) - logging.debug("%s GradientOfCostFunction GradJ = %s"%(self._name, numpy.asmatrix( GradJ ).flatten())) + GradJ = numpy.asmatrix( numpy.ravel( GradJb ) + numpy.ravel( GradJo ) ).T return - RdemiI*H["Tangent"].asMatrix( _X ) # # Point de démarrage de l'optimisation : Xini = Xb @@ -225,7 +203,6 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): Xini = Xb.A1.tolist() else: Xini = list(Xb) - logging.debug("%s Point de démarrage Xini = %s"%(self._name, Xini)) # # Minimisation de la fonctionnelle # -------------------------------- @@ -311,29 +288,22 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): if self._parameters["StoreInternalVariables"]: Minimum = self.StoredVariables["CurrentState"].valueserie(step = StepMin) # - logging.debug("%s %s Step of min cost = %s"%(self._name, self._parameters["Minimizer"], StepMin)) - logging.debug("%s %s Minimum cost = %s"%(self._name, self._parameters["Minimizer"], MinJ)) - logging.debug("%s %s Minimum state = %s"%(self._name, self._parameters["Minimizer"], Minimum)) - logging.debug("%s %s Nb of F = %s"%(self._name, self._parameters["Minimizer"], nfeval)) - logging.debug("%s %s RetCode = %s"%(self._name, self._parameters["Minimizer"], rc)) - # # Obtention de l'analyse # ---------------------- - Xa = numpy.asmatrix(Minimum).flatten().T - logging.debug("%s Analyse Xa = %s"%(self._name, Xa)) + Xa = numpy.asmatrix(numpy.ravel( Minimum )).T # self.StoredVariables["Analysis"].store( Xa.A1 ) # # Calculs et/ou stockages supplémentaires # --------------------------------------- if "Innovation" in self._parameters["StoreSupplementaryCalculations"]: - self.StoredVariables["Innovation"].store( numpy.asmatrix(d).flatten().A1 ) + self.StoredVariables["Innovation"].store( numpy.ravel(d) ) if "BMA" in self._parameters["StoreSupplementaryCalculations"]: - self.StoredVariables["BMA"].store( numpy.asmatrix(Xb - Xa).flatten().A1 ) + self.StoredVariables["BMA"].store( numpy.ravel(Xb - Xa) ) if "OMA" in self._parameters["StoreSupplementaryCalculations"]: - self.StoredVariables["OMA"].store( numpy.asmatrix(Y - Hm(Xa)).flatten().A1 ) + self.StoredVariables["OMA"].store( numpy.ravel(Y - Hm(Xa)) ) if "OMB" in self._parameters["StoreSupplementaryCalculations"]: - self.StoredVariables["OMB"].store( numpy.asmatrix(d).flatten().A1 ) + self.StoredVariables["OMB"].store( numpy.ravel(d) ) # logging.debug("%s Taille mémoire utilisée de %.1f Mo"%(self._name, m.getUsedMemory("M"))) logging.debug("%s Terminé"%self._name) diff --git a/src/daComposant/daAlgorithms/ParticleSwarmOptimization.py b/src/daComposant/daAlgorithms/ParticleSwarmOptimization.py index 5da1eec..5628005 100644 --- a/src/daComposant/daAlgorithms/ParticleSwarmOptimization.py +++ b/src/daComposant/daAlgorithms/ParticleSwarmOptimization.py @@ -82,11 +82,15 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): typecast = bool, message = "Stockage des variables internes ou intermédiaires du calcul", ) + self.defineRequiredParameter( + name = "StoreSupplementaryCalculations", + default = [], + typecast = tuple, + message = "Liste de calculs supplémentaires à stocker et/ou effectuer", + listval = ["BMA", "OMA", "OMB", "Innovation"] + ) def run(self, Xb=None, Y=None, H=None, M=None, R=None, B=None, Q=None, Parameters=None): - """ - Calcul de l'estimateur - """ logging.debug("%s Lancement"%self._name) logging.debug("%s Taille mémoire utilisée de %.1f Mo"%(self._name, m.getUsedMemory("M"))) # @@ -130,10 +134,9 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): # Définition de la fonction-coût # ------------------------------ def CostFunction(x, QualityMeasure="AugmentedPonderatedLeastSquares"): - _X = numpy.asmatrix(x).flatten().T - logging.debug("%s CostFunction X = %s"%(self._name, _X.A1)) + _X = numpy.asmatrix(numpy.ravel( x )).T _HX = Hm( _X ) - _HX = numpy.asmatrix(_HX).flatten().T + _HX = numpy.asmatrix(numpy.ravel( _HX )).T # if QualityMeasure in ["AugmentedPonderatedLeastSquares","APLS","DA"]: if BI is None or RI is None: @@ -160,9 +163,6 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): Jo = numpy.max( numpy.abs(Y - _HX) ) J = float( Jb ) + float( Jo ) # - logging.debug("%s CostFunction Jb = %s"%(self._name, Jb)) - logging.debug("%s CostFunction Jo = %s"%(self._name, Jo)) - logging.debug("%s CostFunction J = %s"%(self._name, J)) return J # # Point de démarrage de l'optimisation : Xini = Xb @@ -173,7 +173,6 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): Xini = list(Xb) else: Xini = numpy.zeros(len(BoxBounds[:,0])) - logging.debug("%s Point de démarrage Xini = %s"%(self._name, Xini)) # # Initialisation des bornes # ------------------------- @@ -218,11 +217,10 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): PosInsect[j,i] = PosInsect[j,i]+VelocityInsect[j,i] quality = CostFunction(insect,self._parameters["QualityCriterion"]) if quality < qBestPosInsect[i]: - BestPosInsect[:,i] = numpy.asmatrix(insect).flatten().A1 + BestPosInsect[:,i] = numpy.ravel( insect ) if quality < qBest : - Best = numpy.asmatrix(insect).flatten().A1 + Best = numpy.ravel( insect ) qBest = quality - logging.debug("%s Iteration %i : qBest = %.5f, Best = %s"%(self._name, n+1,qBest,Best)) # if self._parameters["StoreInternalVariables"]: self.StoredVariables["CurrentState"].store( Best ) @@ -230,19 +228,25 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): self.StoredVariables["CostFunctionJo"].store( 0. ) self.StoredVariables["CostFunctionJ" ].store( qBest ) # - logging.debug("%s %s Step of min cost = %s"%(self._name, self._parameters["QualityCriterion"], self._parameters["MaximumNumberOfSteps"])) - logging.debug("%s %s Minimum cost = %s"%(self._name, self._parameters["QualityCriterion"], qBest)) - logging.debug("%s %s Minimum state = %s"%(self._name, self._parameters["QualityCriterion"], Best)) - logging.debug("%s %s Nb of F = %s"%(self._name, self._parameters["QualityCriterion"], (self._parameters["MaximumNumberOfSteps"]+1)*self._parameters["NumberOfInsects"]+1)) - logging.debug("%s %s RetCode = %s"%(self._name, self._parameters["QualityCriterion"], 0)) - # # Obtention de l'analyse # ---------------------- - Xa = numpy.asmatrix(Best).flatten().T - logging.debug("%s Analyse Xa = %s"%(self._name, Xa)) + Xa = numpy.asmatrix(numpy.ravel( Best )).T # self.StoredVariables["Analysis"].store( Xa.A1 ) # + # Calculs et/ou stockages supplémentaires + # --------------------------------------- + if "Innovation" in self._parameters["StoreSupplementaryCalculations"] or "OMB" in self._parameters["StoreSupplementaryCalculations"]: + d = Y - Hm(Xb) + if "Innovation" in self._parameters["StoreSupplementaryCalculations"]: + self.StoredVariables["Innovation"].store( numpy.ravel(d) ) + if "BMA" in self._parameters["StoreSupplementaryCalculations"]: + self.StoredVariables["BMA"].store( numpy.ravel(Xb - Xa) ) + if "OMA" in self._parameters["StoreSupplementaryCalculations"]: + self.StoredVariables["OMA"].store( numpy.ravel(Y - Hm(Xa)) ) + if "OMB" in self._parameters["StoreSupplementaryCalculations"]: + self.StoredVariables["OMB"].store( numpy.ravel(d) ) + # logging.debug("%s Taille mémoire utilisée de %.1f Mo"%(self._name, m.getUsedMemory("M"))) logging.debug("%s Terminé"%self._name) # diff --git a/src/daComposant/daAlgorithms/QuantileRegression.py b/src/daComposant/daAlgorithms/QuantileRegression.py index 3bea251..965c7bc 100644 --- a/src/daComposant/daAlgorithms/QuantileRegression.py +++ b/src/daComposant/daAlgorithms/QuantileRegression.py @@ -64,11 +64,15 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): typecast = bool, message = "Stockage des variables internes ou intermédiaires du calcul", ) + self.defineRequiredParameter( + name = "StoreSupplementaryCalculations", + default = [], + typecast = tuple, + message = "Liste de calculs supplémentaires à stocker et/ou effectuer", + listval = ["BMA", "OMA", "OMB", "Innovation"] + ) def run(self, Xb=None, Y=None, H=None, M=None, R=None, B=None, Q=None, Parameters=None): - """ - Calcul des parametres definissant le quantile - """ logging.debug("%s Lancement"%self._name) logging.debug("%s Taille mémoire utilisée de %.1f Mo"%(self._name, m.getUsedMemory("M"))) # @@ -83,12 +87,10 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): # Utilisation éventuelle d'un vecteur H(Xb) précalculé # ---------------------------------------------------- if H["AppliedToX"] is not None and H["AppliedToX"].has_key("HXb"): - logging.debug("%s Utilisation de HXb"%self._name) HXb = H["AppliedToX"]["HXb"] else: - logging.debug("%s Calcul de Hm(Xb)"%self._name) HXb = Hm( Xb ) - HXb = numpy.asmatrix(HXb).flatten().T + HXb = numpy.asmatrix(numpy.ravel( HXb )).T # # Calcul de l'innovation # ---------------------- @@ -97,21 +99,16 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): if max(Y.shape) != max(HXb.shape): raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape)) d = Y - HXb - logging.debug("%s Innovation d = %s"%(self._name, d)) # # Définition de la fonction-coût # ------------------------------ def CostFunction(x): - _X = numpy.asmatrix(x).flatten().T - logging.debug("%s CostFunction X = %s"%(self._name, numpy.asmatrix( _X ).flatten())) + _X = numpy.asmatrix(numpy.ravel( x )).T _HX = Hm( _X ) - _HX = numpy.asmatrix(_HX).flatten().T + _HX = numpy.asmatrix(numpy.ravel( _HX )).T Jb = 0. Jo = 0. J = Jb + Jo - logging.debug("%s CostFunction Jb = %s"%(self._name, Jb)) - logging.debug("%s CostFunction Jo = %s"%(self._name, Jo)) - logging.debug("%s CostFunction J = %s"%(self._name, J)) if self._parameters["StoreInternalVariables"]: self.StoredVariables["CurrentState"].store( _X.A1 ) self.StoredVariables["CostFunctionJb"].store( Jb ) @@ -120,8 +117,7 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): return _HX # def GradientOfCostFunction(x): - _X = numpy.asmatrix(x).flatten().T - logging.debug("%s GradientOfCostFunction X = %s"%(self._name, _X.A1)) + _X = numpy.asmatrix(numpy.ravel( x )).T Hg = H["Tangent"].asMatrix( _X ) return Hg # @@ -131,7 +127,6 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): Xini = Xb.A1.tolist() else: Xini = list(Xb) - logging.debug("%s Point de démarrage Xini = %s"%(self._name, Xini)) # # Minimisation de la fonctionnelle # -------------------------------- @@ -151,19 +146,22 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): else: raise ValueError("Error in Minimizer name: %s"%self._parameters["Minimizer"]) # - logging.debug("%s %s Step of min cost = %s"%(self._name, self._parameters["Minimizer"], nfeval)) - logging.debug("%s %s Minimum cost = %s"%(self._name, self._parameters["Minimizer"], J_optimal)) - logging.debug("%s %s Minimum state = %s"%(self._name, self._parameters["Minimizer"], Minimum)) - logging.debug("%s %s Nb of F = %s"%(self._name, self._parameters["Minimizer"], nfeval)) - logging.debug("%s %s RetCode = %s"%(self._name, self._parameters["Minimizer"], rc)) - # # Obtention de l'analyse # ---------------------- - Xa = numpy.asmatrix(Minimum).flatten().T - logging.debug("%s Analyse Xa = %s"%(self._name, Xa)) + Xa = numpy.asmatrix(numpy.ravel( Minimum )).T # self.StoredVariables["Analysis"].store( Xa.A1 ) - self.StoredVariables["Innovation"].store( d.A1 ) + # + # Calculs et/ou stockages supplémentaires + # --------------------------------------- + if "Innovation" in self._parameters["StoreSupplementaryCalculations"]: + self.StoredVariables["Innovation"].store( numpy.ravel(d) ) + if "BMA" in self._parameters["StoreSupplementaryCalculations"]: + self.StoredVariables["BMA"].store( numpy.ravel(Xb - Xa) ) + if "OMA" in self._parameters["StoreSupplementaryCalculations"]: + self.StoredVariables["OMA"].store( numpy.ravel(Y - Hm(Xa)) ) + if "OMB" in self._parameters["StoreSupplementaryCalculations"]: + self.StoredVariables["OMB"].store( numpy.ravel(d) ) # logging.debug("%s Taille mémoire utilisée de %.1f Mo"%(self._name, m.getUsedMemory("M"))) logging.debug("%s Terminé"%self._name) diff --git a/src/daComposant/daCore/AssimilationStudy.py b/src/daComposant/daCore/AssimilationStudy.py index 5047205..83302b8 100644 --- a/src/daComposant/daCore/AssimilationStudy.py +++ b/src/daComposant/daCore/AssimilationStudy.py @@ -304,6 +304,7 @@ class AssimilationStudy: self.__H["Direct"] = Operator( fromMatrix = matrice ) self.__H["Tangent"] = Operator( fromMatrix = matrice ) self.__H["Adjoint"] = Operator( fromMatrix = matrice.T ) + del matrice else: raise ValueError("Improperly defined observation operator, it requires at minima either a matrix, a Direct for approximate derivatives or a Tangent/Adjoint pair.") # @@ -387,6 +388,7 @@ class AssimilationStudy: self.__M["Direct"] = Operator( fromMatrix = matrice ) self.__M["Tangent"] = Operator( fromMatrix = matrice ) self.__M["Adjoint"] = Operator( fromMatrix = matrice.T ) + del matrice else: raise ValueError("Improperly defined evolution operator, it requires at minima either a matrix, a Direct for approximate derivatives or a Tangent/Adjoint pair.") # @@ -634,7 +636,7 @@ class AssimilationStudy: asPersistentVector = self.__Xb.reshape((-1,min(__B_shape))) self.__Xb = Persistence.OneVector("Background", basetype=numpy.matrix) for member in asPersistentVector: - self.__Xb.store( numpy.matrix( numpy.asarray(member).flatten(), numpy.float ).T ) + self.__Xb.store( numpy.matrix( numpy.ravel(member), numpy.float ).T ) __Xb_shape = min(__B_shape) else: raise ValueError("Shape characteristic of B \"%s\" and Xb \"%s\" are incompatible"%(__B_shape,__Xb_shape)) diff --git a/src/daComposant/daNumerics/ApproximatedDerivatives.py b/src/daComposant/daNumerics/ApproximatedDerivatives.py index 140563e..f963b1b 100644 --- a/src/daComposant/daNumerics/ApproximatedDerivatives.py +++ b/src/daComposant/daNumerics/ApproximatedDerivatives.py @@ -50,7 +50,7 @@ class FDApproximation: if dX is None: self.__dX = None else: - self.__dX = numpy.asmatrix(dX).flatten().T + self.__dX = numpy.asmatrix(numpy.ravel( dX )).T # --------------------------------------------------------- def TangentMatrix(self, X ): @@ -82,13 +82,12 @@ class FDApproximation: logging.debug(" Incrément de............: %s*X"%float(self.__increment)) logging.debug(" Approximation centrée...: %s"%(self.__centeredDF)) # - _X = numpy.asmatrix(X).flatten().T + _X = numpy.asmatrix(numpy.ravel( X )).T # if self.__dX is None: _dX = self.__increment * _X else: - _dX = numpy.asmatrix(self.__dX).flatten().T - logging.debug(" Incrément non corrigé...: %s"%(_dX)) + _dX = numpy.asmatrix(numpy.ravel( self.__dX )).T # if (_dX == 0.).any(): moyenne = _dX.mean() @@ -96,7 +95,6 @@ class FDApproximation: _dX = numpy.where( _dX == 0., float(self.__increment), _dX ) else: _dX = numpy.where( _dX == 0., moyenne, _dX ) - logging.debug(" Incrément dX............: %s"%(_dX)) # if self.__centeredDF: # @@ -137,7 +135,7 @@ class FDApproximation: # ------------------------------------------------------- Jacobienne = [] for i in range( len(_dX) ): - Jacobienne.append( numpy.asmatrix(( HX_plus_dX[i] - HX ) / _dX[i]).flatten().A1 ) + Jacobienne.append( numpy.ravel(( HX_plus_dX[i] - HX ) / _dX[i]) ) # Jacobienne = numpy.matrix( numpy.vstack( Jacobienne ) ).T logging.debug(" == Fin du calcul de la Jacobienne") @@ -159,7 +157,7 @@ class FDApproximation: # # Calcul de la valeur linéarisée de H en X appliqué à dX # ------------------------------------------------------ - _dX = numpy.asmatrix(dX).flatten().T + _dX = numpy.asmatrix(numpy.ravel( dX )).T HtX = numpy.dot(Jacobienne, _dX) return HtX.A1 @@ -178,7 +176,7 @@ class FDApproximation: # # Calcul de la valeur de l'adjoint en X appliqué à Y # -------------------------------------------------- - _Y = numpy.asmatrix(Y).flatten().T + _Y = numpy.asmatrix(numpy.ravel( Y )).T HaY = numpy.dot(JacobienneT, _Y) return HaY.A1 diff --git a/src/daComposant/daNumerics/mmqr.py b/src/daComposant/daNumerics/mmqr.py index fa8cb80..551d059 100644 --- a/src/daComposant/daNumerics/mmqr.py +++ b/src/daComposant/daNumerics/mmqr.py @@ -24,7 +24,7 @@ # Graphical Statistics, 9, 1, pp.60-77, 2000 import sys, math -from numpy import sum, array, matrix, dot, linalg, asarray, asmatrix +from numpy import sum, array, matrix, dot, linalg, asarray, asmatrix, ravel # ============================================================================== def mmqr( @@ -39,8 +39,8 @@ def mmqr( # # Recuperation des donnees et informations initiales # -------------------------------------------------- - variables = asmatrix(x0).A1 - mesures = asmatrix(y).flatten().T + variables = asmatrix(ravel( x0 )) + mesures = asmatrix(ravel( y )).T increment = sys.float_info[0] p = len(variables.flat) n = len(mesures.flat) diff --git a/src/daSalome/daYacsIntegration/daOptimizerLoop.py b/src/daSalome/daYacsIntegration/daOptimizerLoop.py index f5635b5..3c75dbf 100644 --- a/src/daSalome/daYacsIntegration/daOptimizerLoop.py +++ b/src/daSalome/daYacsIntegration/daOptimizerLoop.py @@ -21,7 +21,7 @@ import SALOMERuntime import pilot -import pickle +import pickle, cPickle import numpy import threading @@ -181,7 +181,7 @@ class OptimizerHooks: self.optim_algo.setError("sync == false not yet implemented") def Tangent(self, (X, dX), sync = 1): - #print "Call Tangent OptimizerHooks" + # print "Call Tangent OptimizerHooks" if sync == 1: # 1: Get a unique sample number self.optim_algo.counter_lock.acquire() @@ -216,7 +216,7 @@ class OptimizerHooks: self.optim_algo.setError("sync == false not yet implemented") def Adjoint(self, (X, Y), sync = 1): - #print "Call Adjoint OptimizerHooks" + # print "Call Adjoint OptimizerHooks" if sync == 1: # 1: Get a unique sample number self.optim_algo.counter_lock.acquire() @@ -282,17 +282,17 @@ class AssimilationAlgorithm_asynch(SALOMERuntime.OptimizerAlgASync): # get the daStudy #print "[Debug] Input is ", input str_da_study = input.getStringValue() - self.da_study = pickle.loads(str_da_study) + self.da_study = cPickle.loads(str_da_study) #print "[Debug] da_study is ", self.da_study self.da_study.initAlgorithm() self.ADD = self.da_study.getAssimilationStudy() def startToTakeDecision(self): - #print "Algorithme startToTakeDecision" + # print "Algorithme startToTakeDecision" # Check if ObservationOperator is already set if self.da_study.getObservationOperatorType("Direct") == "Function" or self.da_study.getObservationOperatorType("Tangent") == "Function" or self.da_study.getObservationOperatorType("Adjoint") == "Function" : - #print "Set Hooks" + # print "Set Hooks for ObservationOperator" # Use proxy function for YACS self.hooksOO = OptimizerHooks(self, switch_value=1) direct = tangent = adjoint = None @@ -305,11 +305,13 @@ class AssimilationAlgorithm_asynch(SALOMERuntime.OptimizerAlgASync): # Set ObservationOperator self.ADD.setObservationOperator(asFunction = {"Direct":direct, "Tangent":tangent, "Adjoint":adjoint}) + # else: + # print "Not setting Hooks for ObservationOperator" # Check if EvolutionModel is already set if self.da_study.getEvolutionModelType("Direct") == "Function" or self.da_study.getEvolutionModelType("Tangent") == "Function" or self.da_study.getEvolutionModelType("Adjoint") == "Function" : self.has_evolution_model = True - #print "Set Hooks" + # print "Set Hooks for EvolutionModel" # Use proxy function for YACS self.hooksEM = OptimizerHooks(self, switch_value=2) direct = tangent = adjoint = None @@ -322,6 +324,8 @@ class AssimilationAlgorithm_asynch(SALOMERuntime.OptimizerAlgASync): # Set EvolutionModel self.ADD.setEvolutionModel(asFunction = {"Direct":direct, "Tangent":tangent, "Adjoint":adjoint}) + # else: + # print "Not setting Hooks for EvolutionModel" # Set Observers for observer_name in self.da_study.observers_dict.keys(): @@ -376,7 +380,7 @@ class AssimilationAlgorithm_asynch(SALOMERuntime.OptimizerAlgASync): # Remove Data Observer, so you can ... var.removeDataObserver(self.obs) # Pickle then ... - var_str = pickle.dumps(var) + var_str = cPickle.dumps(var) # Add Again Data Observer if self.da_study.observers_dict[info]["scheduler"] != "": self.ADD.setDataObserver(info, HookFunction=self.obs, Scheduler = self.da_study.observers_dict[info]["scheduler"], HookParameters = info) @@ -426,7 +430,7 @@ class AssimilationAlgorithm_asynch(SALOMERuntime.OptimizerAlgASync): # Remove data observers cannot pickle assimilation study object for observer_name in self.da_study.observers_dict.keys(): self.ADD.removeDataObserver(observer_name, self.obs) - result = pickle.dumps(self.da_study) + result = pickle.dumps(self.da_study) # Careful : pickle is mandatory over cPickle ! return result # Obligatoire ??? diff --git a/src/tests/daSalome/test017_3DVAR_function_script.py b/src/tests/daSalome/test017_3DVAR_function_script.py index be23451..dfe0abc 100644 --- a/src/tests/daSalome/test017_3DVAR_function_script.py +++ b/src/tests/daSalome/test017_3DVAR_function_script.py @@ -19,7 +19,6 @@ # Author: André Ribes, andre.ribes@edf.fr, EDF R&D import numpy -import pickle print computation method = "" diff --git a/src/tests/daSalome/test_aster_zzzz159a_functions.py b/src/tests/daSalome/test_aster_zzzz159a_functions.py index 7517c26..323e3b1 100644 --- a/src/tests/daSalome/test_aster_zzzz159a_functions.py +++ b/src/tests/daSalome/test_aster_zzzz159a_functions.py @@ -19,7 +19,6 @@ # Author: André Ribes, andre.ribes@edf.fr, EDF R&D import numpy -import pickle import sys sys.path.insert(0, init_data['SOURCES_ROOT']) -- 2.39.2