-#-*-coding:iso-8859-1-*-
+# -*- coding: utf-8 -*-
#
-# Copyright (C) 2008-2012 EDF R&D
+# Copyright (C) 2008-2021 EDF R&D
#
-# This library is free software; you can redistribute it and/or
-# modify it under the terms of the GNU Lesser General Public
-# License as published by the Free Software Foundation; either
-# version 2.1 of the License.
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License.
#
-# This library is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-# Lesser General Public License for more details.
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
#
-# You should have received a copy of the GNU Lesser General Public
-# License along with this library; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
-# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
#
+# Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
import logging
-from daCore import BasicObjects, PlatformInfo
-m = PlatformInfo.SystemUsage()
-
-import numpy
-import scipy.optimize
-
-if logging.getLogger().level < 30:
- iprint = 1
- message = scipy.optimize.tnc.MSG_ALL
- disp = 1
-else:
- iprint = -1
- message = scipy.optimize.tnc.MSG_NONE
- disp = 0
+from daCore import BasicObjects
+import numpy, scipy.optimize, scipy.version
# ==============================================================================
class ElementaryAlgorithm(BasicObjects.Algorithm):
def __init__(self):
- BasicObjects.Algorithm.__init__(self)
- self._name = "NONLINEARLEASTSQUARES"
- logging.debug("%s Initialisation"%self._name)
+ BasicObjects.Algorithm.__init__(self, "NONLINEARLEASTSQUARES")
+ self.defineRequiredParameter(
+ name = "Minimizer",
+ default = "LBFGSB",
+ typecast = str,
+ message = "Minimiseur utilisé",
+ listval = ["LBFGSB","TNC", "CG", "NCG", "BFGS", "LM"],
+ )
+ self.defineRequiredParameter(
+ name = "MaximumNumberOfSteps",
+ default = 15000,
+ typecast = int,
+ message = "Nombre maximal de pas d'optimisation",
+ minval = -1,
+ )
+ self.defineRequiredParameter(
+ name = "CostDecrementTolerance",
+ default = 1.e-7,
+ typecast = float,
+ message = "Diminution relative minimale du coût lors de l'arrêt",
+ minval = 0.,
+ )
+ self.defineRequiredParameter(
+ name = "ProjectedGradientTolerance",
+ default = -1,
+ typecast = float,
+ message = "Maximum des composantes du gradient projeté lors de l'arrêt",
+ minval = -1,
+ )
+ self.defineRequiredParameter(
+ name = "GradientNormTolerance",
+ default = 1.e-05,
+ typecast = float,
+ message = "Maximum des composantes du gradient lors de l'arrêt",
+ minval = 0.,
+ )
+ self.defineRequiredParameter(
+ name = "StoreInternalVariables",
+ default = False,
+ typecast = bool,
+ message = "Stockage des variables internes ou intermédiaires du calcul",
+ )
+ self.defineRequiredParameter(
+ name = "StoreSupplementaryCalculations",
+ default = [],
+ typecast = tuple,
+ message = "Liste de calculs supplémentaires à stocker et/ou effectuer",
+ listval = [
+ "Analysis",
+ "BMA",
+ "CostFunctionJ",
+ "CostFunctionJAtCurrentOptimum",
+ "CostFunctionJb",
+ "CostFunctionJbAtCurrentOptimum",
+ "CostFunctionJo",
+ "CostFunctionJoAtCurrentOptimum",
+ "CurrentIterationNumber",
+ "CurrentOptimum",
+ "CurrentState",
+ "IndexOfOptimum",
+ "Innovation",
+ "InnovationAtCurrentState",
+ "OMA",
+ "OMB",
+ "SimulatedObservationAtBackground",
+ "SimulatedObservationAtCurrentOptimum",
+ "SimulatedObservationAtCurrentState",
+ "SimulatedObservationAtOptimum",
+ ]
+ )
+ self.defineRequiredParameter( # Pas de type
+ name = "Bounds",
+ message = "Liste des valeurs de bornes",
+ )
+ self.defineRequiredParameter(
+ name = "InitializationPoint",
+ typecast = numpy.ravel,
+ message = "État initial imposé (par défaut, c'est l'ébauche si None)",
+ )
+ self.requireInputArguments(
+ mandatory= ("Xb", "Y", "HO", "R"),
+ )
+ self.setAttributes(tags=(
+ "Optimization",
+ "NonLinear",
+ "Variational",
+ ))
- def run(self, Xb=None, Y=None, H=None, M=None, R=None, B=None, Q=None, Parameters=None):
- """
- Calcul de l'estimateur moindres carrés pondérés non linéaires
- (assimilation variationnelle sans ébauche)
- """
- logging.debug("%s Lancement"%self._name)
- logging.debug("%s Taille mémoire utilisée de %.1f Mo"%(self._name, m.getUsedMemory("Mo")))
+ def run(self, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None, Parameters=None):
+ self._pre_run(Parameters, Xb, Y, U, HO, EM, CM, R, B, Q)
#
- # Opérateur d'observation
- # -----------------------
- Hm = H["Direct"].appliedTo
- Ht = H["Adjoint"].appliedInXTo
+ # Opérateurs
+ # ----------
+ Hm = HO["Direct"].appliedTo
+ Ha = HO["Adjoint"].appliedInXTo
#
- # Utilisation éventuelle d'un vecteur H(Xb) précalculé
+ # Utilisation éventuelle d'un vecteur H(Xb) précalculé
# ----------------------------------------------------
- if H["AppliedToX"] is not None and H["AppliedToX"].has_key("HXb"):
- logging.debug("%s Utilisation de HXb"%self._name)
- HXb = H["AppliedToX"]["HXb"]
+ if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
+ HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
else:
- logging.debug("%s Calcul de Hm(Xb)"%self._name)
HXb = Hm( Xb )
- HXb = numpy.asmatrix(HXb).flatten().T
- #
- # Calcul de l'innovation
- # ----------------------
+ HXb = numpy.asmatrix(numpy.ravel( HXb )).T
if Y.size != HXb.size:
raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
if max(Y.shape) != max(HXb.shape):
raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
- d = Y - HXb
- logging.debug("%s Innovation d = %s"%(self._name, d))
#
- # Précalcul des inversions de B et R
+ # Précalcul des inversions de B et R
# ----------------------------------
- # if B is not None:
- # BI = B.I
- # elif Parameters["B_scalar"] is not None:
- # BI = 1.0 / Parameters["B_scalar"]
- #
- if R is not None:
- RI = R.I
- elif Parameters["R_scalar"] is not None:
- RI = 1.0 / Parameters["R_scalar"]
+ RI = R.getI()
+ if self._parameters["Minimizer"] == "LM":
+ RdemiI = R.choleskyI()
#
- # Définition de la fonction-coût
+ # Définition de la fonction-coût
# ------------------------------
def CostFunction(x):
- _X = numpy.asmatrix(x).flatten().T
- logging.debug("%s CostFunction X = %s"%(self._name, numpy.asmatrix( _X ).flatten()))
+ _X = numpy.asmatrix(numpy.ravel( x )).T
+ if self._parameters["StoreInternalVariables"] or \
+ self._toStore("CurrentState") or \
+ self._toStore("CurrentOptimum"):
+ self.StoredVariables["CurrentState"].store( _X )
_HX = Hm( _X )
- _HX = numpy.asmatrix(_HX).flatten().T
+ _HX = numpy.asmatrix(numpy.ravel( _HX )).T
+ _Innovation = Y - _HX
+ if self._toStore("SimulatedObservationAtCurrentState") or \
+ self._toStore("SimulatedObservationAtCurrentOptimum"):
+ self.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
+ if self._toStore("InnovationAtCurrentState"):
+ self.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
+ #
Jb = 0.
- Jo = 0.5 * (Y - _HX).T * RI * (Y - _HX)
- J = float( Jb ) + float( Jo )
- logging.debug("%s CostFunction Jb = %s"%(self._name, Jb))
- logging.debug("%s CostFunction Jo = %s"%(self._name, Jo))
- logging.debug("%s CostFunction J = %s"%(self._name, J))
- self.StoredVariables["CurrentState"].store( _X.A1 )
+ Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
+ J = Jb + Jo
+ #
+ self.StoredVariables["CurrentIterationNumber"].store( len(self.StoredVariables["CostFunctionJ"]) )
self.StoredVariables["CostFunctionJb"].store( Jb )
self.StoredVariables["CostFunctionJo"].store( Jo )
self.StoredVariables["CostFunctionJ" ].store( J )
- return float( J )
+ if self._toStore("IndexOfOptimum") or \
+ self._toStore("CurrentOptimum") or \
+ self._toStore("CostFunctionJAtCurrentOptimum") or \
+ self._toStore("CostFunctionJbAtCurrentOptimum") or \
+ self._toStore("CostFunctionJoAtCurrentOptimum") or \
+ self._toStore("SimulatedObservationAtCurrentOptimum"):
+ IndexMin = numpy.argmin( self.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
+ if self._toStore("IndexOfOptimum"):
+ self.StoredVariables["IndexOfOptimum"].store( IndexMin )
+ if self._toStore("CurrentOptimum"):
+ self.StoredVariables["CurrentOptimum"].store( self.StoredVariables["CurrentState"][IndexMin] )
+ if self._toStore("SimulatedObservationAtCurrentOptimum"):
+ self.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( self.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
+ if self._toStore("CostFunctionJbAtCurrentOptimum"):
+ self.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( self.StoredVariables["CostFunctionJb"][IndexMin] )
+ if self._toStore("CostFunctionJoAtCurrentOptimum"):
+ self.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( self.StoredVariables["CostFunctionJo"][IndexMin] )
+ if self._toStore("CostFunctionJAtCurrentOptimum"):
+ self.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( self.StoredVariables["CostFunctionJ" ][IndexMin] )
+ return J
#
def GradientOfCostFunction(x):
- _X = numpy.asmatrix(x).flatten().T
- logging.debug("%s GradientOfCostFunction X = %s"%(self._name, numpy.asmatrix( _X ).flatten()))
+ _X = numpy.asmatrix(numpy.ravel( x )).T
_HX = Hm( _X )
- _HX = numpy.asmatrix(_HX).flatten().T
+ _HX = numpy.asmatrix(numpy.ravel( _HX )).T
GradJb = 0.
- GradJo = - Ht( (_X, RI * (Y - _HX)) )
- GradJ = numpy.asmatrix( GradJb ).flatten().T + numpy.asmatrix( GradJo ).flatten().T
- logging.debug("%s GradientOfCostFunction GradJb = %s"%(self._name, numpy.asmatrix( GradJb ).flatten()))
- logging.debug("%s GradientOfCostFunction GradJo = %s"%(self._name, numpy.asmatrix( GradJo ).flatten()))
- logging.debug("%s GradientOfCostFunction GradJ = %s"%(self._name, numpy.asmatrix( GradJ ).flatten()))
+ GradJo = - Ha( (_X, RI * (Y - _HX)) )
+ GradJ = numpy.asmatrix( numpy.ravel( GradJb ) + numpy.ravel( GradJo ) ).T
return GradJ.A1
#
- # Point de démarrage de l'optimisation : Xini = Xb
- # ------------------------------------
- if type(Xb) is type(numpy.matrix([])):
- Xini = Xb.A1.tolist()
- else:
- Xini = list(Xb)
- logging.debug("%s Point de démarrage Xini = %s"%(self._name, Xini))
+ def CostFunctionLM(x):
+ _X = numpy.asmatrix(numpy.ravel( x )).T
+ _HX = Hm( _X )
+ _HX = numpy.asmatrix(numpy.ravel( _HX )).T
+ _Innovation = Y - _HX
+ Jb = 0.
+ Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
+ J = Jb + Jo
+ if self._parameters["StoreInternalVariables"] or \
+ self._toStore("CurrentState"):
+ self.StoredVariables["CurrentState"].store( _X )
+ self.StoredVariables["CostFunctionJb"].store( Jb )
+ self.StoredVariables["CostFunctionJo"].store( Jo )
+ self.StoredVariables["CostFunctionJ" ].store( J )
+ #
+ return numpy.ravel( RdemiI*_Innovation )
#
- # Paramètres de pilotage
- # ----------------------
- # Potentiels : "Bounds", "Minimizer", "MaximumNumberOfSteps", "ProjectedGradientTolerance", "GradientNormTolerance", "InnerMinimizer"
- if Parameters.has_key("Bounds") and (type(Parameters["Bounds"]) is type([]) or type(Parameters["Bounds"]) is type(())) and (len(Parameters["Bounds"]) > 0):
- Bounds = Parameters["Bounds"]
- else:
- Bounds = None
- MinimizerList = ["LBFGSB","TNC", "CG", "NCG", "BFGS"]
- if Parameters.has_key("Minimizer") and (Parameters["Minimizer"] in MinimizerList):
- Minimizer = str( Parameters["Minimizer"] )
- else:
- Minimizer = "LBFGSB"
- logging.warning("%s Unknown or undefined minimizer, replaced by the default one \"%s\""%(self._name,Minimizer))
- logging.debug("%s Minimiseur utilisé = %s"%(self._name, Minimizer))
- if Parameters.has_key("MaximumNumberOfSteps") and (Parameters["MaximumNumberOfSteps"] > -1):
- maxiter = int( Parameters["MaximumNumberOfSteps"] )
- else:
- maxiter = 15000
- logging.debug("%s Nombre maximal de pas d'optimisation = %s"%(self._name, str(maxiter)))
- if Parameters.has_key("CostDecrementTolerance") and (Parameters["CostDecrementTolerance"] > 0):
- ftol = float(Parameters["CostDecrementTolerance"])
- factr = ftol * 1.e14
- else:
- ftol = 1.e-7
- factr = ftol * 1.e14
- logging.debug("%s Diminution relative minimale du cout lors de l'arret = %s"%(self._name, str(1./factr)))
- if Parameters.has_key("ProjectedGradientTolerance") and (Parameters["ProjectedGradientTolerance"] > -1):
- pgtol = float(Parameters["ProjectedGradientTolerance"])
- else:
- pgtol = -1
- logging.debug("%s Maximum des composantes du gradient projete lors de l'arret = %s"%(self._name, str(pgtol)))
- if Parameters.has_key("GradientNormTolerance") and (Parameters["GradientNormTolerance"] > -1):
- gtol = float(Parameters["GradientNormTolerance"])
- else:
- gtol = 1.e-05
- logging.debug("%s Maximum des composantes du gradient lors de l'arret = %s"%(self._name, str(gtol)))
- InnerMinimizerList = ["CG", "NCG", "BFGS"]
- if Parameters.has_key("InnerMinimizer") and (Parameters["InnerMinimizer"] in InnerMinimizerList):
- InnerMinimizer = str( Parameters["InnerMinimizer"] )
- else:
- InnerMinimizer = "BFGS"
- logging.debug("%s Minimiseur interne utilisé = %s"%(self._name, InnerMinimizer))
+ def GradientOfCostFunctionLM(x):
+ _X = numpy.asmatrix(numpy.ravel( x )).T
+ _HX = Hm( _X )
+ _HX = numpy.asmatrix(numpy.ravel( _HX )).T
+ GradJb = 0.
+ GradJo = - Ha( (_X, RI * (Y - _HX)) )
+ GradJ = numpy.asmatrix( numpy.ravel( GradJb ) + numpy.ravel( GradJo ) ).T
+ return - RdemiI*HO["Tangent"].asMatrix( _X )
+ #
+ # Point de démarrage de l'optimisation : Xini = Xb
+ # ------------------------------------
+ Xini = self._parameters["InitializationPoint"]
#
# Minimisation de la fonctionnelle
# --------------------------------
- if Minimizer == "LBFGSB":
- Minimum, J_optimal, Informations = scipy.optimize.fmin_l_bfgs_b(
+ nbPreviousSteps = self.StoredVariables["CostFunctionJ"].stepnumber()
+ #
+ if self._parameters["Minimizer"] == "LBFGSB":
+ # Minimum, J_optimal, Informations = scipy.optimize.fmin_l_bfgs_b(
+ if "0.19" <= scipy.version.version <= "1.1.0":
+ import lbfgsbhlt as optimiseur
+ else:
+ import scipy.optimize as optimiseur
+ Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
func = CostFunction,
x0 = Xini,
fprime = GradientOfCostFunction,
args = (),
- bounds = Bounds,
- maxfun = maxiter-1,
- factr = factr,
- pgtol = pgtol,
- iprint = iprint,
+ bounds = self._parameters["Bounds"],
+ maxfun = self._parameters["MaximumNumberOfSteps"]-1,
+ factr = self._parameters["CostDecrementTolerance"]*1.e14,
+ pgtol = self._parameters["ProjectedGradientTolerance"],
+ iprint = self._parameters["optiprint"],
)
nfeval = Informations['funcalls']
rc = Informations['warnflag']
- elif Minimizer == "TNC":
+ elif self._parameters["Minimizer"] == "TNC":
Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
func = CostFunction,
x0 = Xini,
fprime = GradientOfCostFunction,
args = (),
- bounds = Bounds,
- maxfun = maxiter,
- pgtol = pgtol,
- ftol = ftol,
- messages = message,
+ bounds = self._parameters["Bounds"],
+ maxfun = self._parameters["MaximumNumberOfSteps"],
+ pgtol = self._parameters["ProjectedGradientTolerance"],
+ ftol = self._parameters["CostDecrementTolerance"],
+ messages = self._parameters["optmessages"],
)
- elif Minimizer == "CG":
+ elif self._parameters["Minimizer"] == "CG":
Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
f = CostFunction,
x0 = Xini,
fprime = GradientOfCostFunction,
args = (),
- maxiter = maxiter,
- gtol = gtol,
- disp = disp,
+ maxiter = self._parameters["MaximumNumberOfSteps"],
+ gtol = self._parameters["GradientNormTolerance"],
+ disp = self._parameters["optdisp"],
full_output = True,
)
- elif Minimizer == "NCG":
+ elif self._parameters["Minimizer"] == "NCG":
Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
f = CostFunction,
x0 = Xini,
fprime = GradientOfCostFunction,
args = (),
- maxiter = maxiter,
- avextol = ftol,
- disp = disp,
+ maxiter = self._parameters["MaximumNumberOfSteps"],
+ avextol = self._parameters["CostDecrementTolerance"],
+ disp = self._parameters["optdisp"],
full_output = True,
)
- elif Minimizer == "BFGS":
+ elif self._parameters["Minimizer"] == "BFGS":
Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
f = CostFunction,
x0 = Xini,
fprime = GradientOfCostFunction,
args = (),
- maxiter = maxiter,
- gtol = gtol,
- disp = disp,
+ maxiter = self._parameters["MaximumNumberOfSteps"],
+ gtol = self._parameters["GradientNormTolerance"],
+ disp = self._parameters["optdisp"],
full_output = True,
)
+ elif self._parameters["Minimizer"] == "LM":
+ Minimum, cov_x, infodict, mesg, rc = scipy.optimize.leastsq(
+ func = CostFunctionLM,
+ x0 = Xini,
+ Dfun = GradientOfCostFunctionLM,
+ args = (),
+ ftol = self._parameters["CostDecrementTolerance"],
+ maxfev = self._parameters["MaximumNumberOfSteps"],
+ gtol = self._parameters["GradientNormTolerance"],
+ full_output = True,
+ )
+ nfeval = infodict['nfev']
else:
- raise ValueError("Error in Minimizer name: %s"%Minimizer)
+ raise ValueError("Error in Minimizer name: %s"%self._parameters["Minimizer"])
+ #
+ IndexMin = numpy.argmin( self.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
+ MinJ = self.StoredVariables["CostFunctionJ"][IndexMin]
#
# Correction pour pallier a un bug de TNC sur le retour du Minimum
# ----------------------------------------------------------------
- StepMin = numpy.argmin( self.StoredVariables["CostFunctionJ"].valueserie() )
- MinJ = self.StoredVariables["CostFunctionJ"].valueserie(step = StepMin)
- Minimum = self.StoredVariables["CurrentState"].valueserie(step = StepMin)
- #
- logging.debug("%s %s Step of min cost = %s"%(self._name, Minimizer, StepMin))
- logging.debug("%s %s Minimum cost = %s"%(self._name, Minimizer, MinJ))
- logging.debug("%s %s Minimum state = %s"%(self._name, Minimizer, Minimum))
- logging.debug("%s %s Nb of F = %s"%(self._name, Minimizer, nfeval))
- logging.debug("%s %s RetCode = %s"%(self._name, Minimizer, rc))
+ if self._parameters["StoreInternalVariables"] or self._toStore("CurrentState"):
+ Minimum = self.StoredVariables["CurrentState"][IndexMin]
#
# Obtention de l'analyse
# ----------------------
- Xa = numpy.asmatrix(Minimum).T
- logging.debug("%s Analyse Xa = %s"%(self._name, Xa))
+ Xa = numpy.asmatrix(numpy.ravel( Minimum )).T
#
self.StoredVariables["Analysis"].store( Xa.A1 )
- self.StoredVariables["Innovation"].store( d.A1 )
#
- logging.debug("%s Taille mémoire utilisée de %.1f Mo"%(self._name, m.getUsedMemory("MB")))
- logging.debug("%s Terminé"%self._name)
+ if self._toStore("OMA") or \
+ self._toStore("SimulatedObservationAtOptimum"):
+ if self._toStore("SimulatedObservationAtCurrentState"):
+ HXa = self.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
+ elif self._toStore("SimulatedObservationAtCurrentOptimum"):
+ HXa = self.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
+ else:
+ HXa = Hm( Xa )
+ #
+ #
+ # Calculs et/ou stockages supplémentaires
+ # ---------------------------------------
+ if self._toStore("Innovation") or \
+ self._toStore("OMB"):
+ d = Y - HXb
+ if self._toStore("Innovation"):
+ self.StoredVariables["Innovation"].store( numpy.ravel(d) )
+ if self._toStore("BMA"):
+ self.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
+ if self._toStore("OMA"):
+ self.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
+ if self._toStore("OMB"):
+ self.StoredVariables["OMB"].store( numpy.ravel(d) )
+ if self._toStore("SimulatedObservationAtBackground"):
+ self.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
+ if self._toStore("SimulatedObservationAtOptimum"):
+ self.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
#
+ self._post_run(HO)
return 0
# ==============================================================================
if __name__ == "__main__":
- print '\n AUTODIAGNOSTIC \n'
+ print('\n AUTODIAGNOSTIC\n')