)
def run(self, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None, Parameters=None):
- self._pre_run()
- if logging.getLogger().level < logging.WARNING:
- self.__iprint, self.__disp = 1, 1
- self.__message = scipy.optimize.tnc.MSG_ALL
- else:
- self.__iprint, self.__disp = -1, 0
- self.__message = scipy.optimize.tnc.MSG_NONE
- #
- # Paramètres de pilotage
- # ----------------------
- self.setParameters(Parameters)
- #
- if self._parameters.has_key("Bounds") and (type(self._parameters["Bounds"]) is type([]) or type(self._parameters["Bounds"]) is type(())) and (len(self._parameters["Bounds"]) > 0):
- Bounds = self._parameters["Bounds"]
- logging.debug("%s Prise en compte des bornes effectuee"%(self._name,))
- else:
- Bounds = None
+ self._pre_run(Parameters)
#
# Correction pour pallier a un bug de TNC sur le retour du Minimum
if self._parameters.has_key("Minimizer") == "TNC":
x0 = Xini,
fprime = GradientOfCostFunction,
args = (),
- bounds = Bounds,
+ bounds = self._parameters["Bounds"],
maxfun = self._parameters["MaximumNumberOfSteps"]-1,
factr = self._parameters["CostDecrementTolerance"]*1.e14,
pgtol = self._parameters["ProjectedGradientTolerance"],
- iprint = self.__iprint,
+ iprint = self._parameters["optiprint"],
)
nfeval = Informations['funcalls']
rc = Informations['warnflag']
x0 = Xini,
fprime = GradientOfCostFunction,
args = (),
- bounds = Bounds,
+ bounds = self._parameters["Bounds"],
maxfun = self._parameters["MaximumNumberOfSteps"],
pgtol = self._parameters["ProjectedGradientTolerance"],
ftol = self._parameters["CostDecrementTolerance"],
- messages = self.__message,
+ messages = self._parameters["optmessages"],
)
elif self._parameters["Minimizer"] == "CG":
Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
args = (),
maxiter = self._parameters["MaximumNumberOfSteps"],
gtol = self._parameters["GradientNormTolerance"],
- disp = self.__disp,
+ disp = self._parameters["optdisp"],
full_output = True,
)
elif self._parameters["Minimizer"] == "NCG":
args = (),
maxiter = self._parameters["MaximumNumberOfSteps"],
avextol = self._parameters["CostDecrementTolerance"],
- disp = self.__disp,
+ disp = self._parameters["optdisp"],
full_output = True,
)
elif self._parameters["Minimizer"] == "BFGS":
args = (),
maxiter = self._parameters["MaximumNumberOfSteps"],
gtol = self._parameters["GradientNormTolerance"],
- disp = self.__disp,
+ disp = self._parameters["optdisp"],
full_output = True,
)
else:
)
def run(self, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None, Parameters=None):
- self._pre_run()
- if logging.getLogger().level < logging.WARNING:
- self.__iprint, self.__disp = 1, 1
- self.__message = scipy.optimize.tnc.MSG_ALL
- else:
- self.__iprint, self.__disp = -1, 0
- self.__message = scipy.optimize.tnc.MSG_NONE
- #
- # Paramètres de pilotage
- # ----------------------
- self.setParameters(Parameters)
- #
- if self._parameters.has_key("Bounds") and (type(self._parameters["Bounds"]) is type([]) or type(self._parameters["Bounds"]) is type(())) and (len(self._parameters["Bounds"]) > 0):
- Bounds = self._parameters["Bounds"]
- logging.debug("%s Prise en compte des bornes effectuee"%(self._name,))
- else:
- Bounds = None
+ self._pre_run(Parameters)
#
# Correction pour pallier a un bug de TNC sur le retour du Minimum
if self._parameters.has_key("Minimizer") == "TNC":
elif self._parameters["EstimationOf"] == "Parameters":
pass
#
- if Bounds is not None and self._parameters["ConstrainedBy"] == "EstimateProjection":
- _Xn = numpy.max(numpy.hstack((_Xn,numpy.asmatrix(Bounds)[:,0])),axis=1)
- _Xn = numpy.min(numpy.hstack((_Xn,numpy.asmatrix(Bounds)[:,1])),axis=1)
+ if self._parameters["Bounds"] is not None and self._parameters["ConstrainedBy"] == "EstimateProjection":
+ _Xn = numpy.max(numpy.hstack((_Xn,numpy.asmatrix(self._parameters["Bounds"])[:,0])),axis=1)
+ _Xn = numpy.min(numpy.hstack((_Xn,numpy.asmatrix(self._parameters["Bounds"])[:,1])),axis=1)
#
# Etape de différence aux observations
if self._parameters["EstimationOf"] == "State":
x0 = Xini,
fprime = GradientOfCostFunction,
args = (),
- bounds = Bounds,
+ bounds = self._parameters["Bounds"],
maxfun = self._parameters["MaximumNumberOfSteps"]-1,
factr = self._parameters["CostDecrementTolerance"]*1.e14,
pgtol = self._parameters["ProjectedGradientTolerance"],
- iprint = self.__iprint,
+ iprint = self._parameters["optiprint"],
)
nfeval = Informations['funcalls']
rc = Informations['warnflag']
x0 = Xini,
fprime = GradientOfCostFunction,
args = (),
- bounds = Bounds,
+ bounds = self._parameters["Bounds"],
maxfun = self._parameters["MaximumNumberOfSteps"],
pgtol = self._parameters["ProjectedGradientTolerance"],
ftol = self._parameters["CostDecrementTolerance"],
- messages = self.__message,
+ messages = self._parameters["optmessages"],
)
elif self._parameters["Minimizer"] == "CG":
Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
args = (),
maxiter = self._parameters["MaximumNumberOfSteps"],
gtol = self._parameters["GradientNormTolerance"],
- disp = self.__disp,
+ disp = self._parameters["optdisp"],
full_output = True,
)
elif self._parameters["Minimizer"] == "NCG":
args = (),
maxiter = self._parameters["MaximumNumberOfSteps"],
avextol = self._parameters["CostDecrementTolerance"],
- disp = self.__disp,
+ disp = self._parameters["optdisp"],
full_output = True,
)
elif self._parameters["Minimizer"] == "BFGS":
args = (),
maxiter = self._parameters["MaximumNumberOfSteps"],
gtol = self._parameters["GradientNormTolerance"],
- disp = self.__disp,
+ disp = self._parameters["optdisp"],
full_output = True,
)
else:
)
def run(self, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None, Parameters=None):
- self._pre_run()
- #
- self.setParameters(Parameters)
+ self._pre_run(Parameters)
#
Hm = HO["Direct"].appliedTo
Ht = HO["Tangent"].appliedInXTo
)
def run(self, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None, Parameters=None):
- self._pre_run()
+ self._pre_run(Parameters)
#
- # Paramètres de pilotage
- # ----------------------
- self.setParameters(Parameters)
- #
- # Opérateurs
- # ----------
Hm = HO["Tangent"].asMatrix(Xb)
Hm = Hm.reshape(Y.size,Xb.size) # ADAO & check shape
Ha = HO["Adjoint"].asMatrix(Xb)
)
def run(self, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None, Parameters=None):
- self._pre_run()
- if logging.getLogger().level < logging.WARNING:
- self.__disp = 1
- else:
- self.__disp = 0
- #
- # Paramètres de pilotage
- # ----------------------
- self.setParameters(Parameters)
+ self._pre_run(Parameters)
#
if not PlatformInfo.has_nlopt and not self._parameters["Minimizer"] in ["COBYLA", "POWELL", "SIMPLEX"]:
self._parameters["Minimizer"] = "SIMPLEX"
- if self._parameters.has_key("Bounds") and (type(self._parameters["Bounds"]) is type([]) or type(self._parameters["Bounds"]) is type(())) and (len(self._parameters["Bounds"]) > 0):
- Bounds = self._parameters["Bounds"]
- logging.debug("%s Prise en compte des bornes effectuee"%(self._name,))
- else:
- Bounds = None
#
# Opérateurs
# ----------
xtol = self._parameters["StateVariationTolerance"],
ftol = self._parameters["CostDecrementTolerance"],
full_output = True,
- disp = self.__disp,
+ disp = self._parameters["optdisp"],
)
elif self._parameters["Minimizer"] == "COBYLA" and not PlatformInfo.has_nlopt:
def make_constraints(bounds):
upper = lambda x: b - x[i]
constraints = constraints + [lower] + [upper]
return constraints
- if Bounds is None:
+ if self._parameters["Bounds"] is None:
raise ValueError("Bounds have to be given for all axes as a list of lower/upper pairs!")
Minimum = scipy.optimize.fmin_cobyla(
func = CostFunction,
x0 = Xini,
- cons = make_constraints( Bounds ),
+ cons = make_constraints( self._parameters["Bounds"] ),
args = (self._parameters["QualityCriterion"],),
consargs = (), # To avoid extra-args
maxfun = self._parameters["MaximumNumberOfFunctionEvaluations"],
rhobeg = 1.0,
rhoend = self._parameters["StateVariationTolerance"],
catol = 2.*self._parameters["StateVariationTolerance"],
- disp = self.__disp,
+ disp = self._parameters["optdisp"],
)
elif self._parameters["Minimizer"] == "COBYLA" and PlatformInfo.has_nlopt:
import nlopt
# DFO, so no gradient
return CostFunction(_Xx, self._parameters["QualityCriterion"])
opt.set_min_objective(_f)
- if Bounds is not None:
- lub = numpy.array(Bounds).reshape((Xini.size,2))
+ if self._parameters["Bounds"] is not None:
+ lub = numpy.array(self._parameters["Bounds"]).reshape((Xini.size,2))
lb = lub[:,0]
ub = lub[:,1]
- if self.__disp:
+ if self._parameters["optdisp"]:
print "%s: upper bounds %s"%(opt.get_algorithm_name(),ub)
print "%s: lower bounds %s"%(opt.get_algorithm_name(),lb)
opt.set_upper_bounds(ub)
opt.set_xtol_rel(2.*self._parameters["StateVariationTolerance"])
opt.set_maxeval(self._parameters["MaximumNumberOfFunctionEvaluations"])
Minimum = opt.optimize( Xini )
- if self.__disp:
+ if self._parameters["optdisp"]:
print "%s: optimal state: %s"%(opt.get_algorithm_name(),Minimum)
print "%s: minimum of J: %s"%(opt.get_algorithm_name(),opt.last_optimum_value())
print "%s: return code: %i"%(opt.get_algorithm_name(),opt.last_optimize_result())
xtol = self._parameters["StateVariationTolerance"],
ftol = self._parameters["CostDecrementTolerance"],
full_output = True,
- disp = self.__disp,
+ disp = self._parameters["optdisp"],
)
elif self._parameters["Minimizer"] == "SIMPLEX" and PlatformInfo.has_nlopt:
import nlopt
# DFO, so no gradient
return CostFunction(_Xx, self._parameters["QualityCriterion"])
opt.set_min_objective(_f)
- if Bounds is not None:
- lub = numpy.array(Bounds).reshape((Xini.size,2))
+ if self._parameters["Bounds"] is not None:
+ lub = numpy.array(self._parameters["Bounds"]).reshape((Xini.size,2))
lb = lub[:,0]
ub = lub[:,1]
- if self.__disp:
+ if self._parameters["optdisp"]:
print "%s: upper bounds %s"%(opt.get_algorithm_name(),ub)
print "%s: lower bounds %s"%(opt.get_algorithm_name(),lb)
opt.set_upper_bounds(ub)
opt.set_xtol_rel(2.*self._parameters["StateVariationTolerance"])
opt.set_maxeval(self._parameters["MaximumNumberOfFunctionEvaluations"])
Minimum = opt.optimize( Xini )
- if self.__disp:
+ if self._parameters["optdisp"]:
print "%s: optimal state: %s"%(opt.get_algorithm_name(),Minimum)
print "%s: minimum of J: %s"%(opt.get_algorithm_name(),opt.last_optimum_value())
print "%s: return code: %i"%(opt.get_algorithm_name(),opt.last_optimize_result())
# DFO, so no gradient
return CostFunction(_Xx, self._parameters["QualityCriterion"])
opt.set_min_objective(_f)
- if Bounds is not None:
- lub = numpy.array(Bounds).reshape((Xini.size,2))
+ if self._parameters["Bounds"] is not None:
+ lub = numpy.array(self._parameters["Bounds"]).reshape((Xini.size,2))
lb = lub[:,0]
ub = lub[:,1]
- if self.__disp:
+ if self._parameters["optdisp"]:
print "%s: upper bounds %s"%(opt.get_algorithm_name(),ub)
print "%s: lower bounds %s"%(opt.get_algorithm_name(),lb)
opt.set_upper_bounds(ub)
opt.set_xtol_rel(2.*self._parameters["StateVariationTolerance"])
opt.set_maxeval(self._parameters["MaximumNumberOfFunctionEvaluations"])
Minimum = opt.optimize( Xini )
- if self.__disp:
+ if self._parameters["optdisp"]:
print "%s: optimal state: %s"%(opt.get_algorithm_name(),Minimum)
print "%s: minimum of J: %s"%(opt.get_algorithm_name(),opt.last_optimum_value())
print "%s: return code: %i"%(opt.get_algorithm_name(),opt.last_optimize_result())
# DFO, so no gradient
return CostFunction(_Xx, self._parameters["QualityCriterion"])
opt.set_min_objective(_f)
- if Bounds is not None:
- lub = numpy.array(Bounds).reshape((Xini.size,2))
+ if self._parameters["Bounds"] is not None:
+ lub = numpy.array(self._parameters["Bounds"]).reshape((Xini.size,2))
lb = lub[:,0]
ub = lub[:,1]
- if self.__disp:
+ if self._parameters["optdisp"]:
print "%s: upper bounds %s"%(opt.get_algorithm_name(),ub)
print "%s: lower bounds %s"%(opt.get_algorithm_name(),lb)
opt.set_upper_bounds(ub)
opt.set_xtol_rel(2.*self._parameters["StateVariationTolerance"])
opt.set_maxeval(self._parameters["MaximumNumberOfFunctionEvaluations"])
Minimum = opt.optimize( Xini )
- if self.__disp:
+ if self._parameters["optdisp"]:
print "%s: optimal state: %s"%(opt.get_algorithm_name(),Minimum)
print "%s: minimum of J: %s"%(opt.get_algorithm_name(),opt.last_optimum_value())
print "%s: return code: %i"%(opt.get_algorithm_name(),opt.last_optimize_result())
# DFO, so no gradient
return CostFunction(_Xx, self._parameters["QualityCriterion"])
opt.set_min_objective(_f)
- if Bounds is not None:
- lub = numpy.array(Bounds).reshape((Xini.size,2))
+ if self._parameters["Bounds"] is not None:
+ lub = numpy.array(self._parameters["Bounds"]).reshape((Xini.size,2))
lb = lub[:,0]
ub = lub[:,1]
- if self.__disp:
+ if self._parameters["optdisp"]:
print "%s: upper bounds %s"%(opt.get_algorithm_name(),ub)
print "%s: lower bounds %s"%(opt.get_algorithm_name(),lb)
opt.set_upper_bounds(ub)
opt.set_xtol_rel(2.*self._parameters["StateVariationTolerance"])
opt.set_maxeval(self._parameters["MaximumNumberOfFunctionEvaluations"])
Minimum = opt.optimize( Xini )
- if self.__disp:
+ if self._parameters["optdisp"]:
print "%s: optimal state: %s"%(opt.get_algorithm_name(),Minimum)
print "%s: minimum of J: %s"%(opt.get_algorithm_name(),opt.last_optimum_value())
print "%s: return code: %i"%(opt.get_algorithm_name(),opt.last_optimize_result())
)
def run(self, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None, Parameters=None):
- self._pre_run()
- #
- # Paramètres de pilotage
- # ----------------------
- self.setParameters(Parameters)
+ self._pre_run(Parameters)
#
# Précalcul des inversions de B et R
# ----------------------------------
)
def run(self, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None, Parameters=None):
- self._pre_run()
+ self._pre_run(Parameters)
#
- # Paramètres de pilotage
- # ----------------------
- self.setParameters(Parameters)
- #
- # Opérateur d'observation
- # -----------------------
Hm = HO["Tangent"].asMatrix(Xb)
Hm = Hm.reshape(Y.size,Xb.size) # ADAO & check shape
Ha = HO["Adjoint"].asMatrix(Xb)
)
def run(self, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None, Parameters=None):
- self._pre_run()
+ self._pre_run(Parameters)
#
- # Paramètres de pilotage
- # ----------------------
- self.setParameters(Parameters)
- #
- if self._parameters.has_key("Bounds") and (type(self._parameters["Bounds"]) is type([]) or type(self._parameters["Bounds"]) is type(())) and (len(self._parameters["Bounds"]) > 0):
- Bounds = self._parameters["Bounds"]
- logging.debug("%s Prise en compte des bornes effectuee"%(self._name,))
- else:
- Bounds = None
if self._parameters["EstimationOf"] == "Parameters":
self._parameters["StoreInternalVariables"] = True
#
Xn_predicted = Xn
Pn_predicted = Pn
#
- if Bounds is not None and self._parameters["ConstrainedBy"] == "EstimateProjection":
- Xn_predicted = numpy.max(numpy.hstack((Xn_predicted,numpy.asmatrix(Bounds)[:,0])),axis=1)
- Xn_predicted = numpy.min(numpy.hstack((Xn_predicted,numpy.asmatrix(Bounds)[:,1])),axis=1)
+ if self._parameters["Bounds"] is not None and self._parameters["ConstrainedBy"] == "EstimateProjection":
+ Xn_predicted = numpy.max(numpy.hstack((Xn_predicted,numpy.asmatrix(self._parameters["Bounds"])[:,0])),axis=1)
+ Xn_predicted = numpy.min(numpy.hstack((Xn_predicted,numpy.asmatrix(self._parameters["Bounds"])[:,1])),axis=1)
#
if self._parameters["EstimationOf"] == "State":
d = Ynpu - numpy.asmatrix(numpy.ravel( H( (Xn_predicted, None) ) )).T
)
def run(self, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None, Parameters=None):
- self._pre_run()
- #
- self.setParameters(Parameters)
+ self._pre_run(Parameters)
#
Hm = HO["Direct"].appliedTo
#
)
def run(self, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None, Parameters=None):
- self._pre_run()
- #
- self.setParameters(Parameters)
+ self._pre_run(Parameters)
#
Hm = HO["Direct"].appliedTo
if self._parameters["ResiduFormula"] in ["Taylor", "TaylorOnNorm"]:
#
# Boucle sur les perturbations
# ----------------------------
- Normalisation= -1
NormesdX = []
NormesFXdX = []
NormesdFX = []
Residu = NormedFXGdX / (amplitude*amplitude)
elif self._parameters["ResiduFormula"] == "Norm":
Residu = NormedFXsAm
- if Normalisation < 0 : Normalisation = Residu
#
msg = " %2i %5.0e %9.3e %9.3e %9.3e %9.3e %9.3e | %9.3e | %9.3e %4.0f"%(i,amplitude,NormeX,NormeFX,NormeFXdX,NormedX,NormedFX,NormedFXsdX,Residu,math.log10(max(1.e-99,Residu)))
msgs += "\n" + __marge + msg
)
def run(self, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None, Parameters=None):
- self._pre_run()
- #
- # Paramètres de pilotage
- # ----------------------
- self.setParameters(Parameters)
+ self._pre_run(Parameters)
#
if self._parameters["EstimationOf"] == "Parameters":
self._parameters["StoreInternalVariables"] = True
)
def run(self, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None, Parameters=None):
- self._pre_run()
+ self._pre_run(Parameters)
#
- # Paramètres de pilotage
- # ----------------------
- self.setParameters(Parameters)
- #
- # Opérateur d'observation
- # -----------------------
Hm = HO["Tangent"].asMatrix(None)
Hm = Hm.reshape(Y.size,-1) # ADAO & check shape
Ha = HO["Adjoint"].asMatrix(None)
)
def run(self, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None, Parameters=None):
- self._pre_run()
- #
- # Paramètres de pilotage
- # ----------------------
- self.setParameters(Parameters)
+ self._pre_run(Parameters)
#
def RMS(V1, V2):
import math
)
def run(self, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None, Parameters=None):
- self._pre_run()
- if logging.getLogger().level < logging.WARNING:
- self.__iprint, self.__disp = 1, 1
- self.__message = scipy.optimize.tnc.MSG_ALL
- else:
- self.__iprint, self.__disp = -1, 0
- self.__message = scipy.optimize.tnc.MSG_NONE
- #
- # Paramètres de pilotage
- # ----------------------
- self.setParameters(Parameters)
- #
- if self._parameters.has_key("Bounds") and (type(self._parameters["Bounds"]) is type([]) or type(self._parameters["Bounds"]) is type(())) and (len(self._parameters["Bounds"]) > 0):
- Bounds = self._parameters["Bounds"]
- logging.debug("%s Prise en compte des bornes effectuee"%(self._name,))
- else:
- Bounds = None
+ self._pre_run(Parameters)
#
# Correction pour pallier a un bug de TNC sur le retour du Minimum
if self._parameters.has_key("Minimizer") == "TNC":
x0 = Xini,
fprime = GradientOfCostFunction,
args = (),
- bounds = Bounds,
+ bounds = self._parameters["Bounds"],
maxfun = self._parameters["MaximumNumberOfSteps"]-1,
factr = self._parameters["CostDecrementTolerance"]*1.e14,
pgtol = self._parameters["ProjectedGradientTolerance"],
- iprint = self.__iprint,
+ iprint = self._parameters["optiprint"],
)
nfeval = Informations['funcalls']
rc = Informations['warnflag']
x0 = Xini,
fprime = GradientOfCostFunction,
args = (),
- bounds = Bounds,
+ bounds = self._parameters["Bounds"],
maxfun = self._parameters["MaximumNumberOfSteps"],
pgtol = self._parameters["ProjectedGradientTolerance"],
ftol = self._parameters["CostDecrementTolerance"],
- messages = self.__message,
+ messages = self._parameters["optmessages"],
)
elif self._parameters["Minimizer"] == "CG":
Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
args = (),
maxiter = self._parameters["MaximumNumberOfSteps"],
gtol = self._parameters["GradientNormTolerance"],
- disp = self.__disp,
+ disp = self._parameters["optdisp"],
full_output = True,
)
elif self._parameters["Minimizer"] == "NCG":
args = (),
maxiter = self._parameters["MaximumNumberOfSteps"],
avextol = self._parameters["CostDecrementTolerance"],
- disp = self.__disp,
+ disp = self._parameters["optdisp"],
full_output = True,
)
elif self._parameters["Minimizer"] == "BFGS":
args = (),
maxiter = self._parameters["MaximumNumberOfSteps"],
gtol = self._parameters["GradientNormTolerance"],
- disp = self.__disp,
+ disp = self._parameters["optdisp"],
full_output = True,
)
elif self._parameters["Minimizer"] == "LM":
BasicObjects.Algorithm.__init__(self, "OBSERVERTEST")
def run(self, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None, Parameters=None):
- self._pre_run()
+ self._pre_run(Parameters)
print "Results of observer check on all potential variables or commands,"
print " only activated on selected ones by explicit association."
print
#
- # Paramètres de pilotage
- # ----------------------
- self.setParameters(Parameters)
- #
__Xa = 1.+numpy.arange(3.)
__Xb = numpy.zeros(3)
__YY = 1.+numpy.arange(5.)
)
def run(self, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None, Parameters=None):
- self._pre_run()
- #
- # Paramètres de pilotage
- # ----------------------
- self.setParameters(Parameters)
+ self._pre_run(Parameters)
#
if self._parameters.has_key("BoxBounds") and (type(self._parameters["BoxBounds"]) is type([]) or type(self._parameters["BoxBounds"]) is type(())) and (len(self._parameters["BoxBounds"]) > 0):
BoxBounds = self._parameters["BoxBounds"]
)
def run(self, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None, Parameters=None):
- self._pre_run()
+ self._pre_run(Parameters)
#
- # Paramètres de pilotage
- # ----------------------
- self.setParameters(Parameters)
- #
- if self._parameters.has_key("Bounds") and (type(self._parameters["Bounds"]) is type([]) or type(self._parameters["Bounds"]) is type(())) and (len(self._parameters["Bounds"]) > 0):
- Bounds = self._parameters["Bounds"]
- logging.debug("%s Prise en compte des bornes effectuee"%(self._name,))
- else:
- Bounds = None
- #
- # Opérateur d'observation
- # -----------------------
Hm = HO["Direct"].appliedTo
#
# Utilisation éventuelle d'un vecteur H(Xb) précalculé
func = CostFunction,
x0 = Xini,
fprime = GradientOfCostFunction,
- bounds = Bounds,
+ bounds = self._parameters["Bounds"],
quantile = self._parameters["Quantile"],
maxfun = self._parameters["MaximumNumberOfSteps"],
toler = self._parameters["CostDecrementTolerance"],
)
def run(self, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None, Parameters=None):
- self._pre_run()
- #
- self.setParameters(Parameters)
+ self._pre_run(Parameters)
#
Hm = HO["Direct"].appliedTo
#
)
def run(self, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None, Parameters=None):
- self._pre_run()
+ self._pre_run(Parameters)
#
- # Paramètres de pilotage
- # ----------------------
- self.setParameters(Parameters)
- #
- # Opérateurs
- # ----------
Hm = HO["Direct"].appliedTo
Ht = HO["Tangent"].appliedInXTo
#
)
def run(self, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None, Parameters=None):
- self._pre_run()
+ self._pre_run(Parameters)
#
- # Paramètres de pilotage
- # ----------------------
- self.setParameters(Parameters)
- #
- if self._parameters.has_key("Bounds") and (type(self._parameters["Bounds"]) is type([]) or type(self._parameters["Bounds"]) is type(())) and (len(self._parameters["Bounds"]) > 0):
- Bounds = self._parameters["Bounds"]
- logging.debug("%s Prise en compte des bornes effectuee"%(self._name,))
- else:
- Bounds = None
if self._parameters["EstimationOf"] == "Parameters":
self._parameters["StoreInternalVariables"] = True
#
Xnp = numpy.hstack([Xn, Xn+Gamma*Pndemi, Xn-Gamma*Pndemi])
nbSpts = 2*Xn.size+1
#
- if Bounds is not None and self._parameters["ConstrainedBy"] == "EstimateProjection":
+ if self._parameters["Bounds"] is not None and self._parameters["ConstrainedBy"] == "EstimateProjection":
for point in range(nbSpts):
- Xnp[:,point] = numpy.max(numpy.hstack((Xnp[:,point],numpy.asmatrix(Bounds)[:,0])),axis=1)
- Xnp[:,point] = numpy.min(numpy.hstack((Xnp[:,point],numpy.asmatrix(Bounds)[:,1])),axis=1)
+ Xnp[:,point] = numpy.max(numpy.hstack((Xnp[:,point],numpy.asmatrix(self._parameters["Bounds"])[:,0])),axis=1)
+ Xnp[:,point] = numpy.min(numpy.hstack((Xnp[:,point],numpy.asmatrix(self._parameters["Bounds"])[:,1])),axis=1)
#
XEtnnp = []
for point in range(nbSpts):
if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
Cm = Cm.reshape(Xn.size,Un.size) # ADAO & check shape
XEtnnpi = XEtnnpi + Cm * Un
- if Bounds is not None and self._parameters["ConstrainedBy"] == "EstimateProjection":
- XEtnnpi = numpy.max(numpy.hstack((XEtnnpi,numpy.asmatrix(Bounds)[:,0])),axis=1)
- XEtnnpi = numpy.min(numpy.hstack((XEtnnpi,numpy.asmatrix(Bounds)[:,1])),axis=1)
+ if self._parameters["Bounds"] is not None and self._parameters["ConstrainedBy"] == "EstimateProjection":
+ XEtnnpi = numpy.max(numpy.hstack((XEtnnpi,numpy.asmatrix(self._parameters["Bounds"])[:,0])),axis=1)
+ XEtnnpi = numpy.min(numpy.hstack((XEtnnpi,numpy.asmatrix(self._parameters["Bounds"])[:,1])),axis=1)
elif self._parameters["EstimationOf"] == "Parameters":
# --- > Par principe, M = Id, Q = 0
XEtnnpi = Xnp[:,point]
#
Xncm = numpy.matrix( XEtnnp.getA()*numpy.array(Wm) ).sum(axis=1)
#
- if Bounds is not None and self._parameters["ConstrainedBy"] == "EstimateProjection":
- Xncm = numpy.max(numpy.hstack((Xncm,numpy.asmatrix(Bounds)[:,0])),axis=1)
- Xncm = numpy.min(numpy.hstack((Xncm,numpy.asmatrix(Bounds)[:,1])),axis=1)
+ if self._parameters["Bounds"] is not None and self._parameters["ConstrainedBy"] == "EstimateProjection":
+ Xncm = numpy.max(numpy.hstack((Xncm,numpy.asmatrix(self._parameters["Bounds"])[:,0])),axis=1)
+ Xncm = numpy.min(numpy.hstack((Xncm,numpy.asmatrix(self._parameters["Bounds"])[:,1])),axis=1)
#
if self._parameters["EstimationOf"] == "State": Pnm = Q
elif self._parameters["EstimationOf"] == "Parameters": Pnm = 0.
for point in range(nbSpts):
Pnm += Wc[i] * (XEtnnp[:,point]-Xncm) * (XEtnnp[:,point]-Xncm).T
#
- if self._parameters["EstimationOf"] == "Parameters" and Bounds is not None:
+ if self._parameters["EstimationOf"] == "Parameters" and self._parameters["Bounds"] is not None:
Pnmdemi = self._parameters["Reconditioner"] * numpy.linalg.cholesky(Pnm)
else:
Pnmdemi = numpy.linalg.cholesky(Pnm)
#
Xnnp = numpy.hstack([Xncm, Xncm+Gamma*Pnmdemi, Xncm-Gamma*Pnmdemi])
#
- if Bounds is not None and self._parameters["ConstrainedBy"] == "EstimateProjection":
+ if self._parameters["Bounds"] is not None and self._parameters["ConstrainedBy"] == "EstimateProjection":
for point in range(nbSpts):
- Xnnp[:,point] = numpy.max(numpy.hstack((Xnnp[:,point],numpy.asmatrix(Bounds)[:,0])),axis=1)
- Xnnp[:,point] = numpy.min(numpy.hstack((Xnnp[:,point],numpy.asmatrix(Bounds)[:,1])),axis=1)
+ Xnnp[:,point] = numpy.max(numpy.hstack((Xnnp[:,point],numpy.asmatrix(self._parameters["Bounds"])[:,0])),axis=1)
+ Xnnp[:,point] = numpy.min(numpy.hstack((Xnnp[:,point],numpy.asmatrix(self._parameters["Bounds"])[:,1])),axis=1)
#
Ynnp = []
for point in range(nbSpts):
Xn = Xncm + Kn * d
Pn = Pnm - Kn * Pyyn * Kn.T
#
- if Bounds is not None and self._parameters["ConstrainedBy"] == "EstimateProjection":
- Xn = numpy.max(numpy.hstack((Xn,numpy.asmatrix(Bounds)[:,0])),axis=1)
- Xn = numpy.min(numpy.hstack((Xn,numpy.asmatrix(Bounds)[:,1])),axis=1)
+ if self._parameters["Bounds"] is not None and self._parameters["ConstrainedBy"] == "EstimateProjection":
+ Xn = numpy.max(numpy.hstack((Xn,numpy.asmatrix(self._parameters["Bounds"])[:,0])),axis=1)
+ Xn = numpy.min(numpy.hstack((Xn,numpy.asmatrix(self._parameters["Bounds"])[:,1])),axis=1)
#
self.StoredVariables["Analysis"].store( Xn.A1 )
if "APosterioriCovariance" in self._parameters["StoreSupplementaryCalculations"]:
self.StoredVariables["SimulationQuantiles"] = Persistence.OneMatrix(name = "SimulationQuantiles")
self.StoredVariables["Residu"] = Persistence.OneScalar(name = "Residu")
- def _pre_run(self):
+ def _pre_run(self, Parameters ):
"Pré-calcul"
logging.debug("%s Lancement", self._name)
logging.debug("%s Taille mémoire utilisée de %.1f Mio", self._name, self._m.getUsedMemory("Mio"))
+ #
+ # Mise a jour de self._parameters avec Parameters
+ self.__setParameters(Parameters)
+ #
+ # Corrections et complements
+ if self._parameters.has_key("Bounds") and (type(self._parameters["Bounds"]) is type([]) or type(self._parameters["Bounds"]) is type(())) and (len(self._parameters["Bounds"]) > 0):
+ logging.debug("%s Prise en compte des bornes effectuee"%(self._name,))
+ else:
+ self._parameters["Bounds"] = None
+ #
+ if logging.getLogger().level < logging.WARNING:
+ self._parameters["optiprint"], self._parameters["optdisp"] = 1, 1
+ if PlatformInfo.has_scipy:
+ import scipy.optimize
+ self._parameters["optmessages"] = scipy.optimize.tnc.MSG_ALL
+ else:
+ self._parameters["optmessages"] = 15
+ else:
+ self._parameters["optiprint"], self._parameters["optdisp"] = -1, 0
+ if PlatformInfo.has_scipy:
+ import scipy.optimize
+ self._parameters["optmessages"] = scipy.optimize.tnc.MSG_NONE
+ else:
+ self._parameters["optmessages"] = 15
+ #
return 0
def _post_run(self,_oH=None):
raise ValueError("The value \"%s\" of the parameter named \"%s\" is not allowed, it has to be in the list %s."%( __val, name,listval))
return __val
- def setParameters(self, fromDico={}):
+ def __setParameters(self, fromDico={}):
"""
Permet de stocker les paramètres reçus dans le dictionnaire interne.
"""