if LBounds is not None: # "EstimateProjection" par défaut
Xr = numpy.max(numpy.hstack((Xr.reshape((-1,1)),LBounds[:,0].reshape((-1,1)))),axis=1)
Xr = numpy.min(numpy.hstack((Xr.reshape((-1,1)),LBounds[:,1].reshape((-1,1)))),axis=1)
- Yr = Hm( Xr )
+ Yr = numpy.asarray(Hm( Xr ))
else:
raise ValueError("Quantile simulations has only to be Linear or NonLinear.")
#
BI = B.getI()
RI = R.getI()
#
- Xini = selfA._parameters["InitializationPoint"]
- #
- HXb = numpy.ravel( Hm( Xb ) ).reshape((-1,1))
+ HXb = numpy.asarray(Hm( Xb )).reshape((-1,1))
Innovation = Y - HXb
#
# Outer Loop
iOuter = 0
J = 1./mpr
DeltaJ = 1./mpr
- Xr = Xini.reshape((-1,1))
+ Xr = numpy.asarray(selfA._parameters["InitializationPoint"]).reshape((-1,1))
while abs(DeltaJ) >= selfA._parameters["CostDecrementTolerance"] and iOuter <= selfA._parameters["MaximumNumberOfSteps"]:
#
# Inner Loop
# Définition de la fonction-coût
# ------------------------------
def CostFunction(dx):
- _dX = numpy.ravel( dx ).reshape((-1,1))
+ _dX = numpy.asarray(dx).reshape((-1,1))
if selfA._parameters["StoreInternalVariables"] or \
selfA._toStore("CurrentState") or \
selfA._toStore("CurrentOptimum"):
selfA.StoredVariables["CurrentState"].store( Xb + _dX )
- _HdX = Ht @ _dX
- _HdX = numpy.ravel( _HdX ).reshape((-1,1))
+ _HdX = (Ht @ _dX).reshape((-1,1))
_dInnovation = Innovation - _HdX
if selfA._toStore("SimulatedObservationAtCurrentState") or \
selfA._toStore("SimulatedObservationAtCurrentOptimum"):
#
def GradientOfCostFunction(dx):
_dX = numpy.ravel( dx )
- _HdX = Ht @ _dX
- _HdX = numpy.ravel( _HdX ).reshape((-1,1))
+ _HdX = (Ht @ _dX).reshape((-1,1))
_dInnovation = Innovation - _HdX
GradJb = BI @ _dX
GradJo = - Ht.T @ (RI * _dInnovation)
import scipy.optimize as optimiseur
Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
func = CostFunction,
- x0 = numpy.zeros(Xini.size),
+ x0 = numpy.zeros(Xb.size),
fprime = GradientOfCostFunction,
args = (),
bounds = RecentredBounds(selfA._parameters["Bounds"], Xb),
elif selfA._parameters["Minimizer"] == "TNC":
Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
func = CostFunction,
- x0 = numpy.zeros(Xini.size),
+ x0 = numpy.zeros(Xb.size),
fprime = GradientOfCostFunction,
args = (),
bounds = RecentredBounds(selfA._parameters["Bounds"], Xb),
elif selfA._parameters["Minimizer"] == "CG":
Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
f = CostFunction,
- x0 = numpy.zeros(Xini.size),
+ x0 = numpy.zeros(Xb.size),
fprime = GradientOfCostFunction,
args = (),
maxiter = selfA._parameters["MaximumNumberOfSteps"],
elif selfA._parameters["Minimizer"] == "NCG":
Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
f = CostFunction,
- x0 = numpy.zeros(Xini.size),
+ x0 = numpy.zeros(Xb.size),
fprime = GradientOfCostFunction,
args = (),
maxiter = selfA._parameters["MaximumNumberOfSteps"],
elif selfA._parameters["Minimizer"] == "BFGS":
Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
f = CostFunction,
- x0 = numpy.zeros(Xini.size),
+ x0 = numpy.zeros(Xb.size),
fprime = GradientOfCostFunction,
args = (),
maxiter = selfA._parameters["MaximumNumberOfSteps"],
Hm = HO["Direct"].appliedTo
#
if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
- HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
+ HXb = numpy.asarray(Hm( Xb, HO["AppliedInX"]["HXb"] ))
else:
- HXb = Hm( Xb )
+ HXb = numpy.asarray(Hm( Xb ))
HXb = numpy.ravel( HXb ).reshape((-1,1))
if Y.size != HXb.size:
raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
HBHTpR = R + Ht * BHT
Innovation = Y - HXb
#
- Xini = numpy.zeros(Xb.shape)
+ Xini = numpy.zeros(Y.size)
#
# Définition de la fonction-coût
# ------------------------------
def CostFunction(w):
- _W = w.reshape((-1,1))
+ _W = numpy.asarray(w).reshape((-1,1))
if selfA._parameters["StoreInternalVariables"] or \
selfA._toStore("CurrentState") or \
selfA._toStore("CurrentOptimum"):
return J
#
def GradientOfCostFunction(w):
- _W = w.reshape((-1,1))
+ _W = numpy.asarray(w).reshape((-1,1))
GradJb = HBHTpR @ _W
GradJo = - Innovation
GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
x0 = Xini,
fprime = GradientOfCostFunction,
args = (),
- bounds = RecentredBounds(selfA._parameters["Bounds"], Xb),
maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
pgtol = selfA._parameters["ProjectedGradientTolerance"],
x0 = Xini,
fprime = GradientOfCostFunction,
args = (),
- bounds = RecentredBounds(selfA._parameters["Bounds"], Xb),
maxfun = selfA._parameters["MaximumNumberOfSteps"],
pgtol = selfA._parameters["ProjectedGradientTolerance"],
ftol = selfA._parameters["CostDecrementTolerance"],
Ha = HO["Adjoint"].appliedInXTo
#
if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
- HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
+ HXb = numpy.asarray(Hm( Xb, HO["AppliedInX"]["HXb"] ))
else:
- HXb = Hm( Xb )
+ HXb = numpy.asarray(Hm( Xb ))
HXb = HXb.reshape((-1,1))
if Y.size != HXb.size:
raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
# Définition de la fonction-coût
# ------------------------------
def CostFunction(x):
- _X = numpy.ravel( x ).reshape((-1,1))
+ _X = numpy.asarray(x).reshape((-1,1))
if selfA._parameters["StoreInternalVariables"] or \
selfA._toStore("CurrentState") or \
selfA._toStore("CurrentOptimum"):
selfA.StoredVariables["CurrentState"].store( _X )
- _HX = Hm( _X ).reshape((-1,1))
+ _HX = numpy.asarray(Hm( _X )).reshape((-1,1))
_Innovation = Y - _HX
if selfA._toStore("SimulatedObservationAtCurrentState") or \
selfA._toStore("SimulatedObservationAtCurrentOptimum"):
return J
#
def GradientOfCostFunction(x):
- _X = x.reshape((-1,1))
- _HX = Hm( _X ).reshape((-1,1))
+ _X = numpy.asarray(x).reshape((-1,1))
+ _HX = numpy.asarray(Hm( _X )).reshape((-1,1))
GradJb = BI * (_X - Xb)
GradJo = - Ha( (_X, RI * (Y - _HX)) )
GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
selfA.DirectCalculation = [None,] # Le pas 0 n'est pas observé
selfA.DirectInnovation = [None,] # Le pas 0 n'est pas observé
def CostFunction(x):
- _X = numpy.ravel( x ).reshape((-1,1))
+ _X = numpy.asarray(x).reshape((-1,1))
if selfA._parameters["StoreInternalVariables"] or \
selfA._toStore("CurrentState") or \
selfA._toStore("CurrentOptimum"):
return J
#
def GradientOfCostFunction(x):
- _X = numpy.ravel( x ).reshape((-1,1))
+ _X = numpy.asarray(x).reshape((-1,1))
GradJb = BI * (_X - Xb)
GradJo = 0.
for step in range(duration-1,0,-1):
BT = B.getT()
RI = R.getI()
#
- Xini = numpy.zeros(Xb.shape)
+ Xini = numpy.zeros(Xb.size)
#
# Définition de la fonction-coût
# ------------------------------
def CostFunction(v):
- _V = numpy.ravel( v ).reshape((-1,1))
- _X = Xb + B * _V
+ _V = numpy.asarray(v).reshape((-1,1))
+ _X = Xb + (B @ _V).reshape((-1,1))
if selfA._parameters["StoreInternalVariables"] or \
selfA._toStore("CurrentState") or \
selfA._toStore("CurrentOptimum"):
selfA.StoredVariables["CurrentState"].store( _X )
- _HX = Hm( _X )
- _HX = numpy.ravel( _HX ).reshape((-1,1))
+ _HX = numpy.asarray(Hm( _X )).reshape((-1,1))
_Innovation = Y - _HX
if selfA._toStore("SimulatedObservationAtCurrentState") or \
selfA._toStore("SimulatedObservationAtCurrentOptimum"):
return J
#
def GradientOfCostFunction(v):
- _V = v.reshape((-1,1))
+ _V = numpy.asarray(v).reshape((-1,1))
_X = Xb + (B @ _V).reshape((-1,1))
- _HX = Hm( _X ).reshape((-1,1))
+ _HX = numpy.asarray(Hm( _X )).reshape((-1,1))
GradJb = BT * _V
GradJo = - Ha( (_X, RI * (Y - _HX)) )
GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )