elif __Object is not None:
self.__is_object = True
self.__C = __Object
- for at in ("getT","getI","diag","trace","__add__","__sub__","__neg__","__mul__","__rmul__"):
+ for at in ("getT","getI","diag","trace","__add__","__sub__","__neg__","__matmul__","__mul__","__rmatmul__","__rmul__"):
if not hasattr(self.__C,at):
raise ValueError("The matrix given for %s as an object has no attribute \"%s\". Please check your object input."%(self.__name,at))
if hasattr(self.__C,"shape"):
def asfullmatrix(self, msize=None):
"Matrice pleine"
if self.ismatrix():
- return self.__C
+ return numpy.asarray(self.__C)
elif self.isvector():
- return numpy.matrix( numpy.diag(self.__C), float )
+ return numpy.asarray( numpy.diag(self.__C), float )
elif self.isscalar():
if msize is None:
raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
else:
- return numpy.matrix( self.__C * numpy.eye(int(msize)), float )
+ return numpy.asarray( self.__C * numpy.eye(int(msize)), float )
elif self.isobject() and hasattr(self.__C,"asfullmatrix"):
return self.__C.asfullmatrix()
"x.__neg__() <==> -x"
return - self.__C
+ def __matmul__(self, other):
+ "x.__mul__(y) <==> x@y"
+ if self.ismatrix() and isinstance(other, (int, float)):
+ return numpy.asarray(self.__C) * other
+ elif self.ismatrix() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
+ if numpy.ravel(other).size == self.shape[1]: # Vecteur
+ return numpy.ravel(self.__C @ numpy.ravel(other))
+ elif numpy.asarray(other).shape[0] == self.shape[1]: # Matrice
+ return numpy.asarray(self.__C) @ numpy.asarray(other)
+ else:
+ raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asarray(other).shape,self.__name))
+ elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
+ if numpy.ravel(other).size == self.shape[1]: # Vecteur
+ return numpy.ravel(self.__C) * numpy.ravel(other)
+ elif numpy.asarray(other).shape[0] == self.shape[1]: # Matrice
+ return numpy.ravel(self.__C).reshape((-1,1)) * numpy.asarray(other)
+ else:
+ raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
+ elif self.isscalar() and isinstance(other,numpy.matrix):
+ return numpy.asarray(self.__C * other)
+ elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
+ if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
+ return self.__C * numpy.ravel(other)
+ else:
+ return self.__C * numpy.asarray(other)
+ elif self.isobject():
+ return self.__C.__matmul__(other)
+ else:
+ raise NotImplementedError("%s covariance matrix __matmul__ method not available for %s type!"%(self.__name,type(other)))
+
def __mul__(self, other):
"x.__mul__(y) <==> x*y"
if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
else:
raise NotImplementedError("%s covariance matrix __mul__ method not available for %s type!"%(self.__name,type(other)))
+ def __rmatmul__(self, other):
+ "x.__rmul__(y) <==> y@x"
+ if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
+ return other * self.__C
+ elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
+ if numpy.ravel(other).size == self.shape[1]: # Vecteur
+ return numpy.asmatrix(numpy.ravel(other)) * self.__C
+ elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
+ return numpy.asmatrix(other) * self.__C
+ else:
+ raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name))
+ elif self.isvector() and isinstance(other,numpy.matrix):
+ if numpy.ravel(other).size == self.shape[0]: # Vecteur
+ return numpy.asmatrix(numpy.ravel(other) * self.__C)
+ elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
+ return numpy.asmatrix(numpy.array(other) * self.__C)
+ else:
+ raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
+ elif self.isscalar() and isinstance(other,numpy.matrix):
+ return other * self.__C
+ elif self.isobject():
+ return self.__C.__rmatmul__(other)
+ else:
+ raise NotImplementedError("%s covariance matrix __rmatmul__ method not available for %s type!"%(self.__name,type(other)))
+
def __rmul__(self, other):
"x.__rmul__(y) <==> y*x"
if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
GradJb = BI * (_X - Xb)
GradJo = 0.
for step in range(duration-1,0,-1):
- # Etape de récupération du dernier stockage de l'évolution
+ # Étape de récupération du dernier stockage de l'évolution
_Xn = selfA.DirectCalculation.pop()
- # Etape de récupération du dernier stockage de l'innovation
+ # Étape de récupération du dernier stockage de l'innovation
_YmHMX = selfA.DirectInnovation.pop()
# Calcul des adjoints
Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
Ha = Ha.reshape(_Xn.size,_YmHMX.size) # ADAO & check shape
Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
Ma = Ma.reshape(_Xn.size,_Xn.size) # ADAO & check shape
- # Calcul du gradient par etat adjoint
- GradJo = GradJo + Ha * RI * _YmHMX # Equivaut pour Ha lineaire à : Ha( (_Xn, RI * _YmHMX) )
- GradJo = Ma * GradJo # Equivaut pour Ma lineaire à : Ma( (_Xn, GradJo) )
+ # Calcul du gradient par état adjoint
+ GradJo = GradJo + Ha * RI * _YmHMX # Équivaut pour Ha linéaire à : Ha( (_Xn, RI * _YmHMX) )
+ GradJo = Ma * GradJo # Équivaut pour Ma linéaire à : Ma( (_Xn, GradJo) )
GradJ = numpy.ravel( GradJb ) - numpy.ravel( GradJo )
return GradJ
#
#
for step in range(duration-1):
if hasattr(Y,"store"):
- Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,-1))
+ Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
else:
- Ynpu = numpy.ravel( Y ).reshape((__p,-1))
+ Ynpu = numpy.ravel( Y ).reshape((__p,1))
#
if U is not None:
if hasattr(U,"store") and len(U)>1:
returnSerieAsArrayMatrix = True )
#
# Mean of forecast and observation of forecast
- Xfm = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
- Hfm = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,-1))
+ Xfm = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
+ Hfm = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
#
#--------------------------
if VariantM == "KalmanFilterFormula05":
PfHT, HPfHT = 0., 0.
for i in range(__m):
- Exfi = Xn_predicted[:,i].reshape((__n,-1)) - Xfm
- Eyfi = HX_predicted[:,i].reshape((__p,-1)) - Hfm
+ Exfi = Xn_predicted[:,i].reshape((__n,1)) - Xfm
+ Eyfi = HX_predicted[:,i].reshape((__p,1)) - Hfm
PfHT += Exfi * Eyfi.T
HPfHT += Eyfi * Eyfi.T
PfHT = (1./(__m-1)) * PfHT
#--------------------------
elif VariantM == "KalmanFilterFormula16":
EpY = EnsembleOfCenteredPerturbations(Ynpu, Rn, __m)
- EpYm = EpY.mean(axis=1, dtype=mfp).astype('float').reshape((__p,-1))
+ EpYm = EpY.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
#
EaX = EnsembleOfAnomalies( Xn_predicted ) / numpy.sqrt(__m-1)
EaY = (HX_predicted - Hfm - EpY + EpYm) / numpy.sqrt(__m-1)
selfA._parameters["InflationFactor"],
)
#
- Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
+ Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
#--------------------------
#
if selfA._parameters["StoreInternalVariables"] \
if selfA._toStore("ForecastState"):
selfA.StoredVariables["ForecastState"].store( EMX )
if selfA._toStore("BMA"):
- selfA.StoredVariables["BMA"].store( EMX - Xa.reshape((__n,1)) )
+ selfA.StoredVariables["BMA"].store( EMX - Xa )
if selfA._toStore("InnovationAtCurrentState"):
selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
if selfA._toStore("SimulatedObservationAtCurrentState") \
#
for step in range(duration-1):
if hasattr(Y,"store"):
- Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,-1))
+ Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
else:
- Ynpu = numpy.ravel( Y ).reshape((__p,-1))
+ Ynpu = numpy.ravel( Y ).reshape((__p,1))
#
if U is not None:
if hasattr(U,"store") and len(U)>1:
returnSerieAsArrayMatrix = True )
#
# Mean of forecast and observation of forecast
- Xfm = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
- Hfm = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,-1))
+ Xfm = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
+ Hfm = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
#
# Anomalies
EaX = EnsembleOfAnomalies( Xn_predicted )
mU = numpy.eye(__m)
#
EaX = EaX / numpy.sqrt(__m-1)
- Xn = Xfm + EaX @ ( vw.reshape((__m,-1)) + numpy.sqrt(__m-1) * Tdemi @ mU )
+ Xn = Xfm + EaX @ ( vw.reshape((__m,1)) + numpy.sqrt(__m-1) * Tdemi @ mU )
#--------------------------
elif VariantM == "Variational":
HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
def CostFunction(w):
- _A = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
+ _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
_Jo = 0.5 * _A.T @ (RI * _A)
_Jb = 0.5 * (__m-1) * w.T @ w
_J = _Jo + _Jb
return float(_J)
def GradientOfCostFunction(w):
- _A = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
+ _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
_GardJo = - EaHX.T @ (RI * _A)
_GradJb = (__m-1) * w.reshape((__m,1))
_GradJ = _GardJo + _GradJb
elif VariantM == "FiniteSize11": # Jauge Boc2011
HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
def CostFunction(w):
- _A = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
+ _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
_Jo = 0.5 * _A.T @ (RI * _A)
_Jb = 0.5 * __m * math.log(1 + 1/__m + w.T @ w)
_J = _Jo + _Jb
return float(_J)
def GradientOfCostFunction(w):
- _A = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
+ _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
_GardJo = - EaHX.T @ (RI * _A)
_GradJb = __m * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
_GradJ = _GardJo + _GradJb
Pta = numpy.linalg.inv( Hta )
EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
#
- Xn = Xfm + EaX @ (vw.reshape((__m,-1)) + EWa)
+ Xn = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
#--------------------------
elif VariantM == "FiniteSize15": # Jauge Boc2015
HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
def CostFunction(w):
- _A = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
+ _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
_Jo = 0.5 * _A.T * RI * _A
_Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w)
_J = _Jo + _Jb
return float(_J)
def GradientOfCostFunction(w):
- _A = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
+ _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
_GardJo = - EaHX.T @ (RI * _A)
_GradJb = (__m+1) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
_GradJ = _GardJo + _GradJb
Pta = numpy.linalg.inv( Hta )
EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
#
- Xn = Xfm + EaX @ (vw.reshape((__m,-1)) + EWa)
+ Xn = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
#--------------------------
elif VariantM == "FiniteSize16": # Jauge Boc2016
HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
def CostFunction(w):
- _A = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
+ _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
_Jo = 0.5 * _A.T @ (RI * _A)
_Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w / (__m-1))
_J = _Jo + _Jb
return float(_J)
def GradientOfCostFunction(w):
- _A = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
+ _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
_GardJo = - EaHX.T @ (RI * _A)
_GradJb = ((__m+1) / (__m-1)) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w / (__m-1))
_GradJ = _GardJo + _GradJb
selfA._parameters["InflationFactor"],
)
#
- Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
+ Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
#--------------------------
#
if selfA._parameters["StoreInternalVariables"] \
if selfA._toStore("BMA"):
selfA.StoredVariables["BMA"].store( EMX - Xa.reshape((__n,1)) )
if selfA._toStore("InnovationAtCurrentState"):
- selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu.reshape((__p,1)) )
+ selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
if selfA._toStore("SimulatedObservationAtCurrentState") \
or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
#
for step in range(duration-1):
if hasattr(Y,"store"):
- Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,-1))
+ Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
else:
- Ynpu = numpy.ravel( Y ).reshape((__p,-1))
+ Ynpu = numpy.ravel( Y ).reshape((__p,1))
#
if U is not None:
if hasattr(U,"store") and len(U)>1:
Ta = numpy.eye(__m)
vw = numpy.zeros(__m)
while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
- vx1 = (Xfm + EaX @ vw).reshape((__n,-1))
+ vx1 = (Xfm + EaX @ vw).reshape((__n,1))
#
if BnotT:
E1 = vx1 + _epsilon * EaX
HE2 = H( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
argsAsSerie = True,
returnSerieAsArrayMatrix = True )
- vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,-1))
+ vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
#
if BnotT:
EaY = (HE2 - vy2) / _epsilon
selfA._parameters["InflationFactor"],
)
#
- Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
+ Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
#--------------------------
#
if selfA._parameters["StoreInternalVariables"] \
if selfA._toStore("ForecastState"):
selfA.StoredVariables["ForecastState"].store( EMX )
if selfA._toStore("BMA"):
- selfA.StoredVariables["BMA"].store( EMX - Xa.reshape((__n,1)) )
+ selfA.StoredVariables["BMA"].store( EMX - Xa )
if selfA._toStore("InnovationAtCurrentState"):
- selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu.reshape((__p,-1)) )
+ selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
if selfA._toStore("SimulatedObservationAtCurrentState") \
or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
#
for step in range(duration-1):
if hasattr(Y,"store"):
- Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,-1))
+ Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
else:
- Ynpu = numpy.ravel( Y ).reshape((__p,-1))
+ Ynpu = numpy.ravel( Y ).reshape((__p,1))
#
if U is not None:
if hasattr(U,"store") and len(U)>1:
Ta = numpy.eye(__m)
vw = numpy.zeros(__m)
while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
- vx1 = (Xfm + EaX @ vw).reshape((__n,-1))
+ vx1 = (Xfm + EaX @ vw).reshape((__n,1))
#
if BnotT:
E1 = vx1 + _epsilon * EaX
elif selfA._parameters["EstimationOf"] == "Parameters":
# --- > Par principe, M = Id
E2 = Xn
- vx2 = E2.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
- vy1 = H((vx2, Un)).reshape((__p,-1))
+ vx2 = E2.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
+ vy1 = H((vx2, Un)).reshape((__p,1))
#
HE2 = H( [(E2[:,i,numpy.newaxis], Un) for i in range(__m)],
argsAsSerie = True,
returnSerieAsArrayMatrix = True )
- vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,-1))
+ vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
#
if BnotT:
EaY = (HE2 - vy2) / _epsilon
selfA._parameters["InflationFactor"],
)
#
- Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
+ Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
#--------------------------
#
if selfA._parameters["StoreInternalVariables"] \
if selfA._toStore("BMA"):
selfA.StoredVariables["BMA"].store( E2 - Xa )
if selfA._toStore("InnovationAtCurrentState"):
- selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu.reshape((__p,-1)) )
+ selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
if selfA._toStore("SimulatedObservationAtCurrentState") \
or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )