1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2017 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les outils généraux élémentaires.
26 Ce module est destiné à être appelée par AssimilationStudy.
28 __author__ = "Jean-Philippe ARGAUD"
31 import os, sys, logging, copy
33 from daCore import Persistence
34 from daCore import PlatformInfo
35 from daCore import Templates
37 # ==============================================================================
38 class CacheManager(object):
40 Classe générale de gestion d'un cache de calculs
43 toleranceInRedundancy = 1.e-18,
44 lenghtOfRedundancy = -1,
47 Les caractéristiques de tolérance peuvent être modifées à la création.
49 self.__tolerBP = float(toleranceInRedundancy)
50 self.__lenghtOR = int(lenghtOfRedundancy)
51 self.__initlnOR = self.__lenghtOR
56 self.__listOPCV = [] # Operator Previous Calculated Points, Results, Point Norms
57 # logging.debug("CM Tolerance de determination des doublons : %.2e", self.__tolerBP)
59 def wasCalculatedIn(self, xValue ): #, info="" ):
60 "Vérifie l'existence d'un calcul correspondant à la valeur"
63 for i in range(min(len(self.__listOPCV),self.__lenghtOR)-1,-1,-1):
64 if xValue.size != self.__listOPCV[i][0].size:
65 # logging.debug("CM Différence de la taille %s de X et de celle %s du point %i déjà calculé", xValue.shape,i,self.__listOPCP[i].shape)
67 if numpy.linalg.norm(numpy.ravel(xValue) - self.__listOPCV[i][0]) < self.__tolerBP * self.__listOPCV[i][2]:
69 __HxV = self.__listOPCV[i][1]
70 # logging.debug("CM Cas%s déja calculé, portant le numéro %i", info, i)
74 def storeValueInX(self, xValue, HxValue ):
75 "Stocke un calcul correspondant à la valeur"
76 if self.__lenghtOR < 0:
77 self.__lenghtOR = 2 * xValue.size + 2
78 self.__initlnOR = self.__lenghtOR
79 while len(self.__listOPCV) > self.__lenghtOR:
80 # logging.debug("CM Réduction de la liste des cas à %i éléments par suppression du premier", self.__lenghtOR)
81 self.__listOPCV.pop(0)
82 self.__listOPCV.append( (
83 copy.copy(numpy.ravel(xValue)),
85 numpy.linalg.norm(xValue),
90 self.__initlnOR = self.__lenghtOR
95 self.__lenghtOR = self.__initlnOR
97 # ==============================================================================
98 class Operator(object):
100 Classe générale d'interface de type opérateur simple
107 def __init__(self, fromMethod=None, fromMatrix=None, avoidingRedundancy = True):
109 On construit un objet de ce type en fournissant à l'aide de l'un des
110 deux mots-clé, soit une fonction python, soit une matrice.
112 - fromMethod : argument de type fonction Python
113 - fromMatrix : argument adapté au constructeur numpy.matrix
114 - avoidingRedundancy : évite ou pas les calculs redondants
116 self.__NbCallsAsMatrix, self.__NbCallsAsMethod, self.__NbCallsOfCached = 0, 0, 0
117 self.__AvoidRC = bool( avoidingRedundancy )
118 if fromMethod is not None:
119 self.__Method = fromMethod
121 self.__Type = "Method"
122 elif fromMatrix is not None:
124 self.__Matrix = numpy.matrix( fromMatrix, numpy.float )
125 self.__Type = "Matrix"
131 def disableAvoidingRedundancy(self):
133 Operator.CM.disable()
135 def enableAvoidingRedundancy(self):
140 Operator.CM.disable()
146 def appliedTo(self, xValue, HValue = None):
148 Permet de restituer le résultat de l'application de l'opérateur à un
149 argument xValue. Cette méthode se contente d'appliquer, son argument
150 devant a priori être du bon type.
152 - xValue : argument adapté pour appliquer l'opérateur
154 if HValue is not None:
155 HxValue = numpy.asmatrix( numpy.ravel( HValue ) ).T
157 Operator.CM.storeValueInX(xValue,HxValue)
160 __alreadyCalculated, __HxV = Operator.CM.wasCalculatedIn(xValue)
162 __alreadyCalculated = False
164 if __alreadyCalculated:
165 self.__addOneCacheCall()
168 if self.__Matrix is not None:
169 self.__addOneMatrixCall()
170 HxValue = self.__Matrix * xValue
172 self.__addOneMethodCall()
173 HxValue = self.__Method( xValue )
175 Operator.CM.storeValueInX(xValue,HxValue)
179 def appliedControledFormTo(self, paire ):
181 Permet de restituer le résultat de l'application de l'opérateur à une
182 paire (xValue, uValue). Cette méthode se contente d'appliquer, son
183 argument devant a priori être du bon type. Si la uValue est None,
184 on suppose que l'opérateur ne s'applique qu'à xValue.
186 - xValue : argument X adapté pour appliquer l'opérateur
187 - uValue : argument U adapté pour appliquer l'opérateur
189 assert len(paire) == 2, "Incorrect number of arguments"
190 xValue, uValue = paire
191 if self.__Matrix is not None:
192 self.__addOneMatrixCall()
193 return self.__Matrix * xValue
194 elif uValue is not None:
195 self.__addOneMethodCall()
196 return self.__Method( (xValue, uValue) )
198 self.__addOneMethodCall()
199 return self.__Method( xValue )
201 def appliedInXTo(self, paire ):
203 Permet de restituer le résultat de l'application de l'opérateur à un
204 argument xValue, sachant que l'opérateur est valable en xNominal.
205 Cette méthode se contente d'appliquer, son argument devant a priori
206 être du bon type. Si l'opérateur est linéaire car c'est une matrice,
207 alors il est valable en tout point nominal et il n'est pas nécessaire
209 Arguments : une liste contenant
210 - xNominal : argument permettant de donner le point où l'opérateur
211 est construit pour etre ensuite appliqué
212 - xValue : argument adapté pour appliquer l'opérateur
214 assert len(paire) == 2, "Incorrect number of arguments"
215 xNominal, xValue = paire
216 if self.__Matrix is not None:
217 self.__addOneMatrixCall()
218 return self.__Matrix * xValue
220 self.__addOneMethodCall()
221 return self.__Method( (xNominal, xValue) )
223 def asMatrix(self, ValueForMethodForm = "UnknownVoidValue"):
225 Permet de renvoyer l'opérateur sous la forme d'une matrice
227 if self.__Matrix is not None:
228 self.__addOneMatrixCall()
230 elif ValueForMethodForm is not "UnknownVoidValue": # Ne pas utiliser "None"
231 self.__addOneMethodCall()
232 return numpy.matrix( self.__Method( (ValueForMethodForm, None) ) )
234 raise ValueError("Matrix form of the operator defined as a function/method requires to give an operating point.")
238 Renvoie la taille sous forme numpy si l'opérateur est disponible sous
239 la forme d'une matrice
241 if self.__Matrix is not None:
242 return self.__Matrix.shape
244 raise ValueError("Matrix form of the operator is not available, nor the shape")
246 def nbcalls(self, which=None):
248 Renvoie les nombres d'évaluations de l'opérateur
251 self.__NbCallsAsMatrix+self.__NbCallsAsMethod,
252 self.__NbCallsAsMatrix,
253 self.__NbCallsAsMethod,
254 self.__NbCallsOfCached,
255 Operator.NbCallsAsMatrix+Operator.NbCallsAsMethod,
256 Operator.NbCallsAsMatrix,
257 Operator.NbCallsAsMethod,
258 Operator.NbCallsOfCached,
260 if which is None: return __nbcalls
261 else: return __nbcalls[which]
263 def __addOneMatrixCall(self):
264 "Comptabilise un appel"
265 self.__NbCallsAsMatrix += 1 # Decompte local
266 Operator.NbCallsAsMatrix += 1 # Decompte global
268 def __addOneMethodCall(self):
269 "Comptabilise un appel"
270 self.__NbCallsAsMethod += 1 # Decompte local
271 Operator.NbCallsAsMethod += 1 # Decompte global
273 def __addOneCacheCall(self):
274 "Comptabilise un appel"
275 self.__NbCallsOfCached += 1 # Decompte local
276 Operator.NbCallsOfCached += 1 # Decompte global
278 # ==============================================================================
279 class FullOperator(object):
281 Classe générale d'interface de type opérateur complet
282 (Direct, Linéaire Tangent, Adjoint)
285 name = "GenericFullOperator",
287 asOneFunction = None, # Fonction
288 asThreeFunctions = None, # Dictionnaire de fonctions
290 asDict = None, # Parameters
297 self.__name = str(name)
298 self.__check = bool(toBeChecked)
303 if (asDict is not None) and isinstance(asDict, dict):
304 __Parameters.update( asDict )
305 if "DifferentialIncrement" in asDict:
306 __Parameters["withIncrement"] = asDict["DifferentialIncrement"]
307 if "CenteredFiniteDifference" in asDict:
308 __Parameters["withCenteredDF"] = asDict["CenteredFiniteDifference"]
309 if "EnableMultiProcessing" in asDict:
310 __Parameters["withmpEnabled"] = asDict["EnableMultiProcessing"]
311 if "NumberOfProcesses" in asDict:
312 __Parameters["withmpWorkers"] = asDict["NumberOfProcesses"]
314 if asScript is not None:
315 __Matrix, __Function = None, None
317 __Matrix = ImportFromScript(asScript).getvalue( self.__name )
319 __Function = { "Direct":ImportFromScript(asScript).getvalue( "DirectOperator" ) }
320 __Function.update({"useApproximatedDerivatives":True})
321 __Function.update(__Parameters)
322 elif asThreeFunctions:
324 "Direct" :ImportFromScript(asScript).getvalue( "DirectOperator" ),
325 "Tangent":ImportFromScript(asScript).getvalue( "TangentOperator" ),
326 "Adjoint":ImportFromScript(asScript).getvalue( "AdjointOperator" ),
328 __Function.update(__Parameters)
331 if asOneFunction is not None:
332 if isinstance(asOneFunction, dict) and "Direct" in asOneFunction:
333 if asOneFunction["Direct"] is not None:
334 __Function = asOneFunction
336 raise ValueError("The function has to be given in a dictionnary which have 1 key (\"Direct\")")
338 __Function = { "Direct":asOneFunction }
339 __Function.update({"useApproximatedDerivatives":True})
340 __Function.update(__Parameters)
341 elif asThreeFunctions is not None:
342 if isinstance(asThreeFunctions, dict) and \
343 ("Tangent" in asThreeFunctions) and (asThreeFunctions["Tangent"] is not None) and \
344 ("Adjoint" in asThreeFunctions) and (asThreeFunctions["Adjoint"] is not None) and \
345 (("useApproximatedDerivatives" not in asThreeFunctions) or not bool(asThreeFunctions["useApproximatedDerivatives"])):
346 __Function = asThreeFunctions
347 elif isinstance(asThreeFunctions, dict) and \
348 ("Direct" in asThreeFunctions) and (asThreeFunctions["Direct"] is not None):
349 __Function = asThreeFunctions
350 __Function.update({"useApproximatedDerivatives":True})
352 raise ValueError("The functions has to be given in a dictionnary which have either 1 key (\"Direct\") or 3 keys (\"Direct\" (optionnal), \"Tangent\" and \"Adjoint\")")
353 if "Direct" not in asThreeFunctions:
354 __Function["Direct"] = asThreeFunctions["Tangent"]
355 __Function.update(__Parameters)
359 # if sys.version_info[0] < 3 and isinstance(__Function, dict):
360 # for k in ("Direct", "Tangent", "Adjoint"):
361 # if k in __Function and hasattr(__Function[k],"__class__"):
362 # if type(__Function[k]) is type(self.__init__):
363 # raise TypeError("can't use a class method (%s) as a function for the \"%s\" operator. Use a real function instead."%(type(__Function[k]),k))
365 if appliedInX is not None and isinstance(appliedInX, dict):
366 __appliedInX = appliedInX
367 elif appliedInX is not None:
368 __appliedInX = {"HXb":appliedInX}
372 if scheduledBy is not None:
373 self.__T = scheduledBy
375 if isinstance(__Function, dict) and \
376 ("useApproximatedDerivatives" in __Function) and bool(__Function["useApproximatedDerivatives"]) and \
377 ("Direct" in __Function) and (__Function["Direct"] is not None):
378 if "withCenteredDF" not in __Function: __Function["withCenteredDF"] = False
379 if "withIncrement" not in __Function: __Function["withIncrement"] = 0.01
380 if "withdX" not in __Function: __Function["withdX"] = None
381 if "withAvoidingRedundancy" not in __Function: __Function["withAvoidingRedundancy"] = True
382 if "withToleranceInRedundancy" not in __Function: __Function["withToleranceInRedundancy"] = 1.e-18
383 if "withLenghtOfRedundancy" not in __Function: __Function["withLenghtOfRedundancy"] = -1
384 if "withmpEnabled" not in __Function: __Function["withmpEnabled"] = False
385 if "withmpWorkers" not in __Function: __Function["withmpWorkers"] = None
386 from daNumerics.ApproximatedDerivatives import FDApproximation
387 FDA = FDApproximation(
388 Function = __Function["Direct"],
389 centeredDF = __Function["withCenteredDF"],
390 increment = __Function["withIncrement"],
391 dX = __Function["withdX"],
392 avoidingRedundancy = __Function["withAvoidingRedundancy"],
393 toleranceInRedundancy = __Function["withToleranceInRedundancy"],
394 lenghtOfRedundancy = __Function["withLenghtOfRedundancy"],
395 mpEnabled = __Function["withmpEnabled"],
396 mpWorkers = __Function["withmpWorkers"],
398 self.__FO["Direct"] = Operator( fromMethod = FDA.DirectOperator, avoidingRedundancy = avoidRC )
399 self.__FO["Tangent"] = Operator( fromMethod = FDA.TangentOperator, avoidingRedundancy = avoidRC )
400 self.__FO["Adjoint"] = Operator( fromMethod = FDA.AdjointOperator, avoidingRedundancy = avoidRC )
401 elif isinstance(__Function, dict) and \
402 ("Direct" in __Function) and ("Tangent" in __Function) and ("Adjoint" in __Function) and \
403 (__Function["Direct"] is not None) and (__Function["Tangent"] is not None) and (__Function["Adjoint"] is not None):
404 self.__FO["Direct"] = Operator( fromMethod = __Function["Direct"], avoidingRedundancy = avoidRC )
405 self.__FO["Tangent"] = Operator( fromMethod = __Function["Tangent"], avoidingRedundancy = avoidRC )
406 self.__FO["Adjoint"] = Operator( fromMethod = __Function["Adjoint"], avoidingRedundancy = avoidRC )
407 elif asMatrix is not None:
408 __matrice = numpy.matrix( asMatrix, numpy.float )
409 self.__FO["Direct"] = Operator( fromMatrix = __matrice, avoidingRedundancy = avoidRC )
410 self.__FO["Tangent"] = Operator( fromMatrix = __matrice, avoidingRedundancy = avoidRC )
411 self.__FO["Adjoint"] = Operator( fromMatrix = __matrice.T, avoidingRedundancy = avoidRC )
414 raise ValueError("Improperly defined observation operator, it requires at minima either a matrix, a Direct for approximate derivatives or a Tangent/Adjoint pair.")
416 if __appliedInX is not None:
417 self.__FO["AppliedInX"] = {}
418 if type(__appliedInX) is not dict:
419 raise ValueError("Error: observation operator defined by \"AppliedInX\" need a dictionary as argument.")
420 for key in list(__appliedInX.keys()):
421 if type( __appliedInX[key] ) is type( numpy.matrix([]) ):
422 # Pour le cas où l'on a une vraie matrice
423 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].A1, numpy.float ).T
424 elif type( __appliedInX[key] ) is type( numpy.array([]) ) and len(__appliedInX[key].shape) > 1:
425 # Pour le cas où l'on a un vecteur représenté en array avec 2 dimensions
426 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].reshape(len(__appliedInX[key]),), numpy.float ).T
428 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key], numpy.float ).T
430 self.__FO["AppliedInX"] = None
436 "x.__repr__() <==> repr(x)"
437 return repr(self.__V)
440 "x.__str__() <==> str(x)"
443 # ==============================================================================
444 class Algorithm(object):
446 Classe générale d'interface de type algorithme
448 Elle donne un cadre pour l'écriture d'une classe élémentaire d'algorithme
449 d'assimilation, en fournissant un container (dictionnaire) de variables
450 persistantes initialisées, et des méthodes d'accès à ces variables stockées.
452 Une classe élémentaire d'algorithme doit implémenter la méthode "run".
454 def __init__(self, name):
456 L'initialisation présente permet de fabriquer des variables de stockage
457 disponibles de manière générique dans les algorithmes élémentaires. Ces
458 variables de stockage sont ensuite conservées dans un dictionnaire
459 interne à l'objet, mais auquel on accède par la méthode "get".
461 Les variables prévues sont :
462 - CostFunctionJ : fonction-cout globale, somme des deux parties suivantes
463 - CostFunctionJb : partie ébauche ou background de la fonction-cout
464 - CostFunctionJo : partie observations de la fonction-cout
465 - GradientOfCostFunctionJ : gradient de la fonction-cout globale
466 - GradientOfCostFunctionJb : gradient de la partie ébauche de la fonction-cout
467 - GradientOfCostFunctionJo : gradient de la partie observations de la fonction-cout
468 - CurrentState : état courant lors d'itérations
469 - Analysis : l'analyse Xa
470 - SimulatedObservationAtBackground : l'état observé H(Xb) à l'ébauche
471 - SimulatedObservationAtCurrentState : l'état observé H(X) à l'état courant
472 - SimulatedObservationAtOptimum : l'état observé H(Xa) à l'optimum
473 - Innovation : l'innovation : d = Y - H(X)
474 - InnovationAtCurrentState : l'innovation à l'état courant : dn = Y - H(Xn)
475 - SigmaObs2 : indicateur de correction optimale des erreurs d'observation
476 - SigmaBck2 : indicateur de correction optimale des erreurs d'ébauche
477 - MahalanobisConsistency : indicateur de consistance des covariances
478 - OMA : Observation moins Analysis : Y - Xa
479 - OMB : Observation moins Background : Y - Xb
480 - AMB : Analysis moins Background : Xa - Xb
481 - APosterioriCovariance : matrice A
482 - APosterioriVariances : variances de la matrice A
483 - APosterioriStandardDeviations : écart-types de la matrice A
484 - APosterioriCorrelations : correlations de la matrice A
485 - Residu : dans le cas des algorithmes de vérification
486 On peut rajouter des variables à stocker dans l'initialisation de
487 l'algorithme élémentaire qui va hériter de cette classe
489 logging.debug("%s Initialisation", str(name))
490 self._m = PlatformInfo.SystemUsage()
492 self._name = str( name )
493 self._parameters = {"StoreSupplementaryCalculations":[]}
494 self.__required_parameters = {}
495 self.__required_inputs = {"RequiredInputValues":{"mandatory":(), "optional":()}}
497 self.StoredVariables = {}
498 self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
499 self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
500 self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
501 self.StoredVariables["CostFunctionJAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJAtCurrentOptimum")
502 self.StoredVariables["CostFunctionJbAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJbAtCurrentOptimum")
503 self.StoredVariables["CostFunctionJoAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJoAtCurrentOptimum")
504 self.StoredVariables["GradientOfCostFunctionJ"] = Persistence.OneVector(name = "GradientOfCostFunctionJ")
505 self.StoredVariables["GradientOfCostFunctionJb"] = Persistence.OneVector(name = "GradientOfCostFunctionJb")
506 self.StoredVariables["GradientOfCostFunctionJo"] = Persistence.OneVector(name = "GradientOfCostFunctionJo")
507 self.StoredVariables["CurrentState"] = Persistence.OneVector(name = "CurrentState")
508 self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
509 self.StoredVariables["IndexOfOptimum"] = Persistence.OneIndex(name = "IndexOfOptimum")
510 self.StoredVariables["CurrentOptimum"] = Persistence.OneVector(name = "CurrentOptimum")
511 self.StoredVariables["SimulatedObservationAtBackground"] = Persistence.OneVector(name = "SimulatedObservationAtBackground")
512 self.StoredVariables["SimulatedObservationAtCurrentState"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentState")
513 self.StoredVariables["SimulatedObservationAtOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtOptimum")
514 self.StoredVariables["SimulatedObservationAtCurrentOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentOptimum")
515 self.StoredVariables["Innovation"] = Persistence.OneVector(name = "Innovation")
516 self.StoredVariables["InnovationAtCurrentState"] = Persistence.OneVector(name = "InnovationAtCurrentState")
517 self.StoredVariables["SigmaObs2"] = Persistence.OneScalar(name = "SigmaObs2")
518 self.StoredVariables["SigmaBck2"] = Persistence.OneScalar(name = "SigmaBck2")
519 self.StoredVariables["MahalanobisConsistency"] = Persistence.OneScalar(name = "MahalanobisConsistency")
520 self.StoredVariables["OMA"] = Persistence.OneVector(name = "OMA")
521 self.StoredVariables["OMB"] = Persistence.OneVector(name = "OMB")
522 self.StoredVariables["BMA"] = Persistence.OneVector(name = "BMA")
523 self.StoredVariables["APosterioriCovariance"] = Persistence.OneMatrix(name = "APosterioriCovariance")
524 self.StoredVariables["APosterioriVariances"] = Persistence.OneVector(name = "APosterioriVariances")
525 self.StoredVariables["APosterioriStandardDeviations"] = Persistence.OneVector(name = "APosterioriStandardDeviations")
526 self.StoredVariables["APosterioriCorrelations"] = Persistence.OneMatrix(name = "APosterioriCorrelations")
527 self.StoredVariables["SimulationQuantiles"] = Persistence.OneMatrix(name = "SimulationQuantiles")
528 self.StoredVariables["Residu"] = Persistence.OneScalar(name = "Residu")
530 def _pre_run(self, Parameters, R=None, B=None, Q=None ):
532 logging.debug("%s Lancement", self._name)
533 logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
535 # Mise a jour de self._parameters avec Parameters
536 self.__setParameters(Parameters)
538 # Corrections et complements
539 def __test_cvalue( argument, variable, argname):
541 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
542 raise ValueError("%s %s error covariance matrix %s has to be properly defined!"%(self._name,argname,variable))
543 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
544 logging.debug("%s %s error covariance matrix %s is not set, but is optional."%(self._name,argname,variable))
546 logging.debug("%s %s error covariance matrix %s is not set, but is not required."%(self._name,argname,variable))
548 logging.debug("%s %s error covariance matrix %s is set."%(self._name,argname,variable))
549 __test_cvalue( R, "R", "Observation" )
550 __test_cvalue( B, "B", "Background" )
551 __test_cvalue( Q, "Q", "Evolution" )
553 if ("Bounds" in self._parameters) and isinstance(self._parameters["Bounds"], (list, tuple)) and (len(self._parameters["Bounds"]) > 0):
554 logging.debug("%s Prise en compte des bornes effectuee"%(self._name,))
556 self._parameters["Bounds"] = None
558 if logging.getLogger().level < logging.WARNING:
559 self._parameters["optiprint"], self._parameters["optdisp"] = 1, 1
560 if PlatformInfo.has_scipy:
561 import scipy.optimize
562 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_ALL
564 self._parameters["optmessages"] = 15
566 self._parameters["optiprint"], self._parameters["optdisp"] = -1, 0
567 if PlatformInfo.has_scipy:
568 import scipy.optimize
569 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_NONE
571 self._parameters["optmessages"] = 15
575 def _post_run(self,_oH=None):
577 if ("StoreSupplementaryCalculations" in self._parameters) and \
578 "APosterioriCovariance" in self._parameters["StoreSupplementaryCalculations"]:
579 for _A in self.StoredVariables["APosterioriCovariance"]:
580 if "APosterioriVariances" in self._parameters["StoreSupplementaryCalculations"]:
581 self.StoredVariables["APosterioriVariances"].store( numpy.diag(_A) )
582 if "APosterioriStandardDeviations" in self._parameters["StoreSupplementaryCalculations"]:
583 self.StoredVariables["APosterioriStandardDeviations"].store( numpy.sqrt(numpy.diag(_A)) )
584 if "APosterioriCorrelations" in self._parameters["StoreSupplementaryCalculations"]:
585 _EI = numpy.diag(1./numpy.sqrt(numpy.diag(_A)))
586 _C = numpy.dot(_EI, numpy.dot(_A, _EI))
587 self.StoredVariables["APosterioriCorrelations"].store( _C )
589 logging.debug("%s Nombre d'évaluation(s) de l'opérateur d'observation direct/tangent/adjoint.: %i/%i/%i", self._name, _oH["Direct"].nbcalls(0),_oH["Tangent"].nbcalls(0),_oH["Adjoint"].nbcalls(0))
590 logging.debug("%s Nombre d'appels au cache d'opérateur d'observation direct/tangent/adjoint..: %i/%i/%i", self._name, _oH["Direct"].nbcalls(3),_oH["Tangent"].nbcalls(3),_oH["Adjoint"].nbcalls(3))
591 logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
592 logging.debug("%s Terminé", self._name)
595 def get(self, key=None):
597 Renvoie l'une des variables stockées identifiée par la clé, ou le
598 dictionnaire de l'ensemble des variables disponibles en l'absence de
599 clé. Ce sont directement les variables sous forme objet qui sont
600 renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
601 des classes de persistance.
604 return self.StoredVariables[key]
606 return self.StoredVariables
608 def __contains__(self, key=None):
609 "D.__contains__(k) -> True if D has a key k, else False"
610 return key in self.StoredVariables
613 "D.keys() -> list of D's keys"
614 if hasattr(self, "StoredVariables"):
615 return self.StoredVariables.keys()
620 "D.pop(k[,d]) -> v, remove specified key and return the corresponding value"
621 if hasattr(self, "StoredVariables"):
622 return self.StoredVariables.pop(k, d)
627 raise TypeError("pop expected at least 1 arguments, got 0")
628 "If key is not found, d is returned if given, otherwise KeyError is raised"
634 def run(self, Xb=None, Y=None, H=None, M=None, R=None, B=None, Q=None, Parameters=None):
636 Doit implémenter l'opération élémentaire de calcul d'assimilation sous
637 sa forme mathématique la plus naturelle possible.
639 raise NotImplementedError("Mathematical assimilation calculation has not been implemented!")
641 def defineRequiredParameter(self, name = None, default = None, typecast = None, message = None, minval = None, maxval = None, listval = None):
643 Permet de définir dans l'algorithme des paramètres requis et leurs
644 caractéristiques par défaut.
647 raise ValueError("A name is mandatory to define a required parameter.")
649 self.__required_parameters[name] = {
651 "typecast" : typecast,
657 logging.debug("%s %s (valeur par défaut = %s)", self._name, message, self.setParameterValue(name))
659 def getRequiredParameters(self, noDetails=True):
661 Renvoie la liste des noms de paramètres requis ou directement le
662 dictionnaire des paramètres requis.
665 return sorted(self.__required_parameters.keys())
667 return self.__required_parameters
669 def setParameterValue(self, name=None, value=None):
671 Renvoie la valeur d'un paramètre requis de manière contrôlée
673 default = self.__required_parameters[name]["default"]
674 typecast = self.__required_parameters[name]["typecast"]
675 minval = self.__required_parameters[name]["minval"]
676 maxval = self.__required_parameters[name]["maxval"]
677 listval = self.__required_parameters[name]["listval"]
679 if value is None and default is None:
681 elif value is None and default is not None:
682 if typecast is None: __val = default
683 else: __val = typecast( default )
685 if typecast is None: __val = value
686 else: __val = typecast( value )
688 if minval is not None and (numpy.array(__val, float) < minval).any():
689 raise ValueError("The parameter named \"%s\" of value \"%s\" can not be less than %s."%(name, __val, minval))
690 if maxval is not None and (numpy.array(__val, float) > maxval).any():
691 raise ValueError("The parameter named \"%s\" of value \"%s\" can not be greater than %s."%(name, __val, maxval))
692 if listval is not None:
693 if typecast is list or typecast is tuple or isinstance(__val,list) or isinstance(__val,tuple):
696 raise ValueError("The value \"%s\" of the parameter named \"%s\" is not allowed, it has to be in the list %s."%(v, name, listval))
697 elif __val not in listval:
698 raise ValueError("The value \"%s\" of the parameter named \"%s\" is not allowed, it has to be in the list %s."%( __val, name,listval))
701 def requireInputArguments(self, mandatory=(), optional=()):
703 Permet d'imposer des arguments requises en entrée
705 self.__required_inputs["RequiredInputValues"]["mandatory"] = tuple( mandatory )
706 self.__required_inputs["RequiredInputValues"]["optional"] = tuple( optional )
708 def __setParameters(self, fromDico={}):
710 Permet de stocker les paramètres reçus dans le dictionnaire interne.
712 self._parameters.update( fromDico )
713 for k in self.__required_parameters.keys():
714 if k in fromDico.keys():
715 self._parameters[k] = self.setParameterValue(k,fromDico[k])
717 self._parameters[k] = self.setParameterValue(k)
718 logging.debug("%s %s : %s", self._name, self.__required_parameters[k]["message"], self._parameters[k])
720 # ==============================================================================
721 class Diagnostic(object):
723 Classe générale d'interface de type diagnostic
725 Ce template s'utilise de la manière suivante : il sert de classe "patron" en
726 même temps que l'une des classes de persistance, comme "OneScalar" par
729 Une classe élémentaire de diagnostic doit implémenter ses deux méthodes, la
730 méthode "_formula" pour écrire explicitement et proprement la formule pour
731 l'écriture mathématique du calcul du diagnostic (méthode interne non
732 publique), et "calculate" pour activer la précédente tout en ayant vérifié
733 et préparé les données, et pour stocker les résultats à chaque pas (méthode
734 externe d'activation).
736 def __init__(self, name = "", parameters = {}):
738 self.name = str(name)
739 self.parameters = dict( parameters )
741 def _formula(self, *args):
743 Doit implémenter l'opération élémentaire de diagnostic sous sa forme
744 mathématique la plus naturelle possible.
746 raise NotImplementedError("Diagnostic mathematical formula has not been implemented!")
748 def calculate(self, *args):
750 Active la formule de calcul avec les arguments correctement rangés
752 raise NotImplementedError("Diagnostic activation method has not been implemented!")
754 # ==============================================================================
755 class DiagnosticAndParameters(object):
757 Classe générale d'interface d'interface de type diagnostic
760 name = "GenericDiagnostic",
767 asExistingDiags = None,
771 self.__name = str(name)
777 self.__E = tuple(asExistingDiags)
778 self.__TheDiag = None
780 if asScript is not None:
781 __Diag = ImportFromScript(asScript).getvalue( "Diagnostic" )
782 __Iden = ImportFromScript(asScript).getvalue( "Identifier" )
783 __Dict = ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
784 __Unit = ImportFromScript(asScript).getvalue( "Unit" )
785 __Base = ImportFromScript(asScript).getvalue( "BaseType" )
787 __Diag = asDiagnostic
788 __Iden = asIdentifier
793 if __Diag is not None:
794 self.__D = str(__Diag)
795 if __Iden is not None:
796 self.__I = str(__Iden)
798 self.__I = str(__Diag)
799 if __Dict is not None:
800 self.__P.update( dict(__Dict) )
801 if __Unit is None or __Unit == "None":
803 if __Base is None or __Base == "None":
806 self.__setDiagnostic( self.__D, self.__I, self.__U, self.__B, self.__P, self.__E )
810 return self.__TheDiag
812 def __setDiagnostic(self, __choice = None, __name = "", __unit = "", __basetype = None, __parameters = {}, __existings = () ):
814 Permet de sélectionner un diagnostic a effectuer
817 raise ValueError("Error: diagnostic choice has to be given")
818 __daDirectory = "daDiagnostics"
820 # Recherche explicitement le fichier complet
821 # ------------------------------------------
823 for directory in sys.path:
824 if os.path.isfile(os.path.join(directory, __daDirectory, str(__choice)+'.py')):
825 __module_path = os.path.abspath(os.path.join(directory, __daDirectory))
826 if __module_path is None:
827 raise ImportError("No diagnostic module named \"%s\" was found in a \"%s\" subdirectory\n The search path is %s"%(__choice, __daDirectory, sys.path))
829 # Importe le fichier complet comme un module
830 # ------------------------------------------
832 __sys_path_tmp = sys.path ; sys.path.insert(0,__module_path)
833 self.__diagnosticFile = __import__(str(__choice), globals(), locals(), [])
834 sys.path = __sys_path_tmp ; del __sys_path_tmp
835 except ImportError as e:
836 raise ImportError("The module named \"%s\" was found, but is incorrect at the import stage.\n The import error message is: %s"%(__choice,e))
838 # Instancie un objet du type élémentaire du fichier
839 # -------------------------------------------------
840 if __name in __existings:
841 raise ValueError("A default input with the same name \"%s\" already exists."%str(__name))
843 self.__TheDiag = self.__diagnosticFile.ElementaryDiagnostic(
846 basetype = __basetype,
847 parameters = __parameters )
850 # ==============================================================================
851 class AlgorithmAndParameters(object):
853 Classe générale d'interface d'action pour l'algorithme et ses paramètres
856 name = "GenericAlgorithm",
863 self.__name = str(name)
867 self.__algorithm = {}
868 self.__algorithmFile = None
869 self.__algorithmName = None
871 self.updateParameters( asDict, asScript )
873 if asScript is not None:
874 __Algo = ImportFromScript(asScript).getvalue( "Algorithm" )
878 if __Algo is not None:
879 self.__A = str(__Algo)
880 self.__P.update( {"Algorithm":self.__A} )
882 self.__setAlgorithm( self.__A )
884 def updateParameters(self,
888 "Mise a jour des parametres"
889 if asScript is not None:
890 __Dict = ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
894 if __Dict is not None:
895 self.__P.update( dict(__Dict) )
897 def executePythonScheme(self, asDictAO = None):
898 "Permet de lancer le calcul d'assimilation"
899 Operator.CM.clearCache()
901 if not isinstance(asDictAO, dict):
902 raise ValueError("The objects for algorithm calculation has to be given as a dictionnary, and is not")
903 if hasattr(asDictAO["Background"],"getO"): self.__Xb = asDictAO["Background"].getO()
904 elif hasattr(asDictAO["CheckingPoint"],"getO"): self.__Xb = asDictAO["CheckingPoint"].getO()
905 else: self.__Xb = None
906 if hasattr(asDictAO["Observation"],"getO"): self.__Y = asDictAO["Observation"].getO()
907 else: self.__Y = asDictAO["Observation"]
908 if hasattr(asDictAO["ControlInput"],"getO"): self.__U = asDictAO["ControlInput"].getO()
909 else: self.__U = asDictAO["ControlInput"]
910 if hasattr(asDictAO["ObservationOperator"],"getO"): self.__HO = asDictAO["ObservationOperator"].getO()
911 else: self.__HO = asDictAO["ObservationOperator"]
912 if hasattr(asDictAO["EvolutionModel"],"getO"): self.__EM = asDictAO["EvolutionModel"].getO()
913 else: self.__EM = asDictAO["EvolutionModel"]
914 if hasattr(asDictAO["ControlModel"],"getO"): self.__CM = asDictAO["ControlModel"].getO()
915 else: self.__CM = asDictAO["ControlModel"]
916 self.__B = asDictAO["BackgroundError"]
917 self.__R = asDictAO["ObservationError"]
918 self.__Q = asDictAO["EvolutionError"]
920 self.__shape_validate()
922 self.__algorithm.run(
932 Parameters = self.__P,
936 def executeYACSScheme(self, FileName=None):
937 "Permet de lancer le calcul d'assimilation"
938 if FileName is None or not os.path.exists(FileName):
939 raise ValueError("a existing DIC Python file name has to be given for YACS execution.\n")
940 if not os.environ.has_key("ADAO_ROOT_DIR"):
941 raise ImportError("Unable to get ADAO_ROOT_DIR environnement variable. Please launch SALOME to add ADAO_ROOT_DIR to your environnement.\n")
943 __converterExe = os.path.join(os.environ["ADAO_ROOT_DIR"], "bin/salome", "AdaoYacsSchemaCreator.py")
944 __inputFile = os.path.abspath(FileName)
945 __outputFile = __inputFile[:__inputFile.rfind(".")] + '.xml'
947 __args = ["python", __converterExe, __inputFile, __outputFile]
949 __p = subprocess.Popen(__args)
950 (__stdoutdata, __stderrdata) = __p.communicate()
951 if not os.path.exists(__outputFile):
952 __msg = "An error occured during the execution of the ADAO YACS Schema\n"
953 __msg += "Creator applied on the input file:\n"
954 __msg += " %s\n"%__outputFile
955 __msg += "If SALOME GUI is launched by command line, see errors\n"
956 __msg += "details in your terminal.\n"
957 raise ValueError(__msg)
963 SALOMERuntime.RuntimeSALOME_setRuntime()
965 r = pilot.getRuntime()
966 xmlLoader = loader.YACSLoader()
967 xmlLoader.registerProcCataLoader()
969 catalogAd = r.loadCatalog("proc", __outputFile)
972 r.addCatalog(catalogAd)
975 p = xmlLoader.load(__outputFile)
976 except IOError as ex:
977 print("IO exception: %s"%(ex,))
979 logger = p.getLogger("parser")
980 if not logger.isEmpty():
981 print("The imported file has errors :")
982 print(logger.getStr())
985 print("Le schéma n'est pas valide et ne peut pas être exécuté")
986 print(p.getErrorReport())
988 info=pilot.LinkInfo(pilot.LinkInfo.ALL_DONT_STOP)
989 p.checkConsistency(info)
990 if info.areWarningsOrErrors():
991 print("Le schéma n'est pas cohérent et ne peut pas être exécuté")
992 print(info.getGlobalRepr())
994 e = pilot.ExecutorSwig()
996 if p.getEffectiveState() != pilot.DONE:
997 print(p.getErrorReport())
999 raise ValueError("execution error of YACS scheme")
1003 def get(self, key = None):
1004 "Vérifie l'existence d'une clé de variable ou de paramètres"
1005 if key in self.__algorithm:
1006 return self.__algorithm.get( key )
1007 elif key in self.__P:
1008 return self.__P[key]
1012 def pop(self, k, d):
1013 "Necessaire pour le pickling"
1014 return self.__algorithm.pop(k, d)
1016 def getAlgorithmRequiredParameters(self, noDetails=True):
1017 "Renvoie la liste des paramètres requis selon l'algorithme"
1018 return self.__algorithm.getRequiredParameters(noDetails)
1020 def setObserver(self, __V, __O, __I, __S):
1021 if self.__algorithm is None \
1022 or isinstance(self.__algorithm, dict) \
1023 or not hasattr(self.__algorithm,"StoredVariables"):
1024 raise ValueError("No observer can be build before choosing an algorithm.")
1025 if __V not in self.__algorithm:
1026 raise ValueError("An observer requires to be set on a variable named %s which does not exist."%__V)
1028 self.__algorithm.StoredVariables[ __V ].setDataObserver(
1031 HookParameters = __I,
1034 def removeObserver(self, __V, __O, __A = False):
1035 if self.__algorithm is None \
1036 or isinstance(self.__algorithm, dict) \
1037 or not hasattr(self.__algorithm,"StoredVariables"):
1038 raise ValueError("No observer can be removed before choosing an algorithm.")
1039 if __V not in self.__algorithm:
1040 raise ValueError("An observer requires to be removed on a variable named %s which does not exist."%__V)
1042 return self.__algorithm.StoredVariables[ __V ].removeDataObserver(
1047 def hasObserver(self, __V):
1048 if self.__algorithm is None \
1049 or isinstance(self.__algorithm, dict) \
1050 or not hasattr(self.__algorithm,"StoredVariables"):
1052 if __V not in self.__algorithm:
1054 return self.__algorithm.StoredVariables[ __V ].hasDataObserver()
1057 return list(self.__algorithm.keys()) + list(self.__P.keys())
1059 def __contains__(self, key=None):
1060 "D.__contains__(k) -> True if D has a key k, else False"
1061 return key in self.__algorithm or key in self.__P
1064 "x.__repr__() <==> repr(x)"
1065 return repr(self.__A)+", "+repr(self.__P)
1068 "x.__str__() <==> str(x)"
1069 return str(self.__A)+", "+str(self.__P)
1071 def __setAlgorithm(self, choice = None ):
1073 Permet de sélectionner l'algorithme à utiliser pour mener à bien l'étude
1074 d'assimilation. L'argument est un champ caractère se rapportant au nom
1075 d'un fichier contenu dans "../daAlgorithms" et réalisant l'opération
1076 d'assimilation sur les arguments fixes.
1079 raise ValueError("Error: algorithm choice has to be given")
1080 if self.__algorithmName is not None:
1081 raise ValueError("Error: algorithm choice has already been done as \"%s\", it can't be changed."%self.__algorithmName)
1082 daDirectory = "daAlgorithms"
1084 # Recherche explicitement le fichier complet
1085 # ------------------------------------------
1087 for directory in sys.path:
1088 if os.path.isfile(os.path.join(directory, daDirectory, str(choice)+'.py')):
1089 module_path = os.path.abspath(os.path.join(directory, daDirectory))
1090 if module_path is None:
1091 raise ImportError("No algorithm module named \"%s\" was found in a \"%s\" subdirectory\n The search path is %s"%(choice, daDirectory, sys.path))
1093 # Importe le fichier complet comme un module
1094 # ------------------------------------------
1096 sys_path_tmp = sys.path ; sys.path.insert(0,module_path)
1097 self.__algorithmFile = __import__(str(choice), globals(), locals(), [])
1098 self.__algorithmName = str(choice)
1099 sys.path = sys_path_tmp ; del sys_path_tmp
1100 except ImportError as e:
1101 raise ImportError("The module named \"%s\" was found, but is incorrect at the import stage.\n The import error message is: %s"%(choice,e))
1103 # Instancie un objet du type élémentaire du fichier
1104 # -------------------------------------------------
1105 self.__algorithm = self.__algorithmFile.ElementaryAlgorithm()
1108 def __shape_validate(self):
1110 Validation de la correspondance correcte des tailles des variables et
1111 des matrices s'il y en a.
1113 if self.__Xb is None: __Xb_shape = (0,)
1114 elif hasattr(self.__Xb,"size"): __Xb_shape = (self.__Xb.size,)
1115 elif hasattr(self.__Xb,"shape"):
1116 if isinstance(self.__Xb.shape, tuple): __Xb_shape = self.__Xb.shape
1117 else: __Xb_shape = self.__Xb.shape()
1118 else: raise TypeError("The background (Xb) has no attribute of shape: problem !")
1120 if self.__Y is None: __Y_shape = (0,)
1121 elif hasattr(self.__Y,"size"): __Y_shape = (self.__Y.size,)
1122 elif hasattr(self.__Y,"shape"):
1123 if isinstance(self.__Y.shape, tuple): __Y_shape = self.__Y.shape
1124 else: __Y_shape = self.__Y.shape()
1125 else: raise TypeError("The observation (Y) has no attribute of shape: problem !")
1127 if self.__U is None: __U_shape = (0,)
1128 elif hasattr(self.__U,"size"): __U_shape = (self.__U.size,)
1129 elif hasattr(self.__U,"shape"):
1130 if isinstance(self.__U.shape, tuple): __U_shape = self.__U.shape
1131 else: __U_shape = self.__U.shape()
1132 else: raise TypeError("The control (U) has no attribute of shape: problem !")
1134 if self.__B is None: __B_shape = (0,0)
1135 elif hasattr(self.__B,"shape"):
1136 if isinstance(self.__B.shape, tuple): __B_shape = self.__B.shape
1137 else: __B_shape = self.__B.shape()
1138 else: raise TypeError("The a priori errors covariance matrix (B) has no attribute of shape: problem !")
1140 if self.__R is None: __R_shape = (0,0)
1141 elif hasattr(self.__R,"shape"):
1142 if isinstance(self.__R.shape, tuple): __R_shape = self.__R.shape
1143 else: __R_shape = self.__R.shape()
1144 else: raise TypeError("The observation errors covariance matrix (R) has no attribute of shape: problem !")
1146 if self.__Q is None: __Q_shape = (0,0)
1147 elif hasattr(self.__Q,"shape"):
1148 if isinstance(self.__Q.shape, tuple): __Q_shape = self.__Q.shape
1149 else: __Q_shape = self.__Q.shape()
1150 else: raise TypeError("The evolution errors covariance matrix (Q) has no attribute of shape: problem !")
1152 if len(self.__HO) == 0: __HO_shape = (0,0)
1153 elif isinstance(self.__HO, dict): __HO_shape = (0,0)
1154 elif hasattr(self.__HO["Direct"],"shape"):
1155 if isinstance(self.__HO["Direct"].shape, tuple): __HO_shape = self.__HO["Direct"].shape
1156 else: __HO_shape = self.__HO["Direct"].shape()
1157 else: raise TypeError("The observation operator (H) has no attribute of shape: problem !")
1159 if len(self.__EM) == 0: __EM_shape = (0,0)
1160 elif isinstance(self.__EM, dict): __EM_shape = (0,0)
1161 elif hasattr(self.__EM["Direct"],"shape"):
1162 if isinstance(self.__EM["Direct"].shape, tuple): __EM_shape = self.__EM["Direct"].shape
1163 else: __EM_shape = self.__EM["Direct"].shape()
1164 else: raise TypeError("The evolution model (EM) has no attribute of shape: problem !")
1166 if len(self.__CM) == 0: __CM_shape = (0,0)
1167 elif isinstance(self.__CM, dict): __CM_shape = (0,0)
1168 elif hasattr(self.__CM["Direct"],"shape"):
1169 if isinstance(self.__CM["Direct"].shape, tuple): __CM_shape = self.__CM["Direct"].shape
1170 else: __CM_shape = self.__CM["Direct"].shape()
1171 else: raise TypeError("The control model (CM) has no attribute of shape: problem !")
1173 # Vérification des conditions
1174 # ---------------------------
1175 if not( len(__Xb_shape) == 1 or min(__Xb_shape) == 1 ):
1176 raise ValueError("Shape characteristic of background (Xb) is incorrect: \"%s\"."%(__Xb_shape,))
1177 if not( len(__Y_shape) == 1 or min(__Y_shape) == 1 ):
1178 raise ValueError("Shape characteristic of observation (Y) is incorrect: \"%s\"."%(__Y_shape,))
1180 if not( min(__B_shape) == max(__B_shape) ):
1181 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) is incorrect: \"%s\"."%(__B_shape,))
1182 if not( min(__R_shape) == max(__R_shape) ):
1183 raise ValueError("Shape characteristic of observation errors covariance matrix (R) is incorrect: \"%s\"."%(__R_shape,))
1184 if not( min(__Q_shape) == max(__Q_shape) ):
1185 raise ValueError("Shape characteristic of evolution errors covariance matrix (Q) is incorrect: \"%s\"."%(__Q_shape,))
1186 if not( min(__EM_shape) == max(__EM_shape) ):
1187 raise ValueError("Shape characteristic of evolution operator (EM) is incorrect: \"%s\"."%(__EM_shape,))
1189 if len(self.__HO) > 0 and not(type(self.__HO) is type({})) and not( __HO_shape[1] == max(__Xb_shape) ):
1190 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and state (X) \"%s\" are incompatible."%(__HO_shape,__Xb_shape))
1191 if len(self.__HO) > 0 and not(type(self.__HO) is type({})) and not( __HO_shape[0] == max(__Y_shape) ):
1192 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation (Y) \"%s\" are incompatible."%(__HO_shape,__Y_shape))
1193 if len(self.__HO) > 0 and not(type(self.__HO) is type({})) and len(self.__B) > 0 and not( __HO_shape[1] == __B_shape[0] ):
1194 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and a priori errors covariance matrix (B) \"%s\" are incompatible."%(__HO_shape,__B_shape))
1195 if len(self.__HO) > 0 and not(type(self.__HO) is type({})) and len(self.__R) > 0 and not( __HO_shape[0] == __R_shape[1] ):
1196 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation errors covariance matrix (R) \"%s\" are incompatible."%(__HO_shape,__R_shape))
1198 if self.__B is not None and len(self.__B) > 0 and not( __B_shape[1] == max(__Xb_shape) ):
1199 if self.__algorithmName in ["EnsembleBlue",]:
1200 asPersistentVector = self.__Xb.reshape((-1,min(__B_shape)))
1201 self.__Xb = Persistence.OneVector("Background", basetype=numpy.matrix)
1202 for member in asPersistentVector:
1203 self.__Xb.store( numpy.matrix( numpy.ravel(member), numpy.float ).T )
1204 __Xb_shape = min(__B_shape)
1206 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) \"%s\" and background (Xb) \"%s\" are incompatible."%(__B_shape,__Xb_shape))
1208 if self.__R is not None and len(self.__R) > 0 and not( __R_shape[1] == max(__Y_shape) ):
1209 raise ValueError("Shape characteristic of observation errors covariance matrix (R) \"%s\" and observation (Y) \"%s\" are incompatible."%(__R_shape,__Y_shape))
1211 if self.__EM is not None and len(self.__EM) > 0 and not(type(self.__EM) is type({})) and not( __EM_shape[1] == max(__Xb_shape) ):
1212 raise ValueError("Shape characteristic of evolution model (EM) \"%s\" and state (X) \"%s\" are incompatible."%(__EM_shape,__Xb_shape))
1214 if self.__CM is not None and len(self.__CM) > 0 and not(type(self.__CM) is type({})) and not( __CM_shape[1] == max(__U_shape) ):
1215 raise ValueError("Shape characteristic of control model (CM) \"%s\" and control (U) \"%s\" are incompatible."%(__CM_shape,__U_shape))
1217 if ("Bounds" in self.__P) \
1218 and (isinstance(self.__P["Bounds"], list) or isinstance(self.__P["Bounds"], tuple)) \
1219 and (len(self.__P["Bounds"]) != max(__Xb_shape)):
1220 raise ValueError("The number \"%s\" of bound pairs for the state (X) components is different of the size \"%s\" of the state itself." \
1221 %(len(self.__P["Bounds"]),max(__Xb_shape)))
1225 # ==============================================================================
1226 class DataObserver(object):
1228 Classe générale d'interface de type observer
1231 name = "GenericObserver",
1243 self.__name = str(name)
1248 if onVariable is None:
1249 raise ValueError("setting an observer has to be done over a variable name or a list of variable names, not over None.")
1250 elif type(onVariable) in (tuple, list):
1251 self.__V = tuple(map( str, onVariable ))
1252 if withInfo is None:
1255 self.__I = (str(withInfo),)*len(self.__V)
1256 elif isinstance(onVariable, str):
1257 self.__V = (onVariable,)
1258 if withInfo is None:
1259 self.__I = (onVariable,)
1261 self.__I = (str(withInfo),)
1263 raise ValueError("setting an observer has to be done over a variable name or a list of variable names.")
1265 if asString is not None:
1266 __FunctionText = asString
1267 elif (asTemplate is not None) and (asTemplate in Templates.ObserverTemplates):
1268 __FunctionText = Templates.ObserverTemplates[asTemplate]
1269 elif asScript is not None:
1270 __FunctionText = ImportFromScript(asScript).getstring()
1273 __Function = ObserverF(__FunctionText)
1275 if asObsObject is not None:
1276 self.__O = asObsObject
1278 self.__O = __Function.getfunc()
1280 for k in range(len(self.__V)):
1283 if ename not in withAlgo:
1284 raise ValueError("An observer is asked to be set on a variable named %s which does not exist."%ename)
1286 withAlgo.setObserver(ename, self.__O, einfo, scheduledBy)
1289 "x.__repr__() <==> repr(x)"
1290 return repr(self.__V)+"\n"+repr(self.__O)
1293 "x.__str__() <==> str(x)"
1294 return str(self.__V)+"\n"+str(self.__O)
1296 # ==============================================================================
1297 class State(object):
1299 Classe générale d'interface de type état
1302 name = "GenericVector",
1304 asPersistentVector = None,
1307 toBeChecked = False,
1310 Permet de définir un vecteur :
1311 - asVector : entrée des données, comme un vecteur compatible avec le
1312 constructeur de numpy.matrix, ou "True" si entrée par script.
1313 - asPersistentVector : entrée des données, comme une série de vecteurs
1314 compatible avec le constructeur de numpy.matrix, ou comme un objet de
1315 type Persistence, ou "True" si entrée par script.
1316 - asScript : si un script valide est donné contenant une variable
1317 nommée "name", la variable est de type "asVector" (par défaut) ou
1318 "asPersistentVector" selon que l'une de ces variables est placée à
1321 self.__name = str(name)
1322 self.__check = bool(toBeChecked)
1326 self.__is_vector = False
1327 self.__is_series = False
1329 if asScript is not None:
1330 __Vector, __Series = None, None
1331 if asPersistentVector:
1332 __Series = ImportFromScript(asScript).getvalue( self.__name )
1334 __Vector = ImportFromScript(asScript).getvalue( self.__name )
1336 __Vector, __Series = asVector, asPersistentVector
1338 if __Vector is not None:
1339 self.__is_vector = True
1340 self.__V = numpy.matrix( numpy.asmatrix(__Vector).A1, numpy.float ).T
1341 self.shape = self.__V.shape
1342 self.size = self.__V.size
1343 elif __Series is not None:
1344 self.__is_series = True
1345 if type(__Series) in (tuple, list, numpy.ndarray, numpy.matrix):
1346 self.__V = Persistence.OneVector(self.__name, basetype=numpy.matrix)
1347 for member in __Series:
1348 self.__V.store( numpy.matrix( numpy.asmatrix(member).A1, numpy.float ).T )
1349 import sys ; sys.stdout.flush()
1352 if type(self.__V.shape) in (tuple, list):
1353 self.shape = self.__V.shape
1355 self.shape = self.__V.shape()
1356 if len(self.shape) == 1:
1357 self.shape = (self.shape[0],1)
1358 self.size = self.shape[0] * self.shape[1]
1360 raise ValueError("The %s object is improperly defined, it requires at minima either a vector, a list/tuple of vectors or a persistent object. Please check your vector input."%self.__name)
1362 if scheduledBy is not None:
1363 self.__T = scheduledBy
1365 def getO(self, withScheduler=False):
1367 return self.__V, self.__T
1368 elif self.__T is None:
1374 "Vérification du type interne"
1375 return self.__is_vector
1378 "Vérification du type interne"
1379 return self.__is_series
1382 "x.__repr__() <==> repr(x)"
1383 return repr(self.__V)
1386 "x.__str__() <==> str(x)"
1387 return str(self.__V)
1389 # ==============================================================================
1390 class Covariance(object):
1392 Classe générale d'interface de type covariance
1395 name = "GenericCovariance",
1396 asCovariance = None,
1397 asEyeByScalar = None,
1398 asEyeByVector = None,
1401 toBeChecked = False,
1404 Permet de définir une covariance :
1405 - asCovariance : entrée des données, comme une matrice compatible avec
1406 le constructeur de numpy.matrix
1407 - asEyeByScalar : entrée des données comme un seul scalaire de variance,
1408 multiplicatif d'une matrice de corrélation identité, aucune matrice
1409 n'étant donc explicitement à donner
1410 - asEyeByVector : entrée des données comme un seul vecteur de variance,
1411 à mettre sur la diagonale d'une matrice de corrélation, aucune matrice
1412 n'étant donc explicitement à donner
1413 - asCovObject : entrée des données comme un objet python, qui a les
1414 methodes obligatoires "getT", "getI", "diag", "trace", "__add__",
1415 "__sub__", "__neg__", "__mul__", "__rmul__" et facultatives "shape",
1416 "size", "cholesky", "choleskyI", "asfullmatrix", "__repr__", "__str__"
1417 - toBeChecked : booléen indiquant si le caractère SDP de la matrice
1418 pleine doit être vérifié
1420 self.__name = str(name)
1421 self.__check = bool(toBeChecked)
1424 self.__is_scalar = False
1425 self.__is_vector = False
1426 self.__is_matrix = False
1427 self.__is_object = False
1429 if asScript is not None:
1430 __Matrix, __Scalar, __Vector, __Object = None, None, None, None
1432 __Scalar = ImportFromScript(asScript).getvalue( self.__name )
1434 __Vector = ImportFromScript(asScript).getvalue( self.__name )
1436 __Object = ImportFromScript(asScript).getvalue( self.__name )
1438 __Matrix = ImportFromScript(asScript).getvalue( self.__name )
1440 __Matrix, __Scalar, __Vector, __Object = asCovariance, asEyeByScalar, asEyeByVector, asCovObject
1442 if __Scalar is not None:
1443 if numpy.matrix(__Scalar).size != 1:
1444 raise ValueError(' The diagonal multiplier given to define a sparse matrix is not a unique scalar value.\n Its actual measured size is %i. Please check your scalar input.'%numpy.matrix(__Scalar).size)
1445 self.__is_scalar = True
1446 self.__C = numpy.abs( float(__Scalar) )
1449 elif __Vector is not None:
1450 self.__is_vector = True
1451 self.__C = numpy.abs( numpy.array( numpy.ravel( numpy.matrix(__Vector, float ) ) ) )
1452 self.shape = (self.__C.size,self.__C.size)
1453 self.size = self.__C.size**2
1454 elif __Matrix is not None:
1455 self.__is_matrix = True
1456 self.__C = numpy.matrix( __Matrix, float )
1457 self.shape = self.__C.shape
1458 self.size = self.__C.size
1459 elif __Object is not None:
1460 self.__is_object = True
1462 for at in ("getT","getI","diag","trace","__add__","__sub__","__neg__","__mul__","__rmul__"):
1463 if not hasattr(self.__C,at):
1464 raise ValueError("The matrix given for %s as an object has no attribute \"%s\". Please check your object input."%(self.__name,at))
1465 if hasattr(self.__C,"shape"):
1466 self.shape = self.__C.shape
1469 if hasattr(self.__C,"size"):
1470 self.size = self.__C.size
1475 # raise ValueError("The %s covariance matrix has to be specified either as a matrix, a vector for its diagonal or a scalar multiplying an identity matrix."%self.__name)
1479 def __validate(self):
1481 if self.ismatrix() and min(self.shape) != max(self.shape):
1482 raise ValueError("The given matrix for %s is not a square one, its shape is %s. Please check your matrix input."%(self.__name,self.shape))
1483 if self.isobject() and min(self.shape) != max(self.shape):
1484 raise ValueError("The matrix given for \"%s\" is not a square one, its shape is %s. Please check your object input."%(self.__name,self.shape))
1485 if self.isscalar() and self.__C <= 0:
1486 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your scalar input %s."%(self.__name,self.__C))
1487 if self.isvector() and (self.__C <= 0).any():
1488 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your vector input."%(self.__name,))
1489 if self.ismatrix() and (self.__check or logging.getLogger().level < logging.WARNING):
1491 L = numpy.linalg.cholesky( self.__C )
1493 raise ValueError("The %s covariance matrix is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1494 if self.isobject() and (self.__check or logging.getLogger().level < logging.WARNING):
1496 L = self.__C.cholesky()
1498 raise ValueError("The %s covariance object is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1501 "Vérification du type interne"
1502 return self.__is_scalar
1505 "Vérification du type interne"
1506 return self.__is_vector
1509 "Vérification du type interne"
1510 return self.__is_matrix
1513 "Vérification du type interne"
1514 return self.__is_object
1519 return Covariance(self.__name+"I", asCovariance = self.__C.I )
1520 elif self.isvector():
1521 return Covariance(self.__name+"I", asEyeByVector = 1. / self.__C )
1522 elif self.isscalar():
1523 return Covariance(self.__name+"I", asEyeByScalar = 1. / self.__C )
1524 elif self.isobject():
1525 return Covariance(self.__name+"I", asCovObject = self.__C.getI() )
1527 return None # Indispensable
1532 return Covariance(self.__name+"T", asCovariance = self.__C.T )
1533 elif self.isvector():
1534 return Covariance(self.__name+"T", asEyeByVector = self.__C )
1535 elif self.isscalar():
1536 return Covariance(self.__name+"T", asEyeByScalar = self.__C )
1537 elif self.isobject():
1538 return Covariance(self.__name+"T", asCovObject = self.__C.getT() )
1541 "Décomposition de Cholesky"
1543 return Covariance(self.__name+"C", asCovariance = numpy.linalg.cholesky(self.__C) )
1544 elif self.isvector():
1545 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
1546 elif self.isscalar():
1547 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
1548 elif self.isobject() and hasattr(self.__C,"cholesky"):
1549 return Covariance(self.__name+"C", asCovObject = self.__C.cholesky() )
1551 def choleskyI(self):
1552 "Inversion de la décomposition de Cholesky"
1554 return Covariance(self.__name+"H", asCovariance = numpy.linalg.cholesky(self.__C).I )
1555 elif self.isvector():
1556 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
1557 elif self.isscalar():
1558 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
1559 elif self.isobject() and hasattr(self.__C,"choleskyI"):
1560 return Covariance(self.__name+"H", asCovObject = self.__C.choleskyI() )
1562 def diag(self, msize=None):
1563 "Diagonale de la matrice"
1565 return numpy.diag(self.__C)
1566 elif self.isvector():
1568 elif self.isscalar():
1570 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1572 return self.__C * numpy.ones(int(msize))
1573 elif self.isobject():
1574 return self.__C.diag()
1576 def asfullmatrix(self, msize=None):
1580 elif self.isvector():
1581 return numpy.matrix( numpy.diag(self.__C), float )
1582 elif self.isscalar():
1584 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1586 return numpy.matrix( self.__C * numpy.eye(int(msize)), float )
1587 elif self.isobject() and hasattr(self.__C,"asfullmatrix"):
1588 return self.__C.asfullmatrix()
1590 def trace(self, msize=None):
1591 "Trace de la matrice"
1593 return numpy.trace(self.__C)
1594 elif self.isvector():
1595 return float(numpy.sum(self.__C))
1596 elif self.isscalar():
1598 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1600 return self.__C * int(msize)
1601 elif self.isobject():
1602 return self.__C.trace()
1608 "x.__repr__() <==> repr(x)"
1609 return repr(self.__C)
1612 "x.__str__() <==> str(x)"
1613 return str(self.__C)
1615 def __add__(self, other):
1616 "x.__add__(y) <==> x+y"
1617 if self.ismatrix() or self.isobject():
1618 return self.__C + numpy.asmatrix(other)
1619 elif self.isvector() or self.isscalar():
1620 _A = numpy.asarray(other)
1621 _A.reshape(_A.size)[::_A.shape[1]+1] += self.__C
1622 return numpy.asmatrix(_A)
1624 def __radd__(self, other):
1625 "x.__radd__(y) <==> y+x"
1626 raise NotImplementedError("%s covariance matrix __radd__ method not available for %s type!"%(self.__name,type(other)))
1628 def __sub__(self, other):
1629 "x.__sub__(y) <==> x-y"
1630 if self.ismatrix() or self.isobject():
1631 return self.__C - numpy.asmatrix(other)
1632 elif self.isvector() or self.isscalar():
1633 _A = numpy.asarray(other)
1634 _A.reshape(_A.size)[::_A.shape[1]+1] = self.__C - _A.reshape(_A.size)[::_A.shape[1]+1]
1635 return numpy.asmatrix(_A)
1637 def __rsub__(self, other):
1638 "x.__rsub__(y) <==> y-x"
1639 raise NotImplementedError("%s covariance matrix __rsub__ method not available for %s type!"%(self.__name,type(other)))
1642 "x.__neg__() <==> -x"
1645 def __mul__(self, other):
1646 "x.__mul__(y) <==> x*y"
1647 if self.ismatrix() and isinstance(other,numpy.matrix):
1648 return self.__C * other
1649 elif self.ismatrix() and (isinstance(other,numpy.ndarray) \
1650 or isinstance(other,list) \
1651 or isinstance(other,tuple)):
1652 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1653 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
1654 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1655 return self.__C * numpy.asmatrix(other)
1657 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asmatrix(other).shape,self.__name))
1658 elif self.isvector() and (isinstance(other,numpy.matrix) \
1659 or isinstance(other,numpy.ndarray) \
1660 or isinstance(other,list) \
1661 or isinstance(other,tuple)):
1662 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1663 return numpy.asmatrix(self.__C * numpy.ravel(other)).T
1664 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1665 return numpy.asmatrix((self.__C * (numpy.asarray(other).transpose())).transpose())
1667 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
1668 elif self.isscalar() and isinstance(other,numpy.matrix):
1669 return self.__C * other
1670 elif self.isscalar() and (isinstance(other,numpy.ndarray) \
1671 or isinstance(other,list) \
1672 or isinstance(other,tuple)):
1673 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
1674 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
1676 return self.__C * numpy.asmatrix(other)
1677 elif self.isobject():
1678 return self.__C.__mul__(other)
1680 raise NotImplementedError("%s covariance matrix __mul__ method not available for %s type!"%(self.__name,type(other)))
1682 def __rmul__(self, other):
1683 "x.__rmul__(y) <==> y*x"
1684 if self.ismatrix() and isinstance(other,numpy.matrix):
1685 return other * self.__C
1686 elif self.isvector() and isinstance(other,numpy.matrix):
1687 if numpy.ravel(other).size == self.shape[0]: # Vecteur
1688 return numpy.asmatrix(numpy.ravel(other) * self.__C)
1689 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
1690 return numpy.asmatrix(numpy.array(other) * self.__C)
1692 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
1693 elif self.isscalar() and isinstance(other,numpy.matrix):
1694 return other * self.__C
1695 elif self.isobject():
1696 return self.__C.__rmul__(other)
1698 raise NotImplementedError("%s covariance matrix __rmul__ method not available for %s type!"%(self.__name,type(other)))
1701 "x.__len__() <==> len(x)"
1702 return self.shape[0]
1704 # ==============================================================================
1705 class ObserverF(object):
1707 Creation d'une fonction d'observateur a partir de son texte
1709 def __init__(self, corps=""):
1710 self.__corps = corps
1711 def func(self,var,info):
1712 "Fonction d'observation"
1715 "Restitution du pointeur de fonction dans l'objet"
1718 # ==============================================================================
1719 class ImportFromScript(object):
1721 Obtention d'une variable nommee depuis un fichier script importe
1723 def __init__(self, __filename=None):
1724 "Verifie l'existence et importe le script"
1725 self.__filename = __filename.rstrip(".py")
1726 if self.__filename is None:
1727 raise ValueError("The name of the file containing the variable to be imported has to be specified.")
1728 if not os.path.isfile(str(self.__filename)+".py"):
1729 raise ValueError("The file containing the variable to be imported doesn't seem to exist. The given file name is:\n \"%s\""%self.__filename)
1730 self.__scriptfile = __import__(self.__filename, globals(), locals(), [])
1731 self.__scriptstring = open(self.__filename+".py",'r').read()
1732 def getvalue(self, __varname=None, __synonym=None ):
1733 "Renvoie la variable demandee"
1734 if __varname is None:
1735 raise ValueError("The name of the variable to be imported has to be specified.")
1736 if not hasattr(self.__scriptfile, __varname):
1737 if __synonym is None:
1738 raise ValueError("The imported script file \"%s\" doesn't contain the specified variable \"%s\"."%(str(self.__filename)+".py",__varname))
1739 elif not hasattr(self.__scriptfile, __synonym):
1740 raise ValueError("The imported script file \"%s\" doesn't contain the specified variable \"%s\"."%(str(self.__filename)+".py",__synonym))
1742 return getattr(self.__scriptfile, __synonym)
1744 return getattr(self.__scriptfile, __varname)
1745 def getstring(self):
1746 "Renvoie le script complet"
1747 return self.__scriptstring
1749 # ==============================================================================
1750 def CostFunction3D(_x,
1751 _Hm = None, # Pour simuler Hm(x) : HO["Direct"].appliedTo
1752 _HmX = None, # Simulation déjà faite de Hm(x)
1753 _arg = None, # Arguments supplementaires pour Hm, sous la forme d'un tuple
1758 _SIV = False, # A résorber pour la 8.0
1759 _SSC = [], # self._parameters["StoreSupplementaryCalculations"]
1760 _nPS = 0, # nbPreviousSteps
1761 _QM = "DA", # QualityMeasure
1762 _SSV = {}, # Entrée et/ou sortie : self.StoredVariables
1763 _fRt = False, # Restitue ou pas la sortie étendue
1764 _sSc = True, # Stocke ou pas les SSC
1767 Fonction-coût générale utile pour les algorithmes statiques/3D : 3DVAR, BLUE
1768 et dérivés, Kalman et dérivés, LeastSquares, SamplingTest, PSO, SA, Tabu,
1769 DFO, QuantileRegression
1775 for k in ["CostFunctionJ",
1781 "SimulatedObservationAtCurrentOptimum",
1782 "SimulatedObservationAtCurrentState",
1786 if hasattr(_SSV[k],"store"):
1787 _SSV[k].append = _SSV[k].store # Pour utiliser "append" au lieu de "store"
1789 _X = numpy.asmatrix(numpy.ravel( _x )).T
1790 if _SIV or "CurrentState" in _SSC or "CurrentOptimum" in _SSC:
1791 _SSV["CurrentState"].append( _X )
1793 if _HmX is not None:
1797 raise ValueError("COSTFUNCTION3D Operator has to be defined.")
1801 _HX = _Hm( _X, *_arg )
1802 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
1804 if "SimulatedObservationAtCurrentState" in _SSC or \
1805 "SimulatedObservationAtCurrentOptimum" in _SSC:
1806 _SSV["SimulatedObservationAtCurrentState"].append( _HX )
1808 if numpy.any(numpy.isnan(_HX)):
1809 Jb, Jo, J = numpy.nan, numpy.nan, numpy.nan
1811 _Y = numpy.asmatrix(numpy.ravel( _Y )).T
1812 if _QM in ["AugmentedWeightedLeastSquares", "AWLS", "AugmentedPonderatedLeastSquares", "APLS", "DA"]:
1813 if _BI is None or _RI is None:
1814 raise ValueError("Background and Observation error covariance matrix has to be properly defined!")
1815 _Xb = numpy.asmatrix(numpy.ravel( _Xb )).T
1816 Jb = 0.5 * (_X - _Xb).T * _BI * (_X - _Xb)
1817 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
1818 elif _QM in ["WeightedLeastSquares", "WLS", "PonderatedLeastSquares", "PLS"]:
1820 raise ValueError("Observation error covariance matrix has to be properly defined!")
1822 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
1823 elif _QM in ["LeastSquares", "LS", "L2"]:
1825 Jo = 0.5 * (_Y - _HX).T * (_Y - _HX)
1826 elif _QM in ["AbsoluteValue", "L1"]:
1828 Jo = numpy.sum( numpy.abs(_Y - _HX) )
1829 elif _QM in ["MaximumError", "ME"]:
1831 Jo = numpy.max( numpy.abs(_Y - _HX) )
1832 elif _QM in ["QR", "Null"]:
1836 raise ValueError("Unknown asked quality measure!")
1838 J = float( Jb ) + float( Jo )
1841 _SSV["CostFunctionJb"].append( Jb )
1842 _SSV["CostFunctionJo"].append( Jo )
1843 _SSV["CostFunctionJ" ].append( J )
1845 if "IndexOfOptimum" in _SSC or \
1846 "CurrentOptimum" in _SSC or \
1847 "SimulatedObservationAtCurrentOptimum" in _SSC:
1848 IndexMin = numpy.argmin( _SSV["CostFunctionJ"][_nPS:] ) + _nPS
1849 if "IndexOfOptimum" in _SSC:
1850 _SSV["IndexOfOptimum"].append( IndexMin )
1851 if "CurrentOptimum" in _SSC:
1852 _SSV["CurrentOptimum"].append( _SSV["CurrentState"][IndexMin] )
1853 if "SimulatedObservationAtCurrentOptimum" in _SSC:
1854 _SSV["SimulatedObservationAtCurrentOptimum"].append( _SSV["SimulatedObservationAtCurrentState"][IndexMin] )
1859 if _QM in ["QR"]: # Pour le QuantileRegression
1864 # ==============================================================================
1865 if __name__ == "__main__":
1866 print('\n AUTODIAGNOSTIC \n')