1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2017 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les outils généraux élémentaires.
26 Ce module est destiné à être appelée par AssimilationStudy.
28 __author__ = "Jean-Philippe ARGAUD"
31 import os, sys, logging, copy
33 from daCore import Persistence
34 from daCore import PlatformInfo
35 from daCore import Templates
37 # ==============================================================================
38 class CacheManager(object):
40 Classe générale de gestion d'un cache de calculs
43 toleranceInRedundancy = 1.e-18,
44 lenghtOfRedundancy = -1,
47 Les caractéristiques de tolérance peuvent être modifées à la création.
49 self.__tolerBP = float(toleranceInRedundancy)
50 self.__lenghtOR = int(lenghtOfRedundancy)
51 self.__initlnOR = self.__lenghtOR
56 self.__listOPCV = [] # Operator Previous Calculated Points, Results, Point Norms
57 # logging.debug("CM Tolerance de determination des doublons : %.2e", self.__tolerBP)
59 def wasCalculatedIn(self, xValue ): #, info="" ):
60 "Vérifie l'existence d'un calcul correspondant à la valeur"
63 for i in range(min(len(self.__listOPCV),self.__lenghtOR)-1,-1,-1):
64 if xValue.size != self.__listOPCV[i][0].size:
65 # logging.debug("CM Différence de la taille %s de X et de celle %s du point %i déjà calculé", xValue.shape,i,self.__listOPCP[i].shape)
67 if numpy.linalg.norm(numpy.ravel(xValue) - self.__listOPCV[i][0]) < self.__tolerBP * self.__listOPCV[i][2]:
69 __HxV = self.__listOPCV[i][1]
70 # logging.debug("CM Cas%s déja calculé, portant le numéro %i", info, i)
74 def storeValueInX(self, xValue, HxValue ):
75 "Stocke un calcul correspondant à la valeur"
76 if self.__lenghtOR < 0:
77 self.__lenghtOR = 2 * xValue.size + 2
78 self.__initlnOR = self.__lenghtOR
79 while len(self.__listOPCV) > self.__lenghtOR:
80 # logging.debug("CM Réduction de la liste des cas à %i éléments par suppression du premier", self.__lenghtOR)
81 self.__listOPCV.pop(0)
82 self.__listOPCV.append( (
83 copy.copy(numpy.ravel(xValue)),
85 numpy.linalg.norm(xValue),
90 self.__initlnOR = self.__lenghtOR
95 self.__lenghtOR = self.__initlnOR
97 # ==============================================================================
98 class Operator(object):
100 Classe générale d'interface de type opérateur simple
107 def __init__(self, fromMethod=None, fromMatrix=None, avoidingRedundancy = True):
109 On construit un objet de ce type en fournissant à l'aide de l'un des
110 deux mots-clé, soit une fonction python, soit une matrice.
112 - fromMethod : argument de type fonction Python
113 - fromMatrix : argument adapté au constructeur numpy.matrix
114 - avoidingRedundancy : évite ou pas les calculs redondants
116 self.__NbCallsAsMatrix, self.__NbCallsAsMethod, self.__NbCallsOfCached = 0, 0, 0
117 self.__AvoidRC = bool( avoidingRedundancy )
118 if fromMethod is not None:
119 self.__Method = fromMethod
121 self.__Type = "Method"
122 elif fromMatrix is not None:
124 self.__Matrix = numpy.matrix( fromMatrix, numpy.float )
125 self.__Type = "Matrix"
131 def disableAvoidingRedundancy(self):
133 Operator.CM.disable()
135 def enableAvoidingRedundancy(self):
140 Operator.CM.disable()
146 def appliedTo(self, xValue, HValue = None):
148 Permet de restituer le résultat de l'application de l'opérateur à un
149 argument xValue. Cette méthode se contente d'appliquer, son argument
150 devant a priori être du bon type.
152 - xValue : argument adapté pour appliquer l'opérateur
154 if HValue is not None:
155 HxValue = numpy.asmatrix( numpy.ravel( HValue ) ).T
157 Operator.CM.storeValueInX(xValue,HxValue)
160 __alreadyCalculated, __HxV = Operator.CM.wasCalculatedIn(xValue)
162 __alreadyCalculated = False
164 if __alreadyCalculated:
165 self.__addOneCacheCall()
168 if self.__Matrix is not None:
169 self.__addOneMatrixCall()
170 HxValue = self.__Matrix * xValue
172 self.__addOneMethodCall()
173 HxValue = self.__Method( xValue )
175 Operator.CM.storeValueInX(xValue,HxValue)
179 def appliedControledFormTo(self, paire ):
181 Permet de restituer le résultat de l'application de l'opérateur à une
182 paire (xValue, uValue). Cette méthode se contente d'appliquer, son
183 argument devant a priori être du bon type. Si la uValue est None,
184 on suppose que l'opérateur ne s'applique qu'à xValue.
186 - xValue : argument X adapté pour appliquer l'opérateur
187 - uValue : argument U adapté pour appliquer l'opérateur
189 assert len(paire) == 2, "Incorrect number of arguments"
190 xValue, uValue = paire
191 if self.__Matrix is not None:
192 self.__addOneMatrixCall()
193 return self.__Matrix * xValue
194 elif uValue is not None:
195 self.__addOneMethodCall()
196 return self.__Method( (xValue, uValue) )
198 self.__addOneMethodCall()
199 return self.__Method( xValue )
201 def appliedInXTo(self, paire ):
203 Permet de restituer le résultat de l'application de l'opérateur à un
204 argument xValue, sachant que l'opérateur est valable en xNominal.
205 Cette méthode se contente d'appliquer, son argument devant a priori
206 être du bon type. Si l'opérateur est linéaire car c'est une matrice,
207 alors il est valable en tout point nominal et il n'est pas nécessaire
209 Arguments : une liste contenant
210 - xNominal : argument permettant de donner le point où l'opérateur
211 est construit pour etre ensuite appliqué
212 - xValue : argument adapté pour appliquer l'opérateur
214 assert len(paire) == 2, "Incorrect number of arguments"
215 xNominal, xValue = paire
216 if self.__Matrix is not None:
217 self.__addOneMatrixCall()
218 return self.__Matrix * xValue
220 self.__addOneMethodCall()
221 return self.__Method( (xNominal, xValue) )
223 def asMatrix(self, ValueForMethodForm = "UnknownVoidValue"):
225 Permet de renvoyer l'opérateur sous la forme d'une matrice
227 if self.__Matrix is not None:
228 self.__addOneMatrixCall()
230 elif ValueForMethodForm is not "UnknownVoidValue": # Ne pas utiliser "None"
231 self.__addOneMethodCall()
232 return numpy.matrix( self.__Method( (ValueForMethodForm, None) ) )
234 raise ValueError("Matrix form of the operator defined as a function/method requires to give an operating point.")
238 Renvoie la taille sous forme numpy si l'opérateur est disponible sous
239 la forme d'une matrice
241 if self.__Matrix is not None:
242 return self.__Matrix.shape
244 raise ValueError("Matrix form of the operator is not available, nor the shape")
246 def nbcalls(self, which=None):
248 Renvoie les nombres d'évaluations de l'opérateur
251 self.__NbCallsAsMatrix+self.__NbCallsAsMethod,
252 self.__NbCallsAsMatrix,
253 self.__NbCallsAsMethod,
254 self.__NbCallsOfCached,
255 Operator.NbCallsAsMatrix+Operator.NbCallsAsMethod,
256 Operator.NbCallsAsMatrix,
257 Operator.NbCallsAsMethod,
258 Operator.NbCallsOfCached,
260 if which is None: return __nbcalls
261 else: return __nbcalls[which]
263 def __addOneMatrixCall(self):
264 "Comptabilise un appel"
265 self.__NbCallsAsMatrix += 1 # Decompte local
266 Operator.NbCallsAsMatrix += 1 # Decompte global
268 def __addOneMethodCall(self):
269 "Comptabilise un appel"
270 self.__NbCallsAsMethod += 1 # Decompte local
271 Operator.NbCallsAsMethod += 1 # Decompte global
273 def __addOneCacheCall(self):
274 "Comptabilise un appel"
275 self.__NbCallsOfCached += 1 # Decompte local
276 Operator.NbCallsOfCached += 1 # Decompte global
278 # ==============================================================================
279 class FullOperator(object):
281 Classe générale d'interface de type opérateur complet
282 (Direct, Linéaire Tangent, Adjoint)
285 name = "GenericFullOperator",
287 asOneFunction = None, # Fonction
288 asThreeFunctions = None, # Dictionnaire de fonctions
290 asDict = None, # Parameters
297 self.__name = str(name)
298 self.__check = bool(toBeChecked)
303 if (asDict is not None) and isinstance(asDict, dict):
304 __Parameters.update( asDict )
305 if "DifferentialIncrement" in asDict:
306 __Parameters["withIncrement"] = asDict["DifferentialIncrement"]
307 if "CenteredFiniteDifference" in asDict:
308 __Parameters["withCenteredDF"] = asDict["CenteredFiniteDifference"]
309 if "EnableMultiProcessing" in asDict:
310 __Parameters["withmpEnabled"] = asDict["EnableMultiProcessing"]
311 if "NumberOfProcesses" in asDict:
312 __Parameters["withmpWorkers"] = asDict["NumberOfProcesses"]
314 if asScript is not None:
315 __Matrix, __Function = None, None
317 __Matrix = ImportFromScript(asScript).getvalue( self.__name )
319 __Function = { "Direct":ImportFromScript(asScript).getvalue( "DirectOperator" ) }
320 __Function.update({"useApproximatedDerivatives":True})
321 __Function.update(__Parameters)
322 elif asThreeFunctions:
324 "Direct" :ImportFromScript(asScript).getvalue( "DirectOperator" ),
325 "Tangent":ImportFromScript(asScript).getvalue( "TangentOperator" ),
326 "Adjoint":ImportFromScript(asScript).getvalue( "AdjointOperator" ),
328 __Function.update(__Parameters)
331 if asOneFunction is not None:
332 if isinstance(asOneFunction, dict) and "Direct" in asOneFunction:
333 if asOneFunction["Direct"] is not None:
334 __Function = asOneFunction
336 raise ValueError("The function has to be given in a dictionnary which have 1 key (\"Direct\")")
338 __Function = { "Direct":asOneFunction }
339 __Function.update({"useApproximatedDerivatives":True})
340 __Function.update(__Parameters)
341 elif asThreeFunctions is not None:
342 if isinstance(asThreeFunctions, dict) and \
343 ("Tangent" in asThreeFunctions) and (asThreeFunctions["Tangent"] is not None) and \
344 ("Adjoint" in asThreeFunctions) and (asThreeFunctions["Adjoint"] is not None) and \
345 (("useApproximatedDerivatives" not in asThreeFunctions) or not bool(asThreeFunctions["useApproximatedDerivatives"])):
346 __Function = asThreeFunctions
347 elif isinstance(asThreeFunctions, dict) and \
348 ("Direct" in asThreeFunctions) and (asThreeFunctions["Direct"] is not None):
349 __Function = asThreeFunctions
350 __Function.update({"useApproximatedDerivatives":True})
352 raise ValueError("The functions has to be given in a dictionnary which have either 1 key (\"Direct\") or 3 keys (\"Direct\" (optionnal), \"Tangent\" and \"Adjoint\")")
353 if "Direct" not in asThreeFunctions:
354 __Function["Direct"] = asThreeFunctions["Tangent"]
355 __Function.update(__Parameters)
359 # if sys.version_info[0] < 3 and isinstance(__Function, dict):
360 # for k in ("Direct", "Tangent", "Adjoint"):
361 # if k in __Function and hasattr(__Function[k],"__class__"):
362 # if type(__Function[k]) is type(self.__init__):
363 # raise TypeError("can't use a class method (%s) as a function for the \"%s\" operator. Use a real function instead."%(type(__Function[k]),k))
365 if appliedInX is not None and isinstance(appliedInX, dict):
366 __appliedInX = appliedInX
367 elif appliedInX is not None:
368 __appliedInX = {"HXb":appliedInX}
372 if scheduledBy is not None:
373 self.__T = scheduledBy
375 if isinstance(__Function, dict) and \
376 ("useApproximatedDerivatives" in __Function) and bool(__Function["useApproximatedDerivatives"]) and \
377 ("Direct" in __Function) and (__Function["Direct"] is not None):
378 if "withCenteredDF" not in __Function: __Function["withCenteredDF"] = False
379 if "withIncrement" not in __Function: __Function["withIncrement"] = 0.01
380 if "withdX" not in __Function: __Function["withdX"] = None
381 if "withAvoidingRedundancy" not in __Function: __Function["withAvoidingRedundancy"] = True
382 if "withToleranceInRedundancy" not in __Function: __Function["withToleranceInRedundancy"] = 1.e-18
383 if "withLenghtOfRedundancy" not in __Function: __Function["withLenghtOfRedundancy"] = -1
384 if "withmpEnabled" not in __Function: __Function["withmpEnabled"] = False
385 if "withmpWorkers" not in __Function: __Function["withmpWorkers"] = None
386 from daNumerics.ApproximatedDerivatives import FDApproximation
387 FDA = FDApproximation(
388 Function = __Function["Direct"],
389 centeredDF = __Function["withCenteredDF"],
390 increment = __Function["withIncrement"],
391 dX = __Function["withdX"],
392 avoidingRedundancy = __Function["withAvoidingRedundancy"],
393 toleranceInRedundancy = __Function["withToleranceInRedundancy"],
394 lenghtOfRedundancy = __Function["withLenghtOfRedundancy"],
395 mpEnabled = __Function["withmpEnabled"],
396 mpWorkers = __Function["withmpWorkers"],
398 self.__FO["Direct"] = Operator( fromMethod = FDA.DirectOperator, avoidingRedundancy = avoidRC )
399 self.__FO["Tangent"] = Operator( fromMethod = FDA.TangentOperator, avoidingRedundancy = avoidRC )
400 self.__FO["Adjoint"] = Operator( fromMethod = FDA.AdjointOperator, avoidingRedundancy = avoidRC )
401 elif isinstance(__Function, dict) and \
402 ("Direct" in __Function) and ("Tangent" in __Function) and ("Adjoint" in __Function) and \
403 (__Function["Direct"] is not None) and (__Function["Tangent"] is not None) and (__Function["Adjoint"] is not None):
404 self.__FO["Direct"] = Operator( fromMethod = __Function["Direct"], avoidingRedundancy = avoidRC )
405 self.__FO["Tangent"] = Operator( fromMethod = __Function["Tangent"], avoidingRedundancy = avoidRC )
406 self.__FO["Adjoint"] = Operator( fromMethod = __Function["Adjoint"], avoidingRedundancy = avoidRC )
407 elif asMatrix is not None:
408 __matrice = numpy.matrix( asMatrix, numpy.float )
409 self.__FO["Direct"] = Operator( fromMatrix = __matrice, avoidingRedundancy = avoidRC )
410 self.__FO["Tangent"] = Operator( fromMatrix = __matrice, avoidingRedundancy = avoidRC )
411 self.__FO["Adjoint"] = Operator( fromMatrix = __matrice.T, avoidingRedundancy = avoidRC )
414 raise ValueError("Improperly defined observation operator, it requires at minima either a matrix, a Direct for approximate derivatives or a Tangent/Adjoint pair.")
416 if __appliedInX is not None:
417 self.__FO["AppliedInX"] = {}
418 if type(__appliedInX) is not dict:
419 raise ValueError("Error: observation operator defined by \"AppliedInX\" need a dictionary as argument.")
420 for key in list(__appliedInX.keys()):
421 if type( __appliedInX[key] ) is type( numpy.matrix([]) ):
422 # Pour le cas où l'on a une vraie matrice
423 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].A1, numpy.float ).T
424 elif type( __appliedInX[key] ) is type( numpy.array([]) ) and len(__appliedInX[key].shape) > 1:
425 # Pour le cas où l'on a un vecteur représenté en array avec 2 dimensions
426 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].reshape(len(__appliedInX[key]),), numpy.float ).T
428 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key], numpy.float ).T
430 self.__FO["AppliedInX"] = None
436 "x.__repr__() <==> repr(x)"
437 return repr(self.__V)
440 "x.__str__() <==> str(x)"
443 # ==============================================================================
444 class Algorithm(object):
446 Classe générale d'interface de type algorithme
448 Elle donne un cadre pour l'écriture d'une classe élémentaire d'algorithme
449 d'assimilation, en fournissant un container (dictionnaire) de variables
450 persistantes initialisées, et des méthodes d'accès à ces variables stockées.
452 Une classe élémentaire d'algorithme doit implémenter la méthode "run".
454 def __init__(self, name):
456 L'initialisation présente permet de fabriquer des variables de stockage
457 disponibles de manière générique dans les algorithmes élémentaires. Ces
458 variables de stockage sont ensuite conservées dans un dictionnaire
459 interne à l'objet, mais auquel on accède par la méthode "get".
461 Les variables prévues sont :
462 - CostFunctionJ : fonction-cout globale, somme des deux parties suivantes
463 - CostFunctionJb : partie ébauche ou background de la fonction-cout
464 - CostFunctionJo : partie observations de la fonction-cout
465 - GradientOfCostFunctionJ : gradient de la fonction-cout globale
466 - GradientOfCostFunctionJb : gradient de la partie ébauche de la fonction-cout
467 - GradientOfCostFunctionJo : gradient de la partie observations de la fonction-cout
468 - CurrentState : état courant lors d'itérations
469 - Analysis : l'analyse Xa
470 - SimulatedObservationAtBackground : l'état observé H(Xb) à l'ébauche
471 - SimulatedObservationAtCurrentState : l'état observé H(X) à l'état courant
472 - SimulatedObservationAtOptimum : l'état observé H(Xa) à l'optimum
473 - Innovation : l'innovation : d = Y - H(X)
474 - InnovationAtCurrentState : l'innovation à l'état courant : dn = Y - H(Xn)
475 - SigmaObs2 : indicateur de correction optimale des erreurs d'observation
476 - SigmaBck2 : indicateur de correction optimale des erreurs d'ébauche
477 - MahalanobisConsistency : indicateur de consistance des covariances
478 - OMA : Observation moins Analysis : Y - Xa
479 - OMB : Observation moins Background : Y - Xb
480 - AMB : Analysis moins Background : Xa - Xb
481 - APosterioriCovariance : matrice A
482 - APosterioriVariances : variances de la matrice A
483 - APosterioriStandardDeviations : écart-types de la matrice A
484 - APosterioriCorrelations : correlations de la matrice A
485 - Residu : dans le cas des algorithmes de vérification
486 On peut rajouter des variables à stocker dans l'initialisation de
487 l'algorithme élémentaire qui va hériter de cette classe
489 logging.debug("%s Initialisation", str(name))
490 self._m = PlatformInfo.SystemUsage()
492 self._name = str( name )
493 self._parameters = {"StoreSupplementaryCalculations":[]}
494 self.__required_parameters = {}
495 self.StoredVariables = {}
497 self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
498 self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
499 self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
500 self.StoredVariables["CostFunctionJAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJAtCurrentOptimum")
501 self.StoredVariables["CostFunctionJbAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJbAtCurrentOptimum")
502 self.StoredVariables["CostFunctionJoAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJoAtCurrentOptimum")
503 self.StoredVariables["GradientOfCostFunctionJ"] = Persistence.OneVector(name = "GradientOfCostFunctionJ")
504 self.StoredVariables["GradientOfCostFunctionJb"] = Persistence.OneVector(name = "GradientOfCostFunctionJb")
505 self.StoredVariables["GradientOfCostFunctionJo"] = Persistence.OneVector(name = "GradientOfCostFunctionJo")
506 self.StoredVariables["CurrentState"] = Persistence.OneVector(name = "CurrentState")
507 self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
508 self.StoredVariables["IndexOfOptimum"] = Persistence.OneIndex(name = "IndexOfOptimum")
509 self.StoredVariables["CurrentOptimum"] = Persistence.OneVector(name = "CurrentOptimum")
510 self.StoredVariables["SimulatedObservationAtBackground"] = Persistence.OneVector(name = "SimulatedObservationAtBackground")
511 self.StoredVariables["SimulatedObservationAtCurrentState"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentState")
512 self.StoredVariables["SimulatedObservationAtOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtOptimum")
513 self.StoredVariables["SimulatedObservationAtCurrentOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentOptimum")
514 self.StoredVariables["Innovation"] = Persistence.OneVector(name = "Innovation")
515 self.StoredVariables["InnovationAtCurrentState"] = Persistence.OneVector(name = "InnovationAtCurrentState")
516 self.StoredVariables["SigmaObs2"] = Persistence.OneScalar(name = "SigmaObs2")
517 self.StoredVariables["SigmaBck2"] = Persistence.OneScalar(name = "SigmaBck2")
518 self.StoredVariables["MahalanobisConsistency"] = Persistence.OneScalar(name = "MahalanobisConsistency")
519 self.StoredVariables["OMA"] = Persistence.OneVector(name = "OMA")
520 self.StoredVariables["OMB"] = Persistence.OneVector(name = "OMB")
521 self.StoredVariables["BMA"] = Persistence.OneVector(name = "BMA")
522 self.StoredVariables["APosterioriCovariance"] = Persistence.OneMatrix(name = "APosterioriCovariance")
523 self.StoredVariables["APosterioriVariances"] = Persistence.OneVector(name = "APosterioriVariances")
524 self.StoredVariables["APosterioriStandardDeviations"] = Persistence.OneVector(name = "APosterioriStandardDeviations")
525 self.StoredVariables["APosterioriCorrelations"] = Persistence.OneMatrix(name = "APosterioriCorrelations")
526 self.StoredVariables["SimulationQuantiles"] = Persistence.OneMatrix(name = "SimulationQuantiles")
527 self.StoredVariables["Residu"] = Persistence.OneScalar(name = "Residu")
529 def _pre_run(self, Parameters ):
531 logging.debug("%s Lancement", self._name)
532 logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
534 # Mise a jour de self._parameters avec Parameters
535 self.__setParameters(Parameters)
537 # Corrections et complements
538 if ("Bounds" in self._parameters) and isinstance(self._parameters["Bounds"], (list, tuple)) and (len(self._parameters["Bounds"]) > 0):
539 logging.debug("%s Prise en compte des bornes effectuee"%(self._name,))
541 self._parameters["Bounds"] = None
543 if logging.getLogger().level < logging.WARNING:
544 self._parameters["optiprint"], self._parameters["optdisp"] = 1, 1
545 if PlatformInfo.has_scipy:
546 import scipy.optimize
547 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_ALL
549 self._parameters["optmessages"] = 15
551 self._parameters["optiprint"], self._parameters["optdisp"] = -1, 0
552 if PlatformInfo.has_scipy:
553 import scipy.optimize
554 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_NONE
556 self._parameters["optmessages"] = 15
560 def _post_run(self,_oH=None):
562 if ("StoreSupplementaryCalculations" in self._parameters) and \
563 "APosterioriCovariance" in self._parameters["StoreSupplementaryCalculations"]:
564 for _A in self.StoredVariables["APosterioriCovariance"]:
565 if "APosterioriVariances" in self._parameters["StoreSupplementaryCalculations"]:
566 self.StoredVariables["APosterioriVariances"].store( numpy.diag(_A) )
567 if "APosterioriStandardDeviations" in self._parameters["StoreSupplementaryCalculations"]:
568 self.StoredVariables["APosterioriStandardDeviations"].store( numpy.sqrt(numpy.diag(_A)) )
569 if "APosterioriCorrelations" in self._parameters["StoreSupplementaryCalculations"]:
570 _EI = numpy.diag(1./numpy.sqrt(numpy.diag(_A)))
571 _C = numpy.dot(_EI, numpy.dot(_A, _EI))
572 self.StoredVariables["APosterioriCorrelations"].store( _C )
574 logging.debug("%s Nombre d'évaluation(s) de l'opérateur d'observation direct/tangent/adjoint.: %i/%i/%i", self._name, _oH["Direct"].nbcalls(0),_oH["Tangent"].nbcalls(0),_oH["Adjoint"].nbcalls(0))
575 logging.debug("%s Nombre d'appels au cache d'opérateur d'observation direct/tangent/adjoint..: %i/%i/%i", self._name, _oH["Direct"].nbcalls(3),_oH["Tangent"].nbcalls(3),_oH["Adjoint"].nbcalls(3))
576 logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
577 logging.debug("%s Terminé", self._name)
580 def get(self, key=None):
582 Renvoie l'une des variables stockées identifiée par la clé, ou le
583 dictionnaire de l'ensemble des variables disponibles en l'absence de
584 clé. Ce sont directement les variables sous forme objet qui sont
585 renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
586 des classes de persistance.
589 return self.StoredVariables[key]
591 return self.StoredVariables
593 def __contains__(self, key=None):
594 "D.__contains__(k) -> True if D has a key k, else False"
595 return key in self.StoredVariables
598 "D.keys() -> list of D's keys"
599 if hasattr(self, "StoredVariables"):
600 return self.StoredVariables.keys()
605 "D.pop(k[,d]) -> v, remove specified key and return the corresponding value"
606 if hasattr(self, "StoredVariables"):
607 return self.StoredVariables.pop(k, d)
612 raise TypeError("pop expected at least 1 arguments, got 0")
613 "If key is not found, d is returned if given, otherwise KeyError is raised"
619 def run(self, Xb=None, Y=None, H=None, M=None, R=None, B=None, Q=None, Parameters=None):
621 Doit implémenter l'opération élémentaire de calcul d'assimilation sous
622 sa forme mathématique la plus naturelle possible.
624 raise NotImplementedError("Mathematical assimilation calculation has not been implemented!")
626 def defineRequiredParameter(self, name = None, default = None, typecast = None, message = None, minval = None, maxval = None, listval = None):
628 Permet de définir dans l'algorithme des paramètres requis et leurs
629 caractéristiques par défaut.
632 raise ValueError("A name is mandatory to define a required parameter.")
634 self.__required_parameters[name] = {
636 "typecast" : typecast,
642 logging.debug("%s %s (valeur par défaut = %s)", self._name, message, self.setParameterValue(name))
644 def getRequiredParameters(self, noDetails=True):
646 Renvoie la liste des noms de paramètres requis ou directement le
647 dictionnaire des paramètres requis.
650 return sorted(self.__required_parameters.keys())
652 return self.__required_parameters
654 def setParameterValue(self, name=None, value=None):
656 Renvoie la valeur d'un paramètre requis de manière contrôlée
658 default = self.__required_parameters[name]["default"]
659 typecast = self.__required_parameters[name]["typecast"]
660 minval = self.__required_parameters[name]["minval"]
661 maxval = self.__required_parameters[name]["maxval"]
662 listval = self.__required_parameters[name]["listval"]
664 if value is None and default is None:
666 elif value is None and default is not None:
667 if typecast is None: __val = default
668 else: __val = typecast( default )
670 if typecast is None: __val = value
671 else: __val = typecast( value )
673 if minval is not None and (numpy.array(__val, float) < minval).any():
674 raise ValueError("The parameter named \"%s\" of value \"%s\" can not be less than %s."%(name, __val, minval))
675 if maxval is not None and (numpy.array(__val, float) > maxval).any():
676 raise ValueError("The parameter named \"%s\" of value \"%s\" can not be greater than %s."%(name, __val, maxval))
677 if listval is not None:
678 if typecast is list or typecast is tuple or isinstance(__val,list) or isinstance(__val,tuple):
681 raise ValueError("The value \"%s\" of the parameter named \"%s\" is not allowed, it has to be in the list %s."%(v, name, listval))
682 elif __val not in listval:
683 raise ValueError("The value \"%s\" of the parameter named \"%s\" is not allowed, it has to be in the list %s."%( __val, name,listval))
686 def __setParameters(self, fromDico={}):
688 Permet de stocker les paramètres reçus dans le dictionnaire interne.
690 self._parameters.update( fromDico )
691 for k in self.__required_parameters.keys():
692 if k in fromDico.keys():
693 self._parameters[k] = self.setParameterValue(k,fromDico[k])
695 self._parameters[k] = self.setParameterValue(k)
696 logging.debug("%s %s : %s", self._name, self.__required_parameters[k]["message"], self._parameters[k])
698 # ==============================================================================
699 class Diagnostic(object):
701 Classe générale d'interface de type diagnostic
703 Ce template s'utilise de la manière suivante : il sert de classe "patron" en
704 même temps que l'une des classes de persistance, comme "OneScalar" par
707 Une classe élémentaire de diagnostic doit implémenter ses deux méthodes, la
708 méthode "_formula" pour écrire explicitement et proprement la formule pour
709 l'écriture mathématique du calcul du diagnostic (méthode interne non
710 publique), et "calculate" pour activer la précédente tout en ayant vérifié
711 et préparé les données, et pour stocker les résultats à chaque pas (méthode
712 externe d'activation).
714 def __init__(self, name = "", parameters = {}):
716 self.name = str(name)
717 self.parameters = dict( parameters )
719 def _formula(self, *args):
721 Doit implémenter l'opération élémentaire de diagnostic sous sa forme
722 mathématique la plus naturelle possible.
724 raise NotImplementedError("Diagnostic mathematical formula has not been implemented!")
726 def calculate(self, *args):
728 Active la formule de calcul avec les arguments correctement rangés
730 raise NotImplementedError("Diagnostic activation method has not been implemented!")
732 # ==============================================================================
733 class DiagnosticAndParameters(object):
735 Classe générale d'interface d'interface de type diagnostic
738 name = "GenericDiagnostic",
745 asExistingDiags = None,
749 self.__name = str(name)
755 self.__E = tuple(asExistingDiags)
756 self.__TheDiag = None
758 if asScript is not None:
759 __Diag = ImportFromScript(asScript).getvalue( "Diagnostic" )
760 __Iden = ImportFromScript(asScript).getvalue( "Identifier" )
761 __Dict = ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
762 __Unit = ImportFromScript(asScript).getvalue( "Unit" )
763 __Base = ImportFromScript(asScript).getvalue( "BaseType" )
765 __Diag = asDiagnostic
766 __Iden = asIdentifier
771 if __Diag is not None:
772 self.__D = str(__Diag)
773 if __Iden is not None:
774 self.__I = str(__Iden)
776 self.__I = str(__Diag)
777 if __Dict is not None:
778 self.__P.update( dict(__Dict) )
779 if __Unit is None or __Unit == "None":
781 if __Base is None or __Base == "None":
784 self.__setDiagnostic( self.__D, self.__I, self.__U, self.__B, self.__P, self.__E )
788 return self.__TheDiag
790 def __setDiagnostic(self, __choice = None, __name = "", __unit = "", __basetype = None, __parameters = {}, __existings = () ):
792 Permet de sélectionner un diagnostic a effectuer
795 raise ValueError("Error: diagnostic choice has to be given")
796 __daDirectory = "daDiagnostics"
798 # Recherche explicitement le fichier complet
799 # ------------------------------------------
801 for directory in sys.path:
802 if os.path.isfile(os.path.join(directory, __daDirectory, str(__choice)+'.py')):
803 __module_path = os.path.abspath(os.path.join(directory, __daDirectory))
804 if __module_path is None:
805 raise ImportError("No diagnostic module named \"%s\" was found in a \"%s\" subdirectory\n The search path is %s"%(__choice, __daDirectory, sys.path))
807 # Importe le fichier complet comme un module
808 # ------------------------------------------
810 __sys_path_tmp = sys.path ; sys.path.insert(0,__module_path)
811 self.__diagnosticFile = __import__(str(__choice), globals(), locals(), [])
812 sys.path = __sys_path_tmp ; del __sys_path_tmp
813 except ImportError as e:
814 raise ImportError("The module named \"%s\" was found, but is incorrect at the import stage.\n The import error message is: %s"%(__choice,e))
816 # Instancie un objet du type élémentaire du fichier
817 # -------------------------------------------------
818 if __name in __existings:
819 raise ValueError("A default input with the same name \"%s\" already exists."%str(__name))
821 self.__TheDiag = self.__diagnosticFile.ElementaryDiagnostic(
824 basetype = __basetype,
825 parameters = __parameters )
828 # ==============================================================================
829 class AlgorithmAndParameters(object):
831 Classe générale d'interface d'action pour l'algorithme et ses paramètres
834 name = "GenericAlgorithm",
841 self.__name = str(name)
845 self.__algorithm = {}
846 self.__algorithmFile = None
847 self.__algorithmName = None
849 self.updateParameters( asDict, asScript )
851 if asScript is not None:
852 __Algo = ImportFromScript(asScript).getvalue( "Algorithm" )
856 if __Algo is not None:
857 self.__A = str(__Algo)
858 self.__P.update( {"Algorithm":self.__A} )
860 self.__setAlgorithm( self.__A )
862 def updateParameters(self,
866 "Mise a jour des parametres"
867 if asScript is not None:
868 __Dict = ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
872 if __Dict is not None:
873 self.__P.update( dict(__Dict) )
875 def executePythonScheme(self, asDictAO = None):
876 "Permet de lancer le calcul d'assimilation"
877 Operator.CM.clearCache()
879 if not isinstance(asDictAO, dict):
880 raise ValueError("The objects for algorithm calculation has to be given as a dictionnary, and is not")
881 if hasattr(asDictAO["Background"],"getO"): self.__Xb = asDictAO["Background"].getO()
882 elif hasattr(asDictAO["CheckingPoint"],"getO"): self.__Xb = asDictAO["CheckingPoint"].getO()
883 else: self.__Xb = None
884 if hasattr(asDictAO["Observation"],"getO"): self.__Y = asDictAO["Observation"].getO()
885 else: self.__Y = asDictAO["Observation"]
886 if hasattr(asDictAO["ControlInput"],"getO"): self.__U = asDictAO["ControlInput"].getO()
887 else: self.__U = asDictAO["ControlInput"]
888 if hasattr(asDictAO["ObservationOperator"],"getO"): self.__HO = asDictAO["ObservationOperator"].getO()
889 else: self.__HO = asDictAO["ObservationOperator"]
890 if hasattr(asDictAO["EvolutionModel"],"getO"): self.__EM = asDictAO["EvolutionModel"].getO()
891 else: self.__EM = asDictAO["EvolutionModel"]
892 if hasattr(asDictAO["ControlModel"],"getO"): self.__CM = asDictAO["ControlModel"].getO()
893 else: self.__CM = asDictAO["ControlModel"]
894 self.__B = asDictAO["BackgroundError"]
895 self.__R = asDictAO["ObservationError"]
896 self.__Q = asDictAO["EvolutionError"]
898 self.__shape_validate()
900 self.__algorithm.run(
910 Parameters = self.__P,
914 def executeYACSScheme(self, FileName=None):
915 "Permet de lancer le calcul d'assimilation"
916 if FileName is None or not os.path.exists(FileName):
917 raise ValueError("a existing DIC Python file name has to be given for YACS execution.\n")
918 if not os.environ.has_key("ADAO_ROOT_DIR"):
919 raise ImportError("Unable to get ADAO_ROOT_DIR environnement variable. Please launch SALOME to add ADAO_ROOT_DIR to your environnement.\n")
921 __converterExe = os.path.join(os.environ["ADAO_ROOT_DIR"], "bin/salome", "AdaoYacsSchemaCreator.py")
922 __inputFile = os.path.abspath(FileName)
923 __outputFile = __inputFile[:__inputFile.rfind(".")] + '.xml'
925 __args = ["python", __converterExe, __inputFile, __outputFile]
927 __p = subprocess.Popen(__args)
928 (__stdoutdata, __stderrdata) = __p.communicate()
929 if not os.path.exists(__outputFile):
930 __msg = "An error occured during the execution of the ADAO YACS Schema\n"
931 __msg += "Creator applied on the input file:\n"
932 __msg += " %s\n"%__outputFile
933 __msg += "If SALOME GUI is launched by command line, see errors\n"
934 __msg += "details in your terminal.\n"
935 raise ValueError(__msg)
941 SALOMERuntime.RuntimeSALOME_setRuntime()
943 r = pilot.getRuntime()
944 xmlLoader = loader.YACSLoader()
945 xmlLoader.registerProcCataLoader()
947 catalogAd = r.loadCatalog("proc", __outputFile)
950 r.addCatalog(catalogAd)
953 p = xmlLoader.load(__outputFile)
954 except IOError as ex:
955 print("IO exception: %s"%(ex,))
957 logger = p.getLogger("parser")
958 if not logger.isEmpty():
959 print("The imported file has errors :")
960 print(logger.getStr())
963 print("Le schéma n'est pas valide et ne peut pas être exécuté")
964 print(p.getErrorReport())
966 info=pilot.LinkInfo(pilot.LinkInfo.ALL_DONT_STOP)
967 p.checkConsistency(info)
968 if info.areWarningsOrErrors():
969 print("Le schéma n'est pas cohérent et ne peut pas être exécuté")
970 print(info.getGlobalRepr())
972 e = pilot.ExecutorSwig()
974 if p.getEffectiveState() != pilot.DONE:
975 print(p.getErrorReport())
977 raise ValueError("execution error of YACS scheme")
981 def get(self, key = None):
982 "Vérifie l'existence d'une clé de variable ou de paramètres"
983 if key in self.__algorithm:
984 return self.__algorithm.get( key )
985 elif key in self.__P:
991 "Necessaire pour le pickling"
992 return self.__algorithm.pop(k, d)
994 def getAlgorithmRequiredParameters(self, noDetails=True):
995 "Renvoie la liste des paramètres requis selon l'algorithme"
996 return self.__algorithm.getRequiredParameters(noDetails)
998 def setObserver(self, __V, __O, __I, __S):
999 if self.__algorithm is None \
1000 or isinstance(self.__algorithm, dict) \
1001 or not hasattr(self.__algorithm,"StoredVariables"):
1002 raise ValueError("No observer can be build before choosing an algorithm.")
1003 if __V not in self.__algorithm:
1004 raise ValueError("An observer requires to be set on a variable named %s which does not exist."%__V)
1006 self.__algorithm.StoredVariables[ __V ].setDataObserver(
1009 HookParameters = __I,
1012 def removeObserver(self, __V, __O, __A = False):
1013 if self.__algorithm is None \
1014 or isinstance(self.__algorithm, dict) \
1015 or not hasattr(self.__algorithm,"StoredVariables"):
1016 raise ValueError("No observer can be removed before choosing an algorithm.")
1017 if __V not in self.__algorithm:
1018 raise ValueError("An observer requires to be removed on a variable named %s which does not exist."%__V)
1020 return self.__algorithm.StoredVariables[ __V ].removeDataObserver(
1025 def hasObserver(self, __V):
1026 if self.__algorithm is None \
1027 or isinstance(self.__algorithm, dict) \
1028 or not hasattr(self.__algorithm,"StoredVariables"):
1030 if __V not in self.__algorithm:
1032 return self.__algorithm.StoredVariables[ __V ].hasDataObserver()
1035 return list(self.__algorithm.keys()) + list(self.__P.keys())
1037 def __contains__(self, key=None):
1038 "D.__contains__(k) -> True if D has a key k, else False"
1039 return key in self.__algorithm or key in self.__P
1042 "x.__repr__() <==> repr(x)"
1043 return repr(self.__A)+", "+repr(self.__P)
1046 "x.__str__() <==> str(x)"
1047 return str(self.__A)+", "+str(self.__P)
1049 def __setAlgorithm(self, choice = None ):
1051 Permet de sélectionner l'algorithme à utiliser pour mener à bien l'étude
1052 d'assimilation. L'argument est un champ caractère se rapportant au nom
1053 d'un fichier contenu dans "../daAlgorithms" et réalisant l'opération
1054 d'assimilation sur les arguments fixes.
1057 raise ValueError("Error: algorithm choice has to be given")
1058 if self.__algorithmName is not None:
1059 raise ValueError("Error: algorithm choice has already been done as \"%s\", it can't be changed."%self.__algorithmName)
1060 daDirectory = "daAlgorithms"
1062 # Recherche explicitement le fichier complet
1063 # ------------------------------------------
1065 for directory in sys.path:
1066 if os.path.isfile(os.path.join(directory, daDirectory, str(choice)+'.py')):
1067 module_path = os.path.abspath(os.path.join(directory, daDirectory))
1068 if module_path is None:
1069 raise ImportError("No algorithm module named \"%s\" was found in a \"%s\" subdirectory\n The search path is %s"%(choice, daDirectory, sys.path))
1071 # Importe le fichier complet comme un module
1072 # ------------------------------------------
1074 sys_path_tmp = sys.path ; sys.path.insert(0,module_path)
1075 self.__algorithmFile = __import__(str(choice), globals(), locals(), [])
1076 self.__algorithmName = str(choice)
1077 sys.path = sys_path_tmp ; del sys_path_tmp
1078 except ImportError as e:
1079 raise ImportError("The module named \"%s\" was found, but is incorrect at the import stage.\n The import error message is: %s"%(choice,e))
1081 # Instancie un objet du type élémentaire du fichier
1082 # -------------------------------------------------
1083 self.__algorithm = self.__algorithmFile.ElementaryAlgorithm()
1086 def __shape_validate(self):
1088 Validation de la correspondance correcte des tailles des variables et
1089 des matrices s'il y en a.
1091 if self.__Xb is None: __Xb_shape = (0,)
1092 elif hasattr(self.__Xb,"size"): __Xb_shape = (self.__Xb.size,)
1093 elif hasattr(self.__Xb,"shape"):
1094 if isinstance(self.__Xb.shape, tuple): __Xb_shape = self.__Xb.shape
1095 else: __Xb_shape = self.__Xb.shape()
1096 else: raise TypeError("The background (Xb) has no attribute of shape: problem !")
1098 if self.__Y is None: __Y_shape = (0,)
1099 elif hasattr(self.__Y,"size"): __Y_shape = (self.__Y.size,)
1100 elif hasattr(self.__Y,"shape"):
1101 if isinstance(self.__Y.shape, tuple): __Y_shape = self.__Y.shape
1102 else: __Y_shape = self.__Y.shape()
1103 else: raise TypeError("The observation (Y) has no attribute of shape: problem !")
1105 if self.__U is None: __U_shape = (0,)
1106 elif hasattr(self.__U,"size"): __U_shape = (self.__U.size,)
1107 elif hasattr(self.__U,"shape"):
1108 if isinstance(self.__U.shape, tuple): __U_shape = self.__U.shape
1109 else: __U_shape = self.__U.shape()
1110 else: raise TypeError("The control (U) has no attribute of shape: problem !")
1112 if self.__B is None: __B_shape = (0,0)
1113 elif hasattr(self.__B,"shape"):
1114 if isinstance(self.__B.shape, tuple): __B_shape = self.__B.shape
1115 else: __B_shape = self.__B.shape()
1116 else: raise TypeError("The a priori errors covariance matrix (B) has no attribute of shape: problem !")
1118 if self.__R is None: __R_shape = (0,0)
1119 elif hasattr(self.__R,"shape"):
1120 if isinstance(self.__R.shape, tuple): __R_shape = self.__R.shape
1121 else: __R_shape = self.__R.shape()
1122 else: raise TypeError("The observation errors covariance matrix (R) has no attribute of shape: problem !")
1124 if self.__Q is None: __Q_shape = (0,0)
1125 elif hasattr(self.__Q,"shape"):
1126 if isinstance(self.__Q.shape, tuple): __Q_shape = self.__Q.shape
1127 else: __Q_shape = self.__Q.shape()
1128 else: raise TypeError("The evolution errors covariance matrix (Q) has no attribute of shape: problem !")
1130 if len(self.__HO) == 0: __HO_shape = (0,0)
1131 elif isinstance(self.__HO, dict): __HO_shape = (0,0)
1132 elif hasattr(self.__HO["Direct"],"shape"):
1133 if isinstance(self.__HO["Direct"].shape, tuple): __HO_shape = self.__HO["Direct"].shape
1134 else: __HO_shape = self.__HO["Direct"].shape()
1135 else: raise TypeError("The observation operator (H) has no attribute of shape: problem !")
1137 if len(self.__EM) == 0: __EM_shape = (0,0)
1138 elif isinstance(self.__EM, dict): __EM_shape = (0,0)
1139 elif hasattr(self.__EM["Direct"],"shape"):
1140 if isinstance(self.__EM["Direct"].shape, tuple): __EM_shape = self.__EM["Direct"].shape
1141 else: __EM_shape = self.__EM["Direct"].shape()
1142 else: raise TypeError("The evolution model (EM) has no attribute of shape: problem !")
1144 if len(self.__CM) == 0: __CM_shape = (0,0)
1145 elif isinstance(self.__CM, dict): __CM_shape = (0,0)
1146 elif hasattr(self.__CM["Direct"],"shape"):
1147 if isinstance(self.__CM["Direct"].shape, tuple): __CM_shape = self.__CM["Direct"].shape
1148 else: __CM_shape = self.__CM["Direct"].shape()
1149 else: raise TypeError("The control model (CM) has no attribute of shape: problem !")
1151 # Vérification des conditions
1152 # ---------------------------
1153 if not( len(__Xb_shape) == 1 or min(__Xb_shape) == 1 ):
1154 raise ValueError("Shape characteristic of background (Xb) is incorrect: \"%s\"."%(__Xb_shape,))
1155 if not( len(__Y_shape) == 1 or min(__Y_shape) == 1 ):
1156 raise ValueError("Shape characteristic of observation (Y) is incorrect: \"%s\"."%(__Y_shape,))
1158 if not( min(__B_shape) == max(__B_shape) ):
1159 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) is incorrect: \"%s\"."%(__B_shape,))
1160 if not( min(__R_shape) == max(__R_shape) ):
1161 raise ValueError("Shape characteristic of observation errors covariance matrix (R) is incorrect: \"%s\"."%(__R_shape,))
1162 if not( min(__Q_shape) == max(__Q_shape) ):
1163 raise ValueError("Shape characteristic of evolution errors covariance matrix (Q) is incorrect: \"%s\"."%(__Q_shape,))
1164 if not( min(__EM_shape) == max(__EM_shape) ):
1165 raise ValueError("Shape characteristic of evolution operator (EM) is incorrect: \"%s\"."%(__EM_shape,))
1167 if len(self.__HO) > 0 and not(type(self.__HO) is type({})) and not( __HO_shape[1] == max(__Xb_shape) ):
1168 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and state (X) \"%s\" are incompatible."%(__HO_shape,__Xb_shape))
1169 if len(self.__HO) > 0 and not(type(self.__HO) is type({})) and not( __HO_shape[0] == max(__Y_shape) ):
1170 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation (Y) \"%s\" are incompatible."%(__HO_shape,__Y_shape))
1171 if len(self.__HO) > 0 and not(type(self.__HO) is type({})) and len(self.__B) > 0 and not( __HO_shape[1] == __B_shape[0] ):
1172 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and a priori errors covariance matrix (B) \"%s\" are incompatible."%(__HO_shape,__B_shape))
1173 if len(self.__HO) > 0 and not(type(self.__HO) is type({})) and len(self.__R) > 0 and not( __HO_shape[0] == __R_shape[1] ):
1174 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation errors covariance matrix (R) \"%s\" are incompatible."%(__HO_shape,__R_shape))
1176 if self.__B is not None and len(self.__B) > 0 and not( __B_shape[1] == max(__Xb_shape) ):
1177 if self.__algorithmName in ["EnsembleBlue",]:
1178 asPersistentVector = self.__Xb.reshape((-1,min(__B_shape)))
1179 self.__Xb = Persistence.OneVector("Background", basetype=numpy.matrix)
1180 for member in asPersistentVector:
1181 self.__Xb.store( numpy.matrix( numpy.ravel(member), numpy.float ).T )
1182 __Xb_shape = min(__B_shape)
1184 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) \"%s\" and background (Xb) \"%s\" are incompatible."%(__B_shape,__Xb_shape))
1186 if self.__R is not None and len(self.__R) > 0 and not( __R_shape[1] == max(__Y_shape) ):
1187 raise ValueError("Shape characteristic of observation errors covariance matrix (R) \"%s\" and observation (Y) \"%s\" are incompatible."%(__R_shape,__Y_shape))
1189 if self.__EM is not None and len(self.__EM) > 0 and not(type(self.__EM) is type({})) and not( __EM_shape[1] == max(__Xb_shape) ):
1190 raise ValueError("Shape characteristic of evolution model (EM) \"%s\" and state (X) \"%s\" are incompatible."%(__EM_shape,__Xb_shape))
1192 if self.__CM is not None and len(self.__CM) > 0 and not(type(self.__CM) is type({})) and not( __CM_shape[1] == max(__U_shape) ):
1193 raise ValueError("Shape characteristic of control model (CM) \"%s\" and control (U) \"%s\" are incompatible."%(__CM_shape,__U_shape))
1195 if ("Bounds" in self.__P) \
1196 and (isinstance(self.__P["Bounds"], list) or isinstance(self.__P["Bounds"], tuple)) \
1197 and (len(self.__P["Bounds"]) != max(__Xb_shape)):
1198 raise ValueError("The number \"%s\" of bound pairs for the state (X) components is different of the size \"%s\" of the state itself." \
1199 %(len(self.__P["Bounds"]),max(__Xb_shape)))
1203 # ==============================================================================
1204 class DataObserver(object):
1206 Classe générale d'interface de type observer
1209 name = "GenericObserver",
1221 self.__name = str(name)
1226 if onVariable is None:
1227 raise ValueError("setting an observer has to be done over a variable name or a list of variable names, not over None.")
1228 elif type(onVariable) in (tuple, list):
1229 self.__V = tuple(map( str, onVariable ))
1230 if withInfo is None:
1233 self.__I = (str(withInfo),)*len(self.__V)
1234 elif isinstance(onVariable, str):
1235 self.__V = (onVariable,)
1236 if withInfo is None:
1237 self.__I = (onVariable,)
1239 self.__I = (str(withInfo),)
1241 raise ValueError("setting an observer has to be done over a variable name or a list of variable names.")
1243 if asString is not None:
1244 __FunctionText = asString
1245 elif (asTemplate is not None) and (asTemplate in Templates.ObserverTemplates):
1246 __FunctionText = Templates.ObserverTemplates[asTemplate]
1247 elif asScript is not None:
1248 __FunctionText = ImportFromScript(asScript).getstring()
1251 __Function = ObserverF(__FunctionText)
1253 if asObsObject is not None:
1254 self.__O = asObsObject
1256 self.__O = __Function.getfunc()
1258 for k in range(len(self.__V)):
1261 if ename not in withAlgo:
1262 raise ValueError("An observer is asked to be set on a variable named %s which does not exist."%ename)
1264 withAlgo.setObserver(ename, self.__O, einfo, scheduledBy)
1267 "x.__repr__() <==> repr(x)"
1268 return repr(self.__V)+"\n"+repr(self.__O)
1271 "x.__str__() <==> str(x)"
1272 return str(self.__V)+"\n"+str(self.__O)
1274 # ==============================================================================
1275 class State(object):
1277 Classe générale d'interface de type état
1280 name = "GenericVector",
1282 asPersistentVector = None,
1285 toBeChecked = False,
1288 Permet de définir un vecteur :
1289 - asVector : entrée des données, comme un vecteur compatible avec le
1290 constructeur de numpy.matrix, ou "True" si entrée par script.
1291 - asPersistentVector : entrée des données, comme une série de vecteurs
1292 compatible avec le constructeur de numpy.matrix, ou comme un objet de
1293 type Persistence, ou "True" si entrée par script.
1294 - asScript : si un script valide est donné contenant une variable
1295 nommée "name", la variable est de type "asVector" (par défaut) ou
1296 "asPersistentVector" selon que l'une de ces variables est placée à
1299 self.__name = str(name)
1300 self.__check = bool(toBeChecked)
1304 self.__is_vector = False
1305 self.__is_series = False
1307 if asScript is not None:
1308 __Vector, __Series = None, None
1309 if asPersistentVector:
1310 __Series = ImportFromScript(asScript).getvalue( self.__name )
1312 __Vector = ImportFromScript(asScript).getvalue( self.__name )
1314 __Vector, __Series = asVector, asPersistentVector
1316 if __Vector is not None:
1317 self.__is_vector = True
1318 self.__V = numpy.matrix( numpy.asmatrix(__Vector).A1, numpy.float ).T
1319 self.shape = self.__V.shape
1320 self.size = self.__V.size
1321 elif __Series is not None:
1322 self.__is_series = True
1323 if type(__Series) in (tuple, list, numpy.ndarray, numpy.matrix):
1324 self.__V = Persistence.OneVector(self.__name, basetype=numpy.matrix)
1325 for member in __Series:
1326 self.__V.store( numpy.matrix( numpy.asmatrix(member).A1, numpy.float ).T )
1327 import sys ; sys.stdout.flush()
1330 if type(self.__V.shape) in (tuple, list):
1331 self.shape = self.__V.shape
1333 self.shape = self.__V.shape()
1334 if len(self.shape) == 1:
1335 self.shape = (self.shape[0],1)
1336 self.size = self.shape[0] * self.shape[1]
1338 raise ValueError("The %s object is improperly defined, it requires at minima either a vector, a list/tuple of vectors or a persistent object. Please check your vector input."%self.__name)
1340 if scheduledBy is not None:
1341 self.__T = scheduledBy
1343 def getO(self, withScheduler=False):
1345 return self.__V, self.__T
1346 elif self.__T is None:
1352 "Vérification du type interne"
1353 return self.__is_vector
1356 "Vérification du type interne"
1357 return self.__is_series
1360 "x.__repr__() <==> repr(x)"
1361 return repr(self.__V)
1364 "x.__str__() <==> str(x)"
1365 return str(self.__V)
1367 # ==============================================================================
1368 class Covariance(object):
1370 Classe générale d'interface de type covariance
1373 name = "GenericCovariance",
1374 asCovariance = None,
1375 asEyeByScalar = None,
1376 asEyeByVector = None,
1379 toBeChecked = False,
1382 Permet de définir une covariance :
1383 - asCovariance : entrée des données, comme une matrice compatible avec
1384 le constructeur de numpy.matrix
1385 - asEyeByScalar : entrée des données comme un seul scalaire de variance,
1386 multiplicatif d'une matrice de corrélation identité, aucune matrice
1387 n'étant donc explicitement à donner
1388 - asEyeByVector : entrée des données comme un seul vecteur de variance,
1389 à mettre sur la diagonale d'une matrice de corrélation, aucune matrice
1390 n'étant donc explicitement à donner
1391 - asCovObject : entrée des données comme un objet python, qui a les
1392 methodes obligatoires "getT", "getI", "diag", "trace", "__add__",
1393 "__sub__", "__neg__", "__mul__", "__rmul__" et facultatives "shape",
1394 "size", "cholesky", "choleskyI", "asfullmatrix", "__repr__", "__str__"
1395 - toBeChecked : booléen indiquant si le caractère SDP de la matrice
1396 pleine doit être vérifié
1398 self.__name = str(name)
1399 self.__check = bool(toBeChecked)
1402 self.__is_scalar = False
1403 self.__is_vector = False
1404 self.__is_matrix = False
1405 self.__is_object = False
1407 if asScript is not None:
1408 __Matrix, __Scalar, __Vector, __Object = None, None, None, None
1410 __Scalar = ImportFromScript(asScript).getvalue( self.__name )
1412 __Vector = ImportFromScript(asScript).getvalue( self.__name )
1414 __Object = ImportFromScript(asScript).getvalue( self.__name )
1416 __Matrix = ImportFromScript(asScript).getvalue( self.__name )
1418 __Matrix, __Scalar, __Vector, __Object = asCovariance, asEyeByScalar, asEyeByVector, asCovObject
1420 if __Scalar is not None:
1421 if numpy.matrix(__Scalar).size != 1:
1422 raise ValueError(' The diagonal multiplier given to define a sparse matrix is not a unique scalar value.\n Its actual measured size is %i. Please check your scalar input.'%numpy.matrix(__Scalar).size)
1423 self.__is_scalar = True
1424 self.__C = numpy.abs( float(__Scalar) )
1427 elif __Vector is not None:
1428 self.__is_vector = True
1429 self.__C = numpy.abs( numpy.array( numpy.ravel( numpy.matrix(__Vector, float ) ) ) )
1430 self.shape = (self.__C.size,self.__C.size)
1431 self.size = self.__C.size**2
1432 elif __Matrix is not None:
1433 self.__is_matrix = True
1434 self.__C = numpy.matrix( __Matrix, float )
1435 self.shape = self.__C.shape
1436 self.size = self.__C.size
1437 elif __Object is not None:
1438 self.__is_object = True
1440 for at in ("getT","getI","diag","trace","__add__","__sub__","__neg__","__mul__","__rmul__"):
1441 if not hasattr(self.__C,at):
1442 raise ValueError("The matrix given for %s as an object has no attribute \"%s\". Please check your object input."%(self.__name,at))
1443 if hasattr(self.__C,"shape"):
1444 self.shape = self.__C.shape
1447 if hasattr(self.__C,"size"):
1448 self.size = self.__C.size
1453 # raise ValueError("The %s covariance matrix has to be specified either as a matrix, a vector for its diagonal or a scalar multiplying an identity matrix."%self.__name)
1457 def __validate(self):
1459 if self.ismatrix() and min(self.shape) != max(self.shape):
1460 raise ValueError("The given matrix for %s is not a square one, its shape is %s. Please check your matrix input."%(self.__name,self.shape))
1461 if self.isobject() and min(self.shape) != max(self.shape):
1462 raise ValueError("The matrix given for \"%s\" is not a square one, its shape is %s. Please check your object input."%(self.__name,self.shape))
1463 if self.isscalar() and self.__C <= 0:
1464 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your scalar input %s."%(self.__name,self.__C))
1465 if self.isvector() and (self.__C <= 0).any():
1466 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your vector input."%(self.__name,))
1467 if self.ismatrix() and (self.__check or logging.getLogger().level < logging.WARNING):
1469 L = numpy.linalg.cholesky( self.__C )
1471 raise ValueError("The %s covariance matrix is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1472 if self.isobject() and (self.__check or logging.getLogger().level < logging.WARNING):
1474 L = self.__C.cholesky()
1476 raise ValueError("The %s covariance object is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1479 "Vérification du type interne"
1480 return self.__is_scalar
1483 "Vérification du type interne"
1484 return self.__is_vector
1487 "Vérification du type interne"
1488 return self.__is_matrix
1491 "Vérification du type interne"
1492 return self.__is_object
1497 return Covariance(self.__name+"I", asCovariance = self.__C.I )
1498 elif self.isvector():
1499 return Covariance(self.__name+"I", asEyeByVector = 1. / self.__C )
1500 elif self.isscalar():
1501 return Covariance(self.__name+"I", asEyeByScalar = 1. / self.__C )
1502 elif self.isobject():
1503 return Covariance(self.__name+"I", asCovObject = self.__C.getI() )
1505 return None # Indispensable
1510 return Covariance(self.__name+"T", asCovariance = self.__C.T )
1511 elif self.isvector():
1512 return Covariance(self.__name+"T", asEyeByVector = self.__C )
1513 elif self.isscalar():
1514 return Covariance(self.__name+"T", asEyeByScalar = self.__C )
1515 elif self.isobject():
1516 return Covariance(self.__name+"T", asCovObject = self.__C.getT() )
1519 "Décomposition de Cholesky"
1521 return Covariance(self.__name+"C", asCovariance = numpy.linalg.cholesky(self.__C) )
1522 elif self.isvector():
1523 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
1524 elif self.isscalar():
1525 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
1526 elif self.isobject() and hasattr(self.__C,"cholesky"):
1527 return Covariance(self.__name+"C", asCovObject = self.__C.cholesky() )
1529 def choleskyI(self):
1530 "Inversion de la décomposition de Cholesky"
1532 return Covariance(self.__name+"H", asCovariance = numpy.linalg.cholesky(self.__C).I )
1533 elif self.isvector():
1534 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
1535 elif self.isscalar():
1536 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
1537 elif self.isobject() and hasattr(self.__C,"choleskyI"):
1538 return Covariance(self.__name+"H", asCovObject = self.__C.choleskyI() )
1540 def diag(self, msize=None):
1541 "Diagonale de la matrice"
1543 return numpy.diag(self.__C)
1544 elif self.isvector():
1546 elif self.isscalar():
1548 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1550 return self.__C * numpy.ones(int(msize))
1551 elif self.isobject():
1552 return self.__C.diag()
1554 def asfullmatrix(self, msize=None):
1558 elif self.isvector():
1559 return numpy.matrix( numpy.diag(self.__C), float )
1560 elif self.isscalar():
1562 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1564 return numpy.matrix( self.__C * numpy.eye(int(msize)), float )
1565 elif self.isobject() and hasattr(self.__C,"asfullmatrix"):
1566 return self.__C.asfullmatrix()
1568 def trace(self, msize=None):
1569 "Trace de la matrice"
1571 return numpy.trace(self.__C)
1572 elif self.isvector():
1573 return float(numpy.sum(self.__C))
1574 elif self.isscalar():
1576 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1578 return self.__C * int(msize)
1579 elif self.isobject():
1580 return self.__C.trace()
1586 "x.__repr__() <==> repr(x)"
1587 return repr(self.__C)
1590 "x.__str__() <==> str(x)"
1591 return str(self.__C)
1593 def __add__(self, other):
1594 "x.__add__(y) <==> x+y"
1595 if self.ismatrix() or self.isobject():
1596 return self.__C + numpy.asmatrix(other)
1597 elif self.isvector() or self.isscalar():
1598 _A = numpy.asarray(other)
1599 _A.reshape(_A.size)[::_A.shape[1]+1] += self.__C
1600 return numpy.asmatrix(_A)
1602 def __radd__(self, other):
1603 "x.__radd__(y) <==> y+x"
1604 raise NotImplementedError("%s covariance matrix __radd__ method not available for %s type!"%(self.__name,type(other)))
1606 def __sub__(self, other):
1607 "x.__sub__(y) <==> x-y"
1608 if self.ismatrix() or self.isobject():
1609 return self.__C - numpy.asmatrix(other)
1610 elif self.isvector() or self.isscalar():
1611 _A = numpy.asarray(other)
1612 _A.reshape(_A.size)[::_A.shape[1]+1] = self.__C - _A.reshape(_A.size)[::_A.shape[1]+1]
1613 return numpy.asmatrix(_A)
1615 def __rsub__(self, other):
1616 "x.__rsub__(y) <==> y-x"
1617 raise NotImplementedError("%s covariance matrix __rsub__ method not available for %s type!"%(self.__name,type(other)))
1620 "x.__neg__() <==> -x"
1623 def __mul__(self, other):
1624 "x.__mul__(y) <==> x*y"
1625 if self.ismatrix() and isinstance(other,numpy.matrix):
1626 return self.__C * other
1627 elif self.ismatrix() and (isinstance(other,numpy.ndarray) \
1628 or isinstance(other,list) \
1629 or isinstance(other,tuple)):
1630 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1631 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
1632 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1633 return self.__C * numpy.asmatrix(other)
1635 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asmatrix(other).shape,self.__name))
1636 elif self.isvector() and (isinstance(other,numpy.matrix) \
1637 or isinstance(other,numpy.ndarray) \
1638 or isinstance(other,list) \
1639 or isinstance(other,tuple)):
1640 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1641 return numpy.asmatrix(self.__C * numpy.ravel(other)).T
1642 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1643 return numpy.asmatrix((self.__C * (numpy.asarray(other).transpose())).transpose())
1645 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
1646 elif self.isscalar() and isinstance(other,numpy.matrix):
1647 return self.__C * other
1648 elif self.isscalar() and (isinstance(other,numpy.ndarray) \
1649 or isinstance(other,list) \
1650 or isinstance(other,tuple)):
1651 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
1652 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
1654 return self.__C * numpy.asmatrix(other)
1655 elif self.isobject():
1656 return self.__C.__mul__(other)
1658 raise NotImplementedError("%s covariance matrix __mul__ method not available for %s type!"%(self.__name,type(other)))
1660 def __rmul__(self, other):
1661 "x.__rmul__(y) <==> y*x"
1662 if self.ismatrix() and isinstance(other,numpy.matrix):
1663 return other * self.__C
1664 elif self.isvector() and isinstance(other,numpy.matrix):
1665 if numpy.ravel(other).size == self.shape[0]: # Vecteur
1666 return numpy.asmatrix(numpy.ravel(other) * self.__C)
1667 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
1668 return numpy.asmatrix(numpy.array(other) * self.__C)
1670 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
1671 elif self.isscalar() and isinstance(other,numpy.matrix):
1672 return other * self.__C
1673 elif self.isobject():
1674 return self.__C.__rmul__(other)
1676 raise NotImplementedError("%s covariance matrix __rmul__ method not available for %s type!"%(self.__name,type(other)))
1679 "x.__len__() <==> len(x)"
1680 return self.shape[0]
1682 # ==============================================================================
1683 class ObserverF(object):
1685 Creation d'une fonction d'observateur a partir de son texte
1687 def __init__(self, corps=""):
1688 self.__corps = corps
1689 def func(self,var,info):
1690 "Fonction d'observation"
1693 "Restitution du pointeur de fonction dans l'objet"
1696 # ==============================================================================
1697 class ImportFromScript(object):
1699 Obtention d'une variable nommee depuis un fichier script importe
1701 def __init__(self, __filename=None):
1702 "Verifie l'existence et importe le script"
1703 self.__filename = __filename.rstrip(".py")
1704 if self.__filename is None:
1705 raise ValueError("The name of the file containing the variable to be imported has to be specified.")
1706 if not os.path.isfile(str(self.__filename)+".py"):
1707 raise ValueError("The file containing the variable to be imported doesn't seem to exist. The given file name is:\n \"%s\""%self.__filename)
1708 self.__scriptfile = __import__(self.__filename, globals(), locals(), [])
1709 self.__scriptstring = open(self.__filename+".py",'r').read()
1710 def getvalue(self, __varname=None, __synonym=None ):
1711 "Renvoie la variable demandee"
1712 if __varname is None:
1713 raise ValueError("The name of the variable to be imported has to be specified.")
1714 if not hasattr(self.__scriptfile, __varname):
1715 if __synonym is None:
1716 raise ValueError("The imported script file \"%s\" doesn't contain the specified variable \"%s\"."%(str(self.__filename)+".py",__varname))
1717 elif not hasattr(self.__scriptfile, __synonym):
1718 raise ValueError("The imported script file \"%s\" doesn't contain the specified variable \"%s\"."%(str(self.__filename)+".py",__synonym))
1720 return getattr(self.__scriptfile, __synonym)
1722 return getattr(self.__scriptfile, __varname)
1723 def getstring(self):
1724 "Renvoie le script complet"
1725 return self.__scriptstring
1727 # ==============================================================================
1728 def CostFunction3D(_x,
1729 _Hm = None, # Pour simuler Hm(x) : HO["Direct"].appliedTo
1730 _HmX = None, # Simulation déjà faite de Hm(x)
1731 _arg = None, # Arguments supplementaires pour Hm, sous la forme d'un tuple
1736 _SIV = False, # A résorber pour la 8.0
1737 _SSC = [], # self._parameters["StoreSupplementaryCalculations"]
1738 _nPS = 0, # nbPreviousSteps
1739 _QM = "DA", # QualityMeasure
1740 _SSV = {}, # Entrée et/ou sortie : self.StoredVariables
1741 _fRt = False, # Restitue ou pas la sortie étendue
1742 _sSc = True, # Stocke ou pas les SSC
1745 Fonction-coût générale utile pour les algorithmes statiques/3D : 3DVAR, BLUE
1746 et dérivés, Kalman et dérivés, LeastSquares, SamplingTest, PSO, SA, Tabu,
1747 DFO, QuantileRegression
1753 for k in ["CostFunctionJ",
1759 "SimulatedObservationAtCurrentOptimum",
1760 "SimulatedObservationAtCurrentState",
1764 if hasattr(_SSV[k],"store"):
1765 _SSV[k].append = _SSV[k].store # Pour utiliser "append" au lieu de "store"
1767 _X = numpy.asmatrix(numpy.ravel( _x )).T
1768 if _SIV or "CurrentState" in _SSC or "CurrentOptimum" in _SSC:
1769 _SSV["CurrentState"].append( _X )
1771 if _HmX is not None:
1775 raise ValueError("COSTFUNCTION3D Operator has to be defined.")
1779 _HX = _Hm( _X, *_arg )
1780 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
1782 if "SimulatedObservationAtCurrentState" in _SSC or \
1783 "SimulatedObservationAtCurrentOptimum" in _SSC:
1784 _SSV["SimulatedObservationAtCurrentState"].append( _HX )
1786 if numpy.any(numpy.isnan(_HX)):
1787 Jb, Jo, J = numpy.nan, numpy.nan, numpy.nan
1789 _Y = numpy.asmatrix(numpy.ravel( _Y )).T
1790 if _QM in ["AugmentedWeightedLeastSquares", "AWLS", "AugmentedPonderatedLeastSquares", "APLS", "DA"]:
1791 if _BI is None or _RI is None:
1792 raise ValueError("Background and Observation error covariance matrix has to be properly defined!")
1793 _Xb = numpy.asmatrix(numpy.ravel( _Xb )).T
1794 Jb = 0.5 * (_X - _Xb).T * _BI * (_X - _Xb)
1795 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
1796 elif _QM in ["WeightedLeastSquares", "WLS", "PonderatedLeastSquares", "PLS"]:
1798 raise ValueError("Observation error covariance matrix has to be properly defined!")
1800 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
1801 elif _QM in ["LeastSquares", "LS", "L2"]:
1803 Jo = 0.5 * (_Y - _HX).T * (_Y - _HX)
1804 elif _QM in ["AbsoluteValue", "L1"]:
1806 Jo = numpy.sum( numpy.abs(_Y - _HX) )
1807 elif _QM in ["MaximumError", "ME"]:
1809 Jo = numpy.max( numpy.abs(_Y - _HX) )
1810 elif _QM in ["QR", "Null"]:
1814 raise ValueError("Unknown asked quality measure!")
1816 J = float( Jb ) + float( Jo )
1819 _SSV["CostFunctionJb"].append( Jb )
1820 _SSV["CostFunctionJo"].append( Jo )
1821 _SSV["CostFunctionJ" ].append( J )
1823 if "IndexOfOptimum" in _SSC or \
1824 "CurrentOptimum" in _SSC or \
1825 "SimulatedObservationAtCurrentOptimum" in _SSC:
1826 IndexMin = numpy.argmin( _SSV["CostFunctionJ"][_nPS:] ) + _nPS
1827 if "IndexOfOptimum" in _SSC:
1828 _SSV["IndexOfOptimum"].append( IndexMin )
1829 if "CurrentOptimum" in _SSC:
1830 _SSV["CurrentOptimum"].append( _SSV["CurrentState"][IndexMin] )
1831 if "SimulatedObservationAtCurrentOptimum" in _SSC:
1832 _SSV["SimulatedObservationAtCurrentOptimum"].append( _SSV["SimulatedObservationAtCurrentState"][IndexMin] )
1837 if _QM in ["QR"]: # Pour le QuantileRegression
1842 # ==============================================================================
1843 if __name__ == "__main__":
1844 print('\n AUTODIAGNOSTIC \n')