1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2018 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les outils généraux élémentaires.
26 __author__ = "Jean-Philippe ARGAUD"
34 from functools import partial
35 from daCore import Persistence
36 from daCore import PlatformInfo
37 from daCore import Interfaces
38 from daCore import Templates
39 from daCore.Interfaces import ImportFromScript, ImportFromFile
41 # ==============================================================================
42 class CacheManager(object):
44 Classe générale de gestion d'un cache de calculs
47 toleranceInRedundancy = 1.e-18,
48 lenghtOfRedundancy = -1,
51 Les caractéristiques de tolérance peuvent être modifées à la création.
53 self.__tolerBP = float(toleranceInRedundancy)
54 self.__lenghtOR = int(lenghtOfRedundancy)
55 self.__initlnOR = self.__lenghtOR
60 self.__listOPCV = [] # Operator Previous Calculated Points, Results, Point Norms
61 # logging.debug("CM Tolerance de determination des doublons : %.2e", self.__tolerBP)
63 def wasCalculatedIn(self, xValue ): #, info="" ):
64 "Vérifie l'existence d'un calcul correspondant à la valeur"
67 for i in range(min(len(self.__listOPCV),self.__lenghtOR)-1,-1,-1):
68 if xValue.size != self.__listOPCV[i][0].size:
69 # logging.debug("CM Différence de la taille %s de X et de celle %s du point %i déjà calculé", xValue.shape,i,self.__listOPCP[i].shape)
71 if numpy.linalg.norm(numpy.ravel(xValue) - self.__listOPCV[i][0]) < self.__tolerBP * self.__listOPCV[i][2]:
73 __HxV = self.__listOPCV[i][1]
74 # logging.debug("CM Cas%s déja calculé, portant le numéro %i", info, i)
78 def storeValueInX(self, xValue, HxValue ):
79 "Stocke un calcul correspondant à la valeur"
80 if self.__lenghtOR < 0:
81 self.__lenghtOR = 2 * xValue.size + 2
82 self.__initlnOR = self.__lenghtOR
83 while len(self.__listOPCV) > self.__lenghtOR:
84 # logging.debug("CM Réduction de la liste des cas à %i éléments par suppression du premier", self.__lenghtOR)
85 self.__listOPCV.pop(0)
86 self.__listOPCV.append( (
87 copy.copy(numpy.ravel(xValue)),
89 numpy.linalg.norm(xValue),
94 self.__initlnOR = self.__lenghtOR
99 self.__lenghtOR = self.__initlnOR
101 # ==============================================================================
102 class Operator(object):
104 Classe générale d'interface de type opérateur simple
111 def __init__(self, fromMethod=None, fromMatrix=None, avoidingRedundancy = True, inputAsMultiFunction = False):
113 On construit un objet de ce type en fournissant, à l'aide de l'un des
114 deux mots-clé, soit une fonction ou un multi-fonction python, soit une
117 - fromMethod : argument de type fonction Python
118 - fromMatrix : argument adapté au constructeur numpy.matrix
119 - avoidingRedundancy : évite ou pas les calculs redondants
120 - inputAsMultiFunction : fonction explicitement définie ou pas en multi-fonction
122 self.__NbCallsAsMatrix, self.__NbCallsAsMethod, self.__NbCallsOfCached = 0, 0, 0
123 self.__AvoidRC = bool( avoidingRedundancy )
124 self.__inputAsMF = bool( inputAsMultiFunction )
125 if fromMethod is not None and self.__inputAsMF:
126 self.__Method = fromMethod # logtimer(fromMethod)
128 self.__Type = "Method"
129 elif fromMethod is not None and not self.__inputAsMF:
130 self.__Method = partial( MultiFonction, _sFunction=fromMethod)
132 self.__Type = "Method"
133 elif fromMatrix is not None:
135 self.__Matrix = numpy.matrix( fromMatrix, numpy.float )
136 self.__Type = "Matrix"
142 def disableAvoidingRedundancy(self):
144 Operator.CM.disable()
146 def enableAvoidingRedundancy(self):
151 Operator.CM.disable()
157 def appliedTo(self, xValue, HValue = None):
159 Permet de restituer le résultat de l'application de l'opérateur à un
160 argument xValue. Cette méthode se contente d'appliquer, son argument
161 devant a priori être du bon type.
163 - xValue : argument adapté pour appliquer l'opérateur
165 if HValue is not None:
166 HxValue = numpy.asmatrix( numpy.ravel( HValue ) ).T
168 Operator.CM.storeValueInX(xValue,HxValue)
171 __alreadyCalculated, __HxV = Operator.CM.wasCalculatedIn(xValue)
173 __alreadyCalculated = False
175 if __alreadyCalculated:
176 self.__addOneCacheCall()
179 if self.__Matrix is not None:
180 self.__addOneMatrixCall()
181 HxValue = self.__Matrix * xValue
183 self.__addOneMethodCall()
184 HxValue = self.__Method( xValue )
186 Operator.CM.storeValueInX(xValue,HxValue)
190 def appliedControledFormTo(self, paire ):
192 Permet de restituer le résultat de l'application de l'opérateur à une
193 paire (xValue, uValue). Cette méthode se contente d'appliquer, son
194 argument devant a priori être du bon type. Si la uValue est None,
195 on suppose que l'opérateur ne s'applique qu'à xValue.
197 - xValue : argument X adapté pour appliquer l'opérateur
198 - uValue : argument U adapté pour appliquer l'opérateur
200 assert len(paire) == 2, "Incorrect number of arguments"
201 xValue, uValue = paire
202 if self.__Matrix is not None:
203 self.__addOneMatrixCall()
204 return self.__Matrix * xValue
205 elif uValue is not None:
206 self.__addOneMethodCall()
207 return self.__Method( (xValue, uValue) )
209 self.__addOneMethodCall()
210 return self.__Method( xValue )
212 def appliedInXTo(self, paire ):
214 Permet de restituer le résultat de l'application de l'opérateur à un
215 argument xValue, sachant que l'opérateur est valable en xNominal.
216 Cette méthode se contente d'appliquer, son argument devant a priori
217 être du bon type. Si l'opérateur est linéaire car c'est une matrice,
218 alors il est valable en tout point nominal et il n'est pas nécessaire
220 Arguments : une liste contenant
221 - xNominal : argument permettant de donner le point où l'opérateur
222 est construit pour etre ensuite appliqué
223 - xValue : argument adapté pour appliquer l'opérateur
225 assert len(paire) == 2, "Incorrect number of arguments"
226 xNominal, xValue = paire
227 if self.__Matrix is not None:
228 self.__addOneMatrixCall()
229 return self.__Matrix * xValue
231 self.__addOneMethodCall()
232 return self.__Method( (xNominal, xValue) )
234 def asMatrix(self, ValueForMethodForm = "UnknownVoidValue"):
236 Permet de renvoyer l'opérateur sous la forme d'une matrice
238 if self.__Matrix is not None:
239 self.__addOneMatrixCall()
241 elif ValueForMethodForm is not "UnknownVoidValue": # Ne pas utiliser "None"
242 self.__addOneMethodCall()
243 return numpy.matrix( self.__Method( (ValueForMethodForm, None) ) )
245 raise ValueError("Matrix form of the operator defined as a function/method requires to give an operating point.")
249 Renvoie la taille sous forme numpy si l'opérateur est disponible sous
250 la forme d'une matrice
252 if self.__Matrix is not None:
253 return self.__Matrix.shape
255 raise ValueError("Matrix form of the operator is not available, nor the shape")
257 def nbcalls(self, which=None):
259 Renvoie les nombres d'évaluations de l'opérateur
262 self.__NbCallsAsMatrix+self.__NbCallsAsMethod,
263 self.__NbCallsAsMatrix,
264 self.__NbCallsAsMethod,
265 self.__NbCallsOfCached,
266 Operator.NbCallsAsMatrix+Operator.NbCallsAsMethod,
267 Operator.NbCallsAsMatrix,
268 Operator.NbCallsAsMethod,
269 Operator.NbCallsOfCached,
271 if which is None: return __nbcalls
272 else: return __nbcalls[which]
274 def __addOneMatrixCall(self):
275 "Comptabilise un appel"
276 self.__NbCallsAsMatrix += 1 # Decompte local
277 Operator.NbCallsAsMatrix += 1 # Decompte global
279 def __addOneMethodCall(self):
280 "Comptabilise un appel"
281 self.__NbCallsAsMethod += 1 # Decompte local
282 Operator.NbCallsAsMethod += 1 # Decompte global
284 def __addOneCacheCall(self):
285 "Comptabilise un appel"
286 self.__NbCallsOfCached += 1 # Decompte local
287 Operator.NbCallsOfCached += 1 # Decompte global
289 # ==============================================================================
290 class FullOperator(object):
292 Classe générale d'interface de type opérateur complet
293 (Direct, Linéaire Tangent, Adjoint)
296 name = "GenericFullOperator",
298 asOneFunction = None, # Fonction
299 asThreeFunctions = None, # Fonctions dictionary
300 asScript = None, # Fonction(s) script
301 asDict = None, # Parameters
304 inputAsMF = False,# Fonction(s) as Multi-Functions
309 self.__name = str(name)
310 self.__check = bool(toBeChecked)
315 if (asDict is not None) and isinstance(asDict, dict):
316 __Parameters.update( asDict )
317 if "DifferentialIncrement" in asDict:
318 __Parameters["withIncrement"] = asDict["DifferentialIncrement"]
319 if "CenteredFiniteDifference" in asDict:
320 __Parameters["withCenteredDF"] = asDict["CenteredFiniteDifference"]
321 if "EnableMultiProcessing" in asDict:
322 __Parameters["withmpEnabled"] = asDict["EnableMultiProcessing"]
323 if "NumberOfProcesses" in asDict:
324 __Parameters["withmpWorkers"] = asDict["NumberOfProcesses"]
326 if asScript is not None:
327 __Matrix, __Function = None, None
329 __Matrix = ImportFromScript(asScript).getvalue( self.__name )
331 __Function = { "Direct":ImportFromScript(asScript).getvalue( "DirectOperator" ) }
332 __Function.update({"useApproximatedDerivatives":True})
333 __Function.update(__Parameters)
334 elif asThreeFunctions:
336 "Direct" :ImportFromScript(asScript).getvalue( "DirectOperator" ),
337 "Tangent":ImportFromScript(asScript).getvalue( "TangentOperator" ),
338 "Adjoint":ImportFromScript(asScript).getvalue( "AdjointOperator" ),
340 __Function.update(__Parameters)
343 if asOneFunction is not None:
344 if isinstance(asOneFunction, dict) and "Direct" in asOneFunction:
345 if asOneFunction["Direct"] is not None:
346 __Function = asOneFunction
348 raise ValueError("The function has to be given in a dictionnary which have 1 key (\"Direct\")")
350 __Function = { "Direct":asOneFunction }
351 __Function.update({"useApproximatedDerivatives":True})
352 __Function.update(__Parameters)
353 elif asThreeFunctions is not None:
354 if isinstance(asThreeFunctions, dict) and \
355 ("Tangent" in asThreeFunctions) and (asThreeFunctions["Tangent"] is not None) and \
356 ("Adjoint" in asThreeFunctions) and (asThreeFunctions["Adjoint"] is not None) and \
357 (("useApproximatedDerivatives" not in asThreeFunctions) or not bool(asThreeFunctions["useApproximatedDerivatives"])):
358 __Function = asThreeFunctions
359 elif isinstance(asThreeFunctions, dict) and \
360 ("Direct" in asThreeFunctions) and (asThreeFunctions["Direct"] is not None):
361 __Function = asThreeFunctions
362 __Function.update({"useApproximatedDerivatives":True})
364 raise ValueError("The functions has to be given in a dictionnary which have either 1 key (\"Direct\") or 3 keys (\"Direct\" (optionnal), \"Tangent\" and \"Adjoint\")")
365 if "Direct" not in asThreeFunctions:
366 __Function["Direct"] = asThreeFunctions["Tangent"]
367 __Function.update(__Parameters)
371 # if sys.version_info[0] < 3 and isinstance(__Function, dict):
372 # for k in ("Direct", "Tangent", "Adjoint"):
373 # if k in __Function and hasattr(__Function[k],"__class__"):
374 # if type(__Function[k]) is type(self.__init__):
375 # raise TypeError("can't use a class method (%s) as a function for the \"%s\" operator. Use a real function instead."%(type(__Function[k]),k))
377 if appliedInX is not None and isinstance(appliedInX, dict):
378 __appliedInX = appliedInX
379 elif appliedInX is not None:
380 __appliedInX = {"HXb":appliedInX}
384 if scheduledBy is not None:
385 self.__T = scheduledBy
387 if isinstance(__Function, dict) and \
388 ("useApproximatedDerivatives" in __Function) and bool(__Function["useApproximatedDerivatives"]) and \
389 ("Direct" in __Function) and (__Function["Direct"] is not None):
390 if "withCenteredDF" not in __Function: __Function["withCenteredDF"] = False
391 if "withIncrement" not in __Function: __Function["withIncrement"] = 0.01
392 if "withdX" not in __Function: __Function["withdX"] = None
393 if "withAvoidingRedundancy" not in __Function: __Function["withAvoidingRedundancy"] = True
394 if "withToleranceInRedundancy" not in __Function: __Function["withToleranceInRedundancy"] = 1.e-18
395 if "withLenghtOfRedundancy" not in __Function: __Function["withLenghtOfRedundancy"] = -1
396 if "withmpEnabled" not in __Function: __Function["withmpEnabled"] = False
397 if "withmpWorkers" not in __Function: __Function["withmpWorkers"] = None
398 if "withmfEnabled" not in __Function: __Function["withmfEnabled"] = inputAsMF
399 from daNumerics.ApproximatedDerivatives import FDApproximation
400 FDA = FDApproximation(
401 Function = __Function["Direct"],
402 centeredDF = __Function["withCenteredDF"],
403 increment = __Function["withIncrement"],
404 dX = __Function["withdX"],
405 avoidingRedundancy = __Function["withAvoidingRedundancy"],
406 toleranceInRedundancy = __Function["withToleranceInRedundancy"],
407 lenghtOfRedundancy = __Function["withLenghtOfRedundancy"],
408 mpEnabled = __Function["withmpEnabled"],
409 mpWorkers = __Function["withmpWorkers"],
410 mfEnabled = __Function["withmfEnabled"],
412 self.__FO["Direct"] = Operator( fromMethod = FDA.DirectOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF)
413 self.__FO["Tangent"] = Operator( fromMethod = FDA.TangentOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
414 self.__FO["Adjoint"] = Operator( fromMethod = FDA.AdjointOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
415 elif isinstance(__Function, dict) and \
416 ("Direct" in __Function) and ("Tangent" in __Function) and ("Adjoint" in __Function) and \
417 (__Function["Direct"] is not None) and (__Function["Tangent"] is not None) and (__Function["Adjoint"] is not None):
418 self.__FO["Direct"] = Operator( fromMethod = __Function["Direct"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
419 self.__FO["Tangent"] = Operator( fromMethod = __Function["Tangent"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
420 self.__FO["Adjoint"] = Operator( fromMethod = __Function["Adjoint"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
421 elif asMatrix is not None:
422 __matrice = numpy.matrix( __Matrix, numpy.float )
423 self.__FO["Direct"] = Operator( fromMatrix = __matrice, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
424 self.__FO["Tangent"] = Operator( fromMatrix = __matrice, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
425 self.__FO["Adjoint"] = Operator( fromMatrix = __matrice.T, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
428 raise ValueError("Improperly defined observation operator, it requires at minima either a matrix, a Direct for approximate derivatives or a Tangent/Adjoint pair.")
430 if __appliedInX is not None:
431 self.__FO["AppliedInX"] = {}
432 for key in list(__appliedInX.keys()):
433 if type( __appliedInX[key] ) is type( numpy.matrix([]) ):
434 # Pour le cas où l'on a une vraie matrice
435 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].A1, numpy.float ).T
436 elif type( __appliedInX[key] ) is type( numpy.array([]) ) and len(__appliedInX[key].shape) > 1:
437 # Pour le cas où l'on a un vecteur représenté en array avec 2 dimensions
438 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].reshape(len(__appliedInX[key]),), numpy.float ).T
440 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key], numpy.float ).T
442 self.__FO["AppliedInX"] = None
448 "x.__repr__() <==> repr(x)"
449 return repr(self.__V)
452 "x.__str__() <==> str(x)"
455 # ==============================================================================
456 class Algorithm(object):
458 Classe générale d'interface de type algorithme
460 Elle donne un cadre pour l'écriture d'une classe élémentaire d'algorithme
461 d'assimilation, en fournissant un container (dictionnaire) de variables
462 persistantes initialisées, et des méthodes d'accès à ces variables stockées.
464 Une classe élémentaire d'algorithme doit implémenter la méthode "run".
466 def __init__(self, name):
468 L'initialisation présente permet de fabriquer des variables de stockage
469 disponibles de manière générique dans les algorithmes élémentaires. Ces
470 variables de stockage sont ensuite conservées dans un dictionnaire
471 interne à l'objet, mais auquel on accède par la méthode "get".
473 Les variables prévues sont :
474 - CostFunctionJ : fonction-cout globale, somme des deux parties suivantes
475 - CostFunctionJb : partie ébauche ou background de la fonction-cout
476 - CostFunctionJo : partie observations de la fonction-cout
477 - GradientOfCostFunctionJ : gradient de la fonction-cout globale
478 - GradientOfCostFunctionJb : gradient de la partie ébauche de la fonction-cout
479 - GradientOfCostFunctionJo : gradient de la partie observations de la fonction-cout
480 - CurrentState : état courant lors d'itérations
481 - Analysis : l'analyse Xa
482 - SimulatedObservationAtBackground : l'état observé H(Xb) à l'ébauche
483 - SimulatedObservationAtCurrentState : l'état observé H(X) à l'état courant
484 - SimulatedObservationAtOptimum : l'état observé H(Xa) à l'optimum
485 - Innovation : l'innovation : d = Y - H(X)
486 - InnovationAtCurrentState : l'innovation à l'état courant : dn = Y - H(Xn)
487 - SigmaObs2 : indicateur de correction optimale des erreurs d'observation
488 - SigmaBck2 : indicateur de correction optimale des erreurs d'ébauche
489 - MahalanobisConsistency : indicateur de consistance des covariances
490 - OMA : Observation moins Analysis : Y - Xa
491 - OMB : Observation moins Background : Y - Xb
492 - AMB : Analysis moins Background : Xa - Xb
493 - APosterioriCovariance : matrice A
494 - APosterioriVariances : variances de la matrice A
495 - APosterioriStandardDeviations : écart-types de la matrice A
496 - APosterioriCorrelations : correlations de la matrice A
497 - Residu : dans le cas des algorithmes de vérification
498 On peut rajouter des variables à stocker dans l'initialisation de
499 l'algorithme élémentaire qui va hériter de cette classe
501 logging.debug("%s Initialisation", str(name))
502 self._m = PlatformInfo.SystemUsage()
504 self._name = str( name )
505 self._parameters = {"StoreSupplementaryCalculations":[]}
506 self.__required_parameters = {}
507 self.__required_inputs = {"RequiredInputValues":{"mandatory":(), "optional":()}}
509 self.StoredVariables = {}
510 self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
511 self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
512 self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
513 self.StoredVariables["CostFunctionJAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJAtCurrentOptimum")
514 self.StoredVariables["CostFunctionJbAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJbAtCurrentOptimum")
515 self.StoredVariables["CostFunctionJoAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJoAtCurrentOptimum")
516 self.StoredVariables["GradientOfCostFunctionJ"] = Persistence.OneVector(name = "GradientOfCostFunctionJ")
517 self.StoredVariables["GradientOfCostFunctionJb"] = Persistence.OneVector(name = "GradientOfCostFunctionJb")
518 self.StoredVariables["GradientOfCostFunctionJo"] = Persistence.OneVector(name = "GradientOfCostFunctionJo")
519 self.StoredVariables["CurrentState"] = Persistence.OneVector(name = "CurrentState")
520 self.StoredVariables["PredictedState"] = Persistence.OneVector(name = "PredictedState")
521 self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
522 self.StoredVariables["IndexOfOptimum"] = Persistence.OneIndex(name = "IndexOfOptimum")
523 self.StoredVariables["CurrentOptimum"] = Persistence.OneVector(name = "CurrentOptimum")
524 self.StoredVariables["SimulatedObservationAtBackground"] = Persistence.OneVector(name = "SimulatedObservationAtBackground")
525 self.StoredVariables["SimulatedObservationAtCurrentState"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentState")
526 self.StoredVariables["SimulatedObservationAtOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtOptimum")
527 self.StoredVariables["SimulatedObservationAtCurrentOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentOptimum")
528 self.StoredVariables["Innovation"] = Persistence.OneVector(name = "Innovation")
529 self.StoredVariables["InnovationAtCurrentState"] = Persistence.OneVector(name = "InnovationAtCurrentState")
530 self.StoredVariables["SigmaObs2"] = Persistence.OneScalar(name = "SigmaObs2")
531 self.StoredVariables["SigmaBck2"] = Persistence.OneScalar(name = "SigmaBck2")
532 self.StoredVariables["MahalanobisConsistency"] = Persistence.OneScalar(name = "MahalanobisConsistency")
533 self.StoredVariables["OMA"] = Persistence.OneVector(name = "OMA")
534 self.StoredVariables["OMB"] = Persistence.OneVector(name = "OMB")
535 self.StoredVariables["BMA"] = Persistence.OneVector(name = "BMA")
536 self.StoredVariables["APosterioriCovariance"] = Persistence.OneMatrix(name = "APosterioriCovariance")
537 self.StoredVariables["APosterioriVariances"] = Persistence.OneVector(name = "APosterioriVariances")
538 self.StoredVariables["APosterioriStandardDeviations"] = Persistence.OneVector(name = "APosterioriStandardDeviations")
539 self.StoredVariables["APosterioriCorrelations"] = Persistence.OneMatrix(name = "APosterioriCorrelations")
540 self.StoredVariables["SimulationQuantiles"] = Persistence.OneMatrix(name = "SimulationQuantiles")
541 self.StoredVariables["Residu"] = Persistence.OneScalar(name = "Residu")
543 def _pre_run(self, Parameters, Xb=None, Y=None, R=None, B=None, Q=None ):
545 logging.debug("%s Lancement", self._name)
546 logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
548 # Mise a jour de self._parameters avec Parameters
549 self.__setParameters(Parameters)
551 # Corrections et complements
552 def __test_vvalue(argument, variable, argname):
554 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
555 raise ValueError("%s %s vector %s has to be properly defined!"%(self._name,argname,variable))
556 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
557 logging.debug("%s %s vector %s is not set, but is optional."%(self._name,argname,variable))
559 logging.debug("%s %s vector %s is not set, but is not required."%(self._name,argname,variable))
561 logging.debug("%s %s vector %s is set, and its size is %i."%(self._name,argname,variable,numpy.array(argument).size))
563 __test_vvalue( Xb, "Xb", "Background or initial state" )
564 __test_vvalue( Y, "Y", "Observation" )
566 def __test_cvalue(argument, variable, argname):
568 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
569 raise ValueError("%s %s error covariance matrix %s has to be properly defined!"%(self._name,argname,variable))
570 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
571 logging.debug("%s %s error covariance matrix %s is not set, but is optional."%(self._name,argname,variable))
573 logging.debug("%s %s error covariance matrix %s is not set, but is not required."%(self._name,argname,variable))
575 logging.debug("%s %s error covariance matrix %s is set."%(self._name,argname,variable))
577 __test_cvalue( R, "R", "Observation" )
578 __test_cvalue( B, "B", "Background" )
579 __test_cvalue( Q, "Q", "Evolution" )
581 if ("Bounds" in self._parameters) and isinstance(self._parameters["Bounds"], (list, tuple)) and (len(self._parameters["Bounds"]) > 0):
582 logging.debug("%s Prise en compte des bornes effectuee"%(self._name,))
584 self._parameters["Bounds"] = None
586 if logging.getLogger().level < logging.WARNING:
587 self._parameters["optiprint"], self._parameters["optdisp"] = 1, 1
588 if PlatformInfo.has_scipy:
589 import scipy.optimize
590 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_ALL
592 self._parameters["optmessages"] = 15
594 self._parameters["optiprint"], self._parameters["optdisp"] = -1, 0
595 if PlatformInfo.has_scipy:
596 import scipy.optimize
597 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_NONE
599 self._parameters["optmessages"] = 15
603 def _post_run(self,_oH=None):
605 if ("StoreSupplementaryCalculations" in self._parameters) and \
606 "APosterioriCovariance" in self._parameters["StoreSupplementaryCalculations"]:
607 for _A in self.StoredVariables["APosterioriCovariance"]:
608 if "APosterioriVariances" in self._parameters["StoreSupplementaryCalculations"]:
609 self.StoredVariables["APosterioriVariances"].store( numpy.diag(_A) )
610 if "APosterioriStandardDeviations" in self._parameters["StoreSupplementaryCalculations"]:
611 self.StoredVariables["APosterioriStandardDeviations"].store( numpy.sqrt(numpy.diag(_A)) )
612 if "APosterioriCorrelations" in self._parameters["StoreSupplementaryCalculations"]:
613 _EI = numpy.diag(1./numpy.sqrt(numpy.diag(_A)))
614 _C = numpy.dot(_EI, numpy.dot(_A, _EI))
615 self.StoredVariables["APosterioriCorrelations"].store( _C )
617 logging.debug("%s Nombre d'évaluation(s) de l'opérateur d'observation direct/tangent/adjoint.: %i/%i/%i", self._name, _oH["Direct"].nbcalls(0),_oH["Tangent"].nbcalls(0),_oH["Adjoint"].nbcalls(0))
618 logging.debug("%s Nombre d'appels au cache d'opérateur d'observation direct/tangent/adjoint..: %i/%i/%i", self._name, _oH["Direct"].nbcalls(3),_oH["Tangent"].nbcalls(3),_oH["Adjoint"].nbcalls(3))
619 logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
620 logging.debug("%s Terminé", self._name)
623 def _toStore(self, key):
624 "True if in StoreSupplementaryCalculations, else False"
625 return key in self._parameters["StoreSupplementaryCalculations"]
627 def get(self, key=None):
629 Renvoie l'une des variables stockées identifiée par la clé, ou le
630 dictionnaire de l'ensemble des variables disponibles en l'absence de
631 clé. Ce sont directement les variables sous forme objet qui sont
632 renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
633 des classes de persistance.
636 return self.StoredVariables[key]
638 return self.StoredVariables
640 def __contains__(self, key=None):
641 "D.__contains__(k) -> True if D has a key k, else False"
642 return key in self.StoredVariables
645 "D.keys() -> list of D's keys"
646 if hasattr(self, "StoredVariables"):
647 return self.StoredVariables.keys()
652 "D.pop(k[,d]) -> v, remove specified key and return the corresponding value"
653 if hasattr(self, "StoredVariables"):
654 return self.StoredVariables.pop(k, d)
659 raise TypeError("pop expected at least 1 arguments, got 0")
660 "If key is not found, d is returned if given, otherwise KeyError is raised"
666 def run(self, Xb=None, Y=None, H=None, M=None, R=None, B=None, Q=None, Parameters=None):
668 Doit implémenter l'opération élémentaire de calcul d'assimilation sous
669 sa forme mathématique la plus naturelle possible.
671 raise NotImplementedError("Mathematical assimilation calculation has not been implemented!")
673 def defineRequiredParameter(self, name = None, default = None, typecast = None, message = None, minval = None, maxval = None, listval = None):
675 Permet de définir dans l'algorithme des paramètres requis et leurs
676 caractéristiques par défaut.
679 raise ValueError("A name is mandatory to define a required parameter.")
681 self.__required_parameters[name] = {
683 "typecast" : typecast,
689 logging.debug("%s %s (valeur par défaut = %s)", self._name, message, self.setParameterValue(name))
691 def getRequiredParameters(self, noDetails=True):
693 Renvoie la liste des noms de paramètres requis ou directement le
694 dictionnaire des paramètres requis.
697 return sorted(self.__required_parameters.keys())
699 return self.__required_parameters
701 def setParameterValue(self, name=None, value=None):
703 Renvoie la valeur d'un paramètre requis de manière contrôlée
705 default = self.__required_parameters[name]["default"]
706 typecast = self.__required_parameters[name]["typecast"]
707 minval = self.__required_parameters[name]["minval"]
708 maxval = self.__required_parameters[name]["maxval"]
709 listval = self.__required_parameters[name]["listval"]
711 if value is None and default is None:
713 elif value is None and default is not None:
714 if typecast is None: __val = default
715 else: __val = typecast( default )
717 if typecast is None: __val = value
718 else: __val = typecast( value )
720 if minval is not None and (numpy.array(__val, float) < minval).any():
721 raise ValueError("The parameter named \"%s\" of value \"%s\" can not be less than %s."%(name, __val, minval))
722 if maxval is not None and (numpy.array(__val, float) > maxval).any():
723 raise ValueError("The parameter named \"%s\" of value \"%s\" can not be greater than %s."%(name, __val, maxval))
724 if listval is not None:
725 if typecast is list or typecast is tuple or isinstance(__val,list) or isinstance(__val,tuple):
728 raise ValueError("The value \"%s\" of the parameter named \"%s\" is not allowed, it has to be in the list %s."%(v, name, listval))
729 elif __val not in listval:
730 raise ValueError("The value \"%s\" of the parameter named \"%s\" is not allowed, it has to be in the list %s."%( __val, name,listval))
733 def requireInputArguments(self, mandatory=(), optional=()):
735 Permet d'imposer des arguments requises en entrée
737 self.__required_inputs["RequiredInputValues"]["mandatory"] = tuple( mandatory )
738 self.__required_inputs["RequiredInputValues"]["optional"] = tuple( optional )
740 def __setParameters(self, fromDico={}):
742 Permet de stocker les paramètres reçus dans le dictionnaire interne.
744 self._parameters.update( fromDico )
745 for k in self.__required_parameters.keys():
746 if k in fromDico.keys():
747 self._parameters[k] = self.setParameterValue(k,fromDico[k])
749 self._parameters[k] = self.setParameterValue(k)
750 logging.debug("%s %s : %s", self._name, self.__required_parameters[k]["message"], self._parameters[k])
752 # ==============================================================================
753 class AlgorithmAndParameters(object):
755 Classe générale d'interface d'action pour l'algorithme et ses paramètres
758 name = "GenericAlgorithm",
765 self.__name = str(name)
769 self.__algorithm = {}
770 self.__algorithmFile = None
771 self.__algorithmName = None
773 self.updateParameters( asDict, asScript )
775 if asAlgorithm is None and asScript is not None:
776 __Algo = ImportFromScript(asScript).getvalue( "Algorithm" )
780 if __Algo is not None:
781 self.__A = str(__Algo)
782 self.__P.update( {"Algorithm":self.__A} )
784 self.__setAlgorithm( self.__A )
786 def updateParameters(self,
790 "Mise a jour des parametres"
791 if asDict is None and asScript is not None:
792 __Dict = ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
796 if __Dict is not None:
797 self.__P.update( dict(__Dict) )
799 def executePythonScheme(self, asDictAO = None):
800 "Permet de lancer le calcul d'assimilation"
801 Operator.CM.clearCache()
803 if not isinstance(asDictAO, dict):
804 raise ValueError("The objects for algorithm calculation have to be given together as a dictionnary, and they are not")
805 if hasattr(asDictAO["Background"],"getO"): self.__Xb = asDictAO["Background"].getO()
806 elif hasattr(asDictAO["CheckingPoint"],"getO"): self.__Xb = asDictAO["CheckingPoint"].getO()
807 else: self.__Xb = None
808 if hasattr(asDictAO["Observation"],"getO"): self.__Y = asDictAO["Observation"].getO()
809 else: self.__Y = asDictAO["Observation"]
810 if hasattr(asDictAO["ControlInput"],"getO"): self.__U = asDictAO["ControlInput"].getO()
811 else: self.__U = asDictAO["ControlInput"]
812 if hasattr(asDictAO["ObservationOperator"],"getO"): self.__HO = asDictAO["ObservationOperator"].getO()
813 else: self.__HO = asDictAO["ObservationOperator"]
814 if hasattr(asDictAO["EvolutionModel"],"getO"): self.__EM = asDictAO["EvolutionModel"].getO()
815 else: self.__EM = asDictAO["EvolutionModel"]
816 if hasattr(asDictAO["ControlModel"],"getO"): self.__CM = asDictAO["ControlModel"].getO()
817 else: self.__CM = asDictAO["ControlModel"]
818 self.__B = asDictAO["BackgroundError"]
819 self.__R = asDictAO["ObservationError"]
820 self.__Q = asDictAO["EvolutionError"]
822 self.__shape_validate()
824 self.__algorithm.run(
834 Parameters = self.__P,
838 def executeYACSScheme(self, FileName=None):
839 "Permet de lancer le calcul d'assimilation"
840 if FileName is None or not os.path.exists(FileName):
841 raise ValueError("a YACS file name has to be given for YACS execution.\n")
843 __file = os.path.abspath(FileName)
844 logging.debug("The YACS file name is \"%s\"."%__file)
845 if not PlatformInfo.has_salome or \
846 not PlatformInfo.has_yacs or \
847 not PlatformInfo.has_adao:
848 raise ImportError("\n\n"+\
849 "Unable to get SALOME, YACS or ADAO environnement variables.\n"+\
850 "Please load the right environnement before trying to use it.\n")
855 SALOMERuntime.RuntimeSALOME_setRuntime()
857 r = pilot.getRuntime()
858 xmlLoader = loader.YACSLoader()
859 xmlLoader.registerProcCataLoader()
861 catalogAd = r.loadCatalog("proc", __file)
862 r.addCatalog(catalogAd)
867 p = xmlLoader.load(__file)
868 except IOError as ex:
869 print("The YACS XML schema file can not be loaded: %s"%(ex,))
871 logger = p.getLogger("parser")
872 if not logger.isEmpty():
873 print("The imported YACS XML schema has errors on parsing:")
874 print(logger.getStr())
877 print("The YACS XML schema is not valid and will not be executed:")
878 print(p.getErrorReport())
880 info=pilot.LinkInfo(pilot.LinkInfo.ALL_DONT_STOP)
881 p.checkConsistency(info)
882 if info.areWarningsOrErrors():
883 print("The YACS XML schema is not coherent and will not be executed:")
884 print(info.getGlobalRepr())
886 e = pilot.ExecutorSwig()
888 if p.getEffectiveState() != pilot.DONE:
889 print(p.getErrorReport())
893 def get(self, key = None):
894 "Vérifie l'existence d'une clé de variable ou de paramètres"
895 if key in self.__algorithm:
896 return self.__algorithm.get( key )
897 elif key in self.__P:
903 "Necessaire pour le pickling"
904 return self.__algorithm.pop(k, d)
906 def getAlgorithmRequiredParameters(self, noDetails=True):
907 "Renvoie la liste des paramètres requis selon l'algorithme"
908 return self.__algorithm.getRequiredParameters(noDetails)
910 def setObserver(self, __V, __O, __I, __S):
911 if self.__algorithm is None \
912 or isinstance(self.__algorithm, dict) \
913 or not hasattr(self.__algorithm,"StoredVariables"):
914 raise ValueError("No observer can be build before choosing an algorithm.")
915 if __V not in self.__algorithm:
916 raise ValueError("An observer requires to be set on a variable named %s which does not exist."%__V)
918 self.__algorithm.StoredVariables[ __V ].setDataObserver(
921 HookParameters = __I,
924 def removeObserver(self, __V, __O, __A = False):
925 if self.__algorithm is None \
926 or isinstance(self.__algorithm, dict) \
927 or not hasattr(self.__algorithm,"StoredVariables"):
928 raise ValueError("No observer can be removed before choosing an algorithm.")
929 if __V not in self.__algorithm:
930 raise ValueError("An observer requires to be removed on a variable named %s which does not exist."%__V)
932 return self.__algorithm.StoredVariables[ __V ].removeDataObserver(
937 def hasObserver(self, __V):
938 if self.__algorithm is None \
939 or isinstance(self.__algorithm, dict) \
940 or not hasattr(self.__algorithm,"StoredVariables"):
942 if __V not in self.__algorithm:
944 return self.__algorithm.StoredVariables[ __V ].hasDataObserver()
947 return list(self.__algorithm.keys()) + list(self.__P.keys())
949 def __contains__(self, key=None):
950 "D.__contains__(k) -> True if D has a key k, else False"
951 return key in self.__algorithm or key in self.__P
954 "x.__repr__() <==> repr(x)"
955 return repr(self.__A)+", "+repr(self.__P)
958 "x.__str__() <==> str(x)"
959 return str(self.__A)+", "+str(self.__P)
961 def __setAlgorithm(self, choice = None ):
963 Permet de sélectionner l'algorithme à utiliser pour mener à bien l'étude
964 d'assimilation. L'argument est un champ caractère se rapportant au nom
965 d'un algorithme réalisant l'opération sur les arguments fixes.
968 raise ValueError("Error: algorithm choice has to be given")
969 if self.__algorithmName is not None:
970 raise ValueError("Error: algorithm choice has already been done as \"%s\", it can't be changed."%self.__algorithmName)
971 daDirectory = "daAlgorithms"
973 # Recherche explicitement le fichier complet
974 # ------------------------------------------
976 for directory in sys.path:
977 if os.path.isfile(os.path.join(directory, daDirectory, str(choice)+'.py')):
978 module_path = os.path.abspath(os.path.join(directory, daDirectory))
979 if module_path is None:
980 raise ImportError("No algorithm module named \"%s\" has been found in the search path.\n The search path is %s"%(choice, sys.path))
982 # Importe le fichier complet comme un module
983 # ------------------------------------------
985 sys_path_tmp = sys.path ; sys.path.insert(0,module_path)
986 self.__algorithmFile = __import__(str(choice), globals(), locals(), [])
987 if not hasattr(self.__algorithmFile, "ElementaryAlgorithm"):
988 raise ImportError("this module does not define a valid elementary algorithm.")
989 self.__algorithmName = str(choice)
990 sys.path = sys_path_tmp ; del sys_path_tmp
991 except ImportError as e:
992 raise ImportError("The module named \"%s\" was found, but is incorrect at the import stage.\n The import error message is: %s"%(choice,e))
994 # Instancie un objet du type élémentaire du fichier
995 # -------------------------------------------------
996 self.__algorithm = self.__algorithmFile.ElementaryAlgorithm()
999 def __shape_validate(self):
1001 Validation de la correspondance correcte des tailles des variables et
1002 des matrices s'il y en a.
1004 if self.__Xb is None: __Xb_shape = (0,)
1005 elif hasattr(self.__Xb,"size"): __Xb_shape = (self.__Xb.size,)
1006 elif hasattr(self.__Xb,"shape"):
1007 if isinstance(self.__Xb.shape, tuple): __Xb_shape = self.__Xb.shape
1008 else: __Xb_shape = self.__Xb.shape()
1009 else: raise TypeError("The background (Xb) has no attribute of shape: problem !")
1011 if self.__Y is None: __Y_shape = (0,)
1012 elif hasattr(self.__Y,"size"): __Y_shape = (self.__Y.size,)
1013 elif hasattr(self.__Y,"shape"):
1014 if isinstance(self.__Y.shape, tuple): __Y_shape = self.__Y.shape
1015 else: __Y_shape = self.__Y.shape()
1016 else: raise TypeError("The observation (Y) has no attribute of shape: problem !")
1018 if self.__U is None: __U_shape = (0,)
1019 elif hasattr(self.__U,"size"): __U_shape = (self.__U.size,)
1020 elif hasattr(self.__U,"shape"):
1021 if isinstance(self.__U.shape, tuple): __U_shape = self.__U.shape
1022 else: __U_shape = self.__U.shape()
1023 else: raise TypeError("The control (U) has no attribute of shape: problem !")
1025 if self.__B is None: __B_shape = (0,0)
1026 elif hasattr(self.__B,"shape"):
1027 if isinstance(self.__B.shape, tuple): __B_shape = self.__B.shape
1028 else: __B_shape = self.__B.shape()
1029 else: raise TypeError("The a priori errors covariance matrix (B) has no attribute of shape: problem !")
1031 if self.__R is None: __R_shape = (0,0)
1032 elif hasattr(self.__R,"shape"):
1033 if isinstance(self.__R.shape, tuple): __R_shape = self.__R.shape
1034 else: __R_shape = self.__R.shape()
1035 else: raise TypeError("The observation errors covariance matrix (R) has no attribute of shape: problem !")
1037 if self.__Q is None: __Q_shape = (0,0)
1038 elif hasattr(self.__Q,"shape"):
1039 if isinstance(self.__Q.shape, tuple): __Q_shape = self.__Q.shape
1040 else: __Q_shape = self.__Q.shape()
1041 else: raise TypeError("The evolution errors covariance matrix (Q) has no attribute of shape: problem !")
1043 if len(self.__HO) == 0: __HO_shape = (0,0)
1044 elif isinstance(self.__HO, dict): __HO_shape = (0,0)
1045 elif hasattr(self.__HO["Direct"],"shape"):
1046 if isinstance(self.__HO["Direct"].shape, tuple): __HO_shape = self.__HO["Direct"].shape
1047 else: __HO_shape = self.__HO["Direct"].shape()
1048 else: raise TypeError("The observation operator (H) has no attribute of shape: problem !")
1050 if len(self.__EM) == 0: __EM_shape = (0,0)
1051 elif isinstance(self.__EM, dict): __EM_shape = (0,0)
1052 elif hasattr(self.__EM["Direct"],"shape"):
1053 if isinstance(self.__EM["Direct"].shape, tuple): __EM_shape = self.__EM["Direct"].shape
1054 else: __EM_shape = self.__EM["Direct"].shape()
1055 else: raise TypeError("The evolution model (EM) has no attribute of shape: problem !")
1057 if len(self.__CM) == 0: __CM_shape = (0,0)
1058 elif isinstance(self.__CM, dict): __CM_shape = (0,0)
1059 elif hasattr(self.__CM["Direct"],"shape"):
1060 if isinstance(self.__CM["Direct"].shape, tuple): __CM_shape = self.__CM["Direct"].shape
1061 else: __CM_shape = self.__CM["Direct"].shape()
1062 else: raise TypeError("The control model (CM) has no attribute of shape: problem !")
1064 # Vérification des conditions
1065 # ---------------------------
1066 if not( len(__Xb_shape) == 1 or min(__Xb_shape) == 1 ):
1067 raise ValueError("Shape characteristic of background (Xb) is incorrect: \"%s\"."%(__Xb_shape,))
1068 if not( len(__Y_shape) == 1 or min(__Y_shape) == 1 ):
1069 raise ValueError("Shape characteristic of observation (Y) is incorrect: \"%s\"."%(__Y_shape,))
1071 if not( min(__B_shape) == max(__B_shape) ):
1072 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) is incorrect: \"%s\"."%(__B_shape,))
1073 if not( min(__R_shape) == max(__R_shape) ):
1074 raise ValueError("Shape characteristic of observation errors covariance matrix (R) is incorrect: \"%s\"."%(__R_shape,))
1075 if not( min(__Q_shape) == max(__Q_shape) ):
1076 raise ValueError("Shape characteristic of evolution errors covariance matrix (Q) is incorrect: \"%s\"."%(__Q_shape,))
1077 if not( min(__EM_shape) == max(__EM_shape) ):
1078 raise ValueError("Shape characteristic of evolution operator (EM) is incorrect: \"%s\"."%(__EM_shape,))
1080 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[1] == max(__Xb_shape) ):
1081 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and state (X) \"%s\" are incompatible."%(__HO_shape,__Xb_shape))
1082 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[0] == max(__Y_shape) ):
1083 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation (Y) \"%s\" are incompatible."%(__HO_shape,__Y_shape))
1084 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__B) > 0 and not( __HO_shape[1] == __B_shape[0] ):
1085 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and a priori errors covariance matrix (B) \"%s\" are incompatible."%(__HO_shape,__B_shape))
1086 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__R) > 0 and not( __HO_shape[0] == __R_shape[1] ):
1087 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation errors covariance matrix (R) \"%s\" are incompatible."%(__HO_shape,__R_shape))
1089 if self.__B is not None and len(self.__B) > 0 and not( __B_shape[1] == max(__Xb_shape) ):
1090 if self.__algorithmName in ["EnsembleBlue",]:
1091 asPersistentVector = self.__Xb.reshape((-1,min(__B_shape)))
1092 self.__Xb = Persistence.OneVector("Background", basetype=numpy.matrix)
1093 for member in asPersistentVector:
1094 self.__Xb.store( numpy.matrix( numpy.ravel(member), numpy.float ).T )
1095 __Xb_shape = min(__B_shape)
1097 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) \"%s\" and background (Xb) \"%s\" are incompatible."%(__B_shape,__Xb_shape))
1099 if self.__R is not None and len(self.__R) > 0 and not( __R_shape[1] == max(__Y_shape) ):
1100 raise ValueError("Shape characteristic of observation errors covariance matrix (R) \"%s\" and observation (Y) \"%s\" are incompatible."%(__R_shape,__Y_shape))
1102 if self.__EM is not None and len(self.__EM) > 0 and not isinstance(self.__EM, dict) and not( __EM_shape[1] == max(__Xb_shape) ):
1103 raise ValueError("Shape characteristic of evolution model (EM) \"%s\" and state (X) \"%s\" are incompatible."%(__EM_shape,__Xb_shape))
1105 if self.__CM is not None and len(self.__CM) > 0 and not isinstance(self.__CM, dict) and not( __CM_shape[1] == max(__U_shape) ):
1106 raise ValueError("Shape characteristic of control model (CM) \"%s\" and control (U) \"%s\" are incompatible."%(__CM_shape,__U_shape))
1108 if ("Bounds" in self.__P) \
1109 and (isinstance(self.__P["Bounds"], list) or isinstance(self.__P["Bounds"], tuple)) \
1110 and (len(self.__P["Bounds"]) != max(__Xb_shape)):
1111 raise ValueError("The number \"%s\" of bound pairs for the state (X) components is different of the size \"%s\" of the state itself." \
1112 %(len(self.__P["Bounds"]),max(__Xb_shape)))
1116 # ==============================================================================
1117 class RegulationAndParameters(object):
1119 Classe générale d'interface d'action pour la régulation et ses paramètres
1122 name = "GenericRegulation",
1129 self.__name = str(name)
1132 if asAlgorithm is None and asScript is not None:
1133 __Algo = ImportFromScript(asScript).getvalue( "Algorithm" )
1135 __Algo = asAlgorithm
1137 if asDict is None and asScript is not None:
1138 __Dict = ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1142 if __Dict is not None:
1143 self.__P.update( dict(__Dict) )
1145 if __Algo is not None:
1146 self.__P.update( {"Algorithm":self.__A} )
1148 def get(self, key = None):
1149 "Vérifie l'existence d'une clé de variable ou de paramètres"
1151 return self.__P[key]
1155 # ==============================================================================
1156 class DataObserver(object):
1158 Classe générale d'interface de type observer
1161 name = "GenericObserver",
1173 self.__name = str(name)
1178 if onVariable is None:
1179 raise ValueError("setting an observer has to be done over a variable name or a list of variable names, not over None.")
1180 elif type(onVariable) in (tuple, list):
1181 self.__V = tuple(map( str, onVariable ))
1182 if withInfo is None:
1185 self.__I = (str(withInfo),)*len(self.__V)
1186 elif isinstance(onVariable, str):
1187 self.__V = (onVariable,)
1188 if withInfo is None:
1189 self.__I = (onVariable,)
1191 self.__I = (str(withInfo),)
1193 raise ValueError("setting an observer has to be done over a variable name or a list of variable names.")
1195 if asString is not None:
1196 __FunctionText = asString
1197 elif (asTemplate is not None) and (asTemplate in Templates.ObserverTemplates):
1198 __FunctionText = Templates.ObserverTemplates[asTemplate]
1199 elif asScript is not None:
1200 __FunctionText = ImportFromScript(asScript).getstring()
1203 __Function = ObserverF(__FunctionText)
1205 if asObsObject is not None:
1206 self.__O = asObsObject
1208 self.__O = __Function.getfunc()
1210 for k in range(len(self.__V)):
1213 if ename not in withAlgo:
1214 raise ValueError("An observer is asked to be set on a variable named %s which does not exist."%ename)
1216 withAlgo.setObserver(ename, self.__O, einfo, scheduledBy)
1219 "x.__repr__() <==> repr(x)"
1220 return repr(self.__V)+"\n"+repr(self.__O)
1223 "x.__str__() <==> str(x)"
1224 return str(self.__V)+"\n"+str(self.__O)
1226 # ==============================================================================
1227 class State(object):
1229 Classe générale d'interface de type état
1232 name = "GenericVector",
1234 asPersistentVector = None,
1240 toBeChecked = False,
1243 Permet de définir un vecteur :
1244 - asVector : entrée des données, comme un vecteur compatible avec le
1245 constructeur de numpy.matrix, ou "True" si entrée par script.
1246 - asPersistentVector : entrée des données, comme une série de vecteurs
1247 compatible avec le constructeur de numpy.matrix, ou comme un objet de
1248 type Persistence, ou "True" si entrée par script.
1249 - asScript : si un script valide est donné contenant une variable
1250 nommée "name", la variable est de type "asVector" (par défaut) ou
1251 "asPersistentVector" selon que l'une de ces variables est placée à
1253 - asDataFile : si un ou plusieurs fichiers valides sont donnés
1254 contenant des valeurs en colonnes, elles-mêmes nommées "colNames"
1255 (s'il n'y a pas de nom de colonne indiquée, on cherche une colonne
1256 nommée "name"), on récupère les colonnes et on les range ligne après
1257 ligne (colMajor=False) ou colonne après colonne (colMajor=True). La
1258 variable résultante est de type "asVector" (par défaut) ou
1259 "asPersistentVector" selon que l'une de ces variables est placée à
1262 self.__name = str(name)
1263 self.__check = bool(toBeChecked)
1267 self.__is_vector = False
1268 self.__is_series = False
1270 if asScript is not None:
1271 __Vector, __Series = None, None
1272 if asPersistentVector:
1273 __Series = ImportFromScript(asScript).getvalue( self.__name )
1275 __Vector = ImportFromScript(asScript).getvalue( self.__name )
1276 elif asDataFile is not None:
1277 __Vector, __Series = None, None
1278 if asPersistentVector:
1279 if colNames is not None:
1280 __Series = ImportFromFile(asDataFile).getvalue( colNames )[1]
1282 __Series = ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1283 if bool(colMajor) and not ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1284 __Series = numpy.transpose(__Series)
1285 elif not bool(colMajor) and ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1286 __Series = numpy.transpose(__Series)
1288 if colNames is not None:
1289 __Vector = ImportFromFile(asDataFile).getvalue( colNames )[1]
1291 __Vector = ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1293 __Vector = numpy.ravel(__Vector, order = "F")
1295 __Vector = numpy.ravel(__Vector, order = "C")
1297 __Vector, __Series = asVector, asPersistentVector
1299 if __Vector is not None:
1300 self.__is_vector = True
1301 self.__V = numpy.matrix( numpy.asmatrix(__Vector).A1, numpy.float ).T
1302 self.shape = self.__V.shape
1303 self.size = self.__V.size
1304 elif __Series is not None:
1305 self.__is_series = True
1306 if isinstance(__Series, (tuple, list, numpy.ndarray, numpy.matrix, str)):
1307 self.__V = Persistence.OneVector(self.__name, basetype=numpy.matrix)
1308 if isinstance(__Series, str): __Series = eval(__Series)
1309 for member in __Series:
1310 self.__V.store( numpy.matrix( numpy.asmatrix(member).A1, numpy.float ).T )
1313 if isinstance(self.__V.shape, (tuple, list)):
1314 self.shape = self.__V.shape
1316 self.shape = self.__V.shape()
1317 if len(self.shape) == 1:
1318 self.shape = (self.shape[0],1)
1319 self.size = self.shape[0] * self.shape[1]
1321 raise ValueError("The %s object is improperly defined, it requires at minima either a vector, a list/tuple of vectors or a persistent object. Please check your vector input."%self.__name)
1323 if scheduledBy is not None:
1324 self.__T = scheduledBy
1326 def getO(self, withScheduler=False):
1328 return self.__V, self.__T
1329 elif self.__T is None:
1335 "Vérification du type interne"
1336 return self.__is_vector
1339 "Vérification du type interne"
1340 return self.__is_series
1343 "x.__repr__() <==> repr(x)"
1344 return repr(self.__V)
1347 "x.__str__() <==> str(x)"
1348 return str(self.__V)
1350 # ==============================================================================
1351 class Covariance(object):
1353 Classe générale d'interface de type covariance
1356 name = "GenericCovariance",
1357 asCovariance = None,
1358 asEyeByScalar = None,
1359 asEyeByVector = None,
1362 toBeChecked = False,
1365 Permet de définir une covariance :
1366 - asCovariance : entrée des données, comme une matrice compatible avec
1367 le constructeur de numpy.matrix
1368 - asEyeByScalar : entrée des données comme un seul scalaire de variance,
1369 multiplicatif d'une matrice de corrélation identité, aucune matrice
1370 n'étant donc explicitement à donner
1371 - asEyeByVector : entrée des données comme un seul vecteur de variance,
1372 à mettre sur la diagonale d'une matrice de corrélation, aucune matrice
1373 n'étant donc explicitement à donner
1374 - asCovObject : entrée des données comme un objet python, qui a les
1375 methodes obligatoires "getT", "getI", "diag", "trace", "__add__",
1376 "__sub__", "__neg__", "__mul__", "__rmul__" et facultatives "shape",
1377 "size", "cholesky", "choleskyI", "asfullmatrix", "__repr__", "__str__"
1378 - toBeChecked : booléen indiquant si le caractère SDP de la matrice
1379 pleine doit être vérifié
1381 self.__name = str(name)
1382 self.__check = bool(toBeChecked)
1385 self.__is_scalar = False
1386 self.__is_vector = False
1387 self.__is_matrix = False
1388 self.__is_object = False
1390 if asScript is not None:
1391 __Matrix, __Scalar, __Vector, __Object = None, None, None, None
1393 __Scalar = ImportFromScript(asScript).getvalue( self.__name )
1395 __Vector = ImportFromScript(asScript).getvalue( self.__name )
1397 __Object = ImportFromScript(asScript).getvalue( self.__name )
1399 __Matrix = ImportFromScript(asScript).getvalue( self.__name )
1401 __Matrix, __Scalar, __Vector, __Object = asCovariance, asEyeByScalar, asEyeByVector, asCovObject
1403 if __Scalar is not None:
1404 if numpy.matrix(__Scalar).size != 1:
1405 raise ValueError(' The diagonal multiplier given to define a sparse matrix is not a unique scalar value.\n Its actual measured size is %i. Please check your scalar input.'%numpy.matrix(__Scalar).size)
1406 self.__is_scalar = True
1407 self.__C = numpy.abs( float(__Scalar) )
1410 elif __Vector is not None:
1411 self.__is_vector = True
1412 self.__C = numpy.abs( numpy.array( numpy.ravel( numpy.matrix(__Vector, float ) ) ) )
1413 self.shape = (self.__C.size,self.__C.size)
1414 self.size = self.__C.size**2
1415 elif __Matrix is not None:
1416 self.__is_matrix = True
1417 self.__C = numpy.matrix( __Matrix, float )
1418 self.shape = self.__C.shape
1419 self.size = self.__C.size
1420 elif __Object is not None:
1421 self.__is_object = True
1423 for at in ("getT","getI","diag","trace","__add__","__sub__","__neg__","__mul__","__rmul__"):
1424 if not hasattr(self.__C,at):
1425 raise ValueError("The matrix given for %s as an object has no attribute \"%s\". Please check your object input."%(self.__name,at))
1426 if hasattr(self.__C,"shape"):
1427 self.shape = self.__C.shape
1430 if hasattr(self.__C,"size"):
1431 self.size = self.__C.size
1436 # raise ValueError("The %s covariance matrix has to be specified either as a matrix, a vector for its diagonal or a scalar multiplying an identity matrix."%self.__name)
1440 def __validate(self):
1442 if self.__C is None:
1443 raise UnboundLocalError("%s covariance matrix value has not been set!"%(self.__name,))
1444 if self.ismatrix() and min(self.shape) != max(self.shape):
1445 raise ValueError("The given matrix for %s is not a square one, its shape is %s. Please check your matrix input."%(self.__name,self.shape))
1446 if self.isobject() and min(self.shape) != max(self.shape):
1447 raise ValueError("The matrix given for \"%s\" is not a square one, its shape is %s. Please check your object input."%(self.__name,self.shape))
1448 if self.isscalar() and self.__C <= 0:
1449 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your scalar input %s."%(self.__name,self.__C))
1450 if self.isvector() and (self.__C <= 0).any():
1451 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your vector input."%(self.__name,))
1452 if self.ismatrix() and (self.__check or logging.getLogger().level < logging.WARNING):
1454 L = numpy.linalg.cholesky( self.__C )
1456 raise ValueError("The %s covariance matrix is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1457 if self.isobject() and (self.__check or logging.getLogger().level < logging.WARNING):
1459 L = self.__C.cholesky()
1461 raise ValueError("The %s covariance object is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1464 "Vérification du type interne"
1465 return self.__is_scalar
1468 "Vérification du type interne"
1469 return self.__is_vector
1472 "Vérification du type interne"
1473 return self.__is_matrix
1476 "Vérification du type interne"
1477 return self.__is_object
1482 return Covariance(self.__name+"I", asCovariance = self.__C.I )
1483 elif self.isvector():
1484 return Covariance(self.__name+"I", asEyeByVector = 1. / self.__C )
1485 elif self.isscalar():
1486 return Covariance(self.__name+"I", asEyeByScalar = 1. / self.__C )
1487 elif self.isobject():
1488 return Covariance(self.__name+"I", asCovObject = self.__C.getI() )
1490 return None # Indispensable
1495 return Covariance(self.__name+"T", asCovariance = self.__C.T )
1496 elif self.isvector():
1497 return Covariance(self.__name+"T", asEyeByVector = self.__C )
1498 elif self.isscalar():
1499 return Covariance(self.__name+"T", asEyeByScalar = self.__C )
1500 elif self.isobject():
1501 return Covariance(self.__name+"T", asCovObject = self.__C.getT() )
1504 "Décomposition de Cholesky"
1506 return Covariance(self.__name+"C", asCovariance = numpy.linalg.cholesky(self.__C) )
1507 elif self.isvector():
1508 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
1509 elif self.isscalar():
1510 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
1511 elif self.isobject() and hasattr(self.__C,"cholesky"):
1512 return Covariance(self.__name+"C", asCovObject = self.__C.cholesky() )
1514 def choleskyI(self):
1515 "Inversion de la décomposition de Cholesky"
1517 return Covariance(self.__name+"H", asCovariance = numpy.linalg.cholesky(self.__C).I )
1518 elif self.isvector():
1519 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
1520 elif self.isscalar():
1521 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
1522 elif self.isobject() and hasattr(self.__C,"choleskyI"):
1523 return Covariance(self.__name+"H", asCovObject = self.__C.choleskyI() )
1525 def diag(self, msize=None):
1526 "Diagonale de la matrice"
1528 return numpy.diag(self.__C)
1529 elif self.isvector():
1531 elif self.isscalar():
1533 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1535 return self.__C * numpy.ones(int(msize))
1536 elif self.isobject():
1537 return self.__C.diag()
1539 def asfullmatrix(self, msize=None):
1543 elif self.isvector():
1544 return numpy.matrix( numpy.diag(self.__C), float )
1545 elif self.isscalar():
1547 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1549 return numpy.matrix( self.__C * numpy.eye(int(msize)), float )
1550 elif self.isobject() and hasattr(self.__C,"asfullmatrix"):
1551 return self.__C.asfullmatrix()
1553 def trace(self, msize=None):
1554 "Trace de la matrice"
1556 return numpy.trace(self.__C)
1557 elif self.isvector():
1558 return float(numpy.sum(self.__C))
1559 elif self.isscalar():
1561 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1563 return self.__C * int(msize)
1564 elif self.isobject():
1565 return self.__C.trace()
1571 "x.__repr__() <==> repr(x)"
1572 return repr(self.__C)
1575 "x.__str__() <==> str(x)"
1576 return str(self.__C)
1578 def __add__(self, other):
1579 "x.__add__(y) <==> x+y"
1580 if self.ismatrix() or self.isobject():
1581 return self.__C + numpy.asmatrix(other)
1582 elif self.isvector() or self.isscalar():
1583 _A = numpy.asarray(other)
1584 _A.reshape(_A.size)[::_A.shape[1]+1] += self.__C
1585 return numpy.asmatrix(_A)
1587 def __radd__(self, other):
1588 "x.__radd__(y) <==> y+x"
1589 raise NotImplementedError("%s covariance matrix __radd__ method not available for %s type!"%(self.__name,type(other)))
1591 def __sub__(self, other):
1592 "x.__sub__(y) <==> x-y"
1593 if self.ismatrix() or self.isobject():
1594 return self.__C - numpy.asmatrix(other)
1595 elif self.isvector() or self.isscalar():
1596 _A = numpy.asarray(other)
1597 _A.reshape(_A.size)[::_A.shape[1]+1] = self.__C - _A.reshape(_A.size)[::_A.shape[1]+1]
1598 return numpy.asmatrix(_A)
1600 def __rsub__(self, other):
1601 "x.__rsub__(y) <==> y-x"
1602 raise NotImplementedError("%s covariance matrix __rsub__ method not available for %s type!"%(self.__name,type(other)))
1605 "x.__neg__() <==> -x"
1608 def __mul__(self, other):
1609 "x.__mul__(y) <==> x*y"
1610 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
1611 return self.__C * other
1612 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
1613 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1614 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
1615 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1616 return self.__C * numpy.asmatrix(other)
1618 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asmatrix(other).shape,self.__name))
1619 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
1620 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1621 return numpy.asmatrix(self.__C * numpy.ravel(other)).T
1622 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1623 return numpy.asmatrix((self.__C * (numpy.asarray(other).transpose())).transpose())
1625 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
1626 elif self.isscalar() and isinstance(other,numpy.matrix):
1627 return self.__C * other
1628 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
1629 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
1630 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
1632 return self.__C * numpy.asmatrix(other)
1633 elif self.isobject():
1634 return self.__C.__mul__(other)
1636 raise NotImplementedError("%s covariance matrix __mul__ method not available for %s type!"%(self.__name,type(other)))
1638 def __rmul__(self, other):
1639 "x.__rmul__(y) <==> y*x"
1640 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
1641 return other * self.__C
1642 elif self.isvector() and isinstance(other,numpy.matrix):
1643 if numpy.ravel(other).size == self.shape[0]: # Vecteur
1644 return numpy.asmatrix(numpy.ravel(other) * self.__C)
1645 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
1646 return numpy.asmatrix(numpy.array(other) * self.__C)
1648 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
1649 elif self.isscalar() and isinstance(other,numpy.matrix):
1650 return other * self.__C
1651 elif self.isobject():
1652 return self.__C.__rmul__(other)
1654 raise NotImplementedError("%s covariance matrix __rmul__ method not available for %s type!"%(self.__name,type(other)))
1657 "x.__len__() <==> len(x)"
1658 return self.shape[0]
1660 # ==============================================================================
1661 class ObserverF(object):
1663 Creation d'une fonction d'observateur a partir de son texte
1665 def __init__(self, corps=""):
1666 self.__corps = corps
1667 def func(self,var,info):
1668 "Fonction d'observation"
1671 "Restitution du pointeur de fonction dans l'objet"
1674 # ==============================================================================
1675 class CaseLogger(object):
1677 Conservation des commandes de creation d'un cas
1679 def __init__(self, __name="", __objname="case", __addViewers=None, __addLoaders=None):
1680 self.__name = str(__name)
1681 self.__objname = str(__objname)
1682 self.__logSerie = []
1683 self.__switchoff = False
1685 "TUI" :Interfaces._TUIViewer,
1686 "SCD" :Interfaces._SCDViewer,
1687 "YACS":Interfaces._YACSViewer,
1690 "TUI" :Interfaces._TUIViewer,
1691 "COM" :Interfaces._COMViewer,
1693 if __addViewers is not None:
1694 self.__viewers.update(dict(__addViewers))
1695 if __addLoaders is not None:
1696 self.__loaders.update(dict(__addLoaders))
1698 def register(self, __command=None, __keys=None, __local=None, __pre=None, __switchoff=False):
1699 "Enregistrement d'une commande individuelle"
1700 if __command is not None and __keys is not None and __local is not None and not self.__switchoff:
1701 if "self" in __keys: __keys.remove("self")
1702 self.__logSerie.append( (str(__command), __keys, __local, __pre, __switchoff) )
1704 self.__switchoff = True
1706 self.__switchoff = False
1708 def dump(self, __filename=None, __format="TUI", __upa=""):
1709 "Restitution normalisée des commandes"
1710 if __format in self.__viewers:
1711 __formater = self.__viewers[__format](self.__name, self.__objname, self.__logSerie)
1713 raise ValueError("Dumping as \"%s\" is not available"%__format)
1714 return __formater.dump(__filename, __upa)
1716 def load(self, __filename=None, __content=None, __object=None, __format="TUI"):
1717 "Chargement normalisé des commandes"
1718 if __format in self.__loaders:
1719 __formater = self.__loaders[__format]()
1721 raise ValueError("Loading as \"%s\" is not available"%__format)
1722 return __formater.load(__filename, __content, __object)
1724 # ==============================================================================
1725 def MultiFonction( __xserie, _sFunction = lambda x: x ):
1727 Pour une liste ordonnée de vecteurs en entrée, renvoie en sortie la liste
1728 correspondante de valeurs de la fonction en argument
1730 if not PlatformInfo.isIterable( __xserie ):
1731 raise ValueError("MultiFonction not iterable unkown input type: %s"%(type(__xserie),))
1734 for __xvalue in __xserie:
1735 __multiHX.append( _sFunction( __xvalue ) )
1739 # ==============================================================================
1740 def CostFunction3D(_x,
1741 _Hm = None, # Pour simuler Hm(x) : HO["Direct"].appliedTo
1742 _HmX = None, # Simulation déjà faite de Hm(x)
1743 _arg = None, # Arguments supplementaires pour Hm, sous la forme d'un tuple
1748 _SIV = False, # A résorber pour la 8.0
1749 _SSC = [], # self._parameters["StoreSupplementaryCalculations"]
1750 _nPS = 0, # nbPreviousSteps
1751 _QM = "DA", # QualityMeasure
1752 _SSV = {}, # Entrée et/ou sortie : self.StoredVariables
1753 _fRt = False, # Restitue ou pas la sortie étendue
1754 _sSc = True, # Stocke ou pas les SSC
1757 Fonction-coût générale utile pour les algorithmes statiques/3D : 3DVAR, BLUE
1758 et dérivés, Kalman et dérivés, LeastSquares, SamplingTest, PSO, SA, Tabu,
1759 DFO, QuantileRegression
1765 for k in ["CostFunctionJ",
1771 "SimulatedObservationAtCurrentOptimum",
1772 "SimulatedObservationAtCurrentState",
1776 if hasattr(_SSV[k],"store"):
1777 _SSV[k].append = _SSV[k].store # Pour utiliser "append" au lieu de "store"
1779 _X = numpy.asmatrix(numpy.ravel( _x )).T
1780 if _SIV or "CurrentState" in _SSC or "CurrentOptimum" in _SSC:
1781 _SSV["CurrentState"].append( _X )
1783 if _HmX is not None:
1787 raise ValueError("COSTFUNCTION3D Operator has to be defined.")
1791 _HX = _Hm( _X, *_arg )
1792 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
1794 if "SimulatedObservationAtCurrentState" in _SSC or \
1795 "SimulatedObservationAtCurrentOptimum" in _SSC:
1796 _SSV["SimulatedObservationAtCurrentState"].append( _HX )
1798 if numpy.any(numpy.isnan(_HX)):
1799 Jb, Jo, J = numpy.nan, numpy.nan, numpy.nan
1801 _Y = numpy.asmatrix(numpy.ravel( _Y )).T
1802 if _QM in ["AugmentedWeightedLeastSquares", "AWLS", "AugmentedPonderatedLeastSquares", "APLS", "DA"]:
1803 if _BI is None or _RI is None:
1804 raise ValueError("Background and Observation error covariance matrix has to be properly defined!")
1805 _Xb = numpy.asmatrix(numpy.ravel( _Xb )).T
1806 Jb = 0.5 * (_X - _Xb).T * _BI * (_X - _Xb)
1807 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
1808 elif _QM in ["WeightedLeastSquares", "WLS", "PonderatedLeastSquares", "PLS"]:
1810 raise ValueError("Observation error covariance matrix has to be properly defined!")
1812 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
1813 elif _QM in ["LeastSquares", "LS", "L2"]:
1815 Jo = 0.5 * (_Y - _HX).T * (_Y - _HX)
1816 elif _QM in ["AbsoluteValue", "L1"]:
1818 Jo = numpy.sum( numpy.abs(_Y - _HX) )
1819 elif _QM in ["MaximumError", "ME"]:
1821 Jo = numpy.max( numpy.abs(_Y - _HX) )
1822 elif _QM in ["QR", "Null"]:
1826 raise ValueError("Unknown asked quality measure!")
1828 J = float( Jb ) + float( Jo )
1831 _SSV["CostFunctionJb"].append( Jb )
1832 _SSV["CostFunctionJo"].append( Jo )
1833 _SSV["CostFunctionJ" ].append( J )
1835 if "IndexOfOptimum" in _SSC or \
1836 "CurrentOptimum" in _SSC or \
1837 "SimulatedObservationAtCurrentOptimum" in _SSC:
1838 IndexMin = numpy.argmin( _SSV["CostFunctionJ"][_nPS:] ) + _nPS
1839 if "IndexOfOptimum" in _SSC:
1840 _SSV["IndexOfOptimum"].append( IndexMin )
1841 if "CurrentOptimum" in _SSC:
1842 _SSV["CurrentOptimum"].append( _SSV["CurrentState"][IndexMin] )
1843 if "SimulatedObservationAtCurrentOptimum" in _SSC:
1844 _SSV["SimulatedObservationAtCurrentOptimum"].append( _SSV["SimulatedObservationAtCurrentState"][IndexMin] )
1849 if _QM in ["QR"]: # Pour le QuantileRegression
1854 # ==============================================================================
1855 if __name__ == "__main__":
1856 print('\n AUTODIAGNOSTIC \n')