1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2021 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les outils généraux élémentaires.
26 __author__ = "Jean-Philippe ARGAUD"
35 from functools import partial
36 from daCore import Persistence, PlatformInfo, Interfaces
37 from daCore import Templates
39 # ==============================================================================
40 class CacheManager(object):
42 Classe générale de gestion d'un cache de calculs
45 toleranceInRedundancy = 1.e-18,
46 lenghtOfRedundancy = -1,
49 Les caractéristiques de tolérance peuvent être modifiées à la création.
51 self.__tolerBP = float(toleranceInRedundancy)
52 self.__lenghtOR = int(lenghtOfRedundancy)
53 self.__initlnOR = self.__lenghtOR
63 def wasCalculatedIn(self, xValue, oName="" ):
64 "Vérifie l'existence d'un calcul correspondant à la valeur"
68 for i in range(min(len(self.__listOPCV),self.__lenghtOR)-1,-1,-1):
69 if not hasattr(xValue, 'size'):
71 elif (str(oName) != self.__listOPCV[i][3]):
73 elif (xValue.size != self.__listOPCV[i][0].size):
75 elif (numpy.ravel(xValue)[0] - self.__listOPCV[i][0][0]) > (self.__tolerBP * self.__listOPCV[i][2] / self.__listOPCV[i][0].size):
77 elif numpy.linalg.norm(numpy.ravel(xValue) - self.__listOPCV[i][0]) < (self.__tolerBP * self.__listOPCV[i][2]):
79 __HxV = self.__listOPCV[i][1]
83 def storeValueInX(self, xValue, HxValue, oName="" ):
84 "Stocke pour un opérateur o un calcul Hx correspondant à la valeur x"
85 if self.__lenghtOR < 0:
86 self.__lenghtOR = 2 * min(xValue.size, 50) + 2 # 2 * xValue.size + 2
87 self.__initlnOR = self.__lenghtOR
88 self.__seenNames.append(str(oName))
89 if str(oName) not in self.__seenNames: # Etend la liste si nouveau
90 self.__lenghtOR += 2 * min(xValue.size, 50) + 2 # 2 * xValue.size + 2
91 self.__initlnOR += self.__lenghtOR
92 self.__seenNames.append(str(oName))
93 while len(self.__listOPCV) > self.__lenghtOR:
94 self.__listOPCV.pop(0)
95 self.__listOPCV.append( (
96 copy.copy(numpy.ravel(xValue)), # 0 Previous point
97 copy.copy(HxValue), # 1 Previous value
98 numpy.linalg.norm(xValue), # 2 Norm
99 str(oName), # 3 Operator name
104 self.__initlnOR = self.__lenghtOR
106 self.__enabled = False
110 self.__lenghtOR = self.__initlnOR
111 self.__enabled = True
113 # ==============================================================================
114 class Operator(object):
116 Classe générale d'interface de type opérateur simple
124 name = "GenericOperator",
127 avoidingRedundancy = True,
128 inputAsMultiFunction = False,
129 enableMultiProcess = False,
130 extraArguments = None,
133 On construit un objet de ce type en fournissant, à l'aide de l'un des
134 deux mots-clé, soit une fonction ou un multi-fonction python, soit une
137 - name : nom d'opérateur
138 - fromMethod : argument de type fonction Python
139 - fromMatrix : argument adapté au constructeur numpy.matrix
140 - avoidingRedundancy : booléen évitant (ou pas) les calculs redondants
141 - inputAsMultiFunction : booléen indiquant une fonction explicitement
142 définie (ou pas) en multi-fonction
143 - extraArguments : arguments supplémentaires passés à la fonction de
144 base et ses dérivées (tuple ou dictionnaire)
146 self.__name = str(name)
147 self.__NbCallsAsMatrix, self.__NbCallsAsMethod, self.__NbCallsOfCached = 0, 0, 0
148 self.__AvoidRC = bool( avoidingRedundancy )
149 self.__inputAsMF = bool( inputAsMultiFunction )
150 self.__mpEnabled = bool( enableMultiProcess )
151 self.__extraArgs = extraArguments
152 if fromMethod is not None and self.__inputAsMF:
153 self.__Method = fromMethod # logtimer(fromMethod)
155 self.__Type = "Method"
156 elif fromMethod is not None and not self.__inputAsMF:
157 self.__Method = partial( MultiFonction, _sFunction=fromMethod, _mpEnabled=self.__mpEnabled)
159 self.__Type = "Method"
160 elif fromMatrix is not None:
162 self.__Matrix = numpy.matrix( fromMatrix, numpy.float )
163 self.__Type = "Matrix"
169 def disableAvoidingRedundancy(self):
171 Operator.CM.disable()
173 def enableAvoidingRedundancy(self):
178 Operator.CM.disable()
184 def appliedTo(self, xValue, HValue = None, argsAsSerie = False, returnSerieAsArrayMatrix = False):
186 Permet de restituer le résultat de l'application de l'opérateur à une
187 série d'arguments xValue. Cette méthode se contente d'appliquer, chaque
188 argument devant a priori être du bon type.
190 - les arguments par série sont :
191 - xValue : argument adapté pour appliquer l'opérateur
192 - HValue : valeur précalculée de l'opérateur en ce point
193 - argsAsSerie : indique si les arguments sont une mono ou multi-valeur
200 if HValue is not None:
204 PlatformInfo.isIterable( _xValue, True, " in Operator.appliedTo" )
206 if _HValue is not None:
207 assert len(_xValue) == len(_HValue), "Incompatible number of elements in xValue and HValue"
209 for i in range(len(_HValue)):
210 _HxValue.append( numpy.asmatrix( numpy.ravel( _HValue[i] ) ).T )
212 Operator.CM.storeValueInX(_xValue[i],_HxValue[-1],self.__name)
217 for i, xv in enumerate(_xValue):
219 __alreadyCalculated, __HxV = Operator.CM.wasCalculatedIn(xv,self.__name)
221 __alreadyCalculated = False
223 if __alreadyCalculated:
224 self.__addOneCacheCall()
227 if self.__Matrix is not None:
228 self.__addOneMatrixCall()
229 _xv = numpy.matrix(numpy.ravel(xv)).T
230 _hv = self.__Matrix * _xv
232 self.__addOneMethodCall()
236 _HxValue.append( _hv )
238 if len(_xserie)>0 and self.__Matrix is None:
239 if self.__extraArgs is None:
240 _hserie = self.__Method( _xserie ) # Calcul MF
242 _hserie = self.__Method( _xserie, self.__extraArgs ) # Calcul MF
243 if not hasattr(_hserie, "pop"):
244 raise TypeError("The user input multi-function doesn't seem to return sequence results, behaving like a mono-function. It has to be checked.")
250 Operator.CM.storeValueInX(_xv,_hv,self.__name)
252 if returnSerieAsArrayMatrix:
253 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
255 if argsAsSerie: return _HxValue
256 else: return _HxValue[-1]
258 def appliedControledFormTo(self, paires, argsAsSerie = False, returnSerieAsArrayMatrix = False):
260 Permet de restituer le résultat de l'application de l'opérateur à des
261 paires (xValue, uValue). Cette méthode se contente d'appliquer, son
262 argument devant a priori être du bon type. Si la uValue est None,
263 on suppose que l'opérateur ne s'applique qu'à xValue.
265 - paires : les arguments par paire sont :
266 - xValue : argument X adapté pour appliquer l'opérateur
267 - uValue : argument U adapté pour appliquer l'opérateur
268 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
270 if argsAsSerie: _xuValue = paires
271 else: _xuValue = (paires,)
272 PlatformInfo.isIterable( _xuValue, True, " in Operator.appliedControledFormTo" )
274 if self.__Matrix is not None:
276 for paire in _xuValue:
277 _xValue, _uValue = paire
278 _xValue = numpy.matrix(numpy.ravel(_xValue)).T
279 self.__addOneMatrixCall()
280 _HxValue.append( self.__Matrix * _xValue )
283 for paire in _xuValue:
284 _xValue, _uValue = paire
285 if _uValue is not None:
286 _xuArgs.append( paire )
288 _xuArgs.append( _xValue )
289 self.__addOneMethodCall( len(_xuArgs) )
290 if self.__extraArgs is None:
291 _HxValue = self.__Method( _xuArgs ) # Calcul MF
293 _HxValue = self.__Method( _xuArgs, self.__extraArgs ) # Calcul MF
295 if returnSerieAsArrayMatrix:
296 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
298 if argsAsSerie: return _HxValue
299 else: return _HxValue[-1]
301 def appliedInXTo(self, paires, argsAsSerie = False, returnSerieAsArrayMatrix = False):
303 Permet de restituer le résultat de l'application de l'opérateur à une
304 série d'arguments xValue, sachant que l'opérateur est valable en
305 xNominal. Cette méthode se contente d'appliquer, son argument devant a
306 priori être du bon type. Si l'opérateur est linéaire car c'est une
307 matrice, alors il est valable en tout point nominal et xNominal peut
308 être quelconque. Il n'y a qu'une seule paire par défaut, et argsAsSerie
309 permet d'indiquer que l'argument est multi-paires.
311 - paires : les arguments par paire sont :
312 - xNominal : série d'arguments permettant de donner le point où
313 l'opérateur est construit pour être ensuite appliqué
314 - xValue : série d'arguments adaptés pour appliquer l'opérateur
315 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
317 if argsAsSerie: _nxValue = paires
318 else: _nxValue = (paires,)
319 PlatformInfo.isIterable( _nxValue, True, " in Operator.appliedInXTo" )
321 if self.__Matrix is not None:
323 for paire in _nxValue:
324 _xNominal, _xValue = paire
325 _xValue = numpy.matrix(numpy.ravel(_xValue)).T
326 self.__addOneMatrixCall()
327 _HxValue.append( self.__Matrix * _xValue )
329 self.__addOneMethodCall( len(_nxValue) )
330 if self.__extraArgs is None:
331 _HxValue = self.__Method( _nxValue ) # Calcul MF
333 _HxValue = self.__Method( _nxValue, self.__extraArgs ) # Calcul MF
335 if returnSerieAsArrayMatrix:
336 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
338 if argsAsSerie: return _HxValue
339 else: return _HxValue[-1]
341 def asMatrix(self, ValueForMethodForm = "UnknownVoidValue", argsAsSerie = False):
343 Permet de renvoyer l'opérateur sous la forme d'une matrice
345 if self.__Matrix is not None:
346 self.__addOneMatrixCall()
347 mValue = [self.__Matrix,]
348 elif not isinstance(ValueForMethodForm,str) or ValueForMethodForm != "UnknownVoidValue": # Ne pas utiliser "None"
351 self.__addOneMethodCall( len(ValueForMethodForm) )
352 for _vfmf in ValueForMethodForm:
353 mValue.append( numpy.matrix( self.__Method(((_vfmf, None),)) ) )
355 self.__addOneMethodCall()
356 mValue = self.__Method(((ValueForMethodForm, None),))
358 raise ValueError("Matrix form of the operator defined as a function/method requires to give an operating point.")
360 if argsAsSerie: return mValue
361 else: return mValue[-1]
365 Renvoie la taille sous forme numpy si l'opérateur est disponible sous
366 la forme d'une matrice
368 if self.__Matrix is not None:
369 return self.__Matrix.shape
371 raise ValueError("Matrix form of the operator is not available, nor the shape")
373 def nbcalls(self, which=None):
375 Renvoie les nombres d'évaluations de l'opérateur
378 self.__NbCallsAsMatrix+self.__NbCallsAsMethod,
379 self.__NbCallsAsMatrix,
380 self.__NbCallsAsMethod,
381 self.__NbCallsOfCached,
382 Operator.NbCallsAsMatrix+Operator.NbCallsAsMethod,
383 Operator.NbCallsAsMatrix,
384 Operator.NbCallsAsMethod,
385 Operator.NbCallsOfCached,
387 if which is None: return __nbcalls
388 else: return __nbcalls[which]
390 def __addOneMatrixCall(self):
391 "Comptabilise un appel"
392 self.__NbCallsAsMatrix += 1 # Decompte local
393 Operator.NbCallsAsMatrix += 1 # Decompte global
395 def __addOneMethodCall(self, nb = 1):
396 "Comptabilise un appel"
397 self.__NbCallsAsMethod += nb # Decompte local
398 Operator.NbCallsAsMethod += nb # Decompte global
400 def __addOneCacheCall(self):
401 "Comptabilise un appel"
402 self.__NbCallsOfCached += 1 # Decompte local
403 Operator.NbCallsOfCached += 1 # Decompte global
405 # ==============================================================================
406 class FullOperator(object):
408 Classe générale d'interface de type opérateur complet
409 (Direct, Linéaire Tangent, Adjoint)
412 name = "GenericFullOperator",
414 asOneFunction = None, # 1 Fonction
415 asThreeFunctions = None, # 3 Fonctions in a dictionary
416 asScript = None, # 1 or 3 Fonction(s) by script
417 asDict = None, # Parameters
419 extraArguments = None,
421 inputAsMF = False,# Fonction(s) as Multi-Functions
426 self.__name = str(name)
427 self.__check = bool(toBeChecked)
428 self.__extraArgs = extraArguments
433 if (asDict is not None) and isinstance(asDict, dict):
434 __Parameters.update( asDict )
435 # Priorité à EnableMultiProcessingInDerivatives=True
436 if "EnableMultiProcessing" in __Parameters and __Parameters["EnableMultiProcessing"]:
437 __Parameters["EnableMultiProcessingInDerivatives"] = True
438 __Parameters["EnableMultiProcessingInEvaluation"] = False
439 if "EnableMultiProcessingInDerivatives" not in __Parameters:
440 __Parameters["EnableMultiProcessingInDerivatives"] = False
441 if __Parameters["EnableMultiProcessingInDerivatives"]:
442 __Parameters["EnableMultiProcessingInEvaluation"] = False
443 if "EnableMultiProcessingInEvaluation" not in __Parameters:
444 __Parameters["EnableMultiProcessingInEvaluation"] = False
445 if "withIncrement" in __Parameters: # Temporaire
446 __Parameters["DifferentialIncrement"] = __Parameters["withIncrement"]
448 if asScript is not None:
449 __Matrix, __Function = None, None
451 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
453 __Function = { "Direct":Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ) }
454 __Function.update({"useApproximatedDerivatives":True})
455 __Function.update(__Parameters)
456 elif asThreeFunctions:
458 "Direct" :Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ),
459 "Tangent":Interfaces.ImportFromScript(asScript).getvalue( "TangentOperator" ),
460 "Adjoint":Interfaces.ImportFromScript(asScript).getvalue( "AdjointOperator" ),
462 __Function.update(__Parameters)
465 if asOneFunction is not None:
466 if isinstance(asOneFunction, dict) and "Direct" in asOneFunction:
467 if asOneFunction["Direct"] is not None:
468 __Function = asOneFunction
470 raise ValueError("The function has to be given in a dictionnary which have 1 key (\"Direct\")")
472 __Function = { "Direct":asOneFunction }
473 __Function.update({"useApproximatedDerivatives":True})
474 __Function.update(__Parameters)
475 elif asThreeFunctions is not None:
476 if isinstance(asThreeFunctions, dict) and \
477 ("Tangent" in asThreeFunctions) and (asThreeFunctions["Tangent"] is not None) and \
478 ("Adjoint" in asThreeFunctions) and (asThreeFunctions["Adjoint"] is not None) and \
479 (("useApproximatedDerivatives" not in asThreeFunctions) or not bool(asThreeFunctions["useApproximatedDerivatives"])):
480 __Function = asThreeFunctions
481 elif isinstance(asThreeFunctions, dict) and \
482 ("Direct" in asThreeFunctions) and (asThreeFunctions["Direct"] is not None):
483 __Function = asThreeFunctions
484 __Function.update({"useApproximatedDerivatives":True})
486 raise ValueError("The functions has to be given in a dictionnary which have either 1 key (\"Direct\") or 3 keys (\"Direct\" (optionnal), \"Tangent\" and \"Adjoint\")")
487 if "Direct" not in asThreeFunctions:
488 __Function["Direct"] = asThreeFunctions["Tangent"]
489 __Function.update(__Parameters)
493 # if sys.version_info[0] < 3 and isinstance(__Function, dict):
494 # for k in ("Direct", "Tangent", "Adjoint"):
495 # if k in __Function and hasattr(__Function[k],"__class__"):
496 # if type(__Function[k]) is type(self.__init__):
497 # raise TypeError("can't use a class method (%s) as a function for the \"%s\" operator. Use a real function instead."%(type(__Function[k]),k))
499 if appliedInX is not None and isinstance(appliedInX, dict):
500 __appliedInX = appliedInX
501 elif appliedInX is not None:
502 __appliedInX = {"HXb":appliedInX}
506 if scheduledBy is not None:
507 self.__T = scheduledBy
509 if isinstance(__Function, dict) and \
510 ("useApproximatedDerivatives" in __Function) and bool(__Function["useApproximatedDerivatives"]) and \
511 ("Direct" in __Function) and (__Function["Direct"] is not None):
512 if "CenteredFiniteDifference" not in __Function: __Function["CenteredFiniteDifference"] = False
513 if "DifferentialIncrement" not in __Function: __Function["DifferentialIncrement"] = 0.01
514 if "withdX" not in __Function: __Function["withdX"] = None
515 if "withAvoidingRedundancy" not in __Function: __Function["withAvoidingRedundancy"] = avoidRC
516 if "withToleranceInRedundancy" not in __Function: __Function["withToleranceInRedundancy"] = 1.e-18
517 if "withLenghtOfRedundancy" not in __Function: __Function["withLenghtOfRedundancy"] = -1
518 if "NumberOfProcesses" not in __Function: __Function["NumberOfProcesses"] = None
519 if "withmfEnabled" not in __Function: __Function["withmfEnabled"] = inputAsMF
520 from daCore import NumericObjects
521 FDA = NumericObjects.FDApproximation(
523 Function = __Function["Direct"],
524 centeredDF = __Function["CenteredFiniteDifference"],
525 increment = __Function["DifferentialIncrement"],
526 dX = __Function["withdX"],
527 extraArguments = self.__extraArgs,
528 avoidingRedundancy = __Function["withAvoidingRedundancy"],
529 toleranceInRedundancy = __Function["withToleranceInRedundancy"],
530 lenghtOfRedundancy = __Function["withLenghtOfRedundancy"],
531 mpEnabled = __Function["EnableMultiProcessingInDerivatives"],
532 mpWorkers = __Function["NumberOfProcesses"],
533 mfEnabled = __Function["withmfEnabled"],
535 self.__FO["Direct"] = Operator( name = self.__name, fromMethod = FDA.DirectOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
536 self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMethod = FDA.TangentOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
537 self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMethod = FDA.AdjointOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
538 elif isinstance(__Function, dict) and \
539 ("Direct" in __Function) and ("Tangent" in __Function) and ("Adjoint" in __Function) and \
540 (__Function["Direct"] is not None) and (__Function["Tangent"] is not None) and (__Function["Adjoint"] is not None):
541 self.__FO["Direct"] = Operator( name = self.__name, fromMethod = __Function["Direct"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
542 self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMethod = __Function["Tangent"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
543 self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMethod = __Function["Adjoint"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
544 elif asMatrix is not None:
545 __matrice = numpy.matrix( __Matrix, numpy.float )
546 self.__FO["Direct"] = Operator( name = self.__name, fromMatrix = __matrice, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
547 self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMatrix = __matrice, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
548 self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMatrix = __matrice.T, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
551 raise ValueError("The %s object is improperly defined or undefined, it requires at minima either a matrix, a Direct operator for approximate derivatives or a Tangent/Adjoint operators pair. Please check your operator input."%self.__name)
553 if __appliedInX is not None:
554 self.__FO["AppliedInX"] = {}
555 for key in list(__appliedInX.keys()):
556 if type( __appliedInX[key] ) is type( numpy.matrix([]) ):
557 # Pour le cas où l'on a une vraie matrice
558 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].A1, numpy.float ).T
559 elif type( __appliedInX[key] ) is type( numpy.array([]) ) and len(__appliedInX[key].shape) > 1:
560 # Pour le cas où l'on a un vecteur représenté en array avec 2 dimensions
561 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].reshape(len(__appliedInX[key]),), numpy.float ).T
563 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key], numpy.float ).T
565 self.__FO["AppliedInX"] = None
571 "x.__repr__() <==> repr(x)"
572 return repr(self.__FO)
575 "x.__str__() <==> str(x)"
576 return str(self.__FO)
578 # ==============================================================================
579 class Algorithm(object):
581 Classe générale d'interface de type algorithme
583 Elle donne un cadre pour l'écriture d'une classe élémentaire d'algorithme
584 d'assimilation, en fournissant un container (dictionnaire) de variables
585 persistantes initialisées, et des méthodes d'accès à ces variables stockées.
587 Une classe élémentaire d'algorithme doit implémenter la méthode "run".
589 def __init__(self, name):
591 L'initialisation présente permet de fabriquer des variables de stockage
592 disponibles de manière générique dans les algorithmes élémentaires. Ces
593 variables de stockage sont ensuite conservées dans un dictionnaire
594 interne à l'objet, mais auquel on accède par la méthode "get".
596 Les variables prévues sont :
597 - APosterioriCorrelations : matrice de corrélations de la matrice A
598 - APosterioriCovariance : matrice de covariances a posteriori : A
599 - APosterioriStandardDeviations : vecteur des écart-types de la matrice A
600 - APosterioriVariances : vecteur des variances de la matrice A
601 - Analysis : vecteur d'analyse : Xa
602 - BMA : Background moins Analysis : Xa - Xb
603 - CostFunctionJ : fonction-coût globale, somme des deux parties suivantes Jb et Jo
604 - CostFunctionJAtCurrentOptimum : fonction-coût globale à l'état optimal courant lors d'itérations
605 - CostFunctionJb : partie ébauche ou background de la fonction-coût : Jb
606 - CostFunctionJbAtCurrentOptimum : partie ébauche à l'état optimal courant lors d'itérations
607 - CostFunctionJo : partie observations de la fonction-coût : Jo
608 - CostFunctionJoAtCurrentOptimum : partie observations à l'état optimal courant lors d'itérations
609 - CurrentIterationNumber : numéro courant d'itération dans les algorithmes itératifs, à partir de 0
610 - CurrentOptimum : état optimal courant lors d'itérations
611 - CurrentState : état courant lors d'itérations
612 - GradientOfCostFunctionJ : gradient de la fonction-coût globale
613 - GradientOfCostFunctionJb : gradient de la partie ébauche de la fonction-coût
614 - GradientOfCostFunctionJo : gradient de la partie observations de la fonction-coût
615 - IndexOfOptimum : index de l'état optimal courant lors d'itérations
616 - Innovation : l'innovation : d = Y - H(X)
617 - InnovationAtCurrentState : l'innovation à l'état courant : dn = Y - H(Xn)
618 - JacobianMatrixAtBackground : matrice jacobienne à l'état d'ébauche
619 - JacobianMatrixAtCurrentState : matrice jacobienne à l'état courant
620 - JacobianMatrixAtOptimum : matrice jacobienne à l'optimum
621 - KalmanGainAtOptimum : gain de Kalman à l'optimum
622 - MahalanobisConsistency : indicateur de consistance des covariances
623 - OMA : Observation moins Analyse : Y - Xa
624 - OMB : Observation moins Background : Y - Xb
625 - ForecastState : état prédit courant lors d'itérations
626 - Residu : dans le cas des algorithmes de vérification
627 - SampledStateForQuantiles : échantillons d'états pour l'estimation des quantiles
628 - SigmaBck2 : indicateur de correction optimale des erreurs d'ébauche
629 - SigmaObs2 : indicateur de correction optimale des erreurs d'observation
630 - SimulatedObservationAtBackground : l'état observé H(Xb) à l'ébauche
631 - SimulatedObservationAtCurrentOptimum : l'état observé H(X) à l'état optimal courant
632 - SimulatedObservationAtCurrentState : l'état observé H(X) à l'état courant
633 - SimulatedObservationAtOptimum : l'état observé H(Xa) à l'optimum
634 - SimulationQuantiles : états observés H(X) pour les quantiles demandés
635 On peut rajouter des variables à stocker dans l'initialisation de
636 l'algorithme élémentaire qui va hériter de cette classe
638 logging.debug("%s Initialisation", str(name))
639 self._m = PlatformInfo.SystemUsage()
641 self._name = str( name )
642 self._parameters = {"StoreSupplementaryCalculations":[]}
643 self.__required_parameters = {}
644 self.__required_inputs = {
645 "RequiredInputValues":{"mandatory":(), "optional":()},
646 "ClassificationTags":[],
648 self.__variable_names_not_public = {"nextStep":False} # Duplication dans AlgorithmAndParameters
649 self.__canonical_parameter_name = {} # Correspondance "lower"->"correct"
650 self.__canonical_stored_name = {} # Correspondance "lower"->"correct"
652 self.StoredVariables = {}
653 self.StoredVariables["APosterioriCorrelations"] = Persistence.OneMatrix(name = "APosterioriCorrelations")
654 self.StoredVariables["APosterioriCovariance"] = Persistence.OneMatrix(name = "APosterioriCovariance")
655 self.StoredVariables["APosterioriStandardDeviations"] = Persistence.OneVector(name = "APosterioriStandardDeviations")
656 self.StoredVariables["APosterioriVariances"] = Persistence.OneVector(name = "APosterioriVariances")
657 self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
658 self.StoredVariables["BMA"] = Persistence.OneVector(name = "BMA")
659 self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
660 self.StoredVariables["CostFunctionJAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJAtCurrentOptimum")
661 self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
662 self.StoredVariables["CostFunctionJbAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJbAtCurrentOptimum")
663 self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
664 self.StoredVariables["CostFunctionJoAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJoAtCurrentOptimum")
665 self.StoredVariables["CurrentEnsembleState"] = Persistence.OneMatrix(name = "CurrentEnsembleState")
666 self.StoredVariables["CurrentIterationNumber"] = Persistence.OneIndex(name = "CurrentIterationNumber")
667 self.StoredVariables["CurrentOptimum"] = Persistence.OneVector(name = "CurrentOptimum")
668 self.StoredVariables["CurrentState"] = Persistence.OneVector(name = "CurrentState")
669 self.StoredVariables["ForecastState"] = Persistence.OneVector(name = "ForecastState")
670 self.StoredVariables["GradientOfCostFunctionJ"] = Persistence.OneVector(name = "GradientOfCostFunctionJ")
671 self.StoredVariables["GradientOfCostFunctionJb"] = Persistence.OneVector(name = "GradientOfCostFunctionJb")
672 self.StoredVariables["GradientOfCostFunctionJo"] = Persistence.OneVector(name = "GradientOfCostFunctionJo")
673 self.StoredVariables["IndexOfOptimum"] = Persistence.OneIndex(name = "IndexOfOptimum")
674 self.StoredVariables["Innovation"] = Persistence.OneVector(name = "Innovation")
675 self.StoredVariables["InnovationAtCurrentAnalysis"] = Persistence.OneVector(name = "InnovationAtCurrentAnalysis")
676 self.StoredVariables["InnovationAtCurrentState"] = Persistence.OneVector(name = "InnovationAtCurrentState")
677 self.StoredVariables["JacobianMatrixAtBackground"] = Persistence.OneMatrix(name = "JacobianMatrixAtBackground")
678 self.StoredVariables["JacobianMatrixAtCurrentState"] = Persistence.OneMatrix(name = "JacobianMatrixAtCurrentState")
679 self.StoredVariables["JacobianMatrixAtOptimum"] = Persistence.OneMatrix(name = "JacobianMatrixAtOptimum")
680 self.StoredVariables["KalmanGainAtOptimum"] = Persistence.OneMatrix(name = "KalmanGainAtOptimum")
681 self.StoredVariables["MahalanobisConsistency"] = Persistence.OneScalar(name = "MahalanobisConsistency")
682 self.StoredVariables["OMA"] = Persistence.OneVector(name = "OMA")
683 self.StoredVariables["OMB"] = Persistence.OneVector(name = "OMB")
684 self.StoredVariables["Residu"] = Persistence.OneScalar(name = "Residu")
685 self.StoredVariables["SampledStateForQuantiles"] = Persistence.OneMatrix(name = "SampledStateForQuantiles")
686 self.StoredVariables["SigmaBck2"] = Persistence.OneScalar(name = "SigmaBck2")
687 self.StoredVariables["SigmaObs2"] = Persistence.OneScalar(name = "SigmaObs2")
688 self.StoredVariables["SimulatedObservationAtBackground"] = Persistence.OneVector(name = "SimulatedObservationAtBackground")
689 self.StoredVariables["SimulatedObservationAtCurrentAnalysis"]= Persistence.OneVector(name = "SimulatedObservationAtCurrentAnalysis")
690 self.StoredVariables["SimulatedObservationAtCurrentOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentOptimum")
691 self.StoredVariables["SimulatedObservationAtCurrentState"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentState")
692 self.StoredVariables["SimulatedObservationAtOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtOptimum")
693 self.StoredVariables["SimulationQuantiles"] = Persistence.OneMatrix(name = "SimulationQuantiles")
695 for k in self.StoredVariables:
696 self.__canonical_stored_name[k.lower()] = k
698 for k, v in self.__variable_names_not_public.items():
699 self.__canonical_parameter_name[k.lower()] = k
700 self.__canonical_parameter_name["algorithm"] = "Algorithm"
701 self.__canonical_parameter_name["storesupplementarycalculations"] = "StoreSupplementaryCalculations"
703 def _pre_run(self, Parameters, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None ):
705 logging.debug("%s Lancement", self._name)
706 logging.debug("%s Taille mémoire utilisée de %.0f Mio"%(self._name, self._m.getUsedMemory("Mio")))
707 self._getTimeState(reset=True)
709 # Mise a jour des paramètres internes avec le contenu de Parameters, en
710 # reprenant les valeurs par défauts pour toutes celles non définies
711 self.__setParameters(Parameters, reset=True)
712 for k, v in self.__variable_names_not_public.items():
713 if k not in self._parameters: self.__setParameters( {k:v} )
715 # Corrections et compléments des vecteurs
716 def __test_vvalue(argument, variable, argname, symbol=None):
717 if symbol is None: symbol = variable
719 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
720 raise ValueError("%s %s vector %s is not set and has to be properly defined!"%(self._name,argname,symbol))
721 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
722 logging.debug("%s %s vector %s is not set, but is optional."%(self._name,argname,symbol))
724 logging.debug("%s %s vector %s is not set, but is not required."%(self._name,argname,symbol))
726 logging.debug("%s %s vector %s is set, and its size is %i."%(self._name,argname,symbol,numpy.array(argument).size))
728 __test_vvalue( Xb, "Xb", "Background or initial state" )
729 __test_vvalue( Y, "Y", "Observation" )
730 __test_vvalue( U, "U", "Control" )
732 # Corrections et compléments des covariances
733 def __test_cvalue(argument, variable, argname, symbol=None):
734 if symbol is None: symbol = variable
736 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
737 raise ValueError("%s %s error covariance matrix %s is not set and has to be properly defined!"%(self._name,argname,symbol))
738 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
739 logging.debug("%s %s error covariance matrix %s is not set, but is optional."%(self._name,argname,symbol))
741 logging.debug("%s %s error covariance matrix %s is not set, but is not required."%(self._name,argname,symbol))
743 logging.debug("%s %s error covariance matrix %s is set."%(self._name,argname,symbol))
745 __test_cvalue( B, "B", "Background" )
746 __test_cvalue( R, "R", "Observation" )
747 __test_cvalue( Q, "Q", "Evolution" )
749 # Corrections et compléments des opérateurs
750 def __test_ovalue(argument, variable, argname, symbol=None):
751 if symbol is None: symbol = variable
752 if argument is None or (isinstance(argument,dict) and len(argument)==0):
753 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
754 raise ValueError("%s %s operator %s is not set and has to be properly defined!"%(self._name,argname,symbol))
755 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
756 logging.debug("%s %s operator %s is not set, but is optional."%(self._name,argname,symbol))
758 logging.debug("%s %s operator %s is not set, but is not required."%(self._name,argname,symbol))
760 logging.debug("%s %s operator %s is set."%(self._name,argname,symbol))
762 __test_ovalue( HO, "HO", "Observation", "H" )
763 __test_ovalue( EM, "EM", "Evolution", "M" )
764 __test_ovalue( CM, "CM", "Control Model", "C" )
766 # Corrections et compléments des bornes
767 if ("Bounds" in self._parameters) and isinstance(self._parameters["Bounds"], (list, tuple)) and (len(self._parameters["Bounds"]) > 0):
768 logging.debug("%s Bounds taken into account"%(self._name,))
770 self._parameters["Bounds"] = None
771 if ("QBounds" in self._parameters) and isinstance(self._parameters["QBounds"], (list, tuple)) and (len(self._parameters["QBounds"]) > 0):
772 logging.debug("%s Bounds for quantiles states taken into account"%(self._name,))
773 # Attention : contrairement à Bounds, pas de défaut à None, sinon on ne peut pas être sans bornes
775 # Corrections et compléments de l'initialisation en X
776 if "InitializationPoint" in self._parameters:
778 if self._parameters["InitializationPoint"] is not None and hasattr(self._parameters["InitializationPoint"],'size'):
779 if self._parameters["InitializationPoint"].size != numpy.ravel(Xb).size:
780 raise ValueError("Incompatible size %i of forced initial point that have to replace the background of size %i" \
781 %(self._parameters["InitializationPoint"].size,numpy.ravel(Xb).size))
782 # Obtenu par typecast : numpy.ravel(self._parameters["InitializationPoint"])
784 self._parameters["InitializationPoint"] = numpy.ravel(Xb)
786 if self._parameters["InitializationPoint"] is None:
787 raise ValueError("Forced initial point can not be set without any given Background or required value")
789 # Correction pour pallier a un bug de TNC sur le retour du Minimum
790 if "Minimizer" in self._parameters and self._parameters["Minimizer"] == "TNC":
791 self.setParameterValue("StoreInternalVariables",True)
793 # Verbosité et logging
794 if logging.getLogger().level < logging.WARNING:
795 self._parameters["optiprint"], self._parameters["optdisp"] = 1, 1
796 if PlatformInfo.has_scipy:
797 import scipy.optimize
798 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_ALL
800 self._parameters["optmessages"] = 15
802 self._parameters["optiprint"], self._parameters["optdisp"] = -1, 0
803 if PlatformInfo.has_scipy:
804 import scipy.optimize
805 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_NONE
807 self._parameters["optmessages"] = 15
811 def _post_run(self,_oH=None):
813 if ("StoreSupplementaryCalculations" in self._parameters) and \
814 "APosterioriCovariance" in self._parameters["StoreSupplementaryCalculations"]:
815 for _A in self.StoredVariables["APosterioriCovariance"]:
816 if "APosterioriVariances" in self._parameters["StoreSupplementaryCalculations"]:
817 self.StoredVariables["APosterioriVariances"].store( numpy.diag(_A) )
818 if "APosterioriStandardDeviations" in self._parameters["StoreSupplementaryCalculations"]:
819 self.StoredVariables["APosterioriStandardDeviations"].store( numpy.sqrt(numpy.diag(_A)) )
820 if "APosterioriCorrelations" in self._parameters["StoreSupplementaryCalculations"]:
821 _EI = numpy.diag(1./numpy.sqrt(numpy.diag(_A)))
822 _C = numpy.dot(_EI, numpy.dot(_A, _EI))
823 self.StoredVariables["APosterioriCorrelations"].store( _C )
824 if _oH is not None and "Direct" in _oH and "Tangent" in _oH and "Adjoint" in _oH:
825 logging.debug("%s Nombre d'évaluation(s) de l'opérateur d'observation direct/tangent/adjoint.: %i/%i/%i", self._name, _oH["Direct"].nbcalls(0),_oH["Tangent"].nbcalls(0),_oH["Adjoint"].nbcalls(0))
826 logging.debug("%s Nombre d'appels au cache d'opérateur d'observation direct/tangent/adjoint..: %i/%i/%i", self._name, _oH["Direct"].nbcalls(3),_oH["Tangent"].nbcalls(3),_oH["Adjoint"].nbcalls(3))
827 logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
828 logging.debug("%s Durées d'utilisation CPU de %.1fs et elapsed de %.1fs", self._name, self._getTimeState()[0], self._getTimeState()[1])
829 logging.debug("%s Terminé", self._name)
832 def _toStore(self, key):
833 "True if in StoreSupplementaryCalculations, else False"
834 return key in self._parameters["StoreSupplementaryCalculations"]
836 def get(self, key=None):
838 Renvoie l'une des variables stockées identifiée par la clé, ou le
839 dictionnaire de l'ensemble des variables disponibles en l'absence de
840 clé. Ce sont directement les variables sous forme objet qui sont
841 renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
842 des classes de persistance.
845 return self.StoredVariables[self.__canonical_stored_name[key.lower()]]
847 return self.StoredVariables
849 def __contains__(self, key=None):
850 "D.__contains__(k) -> True if D has a key k, else False"
851 if key is None or key.lower() not in self.__canonical_stored_name:
854 return self.__canonical_stored_name[key.lower()] in self.StoredVariables
857 "D.keys() -> list of D's keys"
858 if hasattr(self, "StoredVariables"):
859 return self.StoredVariables.keys()
864 "D.pop(k[,d]) -> v, remove specified key and return the corresponding value"
865 if hasattr(self, "StoredVariables") and k.lower() in self.__canonical_stored_name:
866 return self.StoredVariables.pop(self.__canonical_stored_name[k.lower()], d)
871 raise TypeError("pop expected at least 1 arguments, got 0")
872 "If key is not found, d is returned if given, otherwise KeyError is raised"
878 def run(self, Xb=None, Y=None, H=None, M=None, R=None, B=None, Q=None, Parameters=None):
880 Doit implémenter l'opération élémentaire de calcul d'assimilation sous
881 sa forme mathématique la plus naturelle possible.
883 raise NotImplementedError("Mathematical assimilation calculation has not been implemented!")
885 def defineRequiredParameter(self, name = None, default = None, typecast = None, message = None, minval = None, maxval = None, listval = None, listadv = None):
887 Permet de définir dans l'algorithme des paramètres requis et leurs
888 caractéristiques par défaut.
891 raise ValueError("A name is mandatory to define a required parameter.")
893 self.__required_parameters[name] = {
895 "typecast" : typecast,
902 self.__canonical_parameter_name[name.lower()] = name
903 logging.debug("%s %s (valeur par défaut = %s)", self._name, message, self.setParameterValue(name))
905 def getRequiredParameters(self, noDetails=True):
907 Renvoie la liste des noms de paramètres requis ou directement le
908 dictionnaire des paramètres requis.
911 return sorted(self.__required_parameters.keys())
913 return self.__required_parameters
915 def setParameterValue(self, name=None, value=None):
917 Renvoie la valeur d'un paramètre requis de manière contrôlée
919 __k = self.__canonical_parameter_name[name.lower()]
920 default = self.__required_parameters[__k]["default"]
921 typecast = self.__required_parameters[__k]["typecast"]
922 minval = self.__required_parameters[__k]["minval"]
923 maxval = self.__required_parameters[__k]["maxval"]
924 listval = self.__required_parameters[__k]["listval"]
925 listadv = self.__required_parameters[__k]["listadv"]
927 if value is None and default is None:
929 elif value is None and default is not None:
930 if typecast is None: __val = default
931 else: __val = typecast( default )
933 if typecast is None: __val = value
936 __val = typecast( value )
938 raise ValueError("The value '%s' for the parameter named '%s' can not be correctly evaluated with type '%s'."%(value, __k, typecast))
940 if minval is not None and (numpy.array(__val, float) < minval).any():
941 raise ValueError("The parameter named '%s' of value '%s' can not be less than %s."%(__k, __val, minval))
942 if maxval is not None and (numpy.array(__val, float) > maxval).any():
943 raise ValueError("The parameter named '%s' of value '%s' can not be greater than %s."%(__k, __val, maxval))
944 if listval is not None or listadv is not None:
945 if typecast is list or typecast is tuple or isinstance(__val,list) or isinstance(__val,tuple):
947 if listval is not None and v in listval: continue
948 elif listadv is not None and v in listadv: continue
950 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%(v, __k, listval))
951 elif not (listval is not None and __val in listval) and not (listadv is not None and __val in listadv):
952 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%( __val, __k,listval))
956 def requireInputArguments(self, mandatory=(), optional=()):
958 Permet d'imposer des arguments de calcul requis en entrée.
960 self.__required_inputs["RequiredInputValues"]["mandatory"] = tuple( mandatory )
961 self.__required_inputs["RequiredInputValues"]["optional"] = tuple( optional )
963 def getInputArguments(self):
965 Permet d'obtenir les listes des arguments de calcul requis en entrée.
967 return self.__required_inputs["RequiredInputValues"]["mandatory"], self.__required_inputs["RequiredInputValues"]["optional"]
969 def setAttributes(self, tags=()):
971 Permet d'adjoindre des attributs comme les tags de classification.
972 Renvoie la liste actuelle dans tous les cas.
974 self.__required_inputs["ClassificationTags"].extend( tags )
975 return self.__required_inputs["ClassificationTags"]
977 def __setParameters(self, fromDico={}, reset=False):
979 Permet de stocker les paramètres reçus dans le dictionnaire interne.
981 self._parameters.update( fromDico )
982 __inverse_fromDico_keys = {}
983 for k in fromDico.keys():
984 if k.lower() in self.__canonical_parameter_name:
985 __inverse_fromDico_keys[self.__canonical_parameter_name[k.lower()]] = k
986 #~ __inverse_fromDico_keys = dict([(self.__canonical_parameter_name[k.lower()],k) for k in fromDico.keys()])
987 __canonic_fromDico_keys = __inverse_fromDico_keys.keys()
988 for k in self.__required_parameters.keys():
989 if k in __canonic_fromDico_keys:
990 self._parameters[k] = self.setParameterValue(k,fromDico[__inverse_fromDico_keys[k]])
992 self._parameters[k] = self.setParameterValue(k)
995 logging.debug("%s %s : %s", self._name, self.__required_parameters[k]["message"], self._parameters[k])
997 def _getTimeState(self, reset=False):
999 Initialise ou restitue le temps de calcul (cpu/elapsed) à la seconde
1002 self.__initial_cpu_time = time.process_time()
1003 self.__initial_elapsed_time = time.perf_counter()
1006 self.__cpu_time = time.process_time() - self.__initial_cpu_time
1007 self.__elapsed_time = time.perf_counter() - self.__initial_elapsed_time
1008 return self.__cpu_time, self.__elapsed_time
1010 def _StopOnTimeLimit(self, X=None, withReason=False):
1011 "Stop criteria on time limit: True/False [+ Reason]"
1012 c, e = self._getTimeState()
1013 if "MaximumCpuTime" in self._parameters and c > self._parameters["MaximumCpuTime"]:
1014 __SC, __SR = True, "Reached maximum CPU time (%.1fs > %.1fs)"%(c, self._parameters["MaximumCpuTime"])
1015 elif "MaximumElapsedTime" in self._parameters and e > self._parameters["MaximumElapsedTime"]:
1016 __SC, __SR = True, "Reached maximum elapsed time (%.1fs > %.1fs)"%(e, self._parameters["MaximumElapsedTime"])
1018 __SC, __SR = False, ""
1024 # ==============================================================================
1025 class AlgorithmAndParameters(object):
1027 Classe générale d'interface d'action pour l'algorithme et ses paramètres
1030 name = "GenericAlgorithm",
1037 self.__name = str(name)
1041 self.__algorithm = {}
1042 self.__algorithmFile = None
1043 self.__algorithmName = None
1045 self.updateParameters( asDict, asScript )
1047 if asAlgorithm is None and asScript is not None:
1048 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
1050 __Algo = asAlgorithm
1052 if __Algo is not None:
1053 self.__A = str(__Algo)
1054 self.__P.update( {"Algorithm":self.__A} )
1056 self.__setAlgorithm( self.__A )
1058 self.__variable_names_not_public = {"nextStep":False} # Duplication dans Algorithm
1060 def updateParameters(self,
1064 "Mise a jour des parametres"
1065 if asDict is None and asScript is not None:
1066 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1070 if __Dict is not None:
1071 self.__P.update( dict(__Dict) )
1073 def executePythonScheme(self, asDictAO = None):
1074 "Permet de lancer le calcul d'assimilation"
1075 Operator.CM.clearCache()
1077 if not isinstance(asDictAO, dict):
1078 raise ValueError("The objects for algorithm calculation have to be given together as a dictionnary, and they are not")
1079 if hasattr(asDictAO["Background"],"getO"): self.__Xb = asDictAO["Background"].getO()
1080 elif hasattr(asDictAO["CheckingPoint"],"getO"): self.__Xb = asDictAO["CheckingPoint"].getO()
1081 else: self.__Xb = None
1082 if hasattr(asDictAO["Observation"],"getO"): self.__Y = asDictAO["Observation"].getO()
1083 else: self.__Y = asDictAO["Observation"]
1084 if hasattr(asDictAO["ControlInput"],"getO"): self.__U = asDictAO["ControlInput"].getO()
1085 else: self.__U = asDictAO["ControlInput"]
1086 if hasattr(asDictAO["ObservationOperator"],"getO"): self.__HO = asDictAO["ObservationOperator"].getO()
1087 else: self.__HO = asDictAO["ObservationOperator"]
1088 if hasattr(asDictAO["EvolutionModel"],"getO"): self.__EM = asDictAO["EvolutionModel"].getO()
1089 else: self.__EM = asDictAO["EvolutionModel"]
1090 if hasattr(asDictAO["ControlModel"],"getO"): self.__CM = asDictAO["ControlModel"].getO()
1091 else: self.__CM = asDictAO["ControlModel"]
1092 self.__B = asDictAO["BackgroundError"]
1093 self.__R = asDictAO["ObservationError"]
1094 self.__Q = asDictAO["EvolutionError"]
1096 self.__shape_validate()
1098 self.__algorithm.run(
1108 Parameters = self.__P,
1112 def executeYACSScheme(self, FileName=None):
1113 "Permet de lancer le calcul d'assimilation"
1114 if FileName is None or not os.path.exists(FileName):
1115 raise ValueError("a YACS file name has to be given for YACS execution.\n")
1117 __file = os.path.abspath(FileName)
1118 logging.debug("The YACS file name is \"%s\"."%__file)
1119 if not PlatformInfo.has_salome or \
1120 not PlatformInfo.has_yacs or \
1121 not PlatformInfo.has_adao:
1122 raise ImportError("\n\n"+\
1123 "Unable to get SALOME, YACS or ADAO environnement variables.\n"+\
1124 "Please load the right environnement before trying to use it.\n")
1127 import SALOMERuntime
1129 SALOMERuntime.RuntimeSALOME_setRuntime()
1131 r = pilot.getRuntime()
1132 xmlLoader = loader.YACSLoader()
1133 xmlLoader.registerProcCataLoader()
1135 catalogAd = r.loadCatalog("proc", __file)
1136 r.addCatalog(catalogAd)
1141 p = xmlLoader.load(__file)
1142 except IOError as ex:
1143 print("The YACS XML schema file can not be loaded: %s"%(ex,))
1145 logger = p.getLogger("parser")
1146 if not logger.isEmpty():
1147 print("The imported YACS XML schema has errors on parsing:")
1148 print(logger.getStr())
1151 print("The YACS XML schema is not valid and will not be executed:")
1152 print(p.getErrorReport())
1154 info=pilot.LinkInfo(pilot.LinkInfo.ALL_DONT_STOP)
1155 p.checkConsistency(info)
1156 if info.areWarningsOrErrors():
1157 print("The YACS XML schema is not coherent and will not be executed:")
1158 print(info.getGlobalRepr())
1160 e = pilot.ExecutorSwig()
1162 if p.getEffectiveState() != pilot.DONE:
1163 print(p.getErrorReport())
1167 def get(self, key = None):
1168 "Vérifie l'existence d'une clé de variable ou de paramètres"
1169 if key in self.__algorithm:
1170 return self.__algorithm.get( key )
1171 elif key in self.__P:
1172 return self.__P[key]
1174 allvariables = self.__P
1175 for k in self.__variable_names_not_public: allvariables.pop(k, None)
1178 def pop(self, k, d):
1179 "Necessaire pour le pickling"
1180 return self.__algorithm.pop(k, d)
1182 def getAlgorithmRequiredParameters(self, noDetails=True):
1183 "Renvoie la liste des paramètres requis selon l'algorithme"
1184 return self.__algorithm.getRequiredParameters(noDetails)
1186 def getAlgorithmInputArguments(self):
1187 "Renvoie la liste des entrées requises selon l'algorithme"
1188 return self.__algorithm.getInputArguments()
1190 def getAlgorithmAttributes(self):
1191 "Renvoie la liste des attributs selon l'algorithme"
1192 return self.__algorithm.setAttributes()
1194 def setObserver(self, __V, __O, __I, __S):
1195 if self.__algorithm is None \
1196 or isinstance(self.__algorithm, dict) \
1197 or not hasattr(self.__algorithm,"StoredVariables"):
1198 raise ValueError("No observer can be build before choosing an algorithm.")
1199 if __V not in self.__algorithm:
1200 raise ValueError("An observer requires to be set on a variable named %s which does not exist."%__V)
1202 self.__algorithm.StoredVariables[ __V ].setDataObserver(
1205 HookParameters = __I,
1208 def removeObserver(self, __V, __O, __A = False):
1209 if self.__algorithm is None \
1210 or isinstance(self.__algorithm, dict) \
1211 or not hasattr(self.__algorithm,"StoredVariables"):
1212 raise ValueError("No observer can be removed before choosing an algorithm.")
1213 if __V not in self.__algorithm:
1214 raise ValueError("An observer requires to be removed on a variable named %s which does not exist."%__V)
1216 return self.__algorithm.StoredVariables[ __V ].removeDataObserver(
1221 def hasObserver(self, __V):
1222 if self.__algorithm is None \
1223 or isinstance(self.__algorithm, dict) \
1224 or not hasattr(self.__algorithm,"StoredVariables"):
1226 if __V not in self.__algorithm:
1228 return self.__algorithm.StoredVariables[ __V ].hasDataObserver()
1231 __allvariables = list(self.__algorithm.keys()) + list(self.__P.keys())
1232 for k in self.__variable_names_not_public:
1233 if k in __allvariables: __allvariables.remove(k)
1234 return __allvariables
1236 def __contains__(self, key=None):
1237 "D.__contains__(k) -> True if D has a key k, else False"
1238 return key in self.__algorithm or key in self.__P
1241 "x.__repr__() <==> repr(x)"
1242 return repr(self.__A)+", "+repr(self.__P)
1245 "x.__str__() <==> str(x)"
1246 return str(self.__A)+", "+str(self.__P)
1248 def __setAlgorithm(self, choice = None ):
1250 Permet de sélectionner l'algorithme à utiliser pour mener à bien l'étude
1251 d'assimilation. L'argument est un champ caractère se rapportant au nom
1252 d'un algorithme réalisant l'opération sur les arguments fixes.
1255 raise ValueError("Error: algorithm choice has to be given")
1256 if self.__algorithmName is not None:
1257 raise ValueError("Error: algorithm choice has already been done as \"%s\", it can't be changed."%self.__algorithmName)
1258 daDirectory = "daAlgorithms"
1260 # Recherche explicitement le fichier complet
1261 # ------------------------------------------
1263 for directory in sys.path:
1264 if os.path.isfile(os.path.join(directory, daDirectory, str(choice)+'.py')):
1265 module_path = os.path.abspath(os.path.join(directory, daDirectory))
1266 if module_path is None:
1267 raise ImportError("No algorithm module named \"%s\" has been found in the search path.\n The search path is %s"%(choice, sys.path))
1269 # Importe le fichier complet comme un module
1270 # ------------------------------------------
1272 sys_path_tmp = sys.path ; sys.path.insert(0,module_path)
1273 self.__algorithmFile = __import__(str(choice), globals(), locals(), [])
1274 if not hasattr(self.__algorithmFile, "ElementaryAlgorithm"):
1275 raise ImportError("this module does not define a valid elementary algorithm.")
1276 self.__algorithmName = str(choice)
1277 sys.path = sys_path_tmp ; del sys_path_tmp
1278 except ImportError as e:
1279 raise ImportError("The module named \"%s\" was found, but is incorrect at the import stage.\n The import error message is: %s"%(choice,e))
1281 # Instancie un objet du type élémentaire du fichier
1282 # -------------------------------------------------
1283 self.__algorithm = self.__algorithmFile.ElementaryAlgorithm()
1286 def __shape_validate(self):
1288 Validation de la correspondance correcte des tailles des variables et
1289 des matrices s'il y en a.
1291 if self.__Xb is None: __Xb_shape = (0,)
1292 elif hasattr(self.__Xb,"size"): __Xb_shape = (self.__Xb.size,)
1293 elif hasattr(self.__Xb,"shape"):
1294 if isinstance(self.__Xb.shape, tuple): __Xb_shape = self.__Xb.shape
1295 else: __Xb_shape = self.__Xb.shape()
1296 else: raise TypeError("The background (Xb) has no attribute of shape: problem !")
1298 if self.__Y is None: __Y_shape = (0,)
1299 elif hasattr(self.__Y,"size"): __Y_shape = (self.__Y.size,)
1300 elif hasattr(self.__Y,"shape"):
1301 if isinstance(self.__Y.shape, tuple): __Y_shape = self.__Y.shape
1302 else: __Y_shape = self.__Y.shape()
1303 else: raise TypeError("The observation (Y) has no attribute of shape: problem !")
1305 if self.__U is None: __U_shape = (0,)
1306 elif hasattr(self.__U,"size"): __U_shape = (self.__U.size,)
1307 elif hasattr(self.__U,"shape"):
1308 if isinstance(self.__U.shape, tuple): __U_shape = self.__U.shape
1309 else: __U_shape = self.__U.shape()
1310 else: raise TypeError("The control (U) has no attribute of shape: problem !")
1312 if self.__B is None: __B_shape = (0,0)
1313 elif hasattr(self.__B,"shape"):
1314 if isinstance(self.__B.shape, tuple): __B_shape = self.__B.shape
1315 else: __B_shape = self.__B.shape()
1316 else: raise TypeError("The a priori errors covariance matrix (B) has no attribute of shape: problem !")
1318 if self.__R is None: __R_shape = (0,0)
1319 elif hasattr(self.__R,"shape"):
1320 if isinstance(self.__R.shape, tuple): __R_shape = self.__R.shape
1321 else: __R_shape = self.__R.shape()
1322 else: raise TypeError("The observation errors covariance matrix (R) has no attribute of shape: problem !")
1324 if self.__Q is None: __Q_shape = (0,0)
1325 elif hasattr(self.__Q,"shape"):
1326 if isinstance(self.__Q.shape, tuple): __Q_shape = self.__Q.shape
1327 else: __Q_shape = self.__Q.shape()
1328 else: raise TypeError("The evolution errors covariance matrix (Q) has no attribute of shape: problem !")
1330 if len(self.__HO) == 0: __HO_shape = (0,0)
1331 elif isinstance(self.__HO, dict): __HO_shape = (0,0)
1332 elif hasattr(self.__HO["Direct"],"shape"):
1333 if isinstance(self.__HO["Direct"].shape, tuple): __HO_shape = self.__HO["Direct"].shape
1334 else: __HO_shape = self.__HO["Direct"].shape()
1335 else: raise TypeError("The observation operator (H) has no attribute of shape: problem !")
1337 if len(self.__EM) == 0: __EM_shape = (0,0)
1338 elif isinstance(self.__EM, dict): __EM_shape = (0,0)
1339 elif hasattr(self.__EM["Direct"],"shape"):
1340 if isinstance(self.__EM["Direct"].shape, tuple): __EM_shape = self.__EM["Direct"].shape
1341 else: __EM_shape = self.__EM["Direct"].shape()
1342 else: raise TypeError("The evolution model (EM) has no attribute of shape: problem !")
1344 if len(self.__CM) == 0: __CM_shape = (0,0)
1345 elif isinstance(self.__CM, dict): __CM_shape = (0,0)
1346 elif hasattr(self.__CM["Direct"],"shape"):
1347 if isinstance(self.__CM["Direct"].shape, tuple): __CM_shape = self.__CM["Direct"].shape
1348 else: __CM_shape = self.__CM["Direct"].shape()
1349 else: raise TypeError("The control model (CM) has no attribute of shape: problem !")
1351 # Vérification des conditions
1352 # ---------------------------
1353 if not( len(__Xb_shape) == 1 or min(__Xb_shape) == 1 ):
1354 raise ValueError("Shape characteristic of background (Xb) is incorrect: \"%s\"."%(__Xb_shape,))
1355 if not( len(__Y_shape) == 1 or min(__Y_shape) == 1 ):
1356 raise ValueError("Shape characteristic of observation (Y) is incorrect: \"%s\"."%(__Y_shape,))
1358 if not( min(__B_shape) == max(__B_shape) ):
1359 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) is incorrect: \"%s\"."%(__B_shape,))
1360 if not( min(__R_shape) == max(__R_shape) ):
1361 raise ValueError("Shape characteristic of observation errors covariance matrix (R) is incorrect: \"%s\"."%(__R_shape,))
1362 if not( min(__Q_shape) == max(__Q_shape) ):
1363 raise ValueError("Shape characteristic of evolution errors covariance matrix (Q) is incorrect: \"%s\"."%(__Q_shape,))
1364 if not( min(__EM_shape) == max(__EM_shape) ):
1365 raise ValueError("Shape characteristic of evolution operator (EM) is incorrect: \"%s\"."%(__EM_shape,))
1367 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[1] == max(__Xb_shape) ):
1368 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and state (X) \"%s\" are incompatible."%(__HO_shape,__Xb_shape))
1369 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[0] == max(__Y_shape) ):
1370 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation (Y) \"%s\" are incompatible."%(__HO_shape,__Y_shape))
1371 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__B) > 0 and not( __HO_shape[1] == __B_shape[0] ):
1372 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and a priori errors covariance matrix (B) \"%s\" are incompatible."%(__HO_shape,__B_shape))
1373 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__R) > 0 and not( __HO_shape[0] == __R_shape[1] ):
1374 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation errors covariance matrix (R) \"%s\" are incompatible."%(__HO_shape,__R_shape))
1376 if self.__B is not None and len(self.__B) > 0 and not( __B_shape[1] == max(__Xb_shape) ):
1377 if self.__algorithmName in ["EnsembleBlue",]:
1378 asPersistentVector = self.__Xb.reshape((-1,min(__B_shape)))
1379 self.__Xb = Persistence.OneVector("Background", basetype=numpy.matrix)
1380 for member in asPersistentVector:
1381 self.__Xb.store( numpy.matrix( numpy.ravel(member), numpy.float ).T )
1382 __Xb_shape = min(__B_shape)
1384 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) \"%s\" and background (Xb) \"%s\" are incompatible."%(__B_shape,__Xb_shape))
1386 if self.__R is not None and len(self.__R) > 0 and not( __R_shape[1] == max(__Y_shape) ):
1387 raise ValueError("Shape characteristic of observation errors covariance matrix (R) \"%s\" and observation (Y) \"%s\" are incompatible."%(__R_shape,__Y_shape))
1389 if self.__EM is not None and len(self.__EM) > 0 and not isinstance(self.__EM, dict) and not( __EM_shape[1] == max(__Xb_shape) ):
1390 raise ValueError("Shape characteristic of evolution model (EM) \"%s\" and state (X) \"%s\" are incompatible."%(__EM_shape,__Xb_shape))
1392 if self.__CM is not None and len(self.__CM) > 0 and not isinstance(self.__CM, dict) and not( __CM_shape[1] == max(__U_shape) ):
1393 raise ValueError("Shape characteristic of control model (CM) \"%s\" and control (U) \"%s\" are incompatible."%(__CM_shape,__U_shape))
1395 if ("Bounds" in self.__P) \
1396 and (isinstance(self.__P["Bounds"], list) or isinstance(self.__P["Bounds"], tuple)) \
1397 and (len(self.__P["Bounds"]) != max(__Xb_shape)):
1398 raise ValueError("The number \"%s\" of bound pairs for the state (X) components is different of the size \"%s\" of the state itself." \
1399 %(len(self.__P["Bounds"]),max(__Xb_shape)))
1401 if ("QBounds" in self.__P) \
1402 and (isinstance(self.__P["QBounds"], list) or isinstance(self.__P["QBounds"], tuple)) \
1403 and (len(self.__P["QBounds"]) != max(__Xb_shape)):
1404 raise ValueError("The number \"%s\" of bound pairs for the quantile state (X) components is different of the size \"%s\" of the state itself." \
1405 %(len(self.__P["QBounds"]),max(__Xb_shape)))
1409 # ==============================================================================
1410 class RegulationAndParameters(object):
1412 Classe générale d'interface d'action pour la régulation et ses paramètres
1415 name = "GenericRegulation",
1422 self.__name = str(name)
1425 if asAlgorithm is None and asScript is not None:
1426 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
1428 __Algo = asAlgorithm
1430 if asDict is None and asScript is not None:
1431 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1435 if __Dict is not None:
1436 self.__P.update( dict(__Dict) )
1438 if __Algo is not None:
1439 self.__P.update( {"Algorithm":str(__Algo)} )
1441 def get(self, key = None):
1442 "Vérifie l'existence d'une clé de variable ou de paramètres"
1444 return self.__P[key]
1448 # ==============================================================================
1449 class DataObserver(object):
1451 Classe générale d'interface de type observer
1454 name = "GenericObserver",
1466 self.__name = str(name)
1471 if onVariable is None:
1472 raise ValueError("setting an observer has to be done over a variable name or a list of variable names, not over None.")
1473 elif type(onVariable) in (tuple, list):
1474 self.__V = tuple(map( str, onVariable ))
1475 if withInfo is None:
1478 self.__I = (str(withInfo),)*len(self.__V)
1479 elif isinstance(onVariable, str):
1480 self.__V = (onVariable,)
1481 if withInfo is None:
1482 self.__I = (onVariable,)
1484 self.__I = (str(withInfo),)
1486 raise ValueError("setting an observer has to be done over a variable name or a list of variable names.")
1488 if asObsObject is not None:
1489 self.__O = asObsObject
1491 __FunctionText = str(UserScript('Observer', asTemplate, asString, asScript))
1492 __Function = Observer2Func(__FunctionText)
1493 self.__O = __Function.getfunc()
1495 for k in range(len(self.__V)):
1498 if ename not in withAlgo:
1499 raise ValueError("An observer is asked to be set on a variable named %s which does not exist."%ename)
1501 withAlgo.setObserver(ename, self.__O, einfo, scheduledBy)
1504 "x.__repr__() <==> repr(x)"
1505 return repr(self.__V)+"\n"+repr(self.__O)
1508 "x.__str__() <==> str(x)"
1509 return str(self.__V)+"\n"+str(self.__O)
1511 # ==============================================================================
1512 class UserScript(object):
1514 Classe générale d'interface de type texte de script utilisateur
1517 name = "GenericUserScript",
1524 self.__name = str(name)
1526 if asString is not None:
1528 elif self.__name == "UserPostAnalysis" and (asTemplate is not None) and (asTemplate in Templates.UserPostAnalysisTemplates):
1529 self.__F = Templates.UserPostAnalysisTemplates[asTemplate]
1530 elif self.__name == "Observer" and (asTemplate is not None) and (asTemplate in Templates.ObserverTemplates):
1531 self.__F = Templates.ObserverTemplates[asTemplate]
1532 elif asScript is not None:
1533 self.__F = Interfaces.ImportFromScript(asScript).getstring()
1538 "x.__repr__() <==> repr(x)"
1539 return repr(self.__F)
1542 "x.__str__() <==> str(x)"
1543 return str(self.__F)
1545 # ==============================================================================
1546 class ExternalParameters(object):
1548 Classe générale d'interface de type texte de script utilisateur
1551 name = "GenericExternalParameters",
1557 self.__name = str(name)
1560 self.updateParameters( asDict, asScript )
1562 def updateParameters(self,
1566 "Mise a jour des parametres"
1567 if asDict is None and asScript is not None:
1568 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "ExternalParameters" )
1572 if __Dict is not None:
1573 self.__P.update( dict(__Dict) )
1575 def get(self, key = None):
1577 return self.__P[key]
1579 return list(self.__P.keys())
1582 return list(self.__P.keys())
1584 def pop(self, k, d):
1585 return self.__P.pop(k, d)
1588 return self.__P.items()
1590 def __contains__(self, key=None):
1591 "D.__contains__(k) -> True if D has a key k, else False"
1592 return key in self.__P
1594 # ==============================================================================
1595 class State(object):
1597 Classe générale d'interface de type état
1600 name = "GenericVector",
1602 asPersistentVector = None,
1608 toBeChecked = False,
1611 Permet de définir un vecteur :
1612 - asVector : entrée des données, comme un vecteur compatible avec le
1613 constructeur de numpy.matrix, ou "True" si entrée par script.
1614 - asPersistentVector : entrée des données, comme une série de vecteurs
1615 compatible avec le constructeur de numpy.matrix, ou comme un objet de
1616 type Persistence, ou "True" si entrée par script.
1617 - asScript : si un script valide est donné contenant une variable
1618 nommée "name", la variable est de type "asVector" (par défaut) ou
1619 "asPersistentVector" selon que l'une de ces variables est placée à
1621 - asDataFile : si un ou plusieurs fichiers valides sont donnés
1622 contenant des valeurs en colonnes, elles-mêmes nommées "colNames"
1623 (s'il n'y a pas de nom de colonne indiquée, on cherche une colonne
1624 nommée "name"), on récupère les colonnes et on les range ligne après
1625 ligne (colMajor=False, par défaut) ou colonne après colonne
1626 (colMajor=True). La variable résultante est de type "asVector" (par
1627 défaut) ou "asPersistentVector" selon que l'une de ces variables est
1630 self.__name = str(name)
1631 self.__check = bool(toBeChecked)
1635 self.__is_vector = False
1636 self.__is_series = False
1638 if asScript is not None:
1639 __Vector, __Series = None, None
1640 if asPersistentVector:
1641 __Series = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1643 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1644 elif asDataFile is not None:
1645 __Vector, __Series = None, None
1646 if asPersistentVector:
1647 if colNames is not None:
1648 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
1650 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1651 if bool(colMajor) and not Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1652 __Series = numpy.transpose(__Series)
1653 elif not bool(colMajor) and Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1654 __Series = numpy.transpose(__Series)
1656 if colNames is not None:
1657 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
1659 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1661 __Vector = numpy.ravel(__Vector, order = "F")
1663 __Vector = numpy.ravel(__Vector, order = "C")
1665 __Vector, __Series = asVector, asPersistentVector
1667 if __Vector is not None:
1668 self.__is_vector = True
1669 self.__V = numpy.matrix( numpy.asmatrix(__Vector).A1, numpy.float ).T
1670 self.shape = self.__V.shape
1671 self.size = self.__V.size
1672 elif __Series is not None:
1673 self.__is_series = True
1674 if isinstance(__Series, (tuple, list, numpy.ndarray, numpy.matrix, str)):
1675 self.__V = Persistence.OneVector(self.__name, basetype=numpy.matrix)
1676 if isinstance(__Series, str): __Series = eval(__Series)
1677 for member in __Series:
1678 self.__V.store( numpy.matrix( numpy.asmatrix(member).A1, numpy.float ).T )
1681 if isinstance(self.__V.shape, (tuple, list)):
1682 self.shape = self.__V.shape
1684 self.shape = self.__V.shape()
1685 if len(self.shape) == 1:
1686 self.shape = (self.shape[0],1)
1687 self.size = self.shape[0] * self.shape[1]
1689 raise ValueError("The %s object is improperly defined or undefined, it requires at minima either a vector, a list/tuple of vectors or a persistent object. Please check your vector input."%self.__name)
1691 if scheduledBy is not None:
1692 self.__T = scheduledBy
1694 def getO(self, withScheduler=False):
1696 return self.__V, self.__T
1697 elif self.__T is None:
1703 "Vérification du type interne"
1704 return self.__is_vector
1707 "Vérification du type interne"
1708 return self.__is_series
1711 "x.__repr__() <==> repr(x)"
1712 return repr(self.__V)
1715 "x.__str__() <==> str(x)"
1716 return str(self.__V)
1718 # ==============================================================================
1719 class Covariance(object):
1721 Classe générale d'interface de type covariance
1724 name = "GenericCovariance",
1725 asCovariance = None,
1726 asEyeByScalar = None,
1727 asEyeByVector = None,
1730 toBeChecked = False,
1733 Permet de définir une covariance :
1734 - asCovariance : entrée des données, comme une matrice compatible avec
1735 le constructeur de numpy.matrix
1736 - asEyeByScalar : entrée des données comme un seul scalaire de variance,
1737 multiplicatif d'une matrice de corrélation identité, aucune matrice
1738 n'étant donc explicitement à donner
1739 - asEyeByVector : entrée des données comme un seul vecteur de variance,
1740 à mettre sur la diagonale d'une matrice de corrélation, aucune matrice
1741 n'étant donc explicitement à donner
1742 - asCovObject : entrée des données comme un objet python, qui a les
1743 methodes obligatoires "getT", "getI", "diag", "trace", "__add__",
1744 "__sub__", "__neg__", "__mul__", "__rmul__" et facultatives "shape",
1745 "size", "cholesky", "choleskyI", "asfullmatrix", "__repr__", "__str__"
1746 - toBeChecked : booléen indiquant si le caractère SDP de la matrice
1747 pleine doit être vérifié
1749 self.__name = str(name)
1750 self.__check = bool(toBeChecked)
1753 self.__is_scalar = False
1754 self.__is_vector = False
1755 self.__is_matrix = False
1756 self.__is_object = False
1758 if asScript is not None:
1759 __Matrix, __Scalar, __Vector, __Object = None, None, None, None
1761 __Scalar = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1763 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1765 __Object = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1767 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1769 __Matrix, __Scalar, __Vector, __Object = asCovariance, asEyeByScalar, asEyeByVector, asCovObject
1771 if __Scalar is not None:
1772 if isinstance(__Scalar, str):
1773 __Scalar = __Scalar.replace(";"," ").replace(","," ").split()
1774 if len(__Scalar) > 0: __Scalar = __Scalar[0]
1775 if numpy.array(__Scalar).size != 1:
1776 raise ValueError(' The diagonal multiplier given to define a sparse matrix is not a unique scalar value.\n Its actual measured size is %i. Please check your scalar input.'%numpy.array(__Scalar).size)
1777 self.__is_scalar = True
1778 self.__C = numpy.abs( float(__Scalar) )
1781 elif __Vector is not None:
1782 if isinstance(__Vector, str):
1783 __Vector = __Vector.replace(";"," ").replace(","," ").split()
1784 self.__is_vector = True
1785 self.__C = numpy.abs( numpy.array( numpy.ravel( __Vector ), dtype=float ) )
1786 self.shape = (self.__C.size,self.__C.size)
1787 self.size = self.__C.size**2
1788 elif __Matrix is not None:
1789 self.__is_matrix = True
1790 self.__C = numpy.matrix( __Matrix, float )
1791 self.shape = self.__C.shape
1792 self.size = self.__C.size
1793 elif __Object is not None:
1794 self.__is_object = True
1796 for at in ("getT","getI","diag","trace","__add__","__sub__","__neg__","__matmul__","__mul__","__rmatmul__","__rmul__"):
1797 if not hasattr(self.__C,at):
1798 raise ValueError("The matrix given for %s as an object has no attribute \"%s\". Please check your object input."%(self.__name,at))
1799 if hasattr(self.__C,"shape"):
1800 self.shape = self.__C.shape
1803 if hasattr(self.__C,"size"):
1804 self.size = self.__C.size
1809 # raise ValueError("The %s covariance matrix has to be specified either as a matrix, a vector for its diagonal or a scalar multiplying an identity matrix."%self.__name)
1813 def __validate(self):
1815 if self.__C is None:
1816 raise UnboundLocalError("%s covariance matrix value has not been set!"%(self.__name,))
1817 if self.ismatrix() and min(self.shape) != max(self.shape):
1818 raise ValueError("The given matrix for %s is not a square one, its shape is %s. Please check your matrix input."%(self.__name,self.shape))
1819 if self.isobject() and min(self.shape) != max(self.shape):
1820 raise ValueError("The matrix given for \"%s\" is not a square one, its shape is %s. Please check your object input."%(self.__name,self.shape))
1821 if self.isscalar() and self.__C <= 0:
1822 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your scalar input %s."%(self.__name,self.__C))
1823 if self.isvector() and (self.__C <= 0).any():
1824 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your vector input."%(self.__name,))
1825 if self.ismatrix() and (self.__check or logging.getLogger().level < logging.WARNING):
1827 L = numpy.linalg.cholesky( self.__C )
1829 raise ValueError("The %s covariance matrix is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1830 if self.isobject() and (self.__check or logging.getLogger().level < logging.WARNING):
1832 L = self.__C.cholesky()
1834 raise ValueError("The %s covariance object is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1837 "Vérification du type interne"
1838 return self.__is_scalar
1841 "Vérification du type interne"
1842 return self.__is_vector
1845 "Vérification du type interne"
1846 return self.__is_matrix
1849 "Vérification du type interne"
1850 return self.__is_object
1855 return Covariance(self.__name+"I", asCovariance = numpy.linalg.inv(self.__C) )
1856 elif self.isvector():
1857 return Covariance(self.__name+"I", asEyeByVector = 1. / self.__C )
1858 elif self.isscalar():
1859 return Covariance(self.__name+"I", asEyeByScalar = 1. / self.__C )
1860 elif self.isobject() and hasattr(self.__C,"getI"):
1861 return Covariance(self.__name+"I", asCovObject = self.__C.getI() )
1863 return None # Indispensable
1868 return Covariance(self.__name+"T", asCovariance = self.__C.T )
1869 elif self.isvector():
1870 return Covariance(self.__name+"T", asEyeByVector = self.__C )
1871 elif self.isscalar():
1872 return Covariance(self.__name+"T", asEyeByScalar = self.__C )
1873 elif self.isobject() and hasattr(self.__C,"getT"):
1874 return Covariance(self.__name+"T", asCovObject = self.__C.getT() )
1876 raise AttributeError("the %s covariance matrix has no getT attribute."%(self.__name,))
1879 "Décomposition de Cholesky"
1881 return Covariance(self.__name+"C", asCovariance = numpy.linalg.cholesky(self.__C) )
1882 elif self.isvector():
1883 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
1884 elif self.isscalar():
1885 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
1886 elif self.isobject() and hasattr(self.__C,"cholesky"):
1887 return Covariance(self.__name+"C", asCovObject = self.__C.cholesky() )
1889 raise AttributeError("the %s covariance matrix has no cholesky attribute."%(self.__name,))
1891 def choleskyI(self):
1892 "Inversion de la décomposition de Cholesky"
1894 return Covariance(self.__name+"H", asCovariance = numpy.linalg.inv(numpy.linalg.cholesky(self.__C)) )
1895 elif self.isvector():
1896 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
1897 elif self.isscalar():
1898 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
1899 elif self.isobject() and hasattr(self.__C,"choleskyI"):
1900 return Covariance(self.__name+"H", asCovObject = self.__C.choleskyI() )
1902 raise AttributeError("the %s covariance matrix has no choleskyI attribute."%(self.__name,))
1905 "Racine carrée matricielle"
1908 return Covariance(self.__name+"C", asCovariance = numpy.real(scipy.linalg.sqrtm(self.__C)) )
1909 elif self.isvector():
1910 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
1911 elif self.isscalar():
1912 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
1913 elif self.isobject() and hasattr(self.__C,"sqrtm"):
1914 return Covariance(self.__name+"C", asCovObject = self.__C.sqrtm() )
1916 raise AttributeError("the %s covariance matrix has no sqrtm attribute."%(self.__name,))
1919 "Inversion de la racine carrée matricielle"
1922 return Covariance(self.__name+"H", asCovariance = numpy.linalg.inv(numpy.real(scipy.linalg.sqrtm(self.__C))) )
1923 elif self.isvector():
1924 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
1925 elif self.isscalar():
1926 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
1927 elif self.isobject() and hasattr(self.__C,"sqrtmI"):
1928 return Covariance(self.__name+"H", asCovObject = self.__C.sqrtmI() )
1930 raise AttributeError("the %s covariance matrix has no sqrtmI attribute."%(self.__name,))
1932 def diag(self, msize=None):
1933 "Diagonale de la matrice"
1935 return numpy.diag(self.__C)
1936 elif self.isvector():
1938 elif self.isscalar():
1940 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1942 return self.__C * numpy.ones(int(msize))
1943 elif self.isobject() and hasattr(self.__C,"diag"):
1944 return self.__C.diag()
1946 raise AttributeError("the %s covariance matrix has no diag attribute."%(self.__name,))
1948 def trace(self, msize=None):
1949 "Trace de la matrice"
1951 return numpy.trace(self.__C)
1952 elif self.isvector():
1953 return float(numpy.sum(self.__C))
1954 elif self.isscalar():
1956 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1958 return self.__C * int(msize)
1959 elif self.isobject():
1960 return self.__C.trace()
1962 raise AttributeError("the %s covariance matrix has no trace attribute."%(self.__name,))
1964 def asfullmatrix(self, msize=None):
1967 return numpy.asarray(self.__C)
1968 elif self.isvector():
1969 return numpy.asarray( numpy.diag(self.__C), float )
1970 elif self.isscalar():
1972 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1974 return numpy.asarray( self.__C * numpy.eye(int(msize)), float )
1975 elif self.isobject() and hasattr(self.__C,"asfullmatrix"):
1976 return self.__C.asfullmatrix()
1978 raise AttributeError("the %s covariance matrix has no asfullmatrix attribute."%(self.__name,))
1980 def assparsematrix(self):
1988 "x.__repr__() <==> repr(x)"
1989 return repr(self.__C)
1992 "x.__str__() <==> str(x)"
1993 return str(self.__C)
1995 def __add__(self, other):
1996 "x.__add__(y) <==> x+y"
1997 if self.ismatrix() or self.isobject():
1998 return self.__C + numpy.asmatrix(other)
1999 elif self.isvector() or self.isscalar():
2000 _A = numpy.asarray(other)
2001 _A.reshape(_A.size)[::_A.shape[1]+1] += self.__C
2002 return numpy.asmatrix(_A)
2004 def __radd__(self, other):
2005 "x.__radd__(y) <==> y+x"
2006 raise NotImplementedError("%s covariance matrix __radd__ method not available for %s type!"%(self.__name,type(other)))
2008 def __sub__(self, other):
2009 "x.__sub__(y) <==> x-y"
2010 if self.ismatrix() or self.isobject():
2011 return self.__C - numpy.asmatrix(other)
2012 elif self.isvector() or self.isscalar():
2013 _A = numpy.asarray(other)
2014 _A.reshape(_A.size)[::_A.shape[1]+1] = self.__C - _A.reshape(_A.size)[::_A.shape[1]+1]
2015 return numpy.asmatrix(_A)
2017 def __rsub__(self, other):
2018 "x.__rsub__(y) <==> y-x"
2019 raise NotImplementedError("%s covariance matrix __rsub__ method not available for %s type!"%(self.__name,type(other)))
2022 "x.__neg__() <==> -x"
2025 def __matmul__(self, other):
2026 "x.__mul__(y) <==> x@y"
2027 if self.ismatrix() and isinstance(other, (int, float)):
2028 return numpy.asarray(self.__C) * other
2029 elif self.ismatrix() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2030 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2031 return numpy.ravel(self.__C @ numpy.ravel(other))
2032 elif numpy.asarray(other).shape[0] == self.shape[1]: # Matrice
2033 return numpy.asarray(self.__C) @ numpy.asarray(other)
2035 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asarray(other).shape,self.__name))
2036 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2037 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2038 return numpy.ravel(self.__C) * numpy.ravel(other)
2039 elif numpy.asarray(other).shape[0] == self.shape[1]: # Matrice
2040 return numpy.ravel(self.__C).reshape((-1,1)) * numpy.asarray(other)
2042 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
2043 elif self.isscalar() and isinstance(other,numpy.matrix):
2044 return numpy.asarray(self.__C * other)
2045 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
2046 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
2047 return self.__C * numpy.ravel(other)
2049 return self.__C * numpy.asarray(other)
2050 elif self.isobject():
2051 return self.__C.__matmul__(other)
2053 raise NotImplementedError("%s covariance matrix __matmul__ method not available for %s type!"%(self.__name,type(other)))
2055 def __mul__(self, other):
2056 "x.__mul__(y) <==> x*y"
2057 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2058 return self.__C * other
2059 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2060 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2061 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
2062 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2063 return self.__C * numpy.asmatrix(other)
2065 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asmatrix(other).shape,self.__name))
2066 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2067 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2068 return numpy.asmatrix(self.__C * numpy.ravel(other)).T
2069 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2070 return numpy.asmatrix((self.__C * (numpy.asarray(other).transpose())).transpose())
2072 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
2073 elif self.isscalar() and isinstance(other,numpy.matrix):
2074 return self.__C * other
2075 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
2076 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
2077 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
2079 return self.__C * numpy.asmatrix(other)
2080 elif self.isobject():
2081 return self.__C.__mul__(other)
2083 raise NotImplementedError("%s covariance matrix __mul__ method not available for %s type!"%(self.__name,type(other)))
2085 def __rmatmul__(self, other):
2086 "x.__rmul__(y) <==> y@x"
2087 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2088 return other * self.__C
2089 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2090 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2091 return numpy.asmatrix(numpy.ravel(other)) * self.__C
2092 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2093 return numpy.asmatrix(other) * self.__C
2095 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name))
2096 elif self.isvector() and isinstance(other,numpy.matrix):
2097 if numpy.ravel(other).size == self.shape[0]: # Vecteur
2098 return numpy.asmatrix(numpy.ravel(other) * self.__C)
2099 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
2100 return numpy.asmatrix(numpy.array(other) * self.__C)
2102 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
2103 elif self.isscalar() and isinstance(other,numpy.matrix):
2104 return other * self.__C
2105 elif self.isobject():
2106 return self.__C.__rmatmul__(other)
2108 raise NotImplementedError("%s covariance matrix __rmatmul__ method not available for %s type!"%(self.__name,type(other)))
2110 def __rmul__(self, other):
2111 "x.__rmul__(y) <==> y*x"
2112 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2113 return other * self.__C
2114 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2115 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2116 return numpy.asmatrix(numpy.ravel(other)) * self.__C
2117 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2118 return numpy.asmatrix(other) * self.__C
2120 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name))
2121 elif self.isvector() and isinstance(other,numpy.matrix):
2122 if numpy.ravel(other).size == self.shape[0]: # Vecteur
2123 return numpy.asmatrix(numpy.ravel(other) * self.__C)
2124 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
2125 return numpy.asmatrix(numpy.array(other) * self.__C)
2127 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
2128 elif self.isscalar() and isinstance(other,numpy.matrix):
2129 return other * self.__C
2130 elif self.isobject():
2131 return self.__C.__rmul__(other)
2133 raise NotImplementedError("%s covariance matrix __rmul__ method not available for %s type!"%(self.__name,type(other)))
2136 "x.__len__() <==> len(x)"
2137 return self.shape[0]
2139 # ==============================================================================
2140 class Observer2Func(object):
2142 Creation d'une fonction d'observateur a partir de son texte
2144 def __init__(self, corps=""):
2145 self.__corps = corps
2146 def func(self,var,info):
2147 "Fonction d'observation"
2150 "Restitution du pointeur de fonction dans l'objet"
2153 # ==============================================================================
2154 class CaseLogger(object):
2156 Conservation des commandes de creation d'un cas
2158 def __init__(self, __name="", __objname="case", __addViewers=None, __addLoaders=None):
2159 self.__name = str(__name)
2160 self.__objname = str(__objname)
2161 self.__logSerie = []
2162 self.__switchoff = False
2164 "TUI" :Interfaces._TUIViewer,
2165 "SCD" :Interfaces._SCDViewer,
2166 "YACS":Interfaces._YACSViewer,
2169 "TUI" :Interfaces._TUIViewer,
2170 "COM" :Interfaces._COMViewer,
2172 if __addViewers is not None:
2173 self.__viewers.update(dict(__addViewers))
2174 if __addLoaders is not None:
2175 self.__loaders.update(dict(__addLoaders))
2177 def register(self, __command=None, __keys=None, __local=None, __pre=None, __switchoff=False):
2178 "Enregistrement d'une commande individuelle"
2179 if __command is not None and __keys is not None and __local is not None and not self.__switchoff:
2180 if "self" in __keys: __keys.remove("self")
2181 self.__logSerie.append( (str(__command), __keys, __local, __pre, __switchoff) )
2183 self.__switchoff = True
2185 self.__switchoff = False
2187 def dump(self, __filename=None, __format="TUI", __upa=""):
2188 "Restitution normalisée des commandes"
2189 if __format in self.__viewers:
2190 __formater = self.__viewers[__format](self.__name, self.__objname, self.__logSerie)
2192 raise ValueError("Dumping as \"%s\" is not available"%__format)
2193 return __formater.dump(__filename, __upa)
2195 def load(self, __filename=None, __content=None, __object=None, __format="TUI"):
2196 "Chargement normalisé des commandes"
2197 if __format in self.__loaders:
2198 __formater = self.__loaders[__format]()
2200 raise ValueError("Loading as \"%s\" is not available"%__format)
2201 return __formater.load(__filename, __content, __object)
2203 # ==============================================================================
2206 _extraArguments = None,
2207 _sFunction = lambda x: x,
2212 Pour une liste ordonnée de vecteurs en entrée, renvoie en sortie la liste
2213 correspondante de valeurs de la fonction en argument
2215 # Vérifications et définitions initiales
2216 # logging.debug("MULTF Internal multifonction calculations begin with function %s"%(_sFunction.__name__,))
2217 if not PlatformInfo.isIterable( __xserie ):
2218 raise TypeError("MultiFonction not iterable unkown input type: %s"%(type(__xserie),))
2220 if (_mpWorkers is None) or (_mpWorkers is not None and _mpWorkers < 1):
2223 __mpWorkers = int(_mpWorkers)
2225 import multiprocessing
2236 # logging.debug("MULTF Internal multiprocessing calculations begin : evaluation of %i point(s)"%(len(_jobs),))
2237 import multiprocessing
2238 with multiprocessing.Pool(__mpWorkers) as pool:
2239 __multiHX = pool.map( _sFunction, _jobs )
2242 # logging.debug("MULTF Internal multiprocessing calculation end")
2244 # logging.debug("MULTF Internal monoprocessing calculation begin")
2246 if _extraArguments is None:
2247 for __xvalue in __xserie:
2248 __multiHX.append( _sFunction( __xvalue ) )
2249 elif _extraArguments is not None and isinstance(_extraArguments, (list, tuple, map)):
2250 for __xvalue in __xserie:
2251 __multiHX.append( _sFunction( __xvalue, *_extraArguments ) )
2252 elif _extraArguments is not None and isinstance(_extraArguments, dict):
2253 for __xvalue in __xserie:
2254 __multiHX.append( _sFunction( __xvalue, **_extraArguments ) )
2256 raise TypeError("MultiFonction extra arguments unkown input type: %s"%(type(_extraArguments),))
2257 # logging.debug("MULTF Internal monoprocessing calculation end")
2259 # logging.debug("MULTF Internal multifonction calculations end")
2262 # ==============================================================================
2263 def CostFunction3D(_x,
2264 _Hm = None, # Pour simuler Hm(x) : HO["Direct"].appliedTo
2265 _HmX = None, # Simulation déjà faite de Hm(x)
2266 _arg = None, # Arguments supplementaires pour Hm, sous la forme d'un tuple
2271 _SIV = False, # A résorber pour la 8.0
2272 _SSC = [], # self._parameters["StoreSupplementaryCalculations"]
2273 _nPS = 0, # nbPreviousSteps
2274 _QM = "DA", # QualityMeasure
2275 _SSV = {}, # Entrée et/ou sortie : self.StoredVariables
2276 _fRt = False, # Restitue ou pas la sortie étendue
2277 _sSc = True, # Stocke ou pas les SSC
2280 Fonction-coût générale utile pour les algorithmes statiques/3D : 3DVAR, BLUE
2281 et dérivés, Kalman et dérivés, LeastSquares, SamplingTest, PSO, SA, Tabu,
2282 DFO, QuantileRegression
2288 for k in ["CostFunctionJ",
2294 "SimulatedObservationAtCurrentOptimum",
2295 "SimulatedObservationAtCurrentState",
2299 if hasattr(_SSV[k],"store"):
2300 _SSV[k].append = _SSV[k].store # Pour utiliser "append" au lieu de "store"
2302 _X = numpy.asmatrix(numpy.ravel( _x )).T
2303 if _SIV or "CurrentState" in _SSC or "CurrentOptimum" in _SSC:
2304 _SSV["CurrentState"].append( _X )
2306 if _HmX is not None:
2310 raise ValueError("COSTFUNCTION3D Operator has to be defined.")
2314 _HX = _Hm( _X, *_arg )
2315 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
2317 if "SimulatedObservationAtCurrentState" in _SSC or \
2318 "SimulatedObservationAtCurrentOptimum" in _SSC:
2319 _SSV["SimulatedObservationAtCurrentState"].append( _HX )
2321 if numpy.any(numpy.isnan(_HX)):
2322 Jb, Jo, J = numpy.nan, numpy.nan, numpy.nan
2324 _Y = numpy.asmatrix(numpy.ravel( _Y )).T
2325 if _QM in ["AugmentedWeightedLeastSquares", "AWLS", "AugmentedPonderatedLeastSquares", "APLS", "DA"]:
2326 if _BI is None or _RI is None:
2327 raise ValueError("Background and Observation error covariance matrix has to be properly defined!")
2328 _Xb = numpy.asmatrix(numpy.ravel( _Xb )).T
2329 Jb = 0.5 * (_X - _Xb).T * _BI * (_X - _Xb)
2330 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
2331 elif _QM in ["WeightedLeastSquares", "WLS", "PonderatedLeastSquares", "PLS"]:
2333 raise ValueError("Observation error covariance matrix has to be properly defined!")
2335 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
2336 elif _QM in ["LeastSquares", "LS", "L2"]:
2338 Jo = 0.5 * (_Y - _HX).T * (_Y - _HX)
2339 elif _QM in ["AbsoluteValue", "L1"]:
2341 Jo = numpy.sum( numpy.abs(_Y - _HX) )
2342 elif _QM in ["MaximumError", "ME"]:
2344 Jo = numpy.max( numpy.abs(_Y - _HX) )
2345 elif _QM in ["QR", "Null"]:
2349 raise ValueError("Unknown asked quality measure!")
2351 J = float( Jb ) + float( Jo )
2354 _SSV["CostFunctionJb"].append( Jb )
2355 _SSV["CostFunctionJo"].append( Jo )
2356 _SSV["CostFunctionJ" ].append( J )
2358 if "IndexOfOptimum" in _SSC or \
2359 "CurrentOptimum" in _SSC or \
2360 "SimulatedObservationAtCurrentOptimum" in _SSC:
2361 IndexMin = numpy.argmin( _SSV["CostFunctionJ"][_nPS:] ) + _nPS
2362 if "IndexOfOptimum" in _SSC:
2363 _SSV["IndexOfOptimum"].append( IndexMin )
2364 if "CurrentOptimum" in _SSC:
2365 _SSV["CurrentOptimum"].append( _SSV["CurrentState"][IndexMin] )
2366 if "SimulatedObservationAtCurrentOptimum" in _SSC:
2367 _SSV["SimulatedObservationAtCurrentOptimum"].append( _SSV["SimulatedObservationAtCurrentState"][IndexMin] )
2372 if _QM in ["QR"]: # Pour le QuantileRegression
2377 # ==============================================================================
2378 if __name__ == "__main__":
2379 print('\n AUTODIAGNOSTIC\n')