1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2021 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les outils généraux élémentaires.
26 __author__ = "Jean-Philippe ARGAUD"
35 from functools import partial
36 from daCore import Persistence, PlatformInfo, Interfaces
37 from daCore import Templates
39 # ==============================================================================
40 class CacheManager(object):
42 Classe générale de gestion d'un cache de calculs
45 toleranceInRedundancy = 1.e-18,
46 lenghtOfRedundancy = -1,
49 Les caractéristiques de tolérance peuvent être modifiées à la création.
51 self.__tolerBP = float(toleranceInRedundancy)
52 self.__lenghtOR = int(lenghtOfRedundancy)
53 self.__initlnOR = self.__lenghtOR
63 def wasCalculatedIn(self, xValue, oName="" ):
64 "Vérifie l'existence d'un calcul correspondant à la valeur"
68 for i in range(min(len(self.__listOPCV),self.__lenghtOR)-1,-1,-1):
69 if not hasattr(xValue, 'size'):
71 elif (str(oName) != self.__listOPCV[i][3]):
73 elif (xValue.size != self.__listOPCV[i][0].size):
75 elif (numpy.ravel(xValue)[0] - self.__listOPCV[i][0][0]) > (self.__tolerBP * self.__listOPCV[i][2] / self.__listOPCV[i][0].size):
77 elif numpy.linalg.norm(numpy.ravel(xValue) - self.__listOPCV[i][0]) < (self.__tolerBP * self.__listOPCV[i][2]):
79 __HxV = self.__listOPCV[i][1]
83 def storeValueInX(self, xValue, HxValue, oName="" ):
84 "Stocke pour un opérateur o un calcul Hx correspondant à la valeur x"
85 if self.__lenghtOR < 0:
86 self.__lenghtOR = 2 * min(xValue.size, 50) + 2 # 2 * xValue.size + 2
87 self.__initlnOR = self.__lenghtOR
88 self.__seenNames.append(str(oName))
89 if str(oName) not in self.__seenNames: # Etend la liste si nouveau
90 self.__lenghtOR += 2 * min(xValue.size, 50) + 2 # 2 * xValue.size + 2
91 self.__initlnOR += self.__lenghtOR
92 self.__seenNames.append(str(oName))
93 while len(self.__listOPCV) > self.__lenghtOR:
94 self.__listOPCV.pop(0)
95 self.__listOPCV.append( (
96 copy.copy(numpy.ravel(xValue)), # 0 Previous point
97 copy.copy(HxValue), # 1 Previous value
98 numpy.linalg.norm(xValue), # 2 Norm
99 str(oName), # 3 Operator name
104 self.__initlnOR = self.__lenghtOR
106 self.__enabled = False
110 self.__lenghtOR = self.__initlnOR
111 self.__enabled = True
113 # ==============================================================================
114 class Operator(object):
116 Classe générale d'interface de type opérateur simple
124 name = "GenericOperator",
127 avoidingRedundancy = True,
128 inputAsMultiFunction = False,
129 enableMultiProcess = False,
130 extraArguments = None,
133 On construit un objet de ce type en fournissant, à l'aide de l'un des
134 deux mots-clé, soit une fonction ou un multi-fonction python, soit une
137 - name : nom d'opérateur
138 - fromMethod : argument de type fonction Python
139 - fromMatrix : argument adapté au constructeur numpy.matrix
140 - avoidingRedundancy : booléen évitant (ou pas) les calculs redondants
141 - inputAsMultiFunction : booléen indiquant une fonction explicitement
142 définie (ou pas) en multi-fonction
143 - extraArguments : arguments supplémentaires passés à la fonction de
144 base et ses dérivées (tuple ou dictionnaire)
146 self.__name = str(name)
147 self.__NbCallsAsMatrix, self.__NbCallsAsMethod, self.__NbCallsOfCached = 0, 0, 0
148 self.__AvoidRC = bool( avoidingRedundancy )
149 self.__inputAsMF = bool( inputAsMultiFunction )
150 self.__mpEnabled = bool( enableMultiProcess )
151 self.__extraArgs = extraArguments
152 if fromMethod is not None and self.__inputAsMF:
153 self.__Method = fromMethod # logtimer(fromMethod)
155 self.__Type = "Method"
156 elif fromMethod is not None and not self.__inputAsMF:
157 self.__Method = partial( MultiFonction, _sFunction=fromMethod, _mpEnabled=self.__mpEnabled)
159 self.__Type = "Method"
160 elif fromMatrix is not None:
162 self.__Matrix = numpy.matrix( fromMatrix, numpy.float )
163 self.__Type = "Matrix"
169 def disableAvoidingRedundancy(self):
171 Operator.CM.disable()
173 def enableAvoidingRedundancy(self):
178 Operator.CM.disable()
184 def appliedTo(self, xValue, HValue = None, argsAsSerie = False, returnSerieAsArrayMatrix = False):
186 Permet de restituer le résultat de l'application de l'opérateur à une
187 série d'arguments xValue. Cette méthode se contente d'appliquer, chaque
188 argument devant a priori être du bon type.
190 - les arguments par série sont :
191 - xValue : argument adapté pour appliquer l'opérateur
192 - HValue : valeur précalculée de l'opérateur en ce point
193 - argsAsSerie : indique si les arguments sont une mono ou multi-valeur
200 if HValue is not None:
204 PlatformInfo.isIterable( _xValue, True, " in Operator.appliedTo" )
206 if _HValue is not None:
207 assert len(_xValue) == len(_HValue), "Incompatible number of elements in xValue and HValue"
209 for i in range(len(_HValue)):
210 _HxValue.append( numpy.asmatrix( numpy.ravel( _HValue[i] ) ).T )
212 Operator.CM.storeValueInX(_xValue[i],_HxValue[-1],self.__name)
217 for i, xv in enumerate(_xValue):
219 __alreadyCalculated, __HxV = Operator.CM.wasCalculatedIn(xv,self.__name)
221 __alreadyCalculated = False
223 if __alreadyCalculated:
224 self.__addOneCacheCall()
227 if self.__Matrix is not None:
228 self.__addOneMatrixCall()
229 _xv = numpy.matrix(numpy.ravel(xv)).T
230 _hv = self.__Matrix * _xv
232 self.__addOneMethodCall()
236 _HxValue.append( _hv )
238 if len(_xserie)>0 and self.__Matrix is None:
239 if self.__extraArgs is None:
240 _hserie = self.__Method( _xserie ) # Calcul MF
242 _hserie = self.__Method( _xserie, self.__extraArgs ) # Calcul MF
243 if not hasattr(_hserie, "pop"):
244 raise TypeError("The user input multi-function doesn't seem to return sequence results, behaving like a mono-function. It has to be checked.")
250 Operator.CM.storeValueInX(_xv,_hv,self.__name)
252 if returnSerieAsArrayMatrix:
253 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
255 if argsAsSerie: return _HxValue
256 else: return _HxValue[-1]
258 def appliedControledFormTo(self, paires, argsAsSerie = False, returnSerieAsArrayMatrix = False):
260 Permet de restituer le résultat de l'application de l'opérateur à des
261 paires (xValue, uValue). Cette méthode se contente d'appliquer, son
262 argument devant a priori être du bon type. Si la uValue est None,
263 on suppose que l'opérateur ne s'applique qu'à xValue.
265 - paires : les arguments par paire sont :
266 - xValue : argument X adapté pour appliquer l'opérateur
267 - uValue : argument U adapté pour appliquer l'opérateur
268 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
270 if argsAsSerie: _xuValue = paires
271 else: _xuValue = (paires,)
272 PlatformInfo.isIterable( _xuValue, True, " in Operator.appliedControledFormTo" )
274 if self.__Matrix is not None:
276 for paire in _xuValue:
277 _xValue, _uValue = paire
278 _xValue = numpy.matrix(numpy.ravel(_xValue)).T
279 self.__addOneMatrixCall()
280 _HxValue.append( self.__Matrix * _xValue )
283 for paire in _xuValue:
284 _xValue, _uValue = paire
285 if _uValue is not None:
286 _xuArgs.append( paire )
288 _xuArgs.append( _xValue )
289 self.__addOneMethodCall( len(_xuArgs) )
290 if self.__extraArgs is None:
291 _HxValue = self.__Method( _xuArgs ) # Calcul MF
293 _HxValue = self.__Method( _xuArgs, self.__extraArgs ) # Calcul MF
295 if returnSerieAsArrayMatrix:
296 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
298 if argsAsSerie: return _HxValue
299 else: return _HxValue[-1]
301 def appliedInXTo(self, paires, argsAsSerie = False, returnSerieAsArrayMatrix = False):
303 Permet de restituer le résultat de l'application de l'opérateur à une
304 série d'arguments xValue, sachant que l'opérateur est valable en
305 xNominal. Cette méthode se contente d'appliquer, son argument devant a
306 priori être du bon type. Si l'opérateur est linéaire car c'est une
307 matrice, alors il est valable en tout point nominal et xNominal peut
308 être quelconque. Il n'y a qu'une seule paire par défaut, et argsAsSerie
309 permet d'indiquer que l'argument est multi-paires.
311 - paires : les arguments par paire sont :
312 - xNominal : série d'arguments permettant de donner le point où
313 l'opérateur est construit pour être ensuite appliqué
314 - xValue : série d'arguments adaptés pour appliquer l'opérateur
315 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
317 if argsAsSerie: _nxValue = paires
318 else: _nxValue = (paires,)
319 PlatformInfo.isIterable( _nxValue, True, " in Operator.appliedInXTo" )
321 if self.__Matrix is not None:
323 for paire in _nxValue:
324 _xNominal, _xValue = paire
325 _xValue = numpy.matrix(numpy.ravel(_xValue)).T
326 self.__addOneMatrixCall()
327 _HxValue.append( self.__Matrix * _xValue )
329 self.__addOneMethodCall( len(_nxValue) )
330 if self.__extraArgs is None:
331 _HxValue = self.__Method( _nxValue ) # Calcul MF
333 _HxValue = self.__Method( _nxValue, self.__extraArgs ) # Calcul MF
335 if returnSerieAsArrayMatrix:
336 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
338 if argsAsSerie: return _HxValue
339 else: return _HxValue[-1]
341 def asMatrix(self, ValueForMethodForm = "UnknownVoidValue", argsAsSerie = False):
343 Permet de renvoyer l'opérateur sous la forme d'une matrice
345 if self.__Matrix is not None:
346 self.__addOneMatrixCall()
347 mValue = [self.__Matrix,]
348 elif not isinstance(ValueForMethodForm,str) or ValueForMethodForm != "UnknownVoidValue": # Ne pas utiliser "None"
351 self.__addOneMethodCall( len(ValueForMethodForm) )
352 for _vfmf in ValueForMethodForm:
353 mValue.append( numpy.matrix( self.__Method(((_vfmf, None),)) ) )
355 self.__addOneMethodCall()
356 mValue = self.__Method(((ValueForMethodForm, None),))
358 raise ValueError("Matrix form of the operator defined as a function/method requires to give an operating point.")
360 if argsAsSerie: return mValue
361 else: return mValue[-1]
365 Renvoie la taille sous forme numpy si l'opérateur est disponible sous
366 la forme d'une matrice
368 if self.__Matrix is not None:
369 return self.__Matrix.shape
371 raise ValueError("Matrix form of the operator is not available, nor the shape")
373 def nbcalls(self, which=None):
375 Renvoie les nombres d'évaluations de l'opérateur
378 self.__NbCallsAsMatrix+self.__NbCallsAsMethod,
379 self.__NbCallsAsMatrix,
380 self.__NbCallsAsMethod,
381 self.__NbCallsOfCached,
382 Operator.NbCallsAsMatrix+Operator.NbCallsAsMethod,
383 Operator.NbCallsAsMatrix,
384 Operator.NbCallsAsMethod,
385 Operator.NbCallsOfCached,
387 if which is None: return __nbcalls
388 else: return __nbcalls[which]
390 def __addOneMatrixCall(self):
391 "Comptabilise un appel"
392 self.__NbCallsAsMatrix += 1 # Decompte local
393 Operator.NbCallsAsMatrix += 1 # Decompte global
395 def __addOneMethodCall(self, nb = 1):
396 "Comptabilise un appel"
397 self.__NbCallsAsMethod += nb # Decompte local
398 Operator.NbCallsAsMethod += nb # Decompte global
400 def __addOneCacheCall(self):
401 "Comptabilise un appel"
402 self.__NbCallsOfCached += 1 # Decompte local
403 Operator.NbCallsOfCached += 1 # Decompte global
405 # ==============================================================================
406 class FullOperator(object):
408 Classe générale d'interface de type opérateur complet
409 (Direct, Linéaire Tangent, Adjoint)
412 name = "GenericFullOperator",
414 asOneFunction = None, # 1 Fonction
415 asThreeFunctions = None, # 3 Fonctions in a dictionary
416 asScript = None, # 1 or 3 Fonction(s) by script
417 asDict = None, # Parameters
419 extraArguments = None,
421 inputAsMF = False,# Fonction(s) as Multi-Functions
426 self.__name = str(name)
427 self.__check = bool(toBeChecked)
428 self.__extraArgs = extraArguments
433 if (asDict is not None) and isinstance(asDict, dict):
434 __Parameters.update( asDict )
435 # Priorité à EnableMultiProcessingInDerivatives=True
436 if "EnableMultiProcessing" in __Parameters and __Parameters["EnableMultiProcessing"]:
437 __Parameters["EnableMultiProcessingInDerivatives"] = True
438 __Parameters["EnableMultiProcessingInEvaluation"] = False
439 if "EnableMultiProcessingInDerivatives" not in __Parameters:
440 __Parameters["EnableMultiProcessingInDerivatives"] = False
441 if __Parameters["EnableMultiProcessingInDerivatives"]:
442 __Parameters["EnableMultiProcessingInEvaluation"] = False
443 if "EnableMultiProcessingInEvaluation" not in __Parameters:
444 __Parameters["EnableMultiProcessingInEvaluation"] = False
445 if "withIncrement" in __Parameters: # Temporaire
446 __Parameters["DifferentialIncrement"] = __Parameters["withIncrement"]
448 if asScript is not None:
449 __Matrix, __Function = None, None
451 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
453 __Function = { "Direct":Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ) }
454 __Function.update({"useApproximatedDerivatives":True})
455 __Function.update(__Parameters)
456 elif asThreeFunctions:
458 "Direct" :Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ),
459 "Tangent":Interfaces.ImportFromScript(asScript).getvalue( "TangentOperator" ),
460 "Adjoint":Interfaces.ImportFromScript(asScript).getvalue( "AdjointOperator" ),
462 __Function.update(__Parameters)
465 if asOneFunction is not None:
466 if isinstance(asOneFunction, dict) and "Direct" in asOneFunction:
467 if asOneFunction["Direct"] is not None:
468 __Function = asOneFunction
470 raise ValueError("The function has to be given in a dictionnary which have 1 key (\"Direct\")")
472 __Function = { "Direct":asOneFunction }
473 __Function.update({"useApproximatedDerivatives":True})
474 __Function.update(__Parameters)
475 elif asThreeFunctions is not None:
476 if isinstance(asThreeFunctions, dict) and \
477 ("Tangent" in asThreeFunctions) and (asThreeFunctions["Tangent"] is not None) and \
478 ("Adjoint" in asThreeFunctions) and (asThreeFunctions["Adjoint"] is not None) and \
479 (("useApproximatedDerivatives" not in asThreeFunctions) or not bool(asThreeFunctions["useApproximatedDerivatives"])):
480 __Function = asThreeFunctions
481 elif isinstance(asThreeFunctions, dict) and \
482 ("Direct" in asThreeFunctions) and (asThreeFunctions["Direct"] is not None):
483 __Function = asThreeFunctions
484 __Function.update({"useApproximatedDerivatives":True})
486 raise ValueError("The functions has to be given in a dictionnary which have either 1 key (\"Direct\") or 3 keys (\"Direct\" (optionnal), \"Tangent\" and \"Adjoint\")")
487 if "Direct" not in asThreeFunctions:
488 __Function["Direct"] = asThreeFunctions["Tangent"]
489 __Function.update(__Parameters)
493 # if sys.version_info[0] < 3 and isinstance(__Function, dict):
494 # for k in ("Direct", "Tangent", "Adjoint"):
495 # if k in __Function and hasattr(__Function[k],"__class__"):
496 # if type(__Function[k]) is type(self.__init__):
497 # raise TypeError("can't use a class method (%s) as a function for the \"%s\" operator. Use a real function instead."%(type(__Function[k]),k))
499 if appliedInX is not None and isinstance(appliedInX, dict):
500 __appliedInX = appliedInX
501 elif appliedInX is not None:
502 __appliedInX = {"HXb":appliedInX}
506 if scheduledBy is not None:
507 self.__T = scheduledBy
509 if isinstance(__Function, dict) and \
510 ("useApproximatedDerivatives" in __Function) and bool(__Function["useApproximatedDerivatives"]) and \
511 ("Direct" in __Function) and (__Function["Direct"] is not None):
512 if "CenteredFiniteDifference" not in __Function: __Function["CenteredFiniteDifference"] = False
513 if "DifferentialIncrement" not in __Function: __Function["DifferentialIncrement"] = 0.01
514 if "withdX" not in __Function: __Function["withdX"] = None
515 if "withAvoidingRedundancy" not in __Function: __Function["withAvoidingRedundancy"] = avoidRC
516 if "withToleranceInRedundancy" not in __Function: __Function["withToleranceInRedundancy"] = 1.e-18
517 if "withLenghtOfRedundancy" not in __Function: __Function["withLenghtOfRedundancy"] = -1
518 if "NumberOfProcesses" not in __Function: __Function["NumberOfProcesses"] = None
519 if "withmfEnabled" not in __Function: __Function["withmfEnabled"] = inputAsMF
520 from daCore import NumericObjects
521 FDA = NumericObjects.FDApproximation(
523 Function = __Function["Direct"],
524 centeredDF = __Function["CenteredFiniteDifference"],
525 increment = __Function["DifferentialIncrement"],
526 dX = __Function["withdX"],
527 extraArguments = self.__extraArgs,
528 avoidingRedundancy = __Function["withAvoidingRedundancy"],
529 toleranceInRedundancy = __Function["withToleranceInRedundancy"],
530 lenghtOfRedundancy = __Function["withLenghtOfRedundancy"],
531 mpEnabled = __Function["EnableMultiProcessingInDerivatives"],
532 mpWorkers = __Function["NumberOfProcesses"],
533 mfEnabled = __Function["withmfEnabled"],
535 self.__FO["Direct"] = Operator( name = self.__name, fromMethod = FDA.DirectOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
536 self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMethod = FDA.TangentOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
537 self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMethod = FDA.AdjointOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
538 elif isinstance(__Function, dict) and \
539 ("Direct" in __Function) and ("Tangent" in __Function) and ("Adjoint" in __Function) and \
540 (__Function["Direct"] is not None) and (__Function["Tangent"] is not None) and (__Function["Adjoint"] is not None):
541 self.__FO["Direct"] = Operator( name = self.__name, fromMethod = __Function["Direct"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
542 self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMethod = __Function["Tangent"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
543 self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMethod = __Function["Adjoint"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
544 elif asMatrix is not None:
545 __matrice = numpy.matrix( __Matrix, numpy.float )
546 self.__FO["Direct"] = Operator( name = self.__name, fromMatrix = __matrice, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
547 self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMatrix = __matrice, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
548 self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMatrix = __matrice.T, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
551 raise ValueError("The %s object is improperly defined or undefined, it requires at minima either a matrix, a Direct operator for approximate derivatives or a Tangent/Adjoint operators pair. Please check your operator input."%self.__name)
553 if __appliedInX is not None:
554 self.__FO["AppliedInX"] = {}
555 for key in list(__appliedInX.keys()):
556 if type( __appliedInX[key] ) is type( numpy.matrix([]) ):
557 # Pour le cas où l'on a une vraie matrice
558 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].A1, numpy.float ).T
559 elif type( __appliedInX[key] ) is type( numpy.array([]) ) and len(__appliedInX[key].shape) > 1:
560 # Pour le cas où l'on a un vecteur représenté en array avec 2 dimensions
561 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].reshape(len(__appliedInX[key]),), numpy.float ).T
563 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key], numpy.float ).T
565 self.__FO["AppliedInX"] = None
571 "x.__repr__() <==> repr(x)"
572 return repr(self.__FO)
575 "x.__str__() <==> str(x)"
576 return str(self.__FO)
578 # ==============================================================================
579 class Algorithm(object):
581 Classe générale d'interface de type algorithme
583 Elle donne un cadre pour l'écriture d'une classe élémentaire d'algorithme
584 d'assimilation, en fournissant un container (dictionnaire) de variables
585 persistantes initialisées, et des méthodes d'accès à ces variables stockées.
587 Une classe élémentaire d'algorithme doit implémenter la méthode "run".
589 def __init__(self, name):
591 L'initialisation présente permet de fabriquer des variables de stockage
592 disponibles de manière générique dans les algorithmes élémentaires. Ces
593 variables de stockage sont ensuite conservées dans un dictionnaire
594 interne à l'objet, mais auquel on accède par la méthode "get".
596 Les variables prévues sont :
597 - APosterioriCorrelations : matrice de corrélations de la matrice A
598 - APosterioriCovariance : matrice de covariances a posteriori : A
599 - APosterioriStandardDeviations : vecteur des écart-types de la matrice A
600 - APosterioriVariances : vecteur des variances de la matrice A
601 - Analysis : vecteur d'analyse : Xa
602 - BMA : Background moins Analysis : Xa - Xb
603 - CostFunctionJ : fonction-coût globale, somme des deux parties suivantes Jb et Jo
604 - CostFunctionJAtCurrentOptimum : fonction-coût globale à l'état optimal courant lors d'itérations
605 - CostFunctionJb : partie ébauche ou background de la fonction-coût : Jb
606 - CostFunctionJbAtCurrentOptimum : partie ébauche à l'état optimal courant lors d'itérations
607 - CostFunctionJo : partie observations de la fonction-coût : Jo
608 - CostFunctionJoAtCurrentOptimum : partie observations à l'état optimal courant lors d'itérations
609 - CurrentIterationNumber : numéro courant d'itération dans les algorithmes itératifs, à partir de 0
610 - CurrentOptimum : état optimal courant lors d'itérations
611 - CurrentState : état courant lors d'itérations
612 - GradientOfCostFunctionJ : gradient de la fonction-coût globale
613 - GradientOfCostFunctionJb : gradient de la partie ébauche de la fonction-coût
614 - GradientOfCostFunctionJo : gradient de la partie observations de la fonction-coût
615 - IndexOfOptimum : index de l'état optimal courant lors d'itérations
616 - Innovation : l'innovation : d = Y - H(X)
617 - InnovationAtCurrentState : l'innovation à l'état courant : dn = Y - H(Xn)
618 - JacobianMatrixAtBackground : matrice jacobienne à l'état d'ébauche
619 - JacobianMatrixAtCurrentState : matrice jacobienne à l'état courant
620 - JacobianMatrixAtOptimum : matrice jacobienne à l'optimum
621 - KalmanGainAtOptimum : gain de Kalman à l'optimum
622 - MahalanobisConsistency : indicateur de consistance des covariances
623 - OMA : Observation moins Analyse : Y - Xa
624 - OMB : Observation moins Background : Y - Xb
625 - ForecastCovariance : covariance de l'état prédit courant lors d'itérations
626 - ForecastState : état prédit courant lors d'itérations
627 - Residu : dans le cas des algorithmes de vérification
628 - SampledStateForQuantiles : échantillons d'états pour l'estimation des quantiles
629 - SigmaBck2 : indicateur de correction optimale des erreurs d'ébauche
630 - SigmaObs2 : indicateur de correction optimale des erreurs d'observation
631 - SimulatedObservationAtBackground : l'état observé H(Xb) à l'ébauche
632 - SimulatedObservationAtCurrentOptimum : l'état observé H(X) à l'état optimal courant
633 - SimulatedObservationAtCurrentState : l'état observé H(X) à l'état courant
634 - SimulatedObservationAtOptimum : l'état observé H(Xa) à l'optimum
635 - SimulationQuantiles : états observés H(X) pour les quantiles demandés
636 On peut rajouter des variables à stocker dans l'initialisation de
637 l'algorithme élémentaire qui va hériter de cette classe
639 logging.debug("%s Initialisation", str(name))
640 self._m = PlatformInfo.SystemUsage()
642 self._name = str( name )
643 self._parameters = {"StoreSupplementaryCalculations":[]}
644 self.__required_parameters = {}
645 self.__required_inputs = {
646 "RequiredInputValues":{"mandatory":(), "optional":()},
647 "ClassificationTags":[],
649 self.__variable_names_not_public = {"nextStep":False} # Duplication dans AlgorithmAndParameters
650 self.__canonical_parameter_name = {} # Correspondance "lower"->"correct"
651 self.__canonical_stored_name = {} # Correspondance "lower"->"correct"
653 self.StoredVariables = {}
654 self.StoredVariables["APosterioriCorrelations"] = Persistence.OneMatrix(name = "APosterioriCorrelations")
655 self.StoredVariables["APosterioriCovariance"] = Persistence.OneMatrix(name = "APosterioriCovariance")
656 self.StoredVariables["APosterioriStandardDeviations"] = Persistence.OneVector(name = "APosterioriStandardDeviations")
657 self.StoredVariables["APosterioriVariances"] = Persistence.OneVector(name = "APosterioriVariances")
658 self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
659 self.StoredVariables["BMA"] = Persistence.OneVector(name = "BMA")
660 self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
661 self.StoredVariables["CostFunctionJAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJAtCurrentOptimum")
662 self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
663 self.StoredVariables["CostFunctionJbAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJbAtCurrentOptimum")
664 self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
665 self.StoredVariables["CostFunctionJoAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJoAtCurrentOptimum")
666 self.StoredVariables["CurrentEnsembleState"] = Persistence.OneMatrix(name = "CurrentEnsembleState")
667 self.StoredVariables["CurrentIterationNumber"] = Persistence.OneIndex(name = "CurrentIterationNumber")
668 self.StoredVariables["CurrentOptimum"] = Persistence.OneVector(name = "CurrentOptimum")
669 self.StoredVariables["CurrentState"] = Persistence.OneVector(name = "CurrentState")
670 self.StoredVariables["ForecastCovariance"] = Persistence.OneMatrix(name = "ForecastCovariance")
671 self.StoredVariables["ForecastState"] = Persistence.OneVector(name = "ForecastState")
672 self.StoredVariables["GradientOfCostFunctionJ"] = Persistence.OneVector(name = "GradientOfCostFunctionJ")
673 self.StoredVariables["GradientOfCostFunctionJb"] = Persistence.OneVector(name = "GradientOfCostFunctionJb")
674 self.StoredVariables["GradientOfCostFunctionJo"] = Persistence.OneVector(name = "GradientOfCostFunctionJo")
675 self.StoredVariables["IndexOfOptimum"] = Persistence.OneIndex(name = "IndexOfOptimum")
676 self.StoredVariables["Innovation"] = Persistence.OneVector(name = "Innovation")
677 self.StoredVariables["InnovationAtCurrentAnalysis"] = Persistence.OneVector(name = "InnovationAtCurrentAnalysis")
678 self.StoredVariables["InnovationAtCurrentState"] = Persistence.OneVector(name = "InnovationAtCurrentState")
679 self.StoredVariables["JacobianMatrixAtBackground"] = Persistence.OneMatrix(name = "JacobianMatrixAtBackground")
680 self.StoredVariables["JacobianMatrixAtCurrentState"] = Persistence.OneMatrix(name = "JacobianMatrixAtCurrentState")
681 self.StoredVariables["JacobianMatrixAtOptimum"] = Persistence.OneMatrix(name = "JacobianMatrixAtOptimum")
682 self.StoredVariables["KalmanGainAtOptimum"] = Persistence.OneMatrix(name = "KalmanGainAtOptimum")
683 self.StoredVariables["MahalanobisConsistency"] = Persistence.OneScalar(name = "MahalanobisConsistency")
684 self.StoredVariables["OMA"] = Persistence.OneVector(name = "OMA")
685 self.StoredVariables["OMB"] = Persistence.OneVector(name = "OMB")
686 self.StoredVariables["Residu"] = Persistence.OneScalar(name = "Residu")
687 self.StoredVariables["SampledStateForQuantiles"] = Persistence.OneMatrix(name = "SampledStateForQuantiles")
688 self.StoredVariables["SigmaBck2"] = Persistence.OneScalar(name = "SigmaBck2")
689 self.StoredVariables["SigmaObs2"] = Persistence.OneScalar(name = "SigmaObs2")
690 self.StoredVariables["SimulatedObservationAtBackground"] = Persistence.OneVector(name = "SimulatedObservationAtBackground")
691 self.StoredVariables["SimulatedObservationAtCurrentAnalysis"]= Persistence.OneVector(name = "SimulatedObservationAtCurrentAnalysis")
692 self.StoredVariables["SimulatedObservationAtCurrentOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentOptimum")
693 self.StoredVariables["SimulatedObservationAtCurrentState"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentState")
694 self.StoredVariables["SimulatedObservationAtOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtOptimum")
695 self.StoredVariables["SimulationQuantiles"] = Persistence.OneMatrix(name = "SimulationQuantiles")
697 for k in self.StoredVariables:
698 self.__canonical_stored_name[k.lower()] = k
700 for k, v in self.__variable_names_not_public.items():
701 self.__canonical_parameter_name[k.lower()] = k
702 self.__canonical_parameter_name["algorithm"] = "Algorithm"
703 self.__canonical_parameter_name["storesupplementarycalculations"] = "StoreSupplementaryCalculations"
705 def _pre_run(self, Parameters, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None ):
707 logging.debug("%s Lancement", self._name)
708 logging.debug("%s Taille mémoire utilisée de %.0f Mio"%(self._name, self._m.getUsedMemory("Mio")))
709 self._getTimeState(reset=True)
711 # Mise a jour des paramètres internes avec le contenu de Parameters, en
712 # reprenant les valeurs par défauts pour toutes celles non définies
713 self.__setParameters(Parameters, reset=True)
714 for k, v in self.__variable_names_not_public.items():
715 if k not in self._parameters: self.__setParameters( {k:v} )
717 # Corrections et compléments des vecteurs
718 def __test_vvalue(argument, variable, argname, symbol=None):
719 if symbol is None: symbol = variable
721 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
722 raise ValueError("%s %s vector %s is not set and has to be properly defined!"%(self._name,argname,symbol))
723 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
724 logging.debug("%s %s vector %s is not set, but is optional."%(self._name,argname,symbol))
726 logging.debug("%s %s vector %s is not set, but is not required."%(self._name,argname,symbol))
728 logging.debug("%s %s vector %s is set, and its size is %i."%(self._name,argname,symbol,numpy.array(argument).size))
730 __test_vvalue( Xb, "Xb", "Background or initial state" )
731 __test_vvalue( Y, "Y", "Observation" )
732 __test_vvalue( U, "U", "Control" )
734 # Corrections et compléments des covariances
735 def __test_cvalue(argument, variable, argname, symbol=None):
736 if symbol is None: symbol = variable
738 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
739 raise ValueError("%s %s error covariance matrix %s is not set and has to be properly defined!"%(self._name,argname,symbol))
740 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
741 logging.debug("%s %s error covariance matrix %s is not set, but is optional."%(self._name,argname,symbol))
743 logging.debug("%s %s error covariance matrix %s is not set, but is not required."%(self._name,argname,symbol))
745 logging.debug("%s %s error covariance matrix %s is set."%(self._name,argname,symbol))
747 __test_cvalue( B, "B", "Background" )
748 __test_cvalue( R, "R", "Observation" )
749 __test_cvalue( Q, "Q", "Evolution" )
751 # Corrections et compléments des opérateurs
752 def __test_ovalue(argument, variable, argname, symbol=None):
753 if symbol is None: symbol = variable
754 if argument is None or (isinstance(argument,dict) and len(argument)==0):
755 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
756 raise ValueError("%s %s operator %s is not set and has to be properly defined!"%(self._name,argname,symbol))
757 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
758 logging.debug("%s %s operator %s is not set, but is optional."%(self._name,argname,symbol))
760 logging.debug("%s %s operator %s is not set, but is not required."%(self._name,argname,symbol))
762 logging.debug("%s %s operator %s is set."%(self._name,argname,symbol))
764 __test_ovalue( HO, "HO", "Observation", "H" )
765 __test_ovalue( EM, "EM", "Evolution", "M" )
766 __test_ovalue( CM, "CM", "Control Model", "C" )
768 # Corrections et compléments des bornes
769 if ("Bounds" in self._parameters) and isinstance(self._parameters["Bounds"], (list, tuple)) and (len(self._parameters["Bounds"]) > 0):
770 logging.debug("%s Bounds taken into account"%(self._name,))
772 self._parameters["Bounds"] = None
773 if ("StateBoundsForQuantiles" in self._parameters) and isinstance(self._parameters["StateBoundsForQuantiles"], (list, tuple)) and (len(self._parameters["StateBoundsForQuantiles"]) > 0):
774 logging.debug("%s Bounds for quantiles states taken into account"%(self._name,))
775 # Attention : contrairement à Bounds, pas de défaut à None, sinon on ne peut pas être sans bornes
777 # Corrections et compléments de l'initialisation en X
778 if "InitializationPoint" in self._parameters:
780 if self._parameters["InitializationPoint"] is not None and hasattr(self._parameters["InitializationPoint"],'size'):
781 if self._parameters["InitializationPoint"].size != numpy.ravel(Xb).size:
782 raise ValueError("Incompatible size %i of forced initial point that have to replace the background of size %i" \
783 %(self._parameters["InitializationPoint"].size,numpy.ravel(Xb).size))
784 # Obtenu par typecast : numpy.ravel(self._parameters["InitializationPoint"])
786 self._parameters["InitializationPoint"] = numpy.ravel(Xb)
788 if self._parameters["InitializationPoint"] is None:
789 raise ValueError("Forced initial point can not be set without any given Background or required value")
791 # Correction pour pallier a un bug de TNC sur le retour du Minimum
792 if "Minimizer" in self._parameters and self._parameters["Minimizer"] == "TNC":
793 self.setParameterValue("StoreInternalVariables",True)
795 # Verbosité et logging
796 if logging.getLogger().level < logging.WARNING:
797 self._parameters["optiprint"], self._parameters["optdisp"] = 1, 1
798 if PlatformInfo.has_scipy:
799 import scipy.optimize
800 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_ALL
802 self._parameters["optmessages"] = 15
804 self._parameters["optiprint"], self._parameters["optdisp"] = -1, 0
805 if PlatformInfo.has_scipy:
806 import scipy.optimize
807 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_NONE
809 self._parameters["optmessages"] = 15
813 def _post_run(self,_oH=None):
815 if ("StoreSupplementaryCalculations" in self._parameters) and \
816 "APosterioriCovariance" in self._parameters["StoreSupplementaryCalculations"]:
817 for _A in self.StoredVariables["APosterioriCovariance"]:
818 if "APosterioriVariances" in self._parameters["StoreSupplementaryCalculations"]:
819 self.StoredVariables["APosterioriVariances"].store( numpy.diag(_A) )
820 if "APosterioriStandardDeviations" in self._parameters["StoreSupplementaryCalculations"]:
821 self.StoredVariables["APosterioriStandardDeviations"].store( numpy.sqrt(numpy.diag(_A)) )
822 if "APosterioriCorrelations" in self._parameters["StoreSupplementaryCalculations"]:
823 _EI = numpy.diag(1./numpy.sqrt(numpy.diag(_A)))
824 _C = numpy.dot(_EI, numpy.dot(_A, _EI))
825 self.StoredVariables["APosterioriCorrelations"].store( _C )
826 if _oH is not None and "Direct" in _oH and "Tangent" in _oH and "Adjoint" in _oH:
827 logging.debug("%s Nombre d'évaluation(s) de l'opérateur d'observation direct/tangent/adjoint.: %i/%i/%i", self._name, _oH["Direct"].nbcalls(0),_oH["Tangent"].nbcalls(0),_oH["Adjoint"].nbcalls(0))
828 logging.debug("%s Nombre d'appels au cache d'opérateur d'observation direct/tangent/adjoint..: %i/%i/%i", self._name, _oH["Direct"].nbcalls(3),_oH["Tangent"].nbcalls(3),_oH["Adjoint"].nbcalls(3))
829 logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
830 logging.debug("%s Durées d'utilisation CPU de %.1fs et elapsed de %.1fs", self._name, self._getTimeState()[0], self._getTimeState()[1])
831 logging.debug("%s Terminé", self._name)
834 def _toStore(self, key):
835 "True if in StoreSupplementaryCalculations, else False"
836 return key in self._parameters["StoreSupplementaryCalculations"]
838 def get(self, key=None):
840 Renvoie l'une des variables stockées identifiée par la clé, ou le
841 dictionnaire de l'ensemble des variables disponibles en l'absence de
842 clé. Ce sont directement les variables sous forme objet qui sont
843 renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
844 des classes de persistance.
847 return self.StoredVariables[self.__canonical_stored_name[key.lower()]]
849 return self.StoredVariables
851 def __contains__(self, key=None):
852 "D.__contains__(k) -> True if D has a key k, else False"
853 if key is None or key.lower() not in self.__canonical_stored_name:
856 return self.__canonical_stored_name[key.lower()] in self.StoredVariables
859 "D.keys() -> list of D's keys"
860 if hasattr(self, "StoredVariables"):
861 return self.StoredVariables.keys()
866 "D.pop(k[,d]) -> v, remove specified key and return the corresponding value"
867 if hasattr(self, "StoredVariables") and k.lower() in self.__canonical_stored_name:
868 return self.StoredVariables.pop(self.__canonical_stored_name[k.lower()], d)
873 raise TypeError("pop expected at least 1 arguments, got 0")
874 "If key is not found, d is returned if given, otherwise KeyError is raised"
880 def run(self, Xb=None, Y=None, H=None, M=None, R=None, B=None, Q=None, Parameters=None):
882 Doit implémenter l'opération élémentaire de calcul d'assimilation sous
883 sa forme mathématique la plus naturelle possible.
885 raise NotImplementedError("Mathematical assimilation calculation has not been implemented!")
887 def defineRequiredParameter(self, name = None, default = None, typecast = None, message = None, minval = None, maxval = None, listval = None, listadv = None):
889 Permet de définir dans l'algorithme des paramètres requis et leurs
890 caractéristiques par défaut.
893 raise ValueError("A name is mandatory to define a required parameter.")
895 self.__required_parameters[name] = {
897 "typecast" : typecast,
904 self.__canonical_parameter_name[name.lower()] = name
905 logging.debug("%s %s (valeur par défaut = %s)", self._name, message, self.setParameterValue(name))
907 def getRequiredParameters(self, noDetails=True):
909 Renvoie la liste des noms de paramètres requis ou directement le
910 dictionnaire des paramètres requis.
913 return sorted(self.__required_parameters.keys())
915 return self.__required_parameters
917 def setParameterValue(self, name=None, value=None):
919 Renvoie la valeur d'un paramètre requis de manière contrôlée
921 __k = self.__canonical_parameter_name[name.lower()]
922 default = self.__required_parameters[__k]["default"]
923 typecast = self.__required_parameters[__k]["typecast"]
924 minval = self.__required_parameters[__k]["minval"]
925 maxval = self.__required_parameters[__k]["maxval"]
926 listval = self.__required_parameters[__k]["listval"]
927 listadv = self.__required_parameters[__k]["listadv"]
929 if value is None and default is None:
931 elif value is None and default is not None:
932 if typecast is None: __val = default
933 else: __val = typecast( default )
935 if typecast is None: __val = value
938 __val = typecast( value )
940 raise ValueError("The value '%s' for the parameter named '%s' can not be correctly evaluated with type '%s'."%(value, __k, typecast))
942 if minval is not None and (numpy.array(__val, float) < minval).any():
943 raise ValueError("The parameter named '%s' of value '%s' can not be less than %s."%(__k, __val, minval))
944 if maxval is not None and (numpy.array(__val, float) > maxval).any():
945 raise ValueError("The parameter named '%s' of value '%s' can not be greater than %s."%(__k, __val, maxval))
946 if listval is not None or listadv is not None:
947 if typecast is list or typecast is tuple or isinstance(__val,list) or isinstance(__val,tuple):
949 if listval is not None and v in listval: continue
950 elif listadv is not None and v in listadv: continue
952 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%(v, __k, listval))
953 elif not (listval is not None and __val in listval) and not (listadv is not None and __val in listadv):
954 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%( __val, __k,listval))
958 def requireInputArguments(self, mandatory=(), optional=()):
960 Permet d'imposer des arguments de calcul requis en entrée.
962 self.__required_inputs["RequiredInputValues"]["mandatory"] = tuple( mandatory )
963 self.__required_inputs["RequiredInputValues"]["optional"] = tuple( optional )
965 def getInputArguments(self):
967 Permet d'obtenir les listes des arguments de calcul requis en entrée.
969 return self.__required_inputs["RequiredInputValues"]["mandatory"], self.__required_inputs["RequiredInputValues"]["optional"]
971 def setAttributes(self, tags=()):
973 Permet d'adjoindre des attributs comme les tags de classification.
974 Renvoie la liste actuelle dans tous les cas.
976 self.__required_inputs["ClassificationTags"].extend( tags )
977 return self.__required_inputs["ClassificationTags"]
979 def __setParameters(self, fromDico={}, reset=False):
981 Permet de stocker les paramètres reçus dans le dictionnaire interne.
983 self._parameters.update( fromDico )
984 __inverse_fromDico_keys = {}
985 for k in fromDico.keys():
986 if k.lower() in self.__canonical_parameter_name:
987 __inverse_fromDico_keys[self.__canonical_parameter_name[k.lower()]] = k
988 #~ __inverse_fromDico_keys = dict([(self.__canonical_parameter_name[k.lower()],k) for k in fromDico.keys()])
989 __canonic_fromDico_keys = __inverse_fromDico_keys.keys()
990 for k in self.__required_parameters.keys():
991 if k in __canonic_fromDico_keys:
992 self._parameters[k] = self.setParameterValue(k,fromDico[__inverse_fromDico_keys[k]])
994 self._parameters[k] = self.setParameterValue(k)
997 logging.debug("%s %s : %s", self._name, self.__required_parameters[k]["message"], self._parameters[k])
999 def _getTimeState(self, reset=False):
1001 Initialise ou restitue le temps de calcul (cpu/elapsed) à la seconde
1004 self.__initial_cpu_time = time.process_time()
1005 self.__initial_elapsed_time = time.perf_counter()
1008 self.__cpu_time = time.process_time() - self.__initial_cpu_time
1009 self.__elapsed_time = time.perf_counter() - self.__initial_elapsed_time
1010 return self.__cpu_time, self.__elapsed_time
1012 def _StopOnTimeLimit(self, X=None, withReason=False):
1013 "Stop criteria on time limit: True/False [+ Reason]"
1014 c, e = self._getTimeState()
1015 if "MaximumCpuTime" in self._parameters and c > self._parameters["MaximumCpuTime"]:
1016 __SC, __SR = True, "Reached maximum CPU time (%.1fs > %.1fs)"%(c, self._parameters["MaximumCpuTime"])
1017 elif "MaximumElapsedTime" in self._parameters and e > self._parameters["MaximumElapsedTime"]:
1018 __SC, __SR = True, "Reached maximum elapsed time (%.1fs > %.1fs)"%(e, self._parameters["MaximumElapsedTime"])
1020 __SC, __SR = False, ""
1026 # ==============================================================================
1027 class AlgorithmAndParameters(object):
1029 Classe générale d'interface d'action pour l'algorithme et ses paramètres
1032 name = "GenericAlgorithm",
1039 self.__name = str(name)
1043 self.__algorithm = {}
1044 self.__algorithmFile = None
1045 self.__algorithmName = None
1047 self.updateParameters( asDict, asScript )
1049 if asAlgorithm is None and asScript is not None:
1050 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
1052 __Algo = asAlgorithm
1054 if __Algo is not None:
1055 self.__A = str(__Algo)
1056 self.__P.update( {"Algorithm":self.__A} )
1058 self.__setAlgorithm( self.__A )
1060 self.__variable_names_not_public = {"nextStep":False} # Duplication dans Algorithm
1062 def updateParameters(self,
1066 "Mise a jour des parametres"
1067 if asDict is None and asScript is not None:
1068 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1072 if __Dict is not None:
1073 self.__P.update( dict(__Dict) )
1075 def executePythonScheme(self, asDictAO = None):
1076 "Permet de lancer le calcul d'assimilation"
1077 Operator.CM.clearCache()
1079 if not isinstance(asDictAO, dict):
1080 raise ValueError("The objects for algorithm calculation have to be given together as a dictionnary, and they are not")
1081 if hasattr(asDictAO["Background"],"getO"): self.__Xb = asDictAO["Background"].getO()
1082 elif hasattr(asDictAO["CheckingPoint"],"getO"): self.__Xb = asDictAO["CheckingPoint"].getO()
1083 else: self.__Xb = None
1084 if hasattr(asDictAO["Observation"],"getO"): self.__Y = asDictAO["Observation"].getO()
1085 else: self.__Y = asDictAO["Observation"]
1086 if hasattr(asDictAO["ControlInput"],"getO"): self.__U = asDictAO["ControlInput"].getO()
1087 else: self.__U = asDictAO["ControlInput"]
1088 if hasattr(asDictAO["ObservationOperator"],"getO"): self.__HO = asDictAO["ObservationOperator"].getO()
1089 else: self.__HO = asDictAO["ObservationOperator"]
1090 if hasattr(asDictAO["EvolutionModel"],"getO"): self.__EM = asDictAO["EvolutionModel"].getO()
1091 else: self.__EM = asDictAO["EvolutionModel"]
1092 if hasattr(asDictAO["ControlModel"],"getO"): self.__CM = asDictAO["ControlModel"].getO()
1093 else: self.__CM = asDictAO["ControlModel"]
1094 self.__B = asDictAO["BackgroundError"]
1095 self.__R = asDictAO["ObservationError"]
1096 self.__Q = asDictAO["EvolutionError"]
1098 self.__shape_validate()
1100 self.__algorithm.run(
1110 Parameters = self.__P,
1114 def executeYACSScheme(self, FileName=None):
1115 "Permet de lancer le calcul d'assimilation"
1116 if FileName is None or not os.path.exists(FileName):
1117 raise ValueError("a YACS file name has to be given for YACS execution.\n")
1119 __file = os.path.abspath(FileName)
1120 logging.debug("The YACS file name is \"%s\"."%__file)
1121 if not PlatformInfo.has_salome or \
1122 not PlatformInfo.has_yacs or \
1123 not PlatformInfo.has_adao:
1124 raise ImportError("\n\n"+\
1125 "Unable to get SALOME, YACS or ADAO environnement variables.\n"+\
1126 "Please load the right environnement before trying to use it.\n")
1129 import SALOMERuntime
1131 SALOMERuntime.RuntimeSALOME_setRuntime()
1133 r = pilot.getRuntime()
1134 xmlLoader = loader.YACSLoader()
1135 xmlLoader.registerProcCataLoader()
1137 catalogAd = r.loadCatalog("proc", __file)
1138 r.addCatalog(catalogAd)
1143 p = xmlLoader.load(__file)
1144 except IOError as ex:
1145 print("The YACS XML schema file can not be loaded: %s"%(ex,))
1147 logger = p.getLogger("parser")
1148 if not logger.isEmpty():
1149 print("The imported YACS XML schema has errors on parsing:")
1150 print(logger.getStr())
1153 print("The YACS XML schema is not valid and will not be executed:")
1154 print(p.getErrorReport())
1156 info=pilot.LinkInfo(pilot.LinkInfo.ALL_DONT_STOP)
1157 p.checkConsistency(info)
1158 if info.areWarningsOrErrors():
1159 print("The YACS XML schema is not coherent and will not be executed:")
1160 print(info.getGlobalRepr())
1162 e = pilot.ExecutorSwig()
1164 if p.getEffectiveState() != pilot.DONE:
1165 print(p.getErrorReport())
1169 def get(self, key = None):
1170 "Vérifie l'existence d'une clé de variable ou de paramètres"
1171 if key in self.__algorithm:
1172 return self.__algorithm.get( key )
1173 elif key in self.__P:
1174 return self.__P[key]
1176 allvariables = self.__P
1177 for k in self.__variable_names_not_public: allvariables.pop(k, None)
1180 def pop(self, k, d):
1181 "Necessaire pour le pickling"
1182 return self.__algorithm.pop(k, d)
1184 def getAlgorithmRequiredParameters(self, noDetails=True):
1185 "Renvoie la liste des paramètres requis selon l'algorithme"
1186 return self.__algorithm.getRequiredParameters(noDetails)
1188 def getAlgorithmInputArguments(self):
1189 "Renvoie la liste des entrées requises selon l'algorithme"
1190 return self.__algorithm.getInputArguments()
1192 def getAlgorithmAttributes(self):
1193 "Renvoie la liste des attributs selon l'algorithme"
1194 return self.__algorithm.setAttributes()
1196 def setObserver(self, __V, __O, __I, __S):
1197 if self.__algorithm is None \
1198 or isinstance(self.__algorithm, dict) \
1199 or not hasattr(self.__algorithm,"StoredVariables"):
1200 raise ValueError("No observer can be build before choosing an algorithm.")
1201 if __V not in self.__algorithm:
1202 raise ValueError("An observer requires to be set on a variable named %s which does not exist."%__V)
1204 self.__algorithm.StoredVariables[ __V ].setDataObserver(
1207 HookParameters = __I,
1210 def removeObserver(self, __V, __O, __A = False):
1211 if self.__algorithm is None \
1212 or isinstance(self.__algorithm, dict) \
1213 or not hasattr(self.__algorithm,"StoredVariables"):
1214 raise ValueError("No observer can be removed before choosing an algorithm.")
1215 if __V not in self.__algorithm:
1216 raise ValueError("An observer requires to be removed on a variable named %s which does not exist."%__V)
1218 return self.__algorithm.StoredVariables[ __V ].removeDataObserver(
1223 def hasObserver(self, __V):
1224 if self.__algorithm is None \
1225 or isinstance(self.__algorithm, dict) \
1226 or not hasattr(self.__algorithm,"StoredVariables"):
1228 if __V not in self.__algorithm:
1230 return self.__algorithm.StoredVariables[ __V ].hasDataObserver()
1233 __allvariables = list(self.__algorithm.keys()) + list(self.__P.keys())
1234 for k in self.__variable_names_not_public:
1235 if k in __allvariables: __allvariables.remove(k)
1236 return __allvariables
1238 def __contains__(self, key=None):
1239 "D.__contains__(k) -> True if D has a key k, else False"
1240 return key in self.__algorithm or key in self.__P
1243 "x.__repr__() <==> repr(x)"
1244 return repr(self.__A)+", "+repr(self.__P)
1247 "x.__str__() <==> str(x)"
1248 return str(self.__A)+", "+str(self.__P)
1250 def __setAlgorithm(self, choice = None ):
1252 Permet de sélectionner l'algorithme à utiliser pour mener à bien l'étude
1253 d'assimilation. L'argument est un champ caractère se rapportant au nom
1254 d'un algorithme réalisant l'opération sur les arguments fixes.
1257 raise ValueError("Error: algorithm choice has to be given")
1258 if self.__algorithmName is not None:
1259 raise ValueError("Error: algorithm choice has already been done as \"%s\", it can't be changed."%self.__algorithmName)
1260 daDirectory = "daAlgorithms"
1262 # Recherche explicitement le fichier complet
1263 # ------------------------------------------
1265 for directory in sys.path:
1266 if os.path.isfile(os.path.join(directory, daDirectory, str(choice)+'.py')):
1267 module_path = os.path.abspath(os.path.join(directory, daDirectory))
1268 if module_path is None:
1269 raise ImportError("No algorithm module named \"%s\" has been found in the search path.\n The search path is %s"%(choice, sys.path))
1271 # Importe le fichier complet comme un module
1272 # ------------------------------------------
1274 sys_path_tmp = sys.path ; sys.path.insert(0,module_path)
1275 self.__algorithmFile = __import__(str(choice), globals(), locals(), [])
1276 if not hasattr(self.__algorithmFile, "ElementaryAlgorithm"):
1277 raise ImportError("this module does not define a valid elementary algorithm.")
1278 self.__algorithmName = str(choice)
1279 sys.path = sys_path_tmp ; del sys_path_tmp
1280 except ImportError as e:
1281 raise ImportError("The module named \"%s\" was found, but is incorrect at the import stage.\n The import error message is: %s"%(choice,e))
1283 # Instancie un objet du type élémentaire du fichier
1284 # -------------------------------------------------
1285 self.__algorithm = self.__algorithmFile.ElementaryAlgorithm()
1288 def __shape_validate(self):
1290 Validation de la correspondance correcte des tailles des variables et
1291 des matrices s'il y en a.
1293 if self.__Xb is None: __Xb_shape = (0,)
1294 elif hasattr(self.__Xb,"size"): __Xb_shape = (self.__Xb.size,)
1295 elif hasattr(self.__Xb,"shape"):
1296 if isinstance(self.__Xb.shape, tuple): __Xb_shape = self.__Xb.shape
1297 else: __Xb_shape = self.__Xb.shape()
1298 else: raise TypeError("The background (Xb) has no attribute of shape: problem !")
1300 if self.__Y is None: __Y_shape = (0,)
1301 elif hasattr(self.__Y,"size"): __Y_shape = (self.__Y.size,)
1302 elif hasattr(self.__Y,"shape"):
1303 if isinstance(self.__Y.shape, tuple): __Y_shape = self.__Y.shape
1304 else: __Y_shape = self.__Y.shape()
1305 else: raise TypeError("The observation (Y) has no attribute of shape: problem !")
1307 if self.__U is None: __U_shape = (0,)
1308 elif hasattr(self.__U,"size"): __U_shape = (self.__U.size,)
1309 elif hasattr(self.__U,"shape"):
1310 if isinstance(self.__U.shape, tuple): __U_shape = self.__U.shape
1311 else: __U_shape = self.__U.shape()
1312 else: raise TypeError("The control (U) has no attribute of shape: problem !")
1314 if self.__B is None: __B_shape = (0,0)
1315 elif hasattr(self.__B,"shape"):
1316 if isinstance(self.__B.shape, tuple): __B_shape = self.__B.shape
1317 else: __B_shape = self.__B.shape()
1318 else: raise TypeError("The a priori errors covariance matrix (B) has no attribute of shape: problem !")
1320 if self.__R is None: __R_shape = (0,0)
1321 elif hasattr(self.__R,"shape"):
1322 if isinstance(self.__R.shape, tuple): __R_shape = self.__R.shape
1323 else: __R_shape = self.__R.shape()
1324 else: raise TypeError("The observation errors covariance matrix (R) has no attribute of shape: problem !")
1326 if self.__Q is None: __Q_shape = (0,0)
1327 elif hasattr(self.__Q,"shape"):
1328 if isinstance(self.__Q.shape, tuple): __Q_shape = self.__Q.shape
1329 else: __Q_shape = self.__Q.shape()
1330 else: raise TypeError("The evolution errors covariance matrix (Q) has no attribute of shape: problem !")
1332 if len(self.__HO) == 0: __HO_shape = (0,0)
1333 elif isinstance(self.__HO, dict): __HO_shape = (0,0)
1334 elif hasattr(self.__HO["Direct"],"shape"):
1335 if isinstance(self.__HO["Direct"].shape, tuple): __HO_shape = self.__HO["Direct"].shape
1336 else: __HO_shape = self.__HO["Direct"].shape()
1337 else: raise TypeError("The observation operator (H) has no attribute of shape: problem !")
1339 if len(self.__EM) == 0: __EM_shape = (0,0)
1340 elif isinstance(self.__EM, dict): __EM_shape = (0,0)
1341 elif hasattr(self.__EM["Direct"],"shape"):
1342 if isinstance(self.__EM["Direct"].shape, tuple): __EM_shape = self.__EM["Direct"].shape
1343 else: __EM_shape = self.__EM["Direct"].shape()
1344 else: raise TypeError("The evolution model (EM) has no attribute of shape: problem !")
1346 if len(self.__CM) == 0: __CM_shape = (0,0)
1347 elif isinstance(self.__CM, dict): __CM_shape = (0,0)
1348 elif hasattr(self.__CM["Direct"],"shape"):
1349 if isinstance(self.__CM["Direct"].shape, tuple): __CM_shape = self.__CM["Direct"].shape
1350 else: __CM_shape = self.__CM["Direct"].shape()
1351 else: raise TypeError("The control model (CM) has no attribute of shape: problem !")
1353 # Vérification des conditions
1354 # ---------------------------
1355 if not( len(__Xb_shape) == 1 or min(__Xb_shape) == 1 ):
1356 raise ValueError("Shape characteristic of background (Xb) is incorrect: \"%s\"."%(__Xb_shape,))
1357 if not( len(__Y_shape) == 1 or min(__Y_shape) == 1 ):
1358 raise ValueError("Shape characteristic of observation (Y) is incorrect: \"%s\"."%(__Y_shape,))
1360 if not( min(__B_shape) == max(__B_shape) ):
1361 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) is incorrect: \"%s\"."%(__B_shape,))
1362 if not( min(__R_shape) == max(__R_shape) ):
1363 raise ValueError("Shape characteristic of observation errors covariance matrix (R) is incorrect: \"%s\"."%(__R_shape,))
1364 if not( min(__Q_shape) == max(__Q_shape) ):
1365 raise ValueError("Shape characteristic of evolution errors covariance matrix (Q) is incorrect: \"%s\"."%(__Q_shape,))
1366 if not( min(__EM_shape) == max(__EM_shape) ):
1367 raise ValueError("Shape characteristic of evolution operator (EM) is incorrect: \"%s\"."%(__EM_shape,))
1369 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[1] == max(__Xb_shape) ):
1370 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and state (X) \"%s\" are incompatible."%(__HO_shape,__Xb_shape))
1371 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[0] == max(__Y_shape) ):
1372 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation (Y) \"%s\" are incompatible."%(__HO_shape,__Y_shape))
1373 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__B) > 0 and not( __HO_shape[1] == __B_shape[0] ):
1374 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and a priori errors covariance matrix (B) \"%s\" are incompatible."%(__HO_shape,__B_shape))
1375 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__R) > 0 and not( __HO_shape[0] == __R_shape[1] ):
1376 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation errors covariance matrix (R) \"%s\" are incompatible."%(__HO_shape,__R_shape))
1378 if self.__B is not None and len(self.__B) > 0 and not( __B_shape[1] == max(__Xb_shape) ):
1379 if self.__algorithmName in ["EnsembleBlue",]:
1380 asPersistentVector = self.__Xb.reshape((-1,min(__B_shape)))
1381 self.__Xb = Persistence.OneVector("Background", basetype=numpy.matrix)
1382 for member in asPersistentVector:
1383 self.__Xb.store( numpy.matrix( numpy.ravel(member), numpy.float ).T )
1384 __Xb_shape = min(__B_shape)
1386 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) \"%s\" and background (Xb) \"%s\" are incompatible."%(__B_shape,__Xb_shape))
1388 if self.__R is not None and len(self.__R) > 0 and not( __R_shape[1] == max(__Y_shape) ):
1389 raise ValueError("Shape characteristic of observation errors covariance matrix (R) \"%s\" and observation (Y) \"%s\" are incompatible."%(__R_shape,__Y_shape))
1391 if self.__EM is not None and len(self.__EM) > 0 and not isinstance(self.__EM, dict) and not( __EM_shape[1] == max(__Xb_shape) ):
1392 raise ValueError("Shape characteristic of evolution model (EM) \"%s\" and state (X) \"%s\" are incompatible."%(__EM_shape,__Xb_shape))
1394 if self.__CM is not None and len(self.__CM) > 0 and not isinstance(self.__CM, dict) and not( __CM_shape[1] == max(__U_shape) ):
1395 raise ValueError("Shape characteristic of control model (CM) \"%s\" and control (U) \"%s\" are incompatible."%(__CM_shape,__U_shape))
1397 if ("Bounds" in self.__P) \
1398 and (isinstance(self.__P["Bounds"], list) or isinstance(self.__P["Bounds"], tuple)) \
1399 and (len(self.__P["Bounds"]) != max(__Xb_shape)):
1400 raise ValueError("The number \"%s\" of bound pairs for the state (X) components is different of the size \"%s\" of the state itself." \
1401 %(len(self.__P["Bounds"]),max(__Xb_shape)))
1403 if ("StateBoundsForQuantiles" in self.__P) \
1404 and (isinstance(self.__P["StateBoundsForQuantiles"], list) or isinstance(self.__P["StateBoundsForQuantiles"], tuple)) \
1405 and (len(self.__P["StateBoundsForQuantiles"]) != max(__Xb_shape)):
1406 raise ValueError("The number \"%s\" of bound pairs for the quantile state (X) components is different of the size \"%s\" of the state itself." \
1407 %(len(self.__P["StateBoundsForQuantiles"]),max(__Xb_shape)))
1411 # ==============================================================================
1412 class RegulationAndParameters(object):
1414 Classe générale d'interface d'action pour la régulation et ses paramètres
1417 name = "GenericRegulation",
1424 self.__name = str(name)
1427 if asAlgorithm is None and asScript is not None:
1428 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
1430 __Algo = asAlgorithm
1432 if asDict is None and asScript is not None:
1433 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1437 if __Dict is not None:
1438 self.__P.update( dict(__Dict) )
1440 if __Algo is not None:
1441 self.__P.update( {"Algorithm":str(__Algo)} )
1443 def get(self, key = None):
1444 "Vérifie l'existence d'une clé de variable ou de paramètres"
1446 return self.__P[key]
1450 # ==============================================================================
1451 class DataObserver(object):
1453 Classe générale d'interface de type observer
1456 name = "GenericObserver",
1468 self.__name = str(name)
1473 if onVariable is None:
1474 raise ValueError("setting an observer has to be done over a variable name or a list of variable names, not over None.")
1475 elif type(onVariable) in (tuple, list):
1476 self.__V = tuple(map( str, onVariable ))
1477 if withInfo is None:
1480 self.__I = (str(withInfo),)*len(self.__V)
1481 elif isinstance(onVariable, str):
1482 self.__V = (onVariable,)
1483 if withInfo is None:
1484 self.__I = (onVariable,)
1486 self.__I = (str(withInfo),)
1488 raise ValueError("setting an observer has to be done over a variable name or a list of variable names.")
1490 if asObsObject is not None:
1491 self.__O = asObsObject
1493 __FunctionText = str(UserScript('Observer', asTemplate, asString, asScript))
1494 __Function = Observer2Func(__FunctionText)
1495 self.__O = __Function.getfunc()
1497 for k in range(len(self.__V)):
1500 if ename not in withAlgo:
1501 raise ValueError("An observer is asked to be set on a variable named %s which does not exist."%ename)
1503 withAlgo.setObserver(ename, self.__O, einfo, scheduledBy)
1506 "x.__repr__() <==> repr(x)"
1507 return repr(self.__V)+"\n"+repr(self.__O)
1510 "x.__str__() <==> str(x)"
1511 return str(self.__V)+"\n"+str(self.__O)
1513 # ==============================================================================
1514 class UserScript(object):
1516 Classe générale d'interface de type texte de script utilisateur
1519 name = "GenericUserScript",
1526 self.__name = str(name)
1528 if asString is not None:
1530 elif self.__name == "UserPostAnalysis" and (asTemplate is not None) and (asTemplate in Templates.UserPostAnalysisTemplates):
1531 self.__F = Templates.UserPostAnalysisTemplates[asTemplate]
1532 elif self.__name == "Observer" and (asTemplate is not None) and (asTemplate in Templates.ObserverTemplates):
1533 self.__F = Templates.ObserverTemplates[asTemplate]
1534 elif asScript is not None:
1535 self.__F = Interfaces.ImportFromScript(asScript).getstring()
1540 "x.__repr__() <==> repr(x)"
1541 return repr(self.__F)
1544 "x.__str__() <==> str(x)"
1545 return str(self.__F)
1547 # ==============================================================================
1548 class ExternalParameters(object):
1550 Classe générale d'interface de type texte de script utilisateur
1553 name = "GenericExternalParameters",
1559 self.__name = str(name)
1562 self.updateParameters( asDict, asScript )
1564 def updateParameters(self,
1568 "Mise a jour des parametres"
1569 if asDict is None and asScript is not None:
1570 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "ExternalParameters" )
1574 if __Dict is not None:
1575 self.__P.update( dict(__Dict) )
1577 def get(self, key = None):
1579 return self.__P[key]
1581 return list(self.__P.keys())
1584 return list(self.__P.keys())
1586 def pop(self, k, d):
1587 return self.__P.pop(k, d)
1590 return self.__P.items()
1592 def __contains__(self, key=None):
1593 "D.__contains__(k) -> True if D has a key k, else False"
1594 return key in self.__P
1596 # ==============================================================================
1597 class State(object):
1599 Classe générale d'interface de type état
1602 name = "GenericVector",
1604 asPersistentVector = None,
1610 toBeChecked = False,
1613 Permet de définir un vecteur :
1614 - asVector : entrée des données, comme un vecteur compatible avec le
1615 constructeur de numpy.matrix, ou "True" si entrée par script.
1616 - asPersistentVector : entrée des données, comme une série de vecteurs
1617 compatible avec le constructeur de numpy.matrix, ou comme un objet de
1618 type Persistence, ou "True" si entrée par script.
1619 - asScript : si un script valide est donné contenant une variable
1620 nommée "name", la variable est de type "asVector" (par défaut) ou
1621 "asPersistentVector" selon que l'une de ces variables est placée à
1623 - asDataFile : si un ou plusieurs fichiers valides sont donnés
1624 contenant des valeurs en colonnes, elles-mêmes nommées "colNames"
1625 (s'il n'y a pas de nom de colonne indiquée, on cherche une colonne
1626 nommée "name"), on récupère les colonnes et on les range ligne après
1627 ligne (colMajor=False, par défaut) ou colonne après colonne
1628 (colMajor=True). La variable résultante est de type "asVector" (par
1629 défaut) ou "asPersistentVector" selon que l'une de ces variables est
1632 self.__name = str(name)
1633 self.__check = bool(toBeChecked)
1637 self.__is_vector = False
1638 self.__is_series = False
1640 if asScript is not None:
1641 __Vector, __Series = None, None
1642 if asPersistentVector:
1643 __Series = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1645 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1646 elif asDataFile is not None:
1647 __Vector, __Series = None, None
1648 if asPersistentVector:
1649 if colNames is not None:
1650 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
1652 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1653 if bool(colMajor) and not Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1654 __Series = numpy.transpose(__Series)
1655 elif not bool(colMajor) and Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1656 __Series = numpy.transpose(__Series)
1658 if colNames is not None:
1659 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
1661 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1663 __Vector = numpy.ravel(__Vector, order = "F")
1665 __Vector = numpy.ravel(__Vector, order = "C")
1667 __Vector, __Series = asVector, asPersistentVector
1669 if __Vector is not None:
1670 self.__is_vector = True
1671 self.__V = numpy.matrix( numpy.asmatrix(__Vector).A1, numpy.float ).T
1672 self.shape = self.__V.shape
1673 self.size = self.__V.size
1674 elif __Series is not None:
1675 self.__is_series = True
1676 if isinstance(__Series, (tuple, list, numpy.ndarray, numpy.matrix, str)):
1677 self.__V = Persistence.OneVector(self.__name, basetype=numpy.matrix)
1678 if isinstance(__Series, str): __Series = eval(__Series)
1679 for member in __Series:
1680 self.__V.store( numpy.matrix( numpy.asmatrix(member).A1, numpy.float ).T )
1683 if isinstance(self.__V.shape, (tuple, list)):
1684 self.shape = self.__V.shape
1686 self.shape = self.__V.shape()
1687 if len(self.shape) == 1:
1688 self.shape = (self.shape[0],1)
1689 self.size = self.shape[0] * self.shape[1]
1691 raise ValueError("The %s object is improperly defined or undefined, it requires at minima either a vector, a list/tuple of vectors or a persistent object. Please check your vector input."%self.__name)
1693 if scheduledBy is not None:
1694 self.__T = scheduledBy
1696 def getO(self, withScheduler=False):
1698 return self.__V, self.__T
1699 elif self.__T is None:
1705 "Vérification du type interne"
1706 return self.__is_vector
1709 "Vérification du type interne"
1710 return self.__is_series
1713 "x.__repr__() <==> repr(x)"
1714 return repr(self.__V)
1717 "x.__str__() <==> str(x)"
1718 return str(self.__V)
1720 # ==============================================================================
1721 class Covariance(object):
1723 Classe générale d'interface de type covariance
1726 name = "GenericCovariance",
1727 asCovariance = None,
1728 asEyeByScalar = None,
1729 asEyeByVector = None,
1732 toBeChecked = False,
1735 Permet de définir une covariance :
1736 - asCovariance : entrée des données, comme une matrice compatible avec
1737 le constructeur de numpy.matrix
1738 - asEyeByScalar : entrée des données comme un seul scalaire de variance,
1739 multiplicatif d'une matrice de corrélation identité, aucune matrice
1740 n'étant donc explicitement à donner
1741 - asEyeByVector : entrée des données comme un seul vecteur de variance,
1742 à mettre sur la diagonale d'une matrice de corrélation, aucune matrice
1743 n'étant donc explicitement à donner
1744 - asCovObject : entrée des données comme un objet python, qui a les
1745 methodes obligatoires "getT", "getI", "diag", "trace", "__add__",
1746 "__sub__", "__neg__", "__mul__", "__rmul__" et facultatives "shape",
1747 "size", "cholesky", "choleskyI", "asfullmatrix", "__repr__", "__str__"
1748 - toBeChecked : booléen indiquant si le caractère SDP de la matrice
1749 pleine doit être vérifié
1751 self.__name = str(name)
1752 self.__check = bool(toBeChecked)
1755 self.__is_scalar = False
1756 self.__is_vector = False
1757 self.__is_matrix = False
1758 self.__is_object = False
1760 if asScript is not None:
1761 __Matrix, __Scalar, __Vector, __Object = None, None, None, None
1763 __Scalar = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1765 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1767 __Object = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1769 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1771 __Matrix, __Scalar, __Vector, __Object = asCovariance, asEyeByScalar, asEyeByVector, asCovObject
1773 if __Scalar is not None:
1774 if isinstance(__Scalar, str):
1775 __Scalar = __Scalar.replace(";"," ").replace(","," ").split()
1776 if len(__Scalar) > 0: __Scalar = __Scalar[0]
1777 if numpy.array(__Scalar).size != 1:
1778 raise ValueError(' The diagonal multiplier given to define a sparse matrix is not a unique scalar value.\n Its actual measured size is %i. Please check your scalar input.'%numpy.array(__Scalar).size)
1779 self.__is_scalar = True
1780 self.__C = numpy.abs( float(__Scalar) )
1783 elif __Vector is not None:
1784 if isinstance(__Vector, str):
1785 __Vector = __Vector.replace(";"," ").replace(","," ").split()
1786 self.__is_vector = True
1787 self.__C = numpy.abs( numpy.array( numpy.ravel( __Vector ), dtype=float ) )
1788 self.shape = (self.__C.size,self.__C.size)
1789 self.size = self.__C.size**2
1790 elif __Matrix is not None:
1791 self.__is_matrix = True
1792 self.__C = numpy.matrix( __Matrix, float )
1793 self.shape = self.__C.shape
1794 self.size = self.__C.size
1795 elif __Object is not None:
1796 self.__is_object = True
1798 for at in ("getT","getI","diag","trace","__add__","__sub__","__neg__","__matmul__","__mul__","__rmatmul__","__rmul__"):
1799 if not hasattr(self.__C,at):
1800 raise ValueError("The matrix given for %s as an object has no attribute \"%s\". Please check your object input."%(self.__name,at))
1801 if hasattr(self.__C,"shape"):
1802 self.shape = self.__C.shape
1805 if hasattr(self.__C,"size"):
1806 self.size = self.__C.size
1811 # raise ValueError("The %s covariance matrix has to be specified either as a matrix, a vector for its diagonal or a scalar multiplying an identity matrix."%self.__name)
1815 def __validate(self):
1817 if self.__C is None:
1818 raise UnboundLocalError("%s covariance matrix value has not been set!"%(self.__name,))
1819 if self.ismatrix() and min(self.shape) != max(self.shape):
1820 raise ValueError("The given matrix for %s is not a square one, its shape is %s. Please check your matrix input."%(self.__name,self.shape))
1821 if self.isobject() and min(self.shape) != max(self.shape):
1822 raise ValueError("The matrix given for \"%s\" is not a square one, its shape is %s. Please check your object input."%(self.__name,self.shape))
1823 if self.isscalar() and self.__C <= 0:
1824 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your scalar input %s."%(self.__name,self.__C))
1825 if self.isvector() and (self.__C <= 0).any():
1826 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your vector input."%(self.__name,))
1827 if self.ismatrix() and (self.__check or logging.getLogger().level < logging.WARNING):
1829 L = numpy.linalg.cholesky( self.__C )
1831 raise ValueError("The %s covariance matrix is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1832 if self.isobject() and (self.__check or logging.getLogger().level < logging.WARNING):
1834 L = self.__C.cholesky()
1836 raise ValueError("The %s covariance object is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1839 "Vérification du type interne"
1840 return self.__is_scalar
1843 "Vérification du type interne"
1844 return self.__is_vector
1847 "Vérification du type interne"
1848 return self.__is_matrix
1851 "Vérification du type interne"
1852 return self.__is_object
1857 return Covariance(self.__name+"I", asCovariance = numpy.linalg.inv(self.__C) )
1858 elif self.isvector():
1859 return Covariance(self.__name+"I", asEyeByVector = 1. / self.__C )
1860 elif self.isscalar():
1861 return Covariance(self.__name+"I", asEyeByScalar = 1. / self.__C )
1862 elif self.isobject() and hasattr(self.__C,"getI"):
1863 return Covariance(self.__name+"I", asCovObject = self.__C.getI() )
1865 return None # Indispensable
1870 return Covariance(self.__name+"T", asCovariance = self.__C.T )
1871 elif self.isvector():
1872 return Covariance(self.__name+"T", asEyeByVector = self.__C )
1873 elif self.isscalar():
1874 return Covariance(self.__name+"T", asEyeByScalar = self.__C )
1875 elif self.isobject() and hasattr(self.__C,"getT"):
1876 return Covariance(self.__name+"T", asCovObject = self.__C.getT() )
1878 raise AttributeError("the %s covariance matrix has no getT attribute."%(self.__name,))
1881 "Décomposition de Cholesky"
1883 return Covariance(self.__name+"C", asCovariance = numpy.linalg.cholesky(self.__C) )
1884 elif self.isvector():
1885 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
1886 elif self.isscalar():
1887 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
1888 elif self.isobject() and hasattr(self.__C,"cholesky"):
1889 return Covariance(self.__name+"C", asCovObject = self.__C.cholesky() )
1891 raise AttributeError("the %s covariance matrix has no cholesky attribute."%(self.__name,))
1893 def choleskyI(self):
1894 "Inversion de la décomposition de Cholesky"
1896 return Covariance(self.__name+"H", asCovariance = numpy.linalg.inv(numpy.linalg.cholesky(self.__C)) )
1897 elif self.isvector():
1898 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
1899 elif self.isscalar():
1900 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
1901 elif self.isobject() and hasattr(self.__C,"choleskyI"):
1902 return Covariance(self.__name+"H", asCovObject = self.__C.choleskyI() )
1904 raise AttributeError("the %s covariance matrix has no choleskyI attribute."%(self.__name,))
1907 "Racine carrée matricielle"
1910 return Covariance(self.__name+"C", asCovariance = numpy.real(scipy.linalg.sqrtm(self.__C)) )
1911 elif self.isvector():
1912 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
1913 elif self.isscalar():
1914 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
1915 elif self.isobject() and hasattr(self.__C,"sqrtm"):
1916 return Covariance(self.__name+"C", asCovObject = self.__C.sqrtm() )
1918 raise AttributeError("the %s covariance matrix has no sqrtm attribute."%(self.__name,))
1921 "Inversion de la racine carrée matricielle"
1924 return Covariance(self.__name+"H", asCovariance = numpy.linalg.inv(numpy.real(scipy.linalg.sqrtm(self.__C))) )
1925 elif self.isvector():
1926 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
1927 elif self.isscalar():
1928 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
1929 elif self.isobject() and hasattr(self.__C,"sqrtmI"):
1930 return Covariance(self.__name+"H", asCovObject = self.__C.sqrtmI() )
1932 raise AttributeError("the %s covariance matrix has no sqrtmI attribute."%(self.__name,))
1934 def diag(self, msize=None):
1935 "Diagonale de la matrice"
1937 return numpy.diag(self.__C)
1938 elif self.isvector():
1940 elif self.isscalar():
1942 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1944 return self.__C * numpy.ones(int(msize))
1945 elif self.isobject() and hasattr(self.__C,"diag"):
1946 return self.__C.diag()
1948 raise AttributeError("the %s covariance matrix has no diag attribute."%(self.__name,))
1950 def trace(self, msize=None):
1951 "Trace de la matrice"
1953 return numpy.trace(self.__C)
1954 elif self.isvector():
1955 return float(numpy.sum(self.__C))
1956 elif self.isscalar():
1958 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1960 return self.__C * int(msize)
1961 elif self.isobject():
1962 return self.__C.trace()
1964 raise AttributeError("the %s covariance matrix has no trace attribute."%(self.__name,))
1966 def asfullmatrix(self, msize=None):
1969 return numpy.asarray(self.__C)
1970 elif self.isvector():
1971 return numpy.asarray( numpy.diag(self.__C), float )
1972 elif self.isscalar():
1974 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1976 return numpy.asarray( self.__C * numpy.eye(int(msize)), float )
1977 elif self.isobject() and hasattr(self.__C,"asfullmatrix"):
1978 return self.__C.asfullmatrix()
1980 raise AttributeError("the %s covariance matrix has no asfullmatrix attribute."%(self.__name,))
1982 def assparsematrix(self):
1990 "x.__repr__() <==> repr(x)"
1991 return repr(self.__C)
1994 "x.__str__() <==> str(x)"
1995 return str(self.__C)
1997 def __add__(self, other):
1998 "x.__add__(y) <==> x+y"
1999 if self.ismatrix() or self.isobject():
2000 return self.__C + numpy.asmatrix(other)
2001 elif self.isvector() or self.isscalar():
2002 _A = numpy.asarray(other)
2003 _A.reshape(_A.size)[::_A.shape[1]+1] += self.__C
2004 return numpy.asmatrix(_A)
2006 def __radd__(self, other):
2007 "x.__radd__(y) <==> y+x"
2008 raise NotImplementedError("%s covariance matrix __radd__ method not available for %s type!"%(self.__name,type(other)))
2010 def __sub__(self, other):
2011 "x.__sub__(y) <==> x-y"
2012 if self.ismatrix() or self.isobject():
2013 return self.__C - numpy.asmatrix(other)
2014 elif self.isvector() or self.isscalar():
2015 _A = numpy.asarray(other)
2016 _A.reshape(_A.size)[::_A.shape[1]+1] = self.__C - _A.reshape(_A.size)[::_A.shape[1]+1]
2017 return numpy.asmatrix(_A)
2019 def __rsub__(self, other):
2020 "x.__rsub__(y) <==> y-x"
2021 raise NotImplementedError("%s covariance matrix __rsub__ method not available for %s type!"%(self.__name,type(other)))
2024 "x.__neg__() <==> -x"
2027 def __matmul__(self, other):
2028 "x.__mul__(y) <==> x@y"
2029 if self.ismatrix() and isinstance(other, (int, float)):
2030 return numpy.asarray(self.__C) * other
2031 elif self.ismatrix() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2032 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2033 return numpy.ravel(self.__C @ numpy.ravel(other))
2034 elif numpy.asarray(other).shape[0] == self.shape[1]: # Matrice
2035 return numpy.asarray(self.__C) @ numpy.asarray(other)
2037 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asarray(other).shape,self.__name))
2038 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2039 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2040 return numpy.ravel(self.__C) * numpy.ravel(other)
2041 elif numpy.asarray(other).shape[0] == self.shape[1]: # Matrice
2042 return numpy.ravel(self.__C).reshape((-1,1)) * numpy.asarray(other)
2044 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
2045 elif self.isscalar() and isinstance(other,numpy.matrix):
2046 return numpy.asarray(self.__C * other)
2047 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
2048 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
2049 return self.__C * numpy.ravel(other)
2051 return self.__C * numpy.asarray(other)
2052 elif self.isobject():
2053 return self.__C.__matmul__(other)
2055 raise NotImplementedError("%s covariance matrix __matmul__ method not available for %s type!"%(self.__name,type(other)))
2057 def __mul__(self, other):
2058 "x.__mul__(y) <==> x*y"
2059 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2060 return self.__C * other
2061 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2062 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2063 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
2064 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2065 return self.__C * numpy.asmatrix(other)
2067 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asmatrix(other).shape,self.__name))
2068 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2069 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2070 return numpy.asmatrix(self.__C * numpy.ravel(other)).T
2071 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2072 return numpy.asmatrix((self.__C * (numpy.asarray(other).transpose())).transpose())
2074 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
2075 elif self.isscalar() and isinstance(other,numpy.matrix):
2076 return self.__C * other
2077 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
2078 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
2079 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
2081 return self.__C * numpy.asmatrix(other)
2082 elif self.isobject():
2083 return self.__C.__mul__(other)
2085 raise NotImplementedError("%s covariance matrix __mul__ method not available for %s type!"%(self.__name,type(other)))
2087 def __rmatmul__(self, other):
2088 "x.__rmul__(y) <==> y@x"
2089 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2090 return other * self.__C
2091 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2092 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2093 return numpy.asmatrix(numpy.ravel(other)) * self.__C
2094 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2095 return numpy.asmatrix(other) * self.__C
2097 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name))
2098 elif self.isvector() and isinstance(other,numpy.matrix):
2099 if numpy.ravel(other).size == self.shape[0]: # Vecteur
2100 return numpy.asmatrix(numpy.ravel(other) * self.__C)
2101 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
2102 return numpy.asmatrix(numpy.array(other) * self.__C)
2104 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
2105 elif self.isscalar() and isinstance(other,numpy.matrix):
2106 return other * self.__C
2107 elif self.isobject():
2108 return self.__C.__rmatmul__(other)
2110 raise NotImplementedError("%s covariance matrix __rmatmul__ method not available for %s type!"%(self.__name,type(other)))
2112 def __rmul__(self, other):
2113 "x.__rmul__(y) <==> y*x"
2114 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2115 return other * self.__C
2116 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2117 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2118 return numpy.asmatrix(numpy.ravel(other)) * self.__C
2119 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2120 return numpy.asmatrix(other) * self.__C
2122 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name))
2123 elif self.isvector() and isinstance(other,numpy.matrix):
2124 if numpy.ravel(other).size == self.shape[0]: # Vecteur
2125 return numpy.asmatrix(numpy.ravel(other) * self.__C)
2126 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
2127 return numpy.asmatrix(numpy.array(other) * self.__C)
2129 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
2130 elif self.isscalar() and isinstance(other,numpy.matrix):
2131 return other * self.__C
2132 elif self.isobject():
2133 return self.__C.__rmul__(other)
2135 raise NotImplementedError("%s covariance matrix __rmul__ method not available for %s type!"%(self.__name,type(other)))
2138 "x.__len__() <==> len(x)"
2139 return self.shape[0]
2141 # ==============================================================================
2142 class Observer2Func(object):
2144 Creation d'une fonction d'observateur a partir de son texte
2146 def __init__(self, corps=""):
2147 self.__corps = corps
2148 def func(self,var,info):
2149 "Fonction d'observation"
2152 "Restitution du pointeur de fonction dans l'objet"
2155 # ==============================================================================
2156 class CaseLogger(object):
2158 Conservation des commandes de creation d'un cas
2160 def __init__(self, __name="", __objname="case", __addViewers=None, __addLoaders=None):
2161 self.__name = str(__name)
2162 self.__objname = str(__objname)
2163 self.__logSerie = []
2164 self.__switchoff = False
2166 "TUI" :Interfaces._TUIViewer,
2167 "SCD" :Interfaces._SCDViewer,
2168 "YACS":Interfaces._YACSViewer,
2171 "TUI" :Interfaces._TUIViewer,
2172 "COM" :Interfaces._COMViewer,
2174 if __addViewers is not None:
2175 self.__viewers.update(dict(__addViewers))
2176 if __addLoaders is not None:
2177 self.__loaders.update(dict(__addLoaders))
2179 def register(self, __command=None, __keys=None, __local=None, __pre=None, __switchoff=False):
2180 "Enregistrement d'une commande individuelle"
2181 if __command is not None and __keys is not None and __local is not None and not self.__switchoff:
2182 if "self" in __keys: __keys.remove("self")
2183 self.__logSerie.append( (str(__command), __keys, __local, __pre, __switchoff) )
2185 self.__switchoff = True
2187 self.__switchoff = False
2189 def dump(self, __filename=None, __format="TUI", __upa=""):
2190 "Restitution normalisée des commandes"
2191 if __format in self.__viewers:
2192 __formater = self.__viewers[__format](self.__name, self.__objname, self.__logSerie)
2194 raise ValueError("Dumping as \"%s\" is not available"%__format)
2195 return __formater.dump(__filename, __upa)
2197 def load(self, __filename=None, __content=None, __object=None, __format="TUI"):
2198 "Chargement normalisé des commandes"
2199 if __format in self.__loaders:
2200 __formater = self.__loaders[__format]()
2202 raise ValueError("Loading as \"%s\" is not available"%__format)
2203 return __formater.load(__filename, __content, __object)
2205 # ==============================================================================
2208 _extraArguments = None,
2209 _sFunction = lambda x: x,
2214 Pour une liste ordonnée de vecteurs en entrée, renvoie en sortie la liste
2215 correspondante de valeurs de la fonction en argument
2217 # Vérifications et définitions initiales
2218 # logging.debug("MULTF Internal multifonction calculations begin with function %s"%(_sFunction.__name__,))
2219 if not PlatformInfo.isIterable( __xserie ):
2220 raise TypeError("MultiFonction not iterable unkown input type: %s"%(type(__xserie),))
2222 if (_mpWorkers is None) or (_mpWorkers is not None and _mpWorkers < 1):
2225 __mpWorkers = int(_mpWorkers)
2227 import multiprocessing
2238 # logging.debug("MULTF Internal multiprocessing calculations begin : evaluation of %i point(s)"%(len(_jobs),))
2239 import multiprocessing
2240 with multiprocessing.Pool(__mpWorkers) as pool:
2241 __multiHX = pool.map( _sFunction, _jobs )
2244 # logging.debug("MULTF Internal multiprocessing calculation end")
2246 # logging.debug("MULTF Internal monoprocessing calculation begin")
2248 if _extraArguments is None:
2249 for __xvalue in __xserie:
2250 __multiHX.append( _sFunction( __xvalue ) )
2251 elif _extraArguments is not None and isinstance(_extraArguments, (list, tuple, map)):
2252 for __xvalue in __xserie:
2253 __multiHX.append( _sFunction( __xvalue, *_extraArguments ) )
2254 elif _extraArguments is not None and isinstance(_extraArguments, dict):
2255 for __xvalue in __xserie:
2256 __multiHX.append( _sFunction( __xvalue, **_extraArguments ) )
2258 raise TypeError("MultiFonction extra arguments unkown input type: %s"%(type(_extraArguments),))
2259 # logging.debug("MULTF Internal monoprocessing calculation end")
2261 # logging.debug("MULTF Internal multifonction calculations end")
2264 # ==============================================================================
2265 def CostFunction3D(_x,
2266 _Hm = None, # Pour simuler Hm(x) : HO["Direct"].appliedTo
2267 _HmX = None, # Simulation déjà faite de Hm(x)
2268 _arg = None, # Arguments supplementaires pour Hm, sous la forme d'un tuple
2273 _SIV = False, # A résorber pour la 8.0
2274 _SSC = [], # self._parameters["StoreSupplementaryCalculations"]
2275 _nPS = 0, # nbPreviousSteps
2276 _QM = "DA", # QualityMeasure
2277 _SSV = {}, # Entrée et/ou sortie : self.StoredVariables
2278 _fRt = False, # Restitue ou pas la sortie étendue
2279 _sSc = True, # Stocke ou pas les SSC
2282 Fonction-coût générale utile pour les algorithmes statiques/3D : 3DVAR, BLUE
2283 et dérivés, Kalman et dérivés, LeastSquares, SamplingTest, PSO, SA, Tabu,
2284 DFO, QuantileRegression
2290 for k in ["CostFunctionJ",
2296 "SimulatedObservationAtCurrentOptimum",
2297 "SimulatedObservationAtCurrentState",
2301 if hasattr(_SSV[k],"store"):
2302 _SSV[k].append = _SSV[k].store # Pour utiliser "append" au lieu de "store"
2304 _X = numpy.asmatrix(numpy.ravel( _x )).T
2305 if _SIV or "CurrentState" in _SSC or "CurrentOptimum" in _SSC:
2306 _SSV["CurrentState"].append( _X )
2308 if _HmX is not None:
2312 raise ValueError("COSTFUNCTION3D Operator has to be defined.")
2316 _HX = _Hm( _X, *_arg )
2317 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
2319 if "SimulatedObservationAtCurrentState" in _SSC or \
2320 "SimulatedObservationAtCurrentOptimum" in _SSC:
2321 _SSV["SimulatedObservationAtCurrentState"].append( _HX )
2323 if numpy.any(numpy.isnan(_HX)):
2324 Jb, Jo, J = numpy.nan, numpy.nan, numpy.nan
2326 _Y = numpy.asmatrix(numpy.ravel( _Y )).T
2327 if _QM in ["AugmentedWeightedLeastSquares", "AWLS", "AugmentedPonderatedLeastSquares", "APLS", "DA"]:
2328 if _BI is None or _RI is None:
2329 raise ValueError("Background and Observation error covariance matrix has to be properly defined!")
2330 _Xb = numpy.asmatrix(numpy.ravel( _Xb )).T
2331 Jb = 0.5 * (_X - _Xb).T * _BI * (_X - _Xb)
2332 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
2333 elif _QM in ["WeightedLeastSquares", "WLS", "PonderatedLeastSquares", "PLS"]:
2335 raise ValueError("Observation error covariance matrix has to be properly defined!")
2337 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
2338 elif _QM in ["LeastSquares", "LS", "L2"]:
2340 Jo = 0.5 * (_Y - _HX).T * (_Y - _HX)
2341 elif _QM in ["AbsoluteValue", "L1"]:
2343 Jo = numpy.sum( numpy.abs(_Y - _HX) )
2344 elif _QM in ["MaximumError", "ME"]:
2346 Jo = numpy.max( numpy.abs(_Y - _HX) )
2347 elif _QM in ["QR", "Null"]:
2351 raise ValueError("Unknown asked quality measure!")
2353 J = float( Jb ) + float( Jo )
2356 _SSV["CostFunctionJb"].append( Jb )
2357 _SSV["CostFunctionJo"].append( Jo )
2358 _SSV["CostFunctionJ" ].append( J )
2360 if "IndexOfOptimum" in _SSC or \
2361 "CurrentOptimum" in _SSC or \
2362 "SimulatedObservationAtCurrentOptimum" in _SSC:
2363 IndexMin = numpy.argmin( _SSV["CostFunctionJ"][_nPS:] ) + _nPS
2364 if "IndexOfOptimum" in _SSC:
2365 _SSV["IndexOfOptimum"].append( IndexMin )
2366 if "CurrentOptimum" in _SSC:
2367 _SSV["CurrentOptimum"].append( _SSV["CurrentState"][IndexMin] )
2368 if "SimulatedObservationAtCurrentOptimum" in _SSC:
2369 _SSV["SimulatedObservationAtCurrentOptimum"].append( _SSV["SimulatedObservationAtCurrentState"][IndexMin] )
2374 if _QM in ["QR"]: # Pour le QuantileRegression
2379 # ==============================================================================
2380 if __name__ == "__main__":
2381 print('\n AUTODIAGNOSTIC\n')