1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2021 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les outils généraux élémentaires.
26 __author__ = "Jean-Philippe ARGAUD"
35 from functools import partial
36 from daCore import Persistence, PlatformInfo, Interfaces
37 from daCore import Templates
39 # ==============================================================================
40 class CacheManager(object):
42 Classe générale de gestion d'un cache de calculs
45 toleranceInRedundancy = 1.e-18,
46 lenghtOfRedundancy = -1,
49 Les caractéristiques de tolérance peuvent être modifiées à la création.
51 self.__tolerBP = float(toleranceInRedundancy)
52 self.__lenghtOR = int(lenghtOfRedundancy)
53 self.__initlnOR = self.__lenghtOR
63 def wasCalculatedIn(self, xValue, oName="" ):
64 "Vérifie l'existence d'un calcul correspondant à la valeur"
68 for i in range(min(len(self.__listOPCV),self.__lenghtOR)-1,-1,-1):
69 if not hasattr(xValue, 'size'):
71 elif (str(oName) != self.__listOPCV[i][3]):
73 elif (xValue.size != self.__listOPCV[i][0].size):
75 elif (numpy.ravel(xValue)[0] - self.__listOPCV[i][0][0]) > (self.__tolerBP * self.__listOPCV[i][2] / self.__listOPCV[i][0].size):
77 elif numpy.linalg.norm(numpy.ravel(xValue) - self.__listOPCV[i][0]) < (self.__tolerBP * self.__listOPCV[i][2]):
79 __HxV = self.__listOPCV[i][1]
83 def storeValueInX(self, xValue, HxValue, oName="" ):
84 "Stocke pour un opérateur o un calcul Hx correspondant à la valeur x"
85 if self.__lenghtOR < 0:
86 self.__lenghtOR = 2 * min(xValue.size, 50) + 2 # 2 * xValue.size + 2
87 self.__initlnOR = self.__lenghtOR
88 self.__seenNames.append(str(oName))
89 if str(oName) not in self.__seenNames: # Etend la liste si nouveau
90 self.__lenghtOR += 2 * min(xValue.size, 50) + 2 # 2 * xValue.size + 2
91 self.__initlnOR += self.__lenghtOR
92 self.__seenNames.append(str(oName))
93 while len(self.__listOPCV) > self.__lenghtOR:
94 self.__listOPCV.pop(0)
95 self.__listOPCV.append( (
96 copy.copy(numpy.ravel(xValue)), # 0 Previous point
97 copy.copy(HxValue), # 1 Previous value
98 numpy.linalg.norm(xValue), # 2 Norm
99 str(oName), # 3 Operator name
104 self.__initlnOR = self.__lenghtOR
106 self.__enabled = False
110 self.__lenghtOR = self.__initlnOR
111 self.__enabled = True
113 # ==============================================================================
114 class Operator(object):
116 Classe générale d'interface de type opérateur simple
124 name = "GenericOperator",
127 avoidingRedundancy = True,
128 inputAsMultiFunction = False,
129 enableMultiProcess = False,
130 extraArguments = None,
133 On construit un objet de ce type en fournissant, à l'aide de l'un des
134 deux mots-clé, soit une fonction ou un multi-fonction python, soit une
137 - name : nom d'opérateur
138 - fromMethod : argument de type fonction Python
139 - fromMatrix : argument adapté au constructeur numpy.matrix
140 - avoidingRedundancy : booléen évitant (ou pas) les calculs redondants
141 - inputAsMultiFunction : booléen indiquant une fonction explicitement
142 définie (ou pas) en multi-fonction
143 - extraArguments : arguments supplémentaires passés à la fonction de
144 base et ses dérivées (tuple ou dictionnaire)
146 self.__name = str(name)
147 self.__NbCallsAsMatrix, self.__NbCallsAsMethod, self.__NbCallsOfCached = 0, 0, 0
148 self.__AvoidRC = bool( avoidingRedundancy )
149 self.__inputAsMF = bool( inputAsMultiFunction )
150 self.__mpEnabled = bool( enableMultiProcess )
151 self.__extraArgs = extraArguments
152 if fromMethod is not None and self.__inputAsMF:
153 self.__Method = fromMethod # logtimer(fromMethod)
155 self.__Type = "Method"
156 elif fromMethod is not None and not self.__inputAsMF:
157 self.__Method = partial( MultiFonction, _sFunction=fromMethod, _mpEnabled=self.__mpEnabled)
159 self.__Type = "Method"
160 elif fromMatrix is not None:
162 self.__Matrix = numpy.matrix( fromMatrix, numpy.float )
163 self.__Type = "Matrix"
169 def disableAvoidingRedundancy(self):
171 Operator.CM.disable()
173 def enableAvoidingRedundancy(self):
178 Operator.CM.disable()
184 def appliedTo(self, xValue, HValue = None, argsAsSerie = False, returnSerieAsArrayMatrix = False):
186 Permet de restituer le résultat de l'application de l'opérateur à une
187 série d'arguments xValue. Cette méthode se contente d'appliquer, chaque
188 argument devant a priori être du bon type.
190 - les arguments par série sont :
191 - xValue : argument adapté pour appliquer l'opérateur
192 - HValue : valeur précalculée de l'opérateur en ce point
193 - argsAsSerie : indique si les arguments sont une mono ou multi-valeur
200 if HValue is not None:
204 PlatformInfo.isIterable( _xValue, True, " in Operator.appliedTo" )
206 if _HValue is not None:
207 assert len(_xValue) == len(_HValue), "Incompatible number of elements in xValue and HValue"
209 for i in range(len(_HValue)):
210 _HxValue.append( numpy.asmatrix( numpy.ravel( _HValue[i] ) ).T )
212 Operator.CM.storeValueInX(_xValue[i],_HxValue[-1],self.__name)
217 for i, xv in enumerate(_xValue):
219 __alreadyCalculated, __HxV = Operator.CM.wasCalculatedIn(xv,self.__name)
221 __alreadyCalculated = False
223 if __alreadyCalculated:
224 self.__addOneCacheCall()
227 if self.__Matrix is not None:
228 self.__addOneMatrixCall()
229 _xv = numpy.matrix(numpy.ravel(xv)).T
230 _hv = self.__Matrix * _xv
232 self.__addOneMethodCall()
236 _HxValue.append( _hv )
238 if len(_xserie)>0 and self.__Matrix is None:
239 if self.__extraArgs is None:
240 _hserie = self.__Method( _xserie ) # Calcul MF
242 _hserie = self.__Method( _xserie, self.__extraArgs ) # Calcul MF
243 if not hasattr(_hserie, "pop"):
244 raise TypeError("The user input multi-function doesn't seem to return sequence results, behaving like a mono-function. It has to be checked.")
250 Operator.CM.storeValueInX(_xv,_hv,self.__name)
252 if returnSerieAsArrayMatrix:
253 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
255 if argsAsSerie: return _HxValue
256 else: return _HxValue[-1]
258 def appliedControledFormTo(self, paires, argsAsSerie = False, returnSerieAsArrayMatrix = False):
260 Permet de restituer le résultat de l'application de l'opérateur à des
261 paires (xValue, uValue). Cette méthode se contente d'appliquer, son
262 argument devant a priori être du bon type. Si la uValue est None,
263 on suppose que l'opérateur ne s'applique qu'à xValue.
265 - paires : les arguments par paire sont :
266 - xValue : argument X adapté pour appliquer l'opérateur
267 - uValue : argument U adapté pour appliquer l'opérateur
268 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
270 if argsAsSerie: _xuValue = paires
271 else: _xuValue = (paires,)
272 PlatformInfo.isIterable( _xuValue, True, " in Operator.appliedControledFormTo" )
274 if self.__Matrix is not None:
276 for paire in _xuValue:
277 _xValue, _uValue = paire
278 _xValue = numpy.matrix(numpy.ravel(_xValue)).T
279 self.__addOneMatrixCall()
280 _HxValue.append( self.__Matrix * _xValue )
283 for paire in _xuValue:
284 _xValue, _uValue = paire
285 if _uValue is not None:
286 _xuArgs.append( paire )
288 _xuArgs.append( _xValue )
289 self.__addOneMethodCall( len(_xuArgs) )
290 if self.__extraArgs is None:
291 _HxValue = self.__Method( _xuArgs ) # Calcul MF
293 _HxValue = self.__Method( _xuArgs, self.__extraArgs ) # Calcul MF
295 if returnSerieAsArrayMatrix:
296 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
298 if argsAsSerie: return _HxValue
299 else: return _HxValue[-1]
301 def appliedInXTo(self, paires, argsAsSerie = False, returnSerieAsArrayMatrix = False):
303 Permet de restituer le résultat de l'application de l'opérateur à une
304 série d'arguments xValue, sachant que l'opérateur est valable en
305 xNominal. Cette méthode se contente d'appliquer, son argument devant a
306 priori être du bon type. Si l'opérateur est linéaire car c'est une
307 matrice, alors il est valable en tout point nominal et xNominal peut
308 être quelconque. Il n'y a qu'une seule paire par défaut, et argsAsSerie
309 permet d'indiquer que l'argument est multi-paires.
311 - paires : les arguments par paire sont :
312 - xNominal : série d'arguments permettant de donner le point où
313 l'opérateur est construit pour être ensuite appliqué
314 - xValue : série d'arguments adaptés pour appliquer l'opérateur
315 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
317 if argsAsSerie: _nxValue = paires
318 else: _nxValue = (paires,)
319 PlatformInfo.isIterable( _nxValue, True, " in Operator.appliedInXTo" )
321 if self.__Matrix is not None:
323 for paire in _nxValue:
324 _xNominal, _xValue = paire
325 _xValue = numpy.matrix(numpy.ravel(_xValue)).T
326 self.__addOneMatrixCall()
327 _HxValue.append( self.__Matrix * _xValue )
329 self.__addOneMethodCall( len(_nxValue) )
330 if self.__extraArgs is None:
331 _HxValue = self.__Method( _nxValue ) # Calcul MF
333 _HxValue = self.__Method( _nxValue, self.__extraArgs ) # Calcul MF
335 if returnSerieAsArrayMatrix:
336 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
338 if argsAsSerie: return _HxValue
339 else: return _HxValue[-1]
341 def asMatrix(self, ValueForMethodForm = "UnknownVoidValue", argsAsSerie = False):
343 Permet de renvoyer l'opérateur sous la forme d'une matrice
345 if self.__Matrix is not None:
346 self.__addOneMatrixCall()
347 mValue = [self.__Matrix,]
348 elif not isinstance(ValueForMethodForm,str) or ValueForMethodForm != "UnknownVoidValue": # Ne pas utiliser "None"
351 self.__addOneMethodCall( len(ValueForMethodForm) )
352 for _vfmf in ValueForMethodForm:
353 mValue.append( numpy.matrix( self.__Method(((_vfmf, None),)) ) )
355 self.__addOneMethodCall()
356 mValue = self.__Method(((ValueForMethodForm, None),))
358 raise ValueError("Matrix form of the operator defined as a function/method requires to give an operating point.")
360 if argsAsSerie: return mValue
361 else: return mValue[-1]
365 Renvoie la taille sous forme numpy si l'opérateur est disponible sous
366 la forme d'une matrice
368 if self.__Matrix is not None:
369 return self.__Matrix.shape
371 raise ValueError("Matrix form of the operator is not available, nor the shape")
373 def nbcalls(self, which=None):
375 Renvoie les nombres d'évaluations de l'opérateur
378 self.__NbCallsAsMatrix+self.__NbCallsAsMethod,
379 self.__NbCallsAsMatrix,
380 self.__NbCallsAsMethod,
381 self.__NbCallsOfCached,
382 Operator.NbCallsAsMatrix+Operator.NbCallsAsMethod,
383 Operator.NbCallsAsMatrix,
384 Operator.NbCallsAsMethod,
385 Operator.NbCallsOfCached,
387 if which is None: return __nbcalls
388 else: return __nbcalls[which]
390 def __addOneMatrixCall(self):
391 "Comptabilise un appel"
392 self.__NbCallsAsMatrix += 1 # Decompte local
393 Operator.NbCallsAsMatrix += 1 # Decompte global
395 def __addOneMethodCall(self, nb = 1):
396 "Comptabilise un appel"
397 self.__NbCallsAsMethod += nb # Decompte local
398 Operator.NbCallsAsMethod += nb # Decompte global
400 def __addOneCacheCall(self):
401 "Comptabilise un appel"
402 self.__NbCallsOfCached += 1 # Decompte local
403 Operator.NbCallsOfCached += 1 # Decompte global
405 # ==============================================================================
406 class FullOperator(object):
408 Classe générale d'interface de type opérateur complet
409 (Direct, Linéaire Tangent, Adjoint)
412 name = "GenericFullOperator",
414 asOneFunction = None, # 1 Fonction
415 asThreeFunctions = None, # 3 Fonctions in a dictionary
416 asScript = None, # 1 or 3 Fonction(s) by script
417 asDict = None, # Parameters
419 extraArguments = None,
421 inputAsMF = False,# Fonction(s) as Multi-Functions
426 self.__name = str(name)
427 self.__check = bool(toBeChecked)
428 self.__extraArgs = extraArguments
433 if (asDict is not None) and isinstance(asDict, dict):
434 __Parameters.update( asDict )
435 # Priorité à EnableMultiProcessingInDerivatives=True
436 if "EnableMultiProcessing" in __Parameters and __Parameters["EnableMultiProcessing"]:
437 __Parameters["EnableMultiProcessingInDerivatives"] = True
438 __Parameters["EnableMultiProcessingInEvaluation"] = False
439 if "EnableMultiProcessingInDerivatives" not in __Parameters:
440 __Parameters["EnableMultiProcessingInDerivatives"] = False
441 if __Parameters["EnableMultiProcessingInDerivatives"]:
442 __Parameters["EnableMultiProcessingInEvaluation"] = False
443 if "EnableMultiProcessingInEvaluation" not in __Parameters:
444 __Parameters["EnableMultiProcessingInEvaluation"] = False
445 if "withIncrement" in __Parameters: # Temporaire
446 __Parameters["DifferentialIncrement"] = __Parameters["withIncrement"]
448 if asScript is not None:
449 __Matrix, __Function = None, None
451 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
453 __Function = { "Direct":Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ) }
454 __Function.update({"useApproximatedDerivatives":True})
455 __Function.update(__Parameters)
456 elif asThreeFunctions:
458 "Direct" :Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ),
459 "Tangent":Interfaces.ImportFromScript(asScript).getvalue( "TangentOperator" ),
460 "Adjoint":Interfaces.ImportFromScript(asScript).getvalue( "AdjointOperator" ),
462 __Function.update(__Parameters)
465 if asOneFunction is not None:
466 if isinstance(asOneFunction, dict) and "Direct" in asOneFunction:
467 if asOneFunction["Direct"] is not None:
468 __Function = asOneFunction
470 raise ValueError("The function has to be given in a dictionnary which have 1 key (\"Direct\")")
472 __Function = { "Direct":asOneFunction }
473 __Function.update({"useApproximatedDerivatives":True})
474 __Function.update(__Parameters)
475 elif asThreeFunctions is not None:
476 if isinstance(asThreeFunctions, dict) and \
477 ("Tangent" in asThreeFunctions) and (asThreeFunctions["Tangent"] is not None) and \
478 ("Adjoint" in asThreeFunctions) and (asThreeFunctions["Adjoint"] is not None) and \
479 (("useApproximatedDerivatives" not in asThreeFunctions) or not bool(asThreeFunctions["useApproximatedDerivatives"])):
480 __Function = asThreeFunctions
481 elif isinstance(asThreeFunctions, dict) and \
482 ("Direct" in asThreeFunctions) and (asThreeFunctions["Direct"] is not None):
483 __Function = asThreeFunctions
484 __Function.update({"useApproximatedDerivatives":True})
486 raise ValueError("The functions has to be given in a dictionnary which have either 1 key (\"Direct\") or 3 keys (\"Direct\" (optionnal), \"Tangent\" and \"Adjoint\")")
487 if "Direct" not in asThreeFunctions:
488 __Function["Direct"] = asThreeFunctions["Tangent"]
489 __Function.update(__Parameters)
493 # if sys.version_info[0] < 3 and isinstance(__Function, dict):
494 # for k in ("Direct", "Tangent", "Adjoint"):
495 # if k in __Function and hasattr(__Function[k],"__class__"):
496 # if type(__Function[k]) is type(self.__init__):
497 # raise TypeError("can't use a class method (%s) as a function for the \"%s\" operator. Use a real function instead."%(type(__Function[k]),k))
499 if appliedInX is not None and isinstance(appliedInX, dict):
500 __appliedInX = appliedInX
501 elif appliedInX is not None:
502 __appliedInX = {"HXb":appliedInX}
506 if scheduledBy is not None:
507 self.__T = scheduledBy
509 if isinstance(__Function, dict) and \
510 ("useApproximatedDerivatives" in __Function) and bool(__Function["useApproximatedDerivatives"]) and \
511 ("Direct" in __Function) and (__Function["Direct"] is not None):
512 if "CenteredFiniteDifference" not in __Function: __Function["CenteredFiniteDifference"] = False
513 if "DifferentialIncrement" not in __Function: __Function["DifferentialIncrement"] = 0.01
514 if "withdX" not in __Function: __Function["withdX"] = None
515 if "withAvoidingRedundancy" not in __Function: __Function["withAvoidingRedundancy"] = avoidRC
516 if "withToleranceInRedundancy" not in __Function: __Function["withToleranceInRedundancy"] = 1.e-18
517 if "withLenghtOfRedundancy" not in __Function: __Function["withLenghtOfRedundancy"] = -1
518 if "NumberOfProcesses" not in __Function: __Function["NumberOfProcesses"] = None
519 if "withmfEnabled" not in __Function: __Function["withmfEnabled"] = inputAsMF
520 from daCore import NumericObjects
521 FDA = NumericObjects.FDApproximation(
523 Function = __Function["Direct"],
524 centeredDF = __Function["CenteredFiniteDifference"],
525 increment = __Function["DifferentialIncrement"],
526 dX = __Function["withdX"],
527 extraArguments = self.__extraArgs,
528 avoidingRedundancy = __Function["withAvoidingRedundancy"],
529 toleranceInRedundancy = __Function["withToleranceInRedundancy"],
530 lenghtOfRedundancy = __Function["withLenghtOfRedundancy"],
531 mpEnabled = __Function["EnableMultiProcessingInDerivatives"],
532 mpWorkers = __Function["NumberOfProcesses"],
533 mfEnabled = __Function["withmfEnabled"],
535 self.__FO["Direct"] = Operator( name = self.__name, fromMethod = FDA.DirectOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
536 self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMethod = FDA.TangentOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
537 self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMethod = FDA.AdjointOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
538 elif isinstance(__Function, dict) and \
539 ("Direct" in __Function) and ("Tangent" in __Function) and ("Adjoint" in __Function) and \
540 (__Function["Direct"] is not None) and (__Function["Tangent"] is not None) and (__Function["Adjoint"] is not None):
541 self.__FO["Direct"] = Operator( name = self.__name, fromMethod = __Function["Direct"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
542 self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMethod = __Function["Tangent"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
543 self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMethod = __Function["Adjoint"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
544 elif asMatrix is not None:
545 __matrice = numpy.matrix( __Matrix, numpy.float )
546 self.__FO["Direct"] = Operator( name = self.__name, fromMatrix = __matrice, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
547 self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMatrix = __matrice, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
548 self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMatrix = __matrice.T, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
551 raise ValueError("The %s object is improperly defined or undefined, it requires at minima either a matrix, a Direct operator for approximate derivatives or a Tangent/Adjoint operators pair. Please check your operator input."%self.__name)
553 if __appliedInX is not None:
554 self.__FO["AppliedInX"] = {}
555 for key in list(__appliedInX.keys()):
556 if type( __appliedInX[key] ) is type( numpy.matrix([]) ):
557 # Pour le cas où l'on a une vraie matrice
558 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].A1, numpy.float ).T
559 elif type( __appliedInX[key] ) is type( numpy.array([]) ) and len(__appliedInX[key].shape) > 1:
560 # Pour le cas où l'on a un vecteur représenté en array avec 2 dimensions
561 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].reshape(len(__appliedInX[key]),), numpy.float ).T
563 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key], numpy.float ).T
565 self.__FO["AppliedInX"] = None
571 "x.__repr__() <==> repr(x)"
572 return repr(self.__FO)
575 "x.__str__() <==> str(x)"
576 return str(self.__FO)
578 # ==============================================================================
579 class Algorithm(object):
581 Classe générale d'interface de type algorithme
583 Elle donne un cadre pour l'écriture d'une classe élémentaire d'algorithme
584 d'assimilation, en fournissant un container (dictionnaire) de variables
585 persistantes initialisées, et des méthodes d'accès à ces variables stockées.
587 Une classe élémentaire d'algorithme doit implémenter la méthode "run".
589 def __init__(self, name):
591 L'initialisation présente permet de fabriquer des variables de stockage
592 disponibles de manière générique dans les algorithmes élémentaires. Ces
593 variables de stockage sont ensuite conservées dans un dictionnaire
594 interne à l'objet, mais auquel on accède par la méthode "get".
596 Les variables prévues sont :
597 - APosterioriCorrelations : matrice de corrélations de la matrice A
598 - APosterioriCovariance : matrice de covariances a posteriori : A
599 - APosterioriStandardDeviations : vecteur des écart-types de la matrice A
600 - APosterioriVariances : vecteur des variances de la matrice A
601 - Analysis : vecteur d'analyse : Xa
602 - BMA : Background moins Analysis : Xa - Xb
603 - CostFunctionJ : fonction-coût globale, somme des deux parties suivantes Jb et Jo
604 - CostFunctionJAtCurrentOptimum : fonction-coût globale à l'état optimal courant lors d'itérations
605 - CostFunctionJb : partie ébauche ou background de la fonction-coût : Jb
606 - CostFunctionJbAtCurrentOptimum : partie ébauche à l'état optimal courant lors d'itérations
607 - CostFunctionJo : partie observations de la fonction-coût : Jo
608 - CostFunctionJoAtCurrentOptimum : partie observations à l'état optimal courant lors d'itérations
609 - CurrentIterationNumber : numéro courant d'itération dans les algorithmes itératifs, à partir de 0
610 - CurrentOptimum : état optimal courant lors d'itérations
611 - CurrentState : état courant lors d'itérations
612 - GradientOfCostFunctionJ : gradient de la fonction-coût globale
613 - GradientOfCostFunctionJb : gradient de la partie ébauche de la fonction-coût
614 - GradientOfCostFunctionJo : gradient de la partie observations de la fonction-coût
615 - IndexOfOptimum : index de l'état optimal courant lors d'itérations
616 - Innovation : l'innovation : d = Y - H(X)
617 - InnovationAtCurrentState : l'innovation à l'état courant : dn = Y - H(Xn)
618 - JacobianMatrixAtBackground : matrice jacobienne à l'état d'ébauche
619 - JacobianMatrixAtCurrentState : matrice jacobienne à l'état courant
620 - JacobianMatrixAtOptimum : matrice jacobienne à l'optimum
621 - KalmanGainAtOptimum : gain de Kalman à l'optimum
622 - MahalanobisConsistency : indicateur de consistance des covariances
623 - OMA : Observation moins Analyse : Y - Xa
624 - OMB : Observation moins Background : Y - Xb
625 - ForecastCovariance : covariance de l'état prédit courant lors d'itérations
626 - ForecastState : état prédit courant lors d'itérations
627 - Residu : dans le cas des algorithmes de vérification
628 - SampledStateForQuantiles : échantillons d'états pour l'estimation des quantiles
629 - SigmaBck2 : indicateur de correction optimale des erreurs d'ébauche
630 - SigmaObs2 : indicateur de correction optimale des erreurs d'observation
631 - SimulatedObservationAtBackground : l'état observé H(Xb) à l'ébauche
632 - SimulatedObservationAtCurrentOptimum : l'état observé H(X) à l'état optimal courant
633 - SimulatedObservationAtCurrentState : l'état observé H(X) à l'état courant
634 - SimulatedObservationAtOptimum : l'état observé H(Xa) à l'optimum
635 - SimulationQuantiles : états observés H(X) pour les quantiles demandés
636 On peut rajouter des variables à stocker dans l'initialisation de
637 l'algorithme élémentaire qui va hériter de cette classe
639 logging.debug("%s Initialisation", str(name))
640 self._m = PlatformInfo.SystemUsage()
642 self._name = str( name )
643 self._parameters = {"StoreSupplementaryCalculations":[]}
644 self.__internal_state = {}
645 self.__required_parameters = {}
646 self.__required_inputs = {
647 "RequiredInputValues":{"mandatory":(), "optional":()},
648 "ClassificationTags":[],
650 self.__variable_names_not_public = {"nextStep":False} # Duplication dans AlgorithmAndParameters
651 self.__canonical_parameter_name = {} # Correspondance "lower"->"correct"
652 self.__canonical_stored_name = {} # Correspondance "lower"->"correct"
654 self.StoredVariables = {}
655 self.StoredVariables["APosterioriCorrelations"] = Persistence.OneMatrix(name = "APosterioriCorrelations")
656 self.StoredVariables["APosterioriCovariance"] = Persistence.OneMatrix(name = "APosterioriCovariance")
657 self.StoredVariables["APosterioriStandardDeviations"] = Persistence.OneVector(name = "APosterioriStandardDeviations")
658 self.StoredVariables["APosterioriVariances"] = Persistence.OneVector(name = "APosterioriVariances")
659 self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
660 self.StoredVariables["BMA"] = Persistence.OneVector(name = "BMA")
661 self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
662 self.StoredVariables["CostFunctionJAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJAtCurrentOptimum")
663 self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
664 self.StoredVariables["CostFunctionJbAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJbAtCurrentOptimum")
665 self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
666 self.StoredVariables["CostFunctionJoAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJoAtCurrentOptimum")
667 self.StoredVariables["CurrentEnsembleState"] = Persistence.OneMatrix(name = "CurrentEnsembleState")
668 self.StoredVariables["CurrentIterationNumber"] = Persistence.OneIndex(name = "CurrentIterationNumber")
669 self.StoredVariables["CurrentOptimum"] = Persistence.OneVector(name = "CurrentOptimum")
670 self.StoredVariables["CurrentState"] = Persistence.OneVector(name = "CurrentState")
671 self.StoredVariables["ForecastCovariance"] = Persistence.OneMatrix(name = "ForecastCovariance")
672 self.StoredVariables["ForecastState"] = Persistence.OneVector(name = "ForecastState")
673 self.StoredVariables["GradientOfCostFunctionJ"] = Persistence.OneVector(name = "GradientOfCostFunctionJ")
674 self.StoredVariables["GradientOfCostFunctionJb"] = Persistence.OneVector(name = "GradientOfCostFunctionJb")
675 self.StoredVariables["GradientOfCostFunctionJo"] = Persistence.OneVector(name = "GradientOfCostFunctionJo")
676 self.StoredVariables["IndexOfOptimum"] = Persistence.OneIndex(name = "IndexOfOptimum")
677 self.StoredVariables["Innovation"] = Persistence.OneVector(name = "Innovation")
678 self.StoredVariables["InnovationAtCurrentAnalysis"] = Persistence.OneVector(name = "InnovationAtCurrentAnalysis")
679 self.StoredVariables["InnovationAtCurrentState"] = Persistence.OneVector(name = "InnovationAtCurrentState")
680 self.StoredVariables["JacobianMatrixAtBackground"] = Persistence.OneMatrix(name = "JacobianMatrixAtBackground")
681 self.StoredVariables["JacobianMatrixAtCurrentState"] = Persistence.OneMatrix(name = "JacobianMatrixAtCurrentState")
682 self.StoredVariables["JacobianMatrixAtOptimum"] = Persistence.OneMatrix(name = "JacobianMatrixAtOptimum")
683 self.StoredVariables["KalmanGainAtOptimum"] = Persistence.OneMatrix(name = "KalmanGainAtOptimum")
684 self.StoredVariables["MahalanobisConsistency"] = Persistence.OneScalar(name = "MahalanobisConsistency")
685 self.StoredVariables["OMA"] = Persistence.OneVector(name = "OMA")
686 self.StoredVariables["OMB"] = Persistence.OneVector(name = "OMB")
687 self.StoredVariables["Residu"] = Persistence.OneScalar(name = "Residu")
688 self.StoredVariables["SampledStateForQuantiles"] = Persistence.OneMatrix(name = "SampledStateForQuantiles")
689 self.StoredVariables["SigmaBck2"] = Persistence.OneScalar(name = "SigmaBck2")
690 self.StoredVariables["SigmaObs2"] = Persistence.OneScalar(name = "SigmaObs2")
691 self.StoredVariables["SimulatedObservationAtBackground"] = Persistence.OneVector(name = "SimulatedObservationAtBackground")
692 self.StoredVariables["SimulatedObservationAtCurrentAnalysis"]= Persistence.OneVector(name = "SimulatedObservationAtCurrentAnalysis")
693 self.StoredVariables["SimulatedObservationAtCurrentOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentOptimum")
694 self.StoredVariables["SimulatedObservationAtCurrentState"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentState")
695 self.StoredVariables["SimulatedObservationAtOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtOptimum")
696 self.StoredVariables["SimulationQuantiles"] = Persistence.OneMatrix(name = "SimulationQuantiles")
698 for k in self.StoredVariables:
699 self.__canonical_stored_name[k.lower()] = k
701 for k, v in self.__variable_names_not_public.items():
702 self.__canonical_parameter_name[k.lower()] = k
703 self.__canonical_parameter_name["algorithm"] = "Algorithm"
704 self.__canonical_parameter_name["storesupplementarycalculations"] = "StoreSupplementaryCalculations"
706 def _pre_run(self, Parameters, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None ):
708 logging.debug("%s Lancement", self._name)
709 logging.debug("%s Taille mémoire utilisée de %.0f Mio"%(self._name, self._m.getUsedMemory("Mio")))
710 self._getTimeState(reset=True)
712 # Mise a jour des paramètres internes avec le contenu de Parameters, en
713 # reprenant les valeurs par défauts pour toutes celles non définies
714 self.__setParameters(Parameters, reset=True)
715 for k, v in self.__variable_names_not_public.items():
716 if k not in self._parameters: self.__setParameters( {k:v} )
718 # Corrections et compléments des vecteurs
719 def __test_vvalue(argument, variable, argname, symbol=None):
720 if symbol is None: symbol = variable
722 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
723 raise ValueError("%s %s vector %s is not set and has to be properly defined!"%(self._name,argname,symbol))
724 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
725 logging.debug("%s %s vector %s is not set, but is optional."%(self._name,argname,symbol))
727 logging.debug("%s %s vector %s is not set, but is not required."%(self._name,argname,symbol))
729 logging.debug("%s %s vector %s is set, and its size is %i."%(self._name,argname,symbol,numpy.array(argument).size))
731 __test_vvalue( Xb, "Xb", "Background or initial state" )
732 __test_vvalue( Y, "Y", "Observation" )
733 __test_vvalue( U, "U", "Control" )
735 # Corrections et compléments des covariances
736 def __test_cvalue(argument, variable, argname, symbol=None):
737 if symbol is None: symbol = variable
739 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
740 raise ValueError("%s %s error covariance matrix %s is not set and has to be properly defined!"%(self._name,argname,symbol))
741 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
742 logging.debug("%s %s error covariance matrix %s is not set, but is optional."%(self._name,argname,symbol))
744 logging.debug("%s %s error covariance matrix %s is not set, but is not required."%(self._name,argname,symbol))
746 logging.debug("%s %s error covariance matrix %s is set."%(self._name,argname,symbol))
748 __test_cvalue( B, "B", "Background" )
749 __test_cvalue( R, "R", "Observation" )
750 __test_cvalue( Q, "Q", "Evolution" )
752 # Corrections et compléments des opérateurs
753 def __test_ovalue(argument, variable, argname, symbol=None):
754 if symbol is None: symbol = variable
755 if argument is None or (isinstance(argument,dict) and len(argument)==0):
756 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
757 raise ValueError("%s %s operator %s is not set and has to be properly defined!"%(self._name,argname,symbol))
758 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
759 logging.debug("%s %s operator %s is not set, but is optional."%(self._name,argname,symbol))
761 logging.debug("%s %s operator %s is not set, but is not required."%(self._name,argname,symbol))
763 logging.debug("%s %s operator %s is set."%(self._name,argname,symbol))
765 __test_ovalue( HO, "HO", "Observation", "H" )
766 __test_ovalue( EM, "EM", "Evolution", "M" )
767 __test_ovalue( CM, "CM", "Control Model", "C" )
769 # Corrections et compléments des bornes
770 if ("Bounds" in self._parameters) and isinstance(self._parameters["Bounds"], (list, tuple)) and (len(self._parameters["Bounds"]) > 0):
771 logging.debug("%s Bounds taken into account"%(self._name,))
773 self._parameters["Bounds"] = None
774 if ("StateBoundsForQuantiles" in self._parameters) and isinstance(self._parameters["StateBoundsForQuantiles"], (list, tuple)) and (len(self._parameters["StateBoundsForQuantiles"]) > 0):
775 logging.debug("%s Bounds for quantiles states taken into account"%(self._name,))
776 # Attention : contrairement à Bounds, pas de défaut à None, sinon on ne peut pas être sans bornes
778 # Corrections et compléments de l'initialisation en X
779 if "InitializationPoint" in self._parameters:
781 if self._parameters["InitializationPoint"] is not None and hasattr(self._parameters["InitializationPoint"],'size'):
782 if self._parameters["InitializationPoint"].size != numpy.ravel(Xb).size:
783 raise ValueError("Incompatible size %i of forced initial point that have to replace the background of size %i" \
784 %(self._parameters["InitializationPoint"].size,numpy.ravel(Xb).size))
785 # Obtenu par typecast : numpy.ravel(self._parameters["InitializationPoint"])
787 self._parameters["InitializationPoint"] = numpy.ravel(Xb)
789 if self._parameters["InitializationPoint"] is None:
790 raise ValueError("Forced initial point can not be set without any given Background or required value")
792 # Correction pour pallier a un bug de TNC sur le retour du Minimum
793 if "Minimizer" in self._parameters and self._parameters["Minimizer"] == "TNC":
794 self.setParameterValue("StoreInternalVariables",True)
796 # Verbosité et logging
797 if logging.getLogger().level < logging.WARNING:
798 self._parameters["optiprint"], self._parameters["optdisp"] = 1, 1
799 if PlatformInfo.has_scipy:
800 import scipy.optimize
801 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_ALL
803 self._parameters["optmessages"] = 15
805 self._parameters["optiprint"], self._parameters["optdisp"] = -1, 0
806 if PlatformInfo.has_scipy:
807 import scipy.optimize
808 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_NONE
810 self._parameters["optmessages"] = 15
814 def _post_run(self,_oH=None):
816 if ("StoreSupplementaryCalculations" in self._parameters) and \
817 "APosterioriCovariance" in self._parameters["StoreSupplementaryCalculations"]:
818 for _A in self.StoredVariables["APosterioriCovariance"]:
819 if "APosterioriVariances" in self._parameters["StoreSupplementaryCalculations"]:
820 self.StoredVariables["APosterioriVariances"].store( numpy.diag(_A) )
821 if "APosterioriStandardDeviations" in self._parameters["StoreSupplementaryCalculations"]:
822 self.StoredVariables["APosterioriStandardDeviations"].store( numpy.sqrt(numpy.diag(_A)) )
823 if "APosterioriCorrelations" in self._parameters["StoreSupplementaryCalculations"]:
824 _EI = numpy.diag(1./numpy.sqrt(numpy.diag(_A)))
825 _C = numpy.dot(_EI, numpy.dot(_A, _EI))
826 self.StoredVariables["APosterioriCorrelations"].store( _C )
827 if _oH is not None and "Direct" in _oH and "Tangent" in _oH and "Adjoint" in _oH:
828 logging.debug("%s Nombre d'évaluation(s) de l'opérateur d'observation direct/tangent/adjoint.: %i/%i/%i", self._name, _oH["Direct"].nbcalls(0),_oH["Tangent"].nbcalls(0),_oH["Adjoint"].nbcalls(0))
829 logging.debug("%s Nombre d'appels au cache d'opérateur d'observation direct/tangent/adjoint..: %i/%i/%i", self._name, _oH["Direct"].nbcalls(3),_oH["Tangent"].nbcalls(3),_oH["Adjoint"].nbcalls(3))
830 logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
831 logging.debug("%s Durées d'utilisation CPU de %.1fs et elapsed de %.1fs", self._name, self._getTimeState()[0], self._getTimeState()[1])
832 logging.debug("%s Terminé", self._name)
835 def _toStore(self, key):
836 "True if in StoreSupplementaryCalculations, else False"
837 return key in self._parameters["StoreSupplementaryCalculations"]
839 def get(self, key=None):
841 Renvoie l'une des variables stockées identifiée par la clé, ou le
842 dictionnaire de l'ensemble des variables disponibles en l'absence de
843 clé. Ce sont directement les variables sous forme objet qui sont
844 renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
845 des classes de persistance.
848 return self.StoredVariables[self.__canonical_stored_name[key.lower()]]
850 return self.StoredVariables
852 def __contains__(self, key=None):
853 "D.__contains__(k) -> True if D has a key k, else False"
854 if key is None or key.lower() not in self.__canonical_stored_name:
857 return self.__canonical_stored_name[key.lower()] in self.StoredVariables
860 "D.keys() -> list of D's keys"
861 if hasattr(self, "StoredVariables"):
862 return self.StoredVariables.keys()
867 "D.pop(k[,d]) -> v, remove specified key and return the corresponding value"
868 if hasattr(self, "StoredVariables") and k.lower() in self.__canonical_stored_name:
869 return self.StoredVariables.pop(self.__canonical_stored_name[k.lower()], d)
874 raise TypeError("pop expected at least 1 arguments, got 0")
875 "If key is not found, d is returned if given, otherwise KeyError is raised"
881 def run(self, Xb=None, Y=None, H=None, M=None, R=None, B=None, Q=None, Parameters=None):
883 Doit implémenter l'opération élémentaire de calcul d'assimilation sous
884 sa forme mathématique la plus naturelle possible.
886 raise NotImplementedError("Mathematical assimilation calculation has not been implemented!")
888 def defineRequiredParameter(self, name = None, default = None, typecast = None, message = None, minval = None, maxval = None, listval = None, listadv = None):
890 Permet de définir dans l'algorithme des paramètres requis et leurs
891 caractéristiques par défaut.
894 raise ValueError("A name is mandatory to define a required parameter.")
896 self.__required_parameters[name] = {
898 "typecast" : typecast,
905 self.__canonical_parameter_name[name.lower()] = name
906 logging.debug("%s %s (valeur par défaut = %s)", self._name, message, self.setParameterValue(name))
908 def getRequiredParameters(self, noDetails=True):
910 Renvoie la liste des noms de paramètres requis ou directement le
911 dictionnaire des paramètres requis.
914 return sorted(self.__required_parameters.keys())
916 return self.__required_parameters
918 def setParameterValue(self, name=None, value=None):
920 Renvoie la valeur d'un paramètre requis de manière contrôlée
922 __k = self.__canonical_parameter_name[name.lower()]
923 default = self.__required_parameters[__k]["default"]
924 typecast = self.__required_parameters[__k]["typecast"]
925 minval = self.__required_parameters[__k]["minval"]
926 maxval = self.__required_parameters[__k]["maxval"]
927 listval = self.__required_parameters[__k]["listval"]
928 listadv = self.__required_parameters[__k]["listadv"]
930 if value is None and default is None:
932 elif value is None and default is not None:
933 if typecast is None: __val = default
934 else: __val = typecast( default )
936 if typecast is None: __val = value
939 __val = typecast( value )
941 raise ValueError("The value '%s' for the parameter named '%s' can not be correctly evaluated with type '%s'."%(value, __k, typecast))
943 if minval is not None and (numpy.array(__val, float) < minval).any():
944 raise ValueError("The parameter named '%s' of value '%s' can not be less than %s."%(__k, __val, minval))
945 if maxval is not None and (numpy.array(__val, float) > maxval).any():
946 raise ValueError("The parameter named '%s' of value '%s' can not be greater than %s."%(__k, __val, maxval))
947 if listval is not None or listadv is not None:
948 if typecast is list or typecast is tuple or isinstance(__val,list) or isinstance(__val,tuple):
950 if listval is not None and v in listval: continue
951 elif listadv is not None and v in listadv: continue
953 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%(v, __k, listval))
954 elif not (listval is not None and __val in listval) and not (listadv is not None and __val in listadv):
955 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%( __val, __k,listval))
959 def requireInputArguments(self, mandatory=(), optional=()):
961 Permet d'imposer des arguments de calcul requis en entrée.
963 self.__required_inputs["RequiredInputValues"]["mandatory"] = tuple( mandatory )
964 self.__required_inputs["RequiredInputValues"]["optional"] = tuple( optional )
966 def getInputArguments(self):
968 Permet d'obtenir les listes des arguments de calcul requis en entrée.
970 return self.__required_inputs["RequiredInputValues"]["mandatory"], self.__required_inputs["RequiredInputValues"]["optional"]
972 def setAttributes(self, tags=()):
974 Permet d'adjoindre des attributs comme les tags de classification.
975 Renvoie la liste actuelle dans tous les cas.
977 self.__required_inputs["ClassificationTags"].extend( tags )
978 return self.__required_inputs["ClassificationTags"]
980 def __setParameters(self, fromDico={}, reset=False):
982 Permet de stocker les paramètres reçus dans le dictionnaire interne.
984 self._parameters.update( fromDico )
985 __inverse_fromDico_keys = {}
986 for k in fromDico.keys():
987 if k.lower() in self.__canonical_parameter_name:
988 __inverse_fromDico_keys[self.__canonical_parameter_name[k.lower()]] = k
989 #~ __inverse_fromDico_keys = dict([(self.__canonical_parameter_name[k.lower()],k) for k in fromDico.keys()])
990 __canonic_fromDico_keys = __inverse_fromDico_keys.keys()
991 for k in self.__required_parameters.keys():
992 if k in __canonic_fromDico_keys:
993 self._parameters[k] = self.setParameterValue(k,fromDico[__inverse_fromDico_keys[k]])
995 self._parameters[k] = self.setParameterValue(k)
998 logging.debug("%s %s : %s", self._name, self.__required_parameters[k]["message"], self._parameters[k])
1000 def _setInternalState(self, key=None, value=None, fromDico={}, reset=False):
1002 Permet de stocker des variables nommées constituant l'état interne
1004 if reset: # Vide le dictionnaire préalablement
1005 self.__internal_state = {}
1006 if key is not None and value is not None:
1007 self.__internal_state[key] = value
1008 self.__internal_state.update( dict(fromDico) )
1010 def _getInternalState(self, key=None):
1012 Restitue un état interne sous la forme d'un dictionnaire de variables nommées
1014 if key is not None and key in self.__internal_state:
1015 return self.__internal_state[key]
1017 return self.__internal_state
1019 def _getTimeState(self, reset=False):
1021 Initialise ou restitue le temps de calcul (cpu/elapsed) à la seconde
1024 self.__initial_cpu_time = time.process_time()
1025 self.__initial_elapsed_time = time.perf_counter()
1028 self.__cpu_time = time.process_time() - self.__initial_cpu_time
1029 self.__elapsed_time = time.perf_counter() - self.__initial_elapsed_time
1030 return self.__cpu_time, self.__elapsed_time
1032 def _StopOnTimeLimit(self, X=None, withReason=False):
1033 "Stop criteria on time limit: True/False [+ Reason]"
1034 c, e = self._getTimeState()
1035 if "MaximumCpuTime" in self._parameters and c > self._parameters["MaximumCpuTime"]:
1036 __SC, __SR = True, "Reached maximum CPU time (%.1fs > %.1fs)"%(c, self._parameters["MaximumCpuTime"])
1037 elif "MaximumElapsedTime" in self._parameters and e > self._parameters["MaximumElapsedTime"]:
1038 __SC, __SR = True, "Reached maximum elapsed time (%.1fs > %.1fs)"%(e, self._parameters["MaximumElapsedTime"])
1040 __SC, __SR = False, ""
1046 # ==============================================================================
1047 class AlgorithmAndParameters(object):
1049 Classe générale d'interface d'action pour l'algorithme et ses paramètres
1052 name = "GenericAlgorithm",
1059 self.__name = str(name)
1063 self.__algorithm = {}
1064 self.__algorithmFile = None
1065 self.__algorithmName = None
1067 self.updateParameters( asDict, asScript )
1069 if asAlgorithm is None and asScript is not None:
1070 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
1072 __Algo = asAlgorithm
1074 if __Algo is not None:
1075 self.__A = str(__Algo)
1076 self.__P.update( {"Algorithm":self.__A} )
1078 self.__setAlgorithm( self.__A )
1080 self.__variable_names_not_public = {"nextStep":False} # Duplication dans Algorithm
1082 def updateParameters(self,
1086 "Mise a jour des parametres"
1087 if asDict is None and asScript is not None:
1088 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1092 if __Dict is not None:
1093 self.__P.update( dict(__Dict) )
1095 def executePythonScheme(self, asDictAO = None):
1096 "Permet de lancer le calcul d'assimilation"
1097 Operator.CM.clearCache()
1099 if not isinstance(asDictAO, dict):
1100 raise ValueError("The objects for algorithm calculation have to be given together as a dictionnary, and they are not")
1101 if hasattr(asDictAO["Background"],"getO"): self.__Xb = asDictAO["Background"].getO()
1102 elif hasattr(asDictAO["CheckingPoint"],"getO"): self.__Xb = asDictAO["CheckingPoint"].getO()
1103 else: self.__Xb = None
1104 if hasattr(asDictAO["Observation"],"getO"): self.__Y = asDictAO["Observation"].getO()
1105 else: self.__Y = asDictAO["Observation"]
1106 if hasattr(asDictAO["ControlInput"],"getO"): self.__U = asDictAO["ControlInput"].getO()
1107 else: self.__U = asDictAO["ControlInput"]
1108 if hasattr(asDictAO["ObservationOperator"],"getO"): self.__HO = asDictAO["ObservationOperator"].getO()
1109 else: self.__HO = asDictAO["ObservationOperator"]
1110 if hasattr(asDictAO["EvolutionModel"],"getO"): self.__EM = asDictAO["EvolutionModel"].getO()
1111 else: self.__EM = asDictAO["EvolutionModel"]
1112 if hasattr(asDictAO["ControlModel"],"getO"): self.__CM = asDictAO["ControlModel"].getO()
1113 else: self.__CM = asDictAO["ControlModel"]
1114 self.__B = asDictAO["BackgroundError"]
1115 self.__R = asDictAO["ObservationError"]
1116 self.__Q = asDictAO["EvolutionError"]
1118 self.__shape_validate()
1120 self.__algorithm.run(
1130 Parameters = self.__P,
1134 def executeYACSScheme(self, FileName=None):
1135 "Permet de lancer le calcul d'assimilation"
1136 if FileName is None or not os.path.exists(FileName):
1137 raise ValueError("a YACS file name has to be given for YACS execution.\n")
1139 __file = os.path.abspath(FileName)
1140 logging.debug("The YACS file name is \"%s\"."%__file)
1141 if not PlatformInfo.has_salome or \
1142 not PlatformInfo.has_yacs or \
1143 not PlatformInfo.has_adao:
1144 raise ImportError("\n\n"+\
1145 "Unable to get SALOME, YACS or ADAO environnement variables.\n"+\
1146 "Please load the right environnement before trying to use it.\n")
1149 import SALOMERuntime
1151 SALOMERuntime.RuntimeSALOME_setRuntime()
1153 r = pilot.getRuntime()
1154 xmlLoader = loader.YACSLoader()
1155 xmlLoader.registerProcCataLoader()
1157 catalogAd = r.loadCatalog("proc", __file)
1158 r.addCatalog(catalogAd)
1163 p = xmlLoader.load(__file)
1164 except IOError as ex:
1165 print("The YACS XML schema file can not be loaded: %s"%(ex,))
1167 logger = p.getLogger("parser")
1168 if not logger.isEmpty():
1169 print("The imported YACS XML schema has errors on parsing:")
1170 print(logger.getStr())
1173 print("The YACS XML schema is not valid and will not be executed:")
1174 print(p.getErrorReport())
1176 info=pilot.LinkInfo(pilot.LinkInfo.ALL_DONT_STOP)
1177 p.checkConsistency(info)
1178 if info.areWarningsOrErrors():
1179 print("The YACS XML schema is not coherent and will not be executed:")
1180 print(info.getGlobalRepr())
1182 e = pilot.ExecutorSwig()
1184 if p.getEffectiveState() != pilot.DONE:
1185 print(p.getErrorReport())
1189 def get(self, key = None):
1190 "Vérifie l'existence d'une clé de variable ou de paramètres"
1191 if key in self.__algorithm:
1192 return self.__algorithm.get( key )
1193 elif key in self.__P:
1194 return self.__P[key]
1196 allvariables = self.__P
1197 for k in self.__variable_names_not_public: allvariables.pop(k, None)
1200 def pop(self, k, d):
1201 "Necessaire pour le pickling"
1202 return self.__algorithm.pop(k, d)
1204 def getAlgorithmRequiredParameters(self, noDetails=True):
1205 "Renvoie la liste des paramètres requis selon l'algorithme"
1206 return self.__algorithm.getRequiredParameters(noDetails)
1208 def getAlgorithmInputArguments(self):
1209 "Renvoie la liste des entrées requises selon l'algorithme"
1210 return self.__algorithm.getInputArguments()
1212 def getAlgorithmAttributes(self):
1213 "Renvoie la liste des attributs selon l'algorithme"
1214 return self.__algorithm.setAttributes()
1216 def setObserver(self, __V, __O, __I, __S):
1217 if self.__algorithm is None \
1218 or isinstance(self.__algorithm, dict) \
1219 or not hasattr(self.__algorithm,"StoredVariables"):
1220 raise ValueError("No observer can be build before choosing an algorithm.")
1221 if __V not in self.__algorithm:
1222 raise ValueError("An observer requires to be set on a variable named %s which does not exist."%__V)
1224 self.__algorithm.StoredVariables[ __V ].setDataObserver(
1227 HookParameters = __I,
1230 def removeObserver(self, __V, __O, __A = False):
1231 if self.__algorithm is None \
1232 or isinstance(self.__algorithm, dict) \
1233 or not hasattr(self.__algorithm,"StoredVariables"):
1234 raise ValueError("No observer can be removed before choosing an algorithm.")
1235 if __V not in self.__algorithm:
1236 raise ValueError("An observer requires to be removed on a variable named %s which does not exist."%__V)
1238 return self.__algorithm.StoredVariables[ __V ].removeDataObserver(
1243 def hasObserver(self, __V):
1244 if self.__algorithm is None \
1245 or isinstance(self.__algorithm, dict) \
1246 or not hasattr(self.__algorithm,"StoredVariables"):
1248 if __V not in self.__algorithm:
1250 return self.__algorithm.StoredVariables[ __V ].hasDataObserver()
1253 __allvariables = list(self.__algorithm.keys()) + list(self.__P.keys())
1254 for k in self.__variable_names_not_public:
1255 if k in __allvariables: __allvariables.remove(k)
1256 return __allvariables
1258 def __contains__(self, key=None):
1259 "D.__contains__(k) -> True if D has a key k, else False"
1260 return key in self.__algorithm or key in self.__P
1263 "x.__repr__() <==> repr(x)"
1264 return repr(self.__A)+", "+repr(self.__P)
1267 "x.__str__() <==> str(x)"
1268 return str(self.__A)+", "+str(self.__P)
1270 def __setAlgorithm(self, choice = None ):
1272 Permet de sélectionner l'algorithme à utiliser pour mener à bien l'étude
1273 d'assimilation. L'argument est un champ caractère se rapportant au nom
1274 d'un algorithme réalisant l'opération sur les arguments fixes.
1277 raise ValueError("Error: algorithm choice has to be given")
1278 if self.__algorithmName is not None:
1279 raise ValueError("Error: algorithm choice has already been done as \"%s\", it can't be changed."%self.__algorithmName)
1280 daDirectory = "daAlgorithms"
1282 # Recherche explicitement le fichier complet
1283 # ------------------------------------------
1285 for directory in sys.path:
1286 if os.path.isfile(os.path.join(directory, daDirectory, str(choice)+'.py')):
1287 module_path = os.path.abspath(os.path.join(directory, daDirectory))
1288 if module_path is None:
1289 raise ImportError("No algorithm module named \"%s\" has been found in the search path.\n The search path is %s"%(choice, sys.path))
1291 # Importe le fichier complet comme un module
1292 # ------------------------------------------
1294 sys_path_tmp = sys.path ; sys.path.insert(0,module_path)
1295 self.__algorithmFile = __import__(str(choice), globals(), locals(), [])
1296 if not hasattr(self.__algorithmFile, "ElementaryAlgorithm"):
1297 raise ImportError("this module does not define a valid elementary algorithm.")
1298 self.__algorithmName = str(choice)
1299 sys.path = sys_path_tmp ; del sys_path_tmp
1300 except ImportError as e:
1301 raise ImportError("The module named \"%s\" was found, but is incorrect at the import stage.\n The import error message is: %s"%(choice,e))
1303 # Instancie un objet du type élémentaire du fichier
1304 # -------------------------------------------------
1305 self.__algorithm = self.__algorithmFile.ElementaryAlgorithm()
1308 def __shape_validate(self):
1310 Validation de la correspondance correcte des tailles des variables et
1311 des matrices s'il y en a.
1313 if self.__Xb is None: __Xb_shape = (0,)
1314 elif hasattr(self.__Xb,"size"): __Xb_shape = (self.__Xb.size,)
1315 elif hasattr(self.__Xb,"shape"):
1316 if isinstance(self.__Xb.shape, tuple): __Xb_shape = self.__Xb.shape
1317 else: __Xb_shape = self.__Xb.shape()
1318 else: raise TypeError("The background (Xb) has no attribute of shape: problem !")
1320 if self.__Y is None: __Y_shape = (0,)
1321 elif hasattr(self.__Y,"size"): __Y_shape = (self.__Y.size,)
1322 elif hasattr(self.__Y,"shape"):
1323 if isinstance(self.__Y.shape, tuple): __Y_shape = self.__Y.shape
1324 else: __Y_shape = self.__Y.shape()
1325 else: raise TypeError("The observation (Y) has no attribute of shape: problem !")
1327 if self.__U is None: __U_shape = (0,)
1328 elif hasattr(self.__U,"size"): __U_shape = (self.__U.size,)
1329 elif hasattr(self.__U,"shape"):
1330 if isinstance(self.__U.shape, tuple): __U_shape = self.__U.shape
1331 else: __U_shape = self.__U.shape()
1332 else: raise TypeError("The control (U) has no attribute of shape: problem !")
1334 if self.__B is None: __B_shape = (0,0)
1335 elif hasattr(self.__B,"shape"):
1336 if isinstance(self.__B.shape, tuple): __B_shape = self.__B.shape
1337 else: __B_shape = self.__B.shape()
1338 else: raise TypeError("The a priori errors covariance matrix (B) has no attribute of shape: problem !")
1340 if self.__R is None: __R_shape = (0,0)
1341 elif hasattr(self.__R,"shape"):
1342 if isinstance(self.__R.shape, tuple): __R_shape = self.__R.shape
1343 else: __R_shape = self.__R.shape()
1344 else: raise TypeError("The observation errors covariance matrix (R) has no attribute of shape: problem !")
1346 if self.__Q is None: __Q_shape = (0,0)
1347 elif hasattr(self.__Q,"shape"):
1348 if isinstance(self.__Q.shape, tuple): __Q_shape = self.__Q.shape
1349 else: __Q_shape = self.__Q.shape()
1350 else: raise TypeError("The evolution errors covariance matrix (Q) has no attribute of shape: problem !")
1352 if len(self.__HO) == 0: __HO_shape = (0,0)
1353 elif isinstance(self.__HO, dict): __HO_shape = (0,0)
1354 elif hasattr(self.__HO["Direct"],"shape"):
1355 if isinstance(self.__HO["Direct"].shape, tuple): __HO_shape = self.__HO["Direct"].shape
1356 else: __HO_shape = self.__HO["Direct"].shape()
1357 else: raise TypeError("The observation operator (H) has no attribute of shape: problem !")
1359 if len(self.__EM) == 0: __EM_shape = (0,0)
1360 elif isinstance(self.__EM, dict): __EM_shape = (0,0)
1361 elif hasattr(self.__EM["Direct"],"shape"):
1362 if isinstance(self.__EM["Direct"].shape, tuple): __EM_shape = self.__EM["Direct"].shape
1363 else: __EM_shape = self.__EM["Direct"].shape()
1364 else: raise TypeError("The evolution model (EM) has no attribute of shape: problem !")
1366 if len(self.__CM) == 0: __CM_shape = (0,0)
1367 elif isinstance(self.__CM, dict): __CM_shape = (0,0)
1368 elif hasattr(self.__CM["Direct"],"shape"):
1369 if isinstance(self.__CM["Direct"].shape, tuple): __CM_shape = self.__CM["Direct"].shape
1370 else: __CM_shape = self.__CM["Direct"].shape()
1371 else: raise TypeError("The control model (CM) has no attribute of shape: problem !")
1373 # Vérification des conditions
1374 # ---------------------------
1375 if not( len(__Xb_shape) == 1 or min(__Xb_shape) == 1 ):
1376 raise ValueError("Shape characteristic of background (Xb) is incorrect: \"%s\"."%(__Xb_shape,))
1377 if not( len(__Y_shape) == 1 or min(__Y_shape) == 1 ):
1378 raise ValueError("Shape characteristic of observation (Y) is incorrect: \"%s\"."%(__Y_shape,))
1380 if not( min(__B_shape) == max(__B_shape) ):
1381 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) is incorrect: \"%s\"."%(__B_shape,))
1382 if not( min(__R_shape) == max(__R_shape) ):
1383 raise ValueError("Shape characteristic of observation errors covariance matrix (R) is incorrect: \"%s\"."%(__R_shape,))
1384 if not( min(__Q_shape) == max(__Q_shape) ):
1385 raise ValueError("Shape characteristic of evolution errors covariance matrix (Q) is incorrect: \"%s\"."%(__Q_shape,))
1386 if not( min(__EM_shape) == max(__EM_shape) ):
1387 raise ValueError("Shape characteristic of evolution operator (EM) is incorrect: \"%s\"."%(__EM_shape,))
1389 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[1] == max(__Xb_shape) ):
1390 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and state (X) \"%s\" are incompatible."%(__HO_shape,__Xb_shape))
1391 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[0] == max(__Y_shape) ):
1392 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation (Y) \"%s\" are incompatible."%(__HO_shape,__Y_shape))
1393 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__B) > 0 and not( __HO_shape[1] == __B_shape[0] ):
1394 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and a priori errors covariance matrix (B) \"%s\" are incompatible."%(__HO_shape,__B_shape))
1395 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__R) > 0 and not( __HO_shape[0] == __R_shape[1] ):
1396 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation errors covariance matrix (R) \"%s\" are incompatible."%(__HO_shape,__R_shape))
1398 if self.__B is not None and len(self.__B) > 0 and not( __B_shape[1] == max(__Xb_shape) ):
1399 if self.__algorithmName in ["EnsembleBlue",]:
1400 asPersistentVector = self.__Xb.reshape((-1,min(__B_shape)))
1401 self.__Xb = Persistence.OneVector("Background", basetype=numpy.matrix)
1402 for member in asPersistentVector:
1403 self.__Xb.store( numpy.matrix( numpy.ravel(member), numpy.float ).T )
1404 __Xb_shape = min(__B_shape)
1406 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) \"%s\" and background (Xb) \"%s\" are incompatible."%(__B_shape,__Xb_shape))
1408 if self.__R is not None and len(self.__R) > 0 and not( __R_shape[1] == max(__Y_shape) ):
1409 raise ValueError("Shape characteristic of observation errors covariance matrix (R) \"%s\" and observation (Y) \"%s\" are incompatible."%(__R_shape,__Y_shape))
1411 if self.__EM is not None and len(self.__EM) > 0 and not isinstance(self.__EM, dict) and not( __EM_shape[1] == max(__Xb_shape) ):
1412 raise ValueError("Shape characteristic of evolution model (EM) \"%s\" and state (X) \"%s\" are incompatible."%(__EM_shape,__Xb_shape))
1414 if self.__CM is not None and len(self.__CM) > 0 and not isinstance(self.__CM, dict) and not( __CM_shape[1] == max(__U_shape) ):
1415 raise ValueError("Shape characteristic of control model (CM) \"%s\" and control (U) \"%s\" are incompatible."%(__CM_shape,__U_shape))
1417 if ("Bounds" in self.__P) \
1418 and (isinstance(self.__P["Bounds"], list) or isinstance(self.__P["Bounds"], tuple)) \
1419 and (len(self.__P["Bounds"]) != max(__Xb_shape)):
1420 raise ValueError("The number \"%s\" of bound pairs for the state (X) components is different of the size \"%s\" of the state itself." \
1421 %(len(self.__P["Bounds"]),max(__Xb_shape)))
1423 if ("StateBoundsForQuantiles" in self.__P) \
1424 and (isinstance(self.__P["StateBoundsForQuantiles"], list) or isinstance(self.__P["StateBoundsForQuantiles"], tuple)) \
1425 and (len(self.__P["StateBoundsForQuantiles"]) != max(__Xb_shape)):
1426 raise ValueError("The number \"%s\" of bound pairs for the quantile state (X) components is different of the size \"%s\" of the state itself." \
1427 %(len(self.__P["StateBoundsForQuantiles"]),max(__Xb_shape)))
1431 # ==============================================================================
1432 class RegulationAndParameters(object):
1434 Classe générale d'interface d'action pour la régulation et ses paramètres
1437 name = "GenericRegulation",
1444 self.__name = str(name)
1447 if asAlgorithm is None and asScript is not None:
1448 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
1450 __Algo = asAlgorithm
1452 if asDict is None and asScript is not None:
1453 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1457 if __Dict is not None:
1458 self.__P.update( dict(__Dict) )
1460 if __Algo is not None:
1461 self.__P.update( {"Algorithm":str(__Algo)} )
1463 def get(self, key = None):
1464 "Vérifie l'existence d'une clé de variable ou de paramètres"
1466 return self.__P[key]
1470 # ==============================================================================
1471 class DataObserver(object):
1473 Classe générale d'interface de type observer
1476 name = "GenericObserver",
1488 self.__name = str(name)
1493 if onVariable is None:
1494 raise ValueError("setting an observer has to be done over a variable name or a list of variable names, not over None.")
1495 elif type(onVariable) in (tuple, list):
1496 self.__V = tuple(map( str, onVariable ))
1497 if withInfo is None:
1500 self.__I = (str(withInfo),)*len(self.__V)
1501 elif isinstance(onVariable, str):
1502 self.__V = (onVariable,)
1503 if withInfo is None:
1504 self.__I = (onVariable,)
1506 self.__I = (str(withInfo),)
1508 raise ValueError("setting an observer has to be done over a variable name or a list of variable names.")
1510 if asObsObject is not None:
1511 self.__O = asObsObject
1513 __FunctionText = str(UserScript('Observer', asTemplate, asString, asScript))
1514 __Function = Observer2Func(__FunctionText)
1515 self.__O = __Function.getfunc()
1517 for k in range(len(self.__V)):
1520 if ename not in withAlgo:
1521 raise ValueError("An observer is asked to be set on a variable named %s which does not exist."%ename)
1523 withAlgo.setObserver(ename, self.__O, einfo, scheduledBy)
1526 "x.__repr__() <==> repr(x)"
1527 return repr(self.__V)+"\n"+repr(self.__O)
1530 "x.__str__() <==> str(x)"
1531 return str(self.__V)+"\n"+str(self.__O)
1533 # ==============================================================================
1534 class UserScript(object):
1536 Classe générale d'interface de type texte de script utilisateur
1539 name = "GenericUserScript",
1546 self.__name = str(name)
1548 if asString is not None:
1550 elif self.__name == "UserPostAnalysis" and (asTemplate is not None) and (asTemplate in Templates.UserPostAnalysisTemplates):
1551 self.__F = Templates.UserPostAnalysisTemplates[asTemplate]
1552 elif self.__name == "Observer" and (asTemplate is not None) and (asTemplate in Templates.ObserverTemplates):
1553 self.__F = Templates.ObserverTemplates[asTemplate]
1554 elif asScript is not None:
1555 self.__F = Interfaces.ImportFromScript(asScript).getstring()
1560 "x.__repr__() <==> repr(x)"
1561 return repr(self.__F)
1564 "x.__str__() <==> str(x)"
1565 return str(self.__F)
1567 # ==============================================================================
1568 class ExternalParameters(object):
1570 Classe générale d'interface de type texte de script utilisateur
1573 name = "GenericExternalParameters",
1579 self.__name = str(name)
1582 self.updateParameters( asDict, asScript )
1584 def updateParameters(self,
1588 "Mise a jour des parametres"
1589 if asDict is None and asScript is not None:
1590 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "ExternalParameters" )
1594 if __Dict is not None:
1595 self.__P.update( dict(__Dict) )
1597 def get(self, key = None):
1599 return self.__P[key]
1601 return list(self.__P.keys())
1604 return list(self.__P.keys())
1606 def pop(self, k, d):
1607 return self.__P.pop(k, d)
1610 return self.__P.items()
1612 def __contains__(self, key=None):
1613 "D.__contains__(k) -> True if D has a key k, else False"
1614 return key in self.__P
1616 # ==============================================================================
1617 class State(object):
1619 Classe générale d'interface de type état
1622 name = "GenericVector",
1624 asPersistentVector = None,
1630 toBeChecked = False,
1633 Permet de définir un vecteur :
1634 - asVector : entrée des données, comme un vecteur compatible avec le
1635 constructeur de numpy.matrix, ou "True" si entrée par script.
1636 - asPersistentVector : entrée des données, comme une série de vecteurs
1637 compatible avec le constructeur de numpy.matrix, ou comme un objet de
1638 type Persistence, ou "True" si entrée par script.
1639 - asScript : si un script valide est donné contenant une variable
1640 nommée "name", la variable est de type "asVector" (par défaut) ou
1641 "asPersistentVector" selon que l'une de ces variables est placée à
1643 - asDataFile : si un ou plusieurs fichiers valides sont donnés
1644 contenant des valeurs en colonnes, elles-mêmes nommées "colNames"
1645 (s'il n'y a pas de nom de colonne indiquée, on cherche une colonne
1646 nommée "name"), on récupère les colonnes et on les range ligne après
1647 ligne (colMajor=False, par défaut) ou colonne après colonne
1648 (colMajor=True). La variable résultante est de type "asVector" (par
1649 défaut) ou "asPersistentVector" selon que l'une de ces variables est
1652 self.__name = str(name)
1653 self.__check = bool(toBeChecked)
1657 self.__is_vector = False
1658 self.__is_series = False
1660 if asScript is not None:
1661 __Vector, __Series = None, None
1662 if asPersistentVector:
1663 __Series = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1665 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1666 elif asDataFile is not None:
1667 __Vector, __Series = None, None
1668 if asPersistentVector:
1669 if colNames is not None:
1670 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
1672 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1673 if bool(colMajor) and not Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1674 __Series = numpy.transpose(__Series)
1675 elif not bool(colMajor) and Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1676 __Series = numpy.transpose(__Series)
1678 if colNames is not None:
1679 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
1681 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1683 __Vector = numpy.ravel(__Vector, order = "F")
1685 __Vector = numpy.ravel(__Vector, order = "C")
1687 __Vector, __Series = asVector, asPersistentVector
1689 if __Vector is not None:
1690 self.__is_vector = True
1691 self.__V = numpy.matrix( numpy.asmatrix(__Vector).A1, numpy.float ).T
1692 self.shape = self.__V.shape
1693 self.size = self.__V.size
1694 elif __Series is not None:
1695 self.__is_series = True
1696 if isinstance(__Series, (tuple, list, numpy.ndarray, numpy.matrix, str)):
1697 self.__V = Persistence.OneVector(self.__name, basetype=numpy.matrix)
1698 if isinstance(__Series, str): __Series = eval(__Series)
1699 for member in __Series:
1700 self.__V.store( numpy.matrix( numpy.asmatrix(member).A1, numpy.float ).T )
1703 if isinstance(self.__V.shape, (tuple, list)):
1704 self.shape = self.__V.shape
1706 self.shape = self.__V.shape()
1707 if len(self.shape) == 1:
1708 self.shape = (self.shape[0],1)
1709 self.size = self.shape[0] * self.shape[1]
1711 raise ValueError("The %s object is improperly defined or undefined, it requires at minima either a vector, a list/tuple of vectors or a persistent object. Please check your vector input."%self.__name)
1713 if scheduledBy is not None:
1714 self.__T = scheduledBy
1716 def getO(self, withScheduler=False):
1718 return self.__V, self.__T
1719 elif self.__T is None:
1725 "Vérification du type interne"
1726 return self.__is_vector
1729 "Vérification du type interne"
1730 return self.__is_series
1733 "x.__repr__() <==> repr(x)"
1734 return repr(self.__V)
1737 "x.__str__() <==> str(x)"
1738 return str(self.__V)
1740 # ==============================================================================
1741 class Covariance(object):
1743 Classe générale d'interface de type covariance
1746 name = "GenericCovariance",
1747 asCovariance = None,
1748 asEyeByScalar = None,
1749 asEyeByVector = None,
1752 toBeChecked = False,
1755 Permet de définir une covariance :
1756 - asCovariance : entrée des données, comme une matrice compatible avec
1757 le constructeur de numpy.matrix
1758 - asEyeByScalar : entrée des données comme un seul scalaire de variance,
1759 multiplicatif d'une matrice de corrélation identité, aucune matrice
1760 n'étant donc explicitement à donner
1761 - asEyeByVector : entrée des données comme un seul vecteur de variance,
1762 à mettre sur la diagonale d'une matrice de corrélation, aucune matrice
1763 n'étant donc explicitement à donner
1764 - asCovObject : entrée des données comme un objet python, qui a les
1765 methodes obligatoires "getT", "getI", "diag", "trace", "__add__",
1766 "__sub__", "__neg__", "__mul__", "__rmul__" et facultatives "shape",
1767 "size", "cholesky", "choleskyI", "asfullmatrix", "__repr__", "__str__"
1768 - toBeChecked : booléen indiquant si le caractère SDP de la matrice
1769 pleine doit être vérifié
1771 self.__name = str(name)
1772 self.__check = bool(toBeChecked)
1775 self.__is_scalar = False
1776 self.__is_vector = False
1777 self.__is_matrix = False
1778 self.__is_object = False
1780 if asScript is not None:
1781 __Matrix, __Scalar, __Vector, __Object = None, None, None, None
1783 __Scalar = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1785 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1787 __Object = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1789 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1791 __Matrix, __Scalar, __Vector, __Object = asCovariance, asEyeByScalar, asEyeByVector, asCovObject
1793 if __Scalar is not None:
1794 if isinstance(__Scalar, str):
1795 __Scalar = __Scalar.replace(";"," ").replace(","," ").split()
1796 if len(__Scalar) > 0: __Scalar = __Scalar[0]
1797 if numpy.array(__Scalar).size != 1:
1798 raise ValueError(' The diagonal multiplier given to define a sparse matrix is not a unique scalar value.\n Its actual measured size is %i. Please check your scalar input.'%numpy.array(__Scalar).size)
1799 self.__is_scalar = True
1800 self.__C = numpy.abs( float(__Scalar) )
1803 elif __Vector is not None:
1804 if isinstance(__Vector, str):
1805 __Vector = __Vector.replace(";"," ").replace(","," ").split()
1806 self.__is_vector = True
1807 self.__C = numpy.abs( numpy.array( numpy.ravel( __Vector ), dtype=float ) )
1808 self.shape = (self.__C.size,self.__C.size)
1809 self.size = self.__C.size**2
1810 elif __Matrix is not None:
1811 self.__is_matrix = True
1812 self.__C = numpy.matrix( __Matrix, float )
1813 self.shape = self.__C.shape
1814 self.size = self.__C.size
1815 elif __Object is not None:
1816 self.__is_object = True
1818 for at in ("getT","getI","diag","trace","__add__","__sub__","__neg__","__matmul__","__mul__","__rmatmul__","__rmul__"):
1819 if not hasattr(self.__C,at):
1820 raise ValueError("The matrix given for %s as an object has no attribute \"%s\". Please check your object input."%(self.__name,at))
1821 if hasattr(self.__C,"shape"):
1822 self.shape = self.__C.shape
1825 if hasattr(self.__C,"size"):
1826 self.size = self.__C.size
1831 # raise ValueError("The %s covariance matrix has to be specified either as a matrix, a vector for its diagonal or a scalar multiplying an identity matrix."%self.__name)
1835 def __validate(self):
1837 if self.__C is None:
1838 raise UnboundLocalError("%s covariance matrix value has not been set!"%(self.__name,))
1839 if self.ismatrix() and min(self.shape) != max(self.shape):
1840 raise ValueError("The given matrix for %s is not a square one, its shape is %s. Please check your matrix input."%(self.__name,self.shape))
1841 if self.isobject() and min(self.shape) != max(self.shape):
1842 raise ValueError("The matrix given for \"%s\" is not a square one, its shape is %s. Please check your object input."%(self.__name,self.shape))
1843 if self.isscalar() and self.__C <= 0:
1844 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your scalar input %s."%(self.__name,self.__C))
1845 if self.isvector() and (self.__C <= 0).any():
1846 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your vector input."%(self.__name,))
1847 if self.ismatrix() and (self.__check or logging.getLogger().level < logging.WARNING):
1849 L = numpy.linalg.cholesky( self.__C )
1851 raise ValueError("The %s covariance matrix is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1852 if self.isobject() and (self.__check or logging.getLogger().level < logging.WARNING):
1854 L = self.__C.cholesky()
1856 raise ValueError("The %s covariance object is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1859 "Vérification du type interne"
1860 return self.__is_scalar
1863 "Vérification du type interne"
1864 return self.__is_vector
1867 "Vérification du type interne"
1868 return self.__is_matrix
1871 "Vérification du type interne"
1872 return self.__is_object
1877 return Covariance(self.__name+"I", asCovariance = numpy.linalg.inv(self.__C) )
1878 elif self.isvector():
1879 return Covariance(self.__name+"I", asEyeByVector = 1. / self.__C )
1880 elif self.isscalar():
1881 return Covariance(self.__name+"I", asEyeByScalar = 1. / self.__C )
1882 elif self.isobject() and hasattr(self.__C,"getI"):
1883 return Covariance(self.__name+"I", asCovObject = self.__C.getI() )
1885 return None # Indispensable
1890 return Covariance(self.__name+"T", asCovariance = self.__C.T )
1891 elif self.isvector():
1892 return Covariance(self.__name+"T", asEyeByVector = self.__C )
1893 elif self.isscalar():
1894 return Covariance(self.__name+"T", asEyeByScalar = self.__C )
1895 elif self.isobject() and hasattr(self.__C,"getT"):
1896 return Covariance(self.__name+"T", asCovObject = self.__C.getT() )
1898 raise AttributeError("the %s covariance matrix has no getT attribute."%(self.__name,))
1901 "Décomposition de Cholesky"
1903 return Covariance(self.__name+"C", asCovariance = numpy.linalg.cholesky(self.__C) )
1904 elif self.isvector():
1905 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
1906 elif self.isscalar():
1907 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
1908 elif self.isobject() and hasattr(self.__C,"cholesky"):
1909 return Covariance(self.__name+"C", asCovObject = self.__C.cholesky() )
1911 raise AttributeError("the %s covariance matrix has no cholesky attribute."%(self.__name,))
1913 def choleskyI(self):
1914 "Inversion de la décomposition de Cholesky"
1916 return Covariance(self.__name+"H", asCovariance = numpy.linalg.inv(numpy.linalg.cholesky(self.__C)) )
1917 elif self.isvector():
1918 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
1919 elif self.isscalar():
1920 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
1921 elif self.isobject() and hasattr(self.__C,"choleskyI"):
1922 return Covariance(self.__name+"H", asCovObject = self.__C.choleskyI() )
1924 raise AttributeError("the %s covariance matrix has no choleskyI attribute."%(self.__name,))
1927 "Racine carrée matricielle"
1930 return Covariance(self.__name+"C", asCovariance = numpy.real(scipy.linalg.sqrtm(self.__C)) )
1931 elif self.isvector():
1932 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
1933 elif self.isscalar():
1934 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
1935 elif self.isobject() and hasattr(self.__C,"sqrtm"):
1936 return Covariance(self.__name+"C", asCovObject = self.__C.sqrtm() )
1938 raise AttributeError("the %s covariance matrix has no sqrtm attribute."%(self.__name,))
1941 "Inversion de la racine carrée matricielle"
1944 return Covariance(self.__name+"H", asCovariance = numpy.linalg.inv(numpy.real(scipy.linalg.sqrtm(self.__C))) )
1945 elif self.isvector():
1946 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
1947 elif self.isscalar():
1948 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
1949 elif self.isobject() and hasattr(self.__C,"sqrtmI"):
1950 return Covariance(self.__name+"H", asCovObject = self.__C.sqrtmI() )
1952 raise AttributeError("the %s covariance matrix has no sqrtmI attribute."%(self.__name,))
1954 def diag(self, msize=None):
1955 "Diagonale de la matrice"
1957 return numpy.diag(self.__C)
1958 elif self.isvector():
1960 elif self.isscalar():
1962 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1964 return self.__C * numpy.ones(int(msize))
1965 elif self.isobject() and hasattr(self.__C,"diag"):
1966 return self.__C.diag()
1968 raise AttributeError("the %s covariance matrix has no diag attribute."%(self.__name,))
1970 def trace(self, msize=None):
1971 "Trace de la matrice"
1973 return numpy.trace(self.__C)
1974 elif self.isvector():
1975 return float(numpy.sum(self.__C))
1976 elif self.isscalar():
1978 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1980 return self.__C * int(msize)
1981 elif self.isobject():
1982 return self.__C.trace()
1984 raise AttributeError("the %s covariance matrix has no trace attribute."%(self.__name,))
1986 def asfullmatrix(self, msize=None):
1989 return numpy.asarray(self.__C)
1990 elif self.isvector():
1991 return numpy.asarray( numpy.diag(self.__C), float )
1992 elif self.isscalar():
1994 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1996 return numpy.asarray( self.__C * numpy.eye(int(msize)), float )
1997 elif self.isobject() and hasattr(self.__C,"asfullmatrix"):
1998 return self.__C.asfullmatrix()
2000 raise AttributeError("the %s covariance matrix has no asfullmatrix attribute."%(self.__name,))
2002 def assparsematrix(self):
2010 "x.__repr__() <==> repr(x)"
2011 return repr(self.__C)
2014 "x.__str__() <==> str(x)"
2015 return str(self.__C)
2017 def __add__(self, other):
2018 "x.__add__(y) <==> x+y"
2019 if self.ismatrix() or self.isobject():
2020 return self.__C + numpy.asmatrix(other)
2021 elif self.isvector() or self.isscalar():
2022 _A = numpy.asarray(other)
2023 _A.reshape(_A.size)[::_A.shape[1]+1] += self.__C
2024 return numpy.asmatrix(_A)
2026 def __radd__(self, other):
2027 "x.__radd__(y) <==> y+x"
2028 raise NotImplementedError("%s covariance matrix __radd__ method not available for %s type!"%(self.__name,type(other)))
2030 def __sub__(self, other):
2031 "x.__sub__(y) <==> x-y"
2032 if self.ismatrix() or self.isobject():
2033 return self.__C - numpy.asmatrix(other)
2034 elif self.isvector() or self.isscalar():
2035 _A = numpy.asarray(other)
2036 _A.reshape(_A.size)[::_A.shape[1]+1] = self.__C - _A.reshape(_A.size)[::_A.shape[1]+1]
2037 return numpy.asmatrix(_A)
2039 def __rsub__(self, other):
2040 "x.__rsub__(y) <==> y-x"
2041 raise NotImplementedError("%s covariance matrix __rsub__ method not available for %s type!"%(self.__name,type(other)))
2044 "x.__neg__() <==> -x"
2047 def __matmul__(self, other):
2048 "x.__mul__(y) <==> x@y"
2049 if self.ismatrix() and isinstance(other, (int, float)):
2050 return numpy.asarray(self.__C) * other
2051 elif self.ismatrix() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2052 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2053 return numpy.ravel(self.__C @ numpy.ravel(other))
2054 elif numpy.asarray(other).shape[0] == self.shape[1]: # Matrice
2055 return numpy.asarray(self.__C) @ numpy.asarray(other)
2057 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asarray(other).shape,self.__name))
2058 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2059 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2060 return numpy.ravel(self.__C) * numpy.ravel(other)
2061 elif numpy.asarray(other).shape[0] == self.shape[1]: # Matrice
2062 return numpy.ravel(self.__C).reshape((-1,1)) * numpy.asarray(other)
2064 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
2065 elif self.isscalar() and isinstance(other,numpy.matrix):
2066 return numpy.asarray(self.__C * other)
2067 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
2068 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
2069 return self.__C * numpy.ravel(other)
2071 return self.__C * numpy.asarray(other)
2072 elif self.isobject():
2073 return self.__C.__matmul__(other)
2075 raise NotImplementedError("%s covariance matrix __matmul__ method not available for %s type!"%(self.__name,type(other)))
2077 def __mul__(self, other):
2078 "x.__mul__(y) <==> x*y"
2079 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2080 return self.__C * other
2081 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2082 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2083 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
2084 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2085 return self.__C * numpy.asmatrix(other)
2087 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asmatrix(other).shape,self.__name))
2088 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2089 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2090 return numpy.asmatrix(self.__C * numpy.ravel(other)).T
2091 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2092 return numpy.asmatrix((self.__C * (numpy.asarray(other).transpose())).transpose())
2094 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
2095 elif self.isscalar() and isinstance(other,numpy.matrix):
2096 return self.__C * other
2097 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
2098 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
2099 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
2101 return self.__C * numpy.asmatrix(other)
2102 elif self.isobject():
2103 return self.__C.__mul__(other)
2105 raise NotImplementedError("%s covariance matrix __mul__ method not available for %s type!"%(self.__name,type(other)))
2107 def __rmatmul__(self, other):
2108 "x.__rmul__(y) <==> y@x"
2109 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2110 return other * self.__C
2111 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2112 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2113 return numpy.asmatrix(numpy.ravel(other)) * self.__C
2114 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2115 return numpy.asmatrix(other) * self.__C
2117 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name))
2118 elif self.isvector() and isinstance(other,numpy.matrix):
2119 if numpy.ravel(other).size == self.shape[0]: # Vecteur
2120 return numpy.asmatrix(numpy.ravel(other) * self.__C)
2121 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
2122 return numpy.asmatrix(numpy.array(other) * self.__C)
2124 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
2125 elif self.isscalar() and isinstance(other,numpy.matrix):
2126 return other * self.__C
2127 elif self.isobject():
2128 return self.__C.__rmatmul__(other)
2130 raise NotImplementedError("%s covariance matrix __rmatmul__ method not available for %s type!"%(self.__name,type(other)))
2132 def __rmul__(self, other):
2133 "x.__rmul__(y) <==> y*x"
2134 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2135 return other * self.__C
2136 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2137 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2138 return numpy.asmatrix(numpy.ravel(other)) * self.__C
2139 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2140 return numpy.asmatrix(other) * self.__C
2142 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name))
2143 elif self.isvector() and isinstance(other,numpy.matrix):
2144 if numpy.ravel(other).size == self.shape[0]: # Vecteur
2145 return numpy.asmatrix(numpy.ravel(other) * self.__C)
2146 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
2147 return numpy.asmatrix(numpy.array(other) * self.__C)
2149 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
2150 elif self.isscalar() and isinstance(other,numpy.matrix):
2151 return other * self.__C
2152 elif self.isobject():
2153 return self.__C.__rmul__(other)
2155 raise NotImplementedError("%s covariance matrix __rmul__ method not available for %s type!"%(self.__name,type(other)))
2158 "x.__len__() <==> len(x)"
2159 return self.shape[0]
2161 # ==============================================================================
2162 class Observer2Func(object):
2164 Creation d'une fonction d'observateur a partir de son texte
2166 def __init__(self, corps=""):
2167 self.__corps = corps
2168 def func(self,var,info):
2169 "Fonction d'observation"
2172 "Restitution du pointeur de fonction dans l'objet"
2175 # ==============================================================================
2176 class CaseLogger(object):
2178 Conservation des commandes de creation d'un cas
2180 def __init__(self, __name="", __objname="case", __addViewers=None, __addLoaders=None):
2181 self.__name = str(__name)
2182 self.__objname = str(__objname)
2183 self.__logSerie = []
2184 self.__switchoff = False
2186 "TUI" :Interfaces._TUIViewer,
2187 "SCD" :Interfaces._SCDViewer,
2188 "YACS":Interfaces._YACSViewer,
2191 "TUI" :Interfaces._TUIViewer,
2192 "COM" :Interfaces._COMViewer,
2194 if __addViewers is not None:
2195 self.__viewers.update(dict(__addViewers))
2196 if __addLoaders is not None:
2197 self.__loaders.update(dict(__addLoaders))
2199 def register(self, __command=None, __keys=None, __local=None, __pre=None, __switchoff=False):
2200 "Enregistrement d'une commande individuelle"
2201 if __command is not None and __keys is not None and __local is not None and not self.__switchoff:
2202 if "self" in __keys: __keys.remove("self")
2203 self.__logSerie.append( (str(__command), __keys, __local, __pre, __switchoff) )
2205 self.__switchoff = True
2207 self.__switchoff = False
2209 def dump(self, __filename=None, __format="TUI", __upa=""):
2210 "Restitution normalisée des commandes"
2211 if __format in self.__viewers:
2212 __formater = self.__viewers[__format](self.__name, self.__objname, self.__logSerie)
2214 raise ValueError("Dumping as \"%s\" is not available"%__format)
2215 return __formater.dump(__filename, __upa)
2217 def load(self, __filename=None, __content=None, __object=None, __format="TUI"):
2218 "Chargement normalisé des commandes"
2219 if __format in self.__loaders:
2220 __formater = self.__loaders[__format]()
2222 raise ValueError("Loading as \"%s\" is not available"%__format)
2223 return __formater.load(__filename, __content, __object)
2225 # ==============================================================================
2228 _extraArguments = None,
2229 _sFunction = lambda x: x,
2234 Pour une liste ordonnée de vecteurs en entrée, renvoie en sortie la liste
2235 correspondante de valeurs de la fonction en argument
2237 # Vérifications et définitions initiales
2238 # logging.debug("MULTF Internal multifonction calculations begin with function %s"%(_sFunction.__name__,))
2239 if not PlatformInfo.isIterable( __xserie ):
2240 raise TypeError("MultiFonction not iterable unkown input type: %s"%(type(__xserie),))
2242 if (_mpWorkers is None) or (_mpWorkers is not None and _mpWorkers < 1):
2245 __mpWorkers = int(_mpWorkers)
2247 import multiprocessing
2258 # logging.debug("MULTF Internal multiprocessing calculations begin : evaluation of %i point(s)"%(len(_jobs),))
2259 import multiprocessing
2260 with multiprocessing.Pool(__mpWorkers) as pool:
2261 __multiHX = pool.map( _sFunction, _jobs )
2264 # logging.debug("MULTF Internal multiprocessing calculation end")
2266 # logging.debug("MULTF Internal monoprocessing calculation begin")
2268 if _extraArguments is None:
2269 for __xvalue in __xserie:
2270 __multiHX.append( _sFunction( __xvalue ) )
2271 elif _extraArguments is not None and isinstance(_extraArguments, (list, tuple, map)):
2272 for __xvalue in __xserie:
2273 __multiHX.append( _sFunction( __xvalue, *_extraArguments ) )
2274 elif _extraArguments is not None and isinstance(_extraArguments, dict):
2275 for __xvalue in __xserie:
2276 __multiHX.append( _sFunction( __xvalue, **_extraArguments ) )
2278 raise TypeError("MultiFonction extra arguments unkown input type: %s"%(type(_extraArguments),))
2279 # logging.debug("MULTF Internal monoprocessing calculation end")
2281 # logging.debug("MULTF Internal multifonction calculations end")
2284 # ==============================================================================
2285 def CostFunction3D(_x,
2286 _Hm = None, # Pour simuler Hm(x) : HO["Direct"].appliedTo
2287 _HmX = None, # Simulation déjà faite de Hm(x)
2288 _arg = None, # Arguments supplementaires pour Hm, sous la forme d'un tuple
2293 _SIV = False, # A résorber pour la 8.0
2294 _SSC = [], # self._parameters["StoreSupplementaryCalculations"]
2295 _nPS = 0, # nbPreviousSteps
2296 _QM = "DA", # QualityMeasure
2297 _SSV = {}, # Entrée et/ou sortie : self.StoredVariables
2298 _fRt = False, # Restitue ou pas la sortie étendue
2299 _sSc = True, # Stocke ou pas les SSC
2302 Fonction-coût générale utile pour les algorithmes statiques/3D : 3DVAR, BLUE
2303 et dérivés, Kalman et dérivés, LeastSquares, SamplingTest, PSO, SA, Tabu,
2304 DFO, QuantileRegression
2310 for k in ["CostFunctionJ",
2316 "SimulatedObservationAtCurrentOptimum",
2317 "SimulatedObservationAtCurrentState",
2321 if hasattr(_SSV[k],"store"):
2322 _SSV[k].append = _SSV[k].store # Pour utiliser "append" au lieu de "store"
2324 _X = numpy.asmatrix(numpy.ravel( _x )).T
2325 if _SIV or "CurrentState" in _SSC or "CurrentOptimum" in _SSC:
2326 _SSV["CurrentState"].append( _X )
2328 if _HmX is not None:
2332 raise ValueError("COSTFUNCTION3D Operator has to be defined.")
2336 _HX = _Hm( _X, *_arg )
2337 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
2339 if "SimulatedObservationAtCurrentState" in _SSC or \
2340 "SimulatedObservationAtCurrentOptimum" in _SSC:
2341 _SSV["SimulatedObservationAtCurrentState"].append( _HX )
2343 if numpy.any(numpy.isnan(_HX)):
2344 Jb, Jo, J = numpy.nan, numpy.nan, numpy.nan
2346 _Y = numpy.asmatrix(numpy.ravel( _Y )).T
2347 if _QM in ["AugmentedWeightedLeastSquares", "AWLS", "AugmentedPonderatedLeastSquares", "APLS", "DA"]:
2348 if _BI is None or _RI is None:
2349 raise ValueError("Background and Observation error covariance matrix has to be properly defined!")
2350 _Xb = numpy.asmatrix(numpy.ravel( _Xb )).T
2351 Jb = 0.5 * (_X - _Xb).T * _BI * (_X - _Xb)
2352 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
2353 elif _QM in ["WeightedLeastSquares", "WLS", "PonderatedLeastSquares", "PLS"]:
2355 raise ValueError("Observation error covariance matrix has to be properly defined!")
2357 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
2358 elif _QM in ["LeastSquares", "LS", "L2"]:
2360 Jo = 0.5 * (_Y - _HX).T * (_Y - _HX)
2361 elif _QM in ["AbsoluteValue", "L1"]:
2363 Jo = numpy.sum( numpy.abs(_Y - _HX) )
2364 elif _QM in ["MaximumError", "ME"]:
2366 Jo = numpy.max( numpy.abs(_Y - _HX) )
2367 elif _QM in ["QR", "Null"]:
2371 raise ValueError("Unknown asked quality measure!")
2373 J = float( Jb ) + float( Jo )
2376 _SSV["CostFunctionJb"].append( Jb )
2377 _SSV["CostFunctionJo"].append( Jo )
2378 _SSV["CostFunctionJ" ].append( J )
2380 if "IndexOfOptimum" in _SSC or \
2381 "CurrentOptimum" in _SSC or \
2382 "SimulatedObservationAtCurrentOptimum" in _SSC:
2383 IndexMin = numpy.argmin( _SSV["CostFunctionJ"][_nPS:] ) + _nPS
2384 if "IndexOfOptimum" in _SSC:
2385 _SSV["IndexOfOptimum"].append( IndexMin )
2386 if "CurrentOptimum" in _SSC:
2387 _SSV["CurrentOptimum"].append( _SSV["CurrentState"][IndexMin] )
2388 if "SimulatedObservationAtCurrentOptimum" in _SSC:
2389 _SSV["SimulatedObservationAtCurrentOptimum"].append( _SSV["SimulatedObservationAtCurrentState"][IndexMin] )
2394 if _QM in ["QR"]: # Pour le QuantileRegression
2399 # ==============================================================================
2400 if __name__ == "__main__":
2401 print('\n AUTODIAGNOSTIC\n')