1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2021 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les outils généraux élémentaires.
26 __author__ = "Jean-Philippe ARGAUD"
35 from functools import partial
36 from daCore import Persistence, PlatformInfo, Interfaces
37 from daCore import Templates
39 # ==============================================================================
40 class CacheManager(object):
42 Classe générale de gestion d'un cache de calculs
45 toleranceInRedundancy = 1.e-18,
46 lenghtOfRedundancy = -1,
49 Les caractéristiques de tolérance peuvent être modifiées à la création.
51 self.__tolerBP = float(toleranceInRedundancy)
52 self.__lenghtOR = int(lenghtOfRedundancy)
53 self.__initlnOR = self.__lenghtOR
63 def wasCalculatedIn(self, xValue, oName="" ):
64 "Vérifie l'existence d'un calcul correspondant à la valeur"
68 for i in range(min(len(self.__listOPCV),self.__lenghtOR)-1,-1,-1):
69 if not hasattr(xValue, 'size'):
71 elif (str(oName) != self.__listOPCV[i][3]):
73 elif (xValue.size != self.__listOPCV[i][0].size):
75 elif (numpy.ravel(xValue)[0] - self.__listOPCV[i][0][0]) > (self.__tolerBP * self.__listOPCV[i][2] / self.__listOPCV[i][0].size):
77 elif numpy.linalg.norm(numpy.ravel(xValue) - self.__listOPCV[i][0]) < (self.__tolerBP * self.__listOPCV[i][2]):
79 __HxV = self.__listOPCV[i][1]
83 def storeValueInX(self, xValue, HxValue, oName="" ):
84 "Stocke pour un opérateur o un calcul Hx correspondant à la valeur x"
85 if self.__lenghtOR < 0:
86 self.__lenghtOR = 2 * min(xValue.size, 50) + 2 # 2 * xValue.size + 2
87 self.__initlnOR = self.__lenghtOR
88 self.__seenNames.append(str(oName))
89 if str(oName) not in self.__seenNames: # Etend la liste si nouveau
90 self.__lenghtOR += 2 * min(xValue.size, 50) + 2 # 2 * xValue.size + 2
91 self.__initlnOR += self.__lenghtOR
92 self.__seenNames.append(str(oName))
93 while len(self.__listOPCV) > self.__lenghtOR:
94 self.__listOPCV.pop(0)
95 self.__listOPCV.append( (
96 copy.copy(numpy.ravel(xValue)), # 0 Previous point
97 copy.copy(HxValue), # 1 Previous value
98 numpy.linalg.norm(xValue), # 2 Norm
99 str(oName), # 3 Operator name
104 self.__initlnOR = self.__lenghtOR
106 self.__enabled = False
110 self.__lenghtOR = self.__initlnOR
111 self.__enabled = True
113 # ==============================================================================
114 class Operator(object):
116 Classe générale d'interface de type opérateur simple
124 name = "GenericOperator",
127 avoidingRedundancy = True,
128 reducingMemoryUse = False,
129 inputAsMultiFunction = False,
130 enableMultiProcess = False,
131 extraArguments = None,
134 On construit un objet de ce type en fournissant, à l'aide de l'un des
135 deux mots-clé, soit une fonction ou un multi-fonction python, soit une
138 - name : nom d'opérateur
139 - fromMethod : argument de type fonction Python
140 - fromMatrix : argument adapté au constructeur numpy.array/matrix
141 - avoidingRedundancy : booléen évitant (ou pas) les calculs redondants
142 - reducingMemoryUse : booléen forçant (ou pas) des calculs moins
144 - inputAsMultiFunction : booléen indiquant une fonction explicitement
145 définie (ou pas) en multi-fonction
146 - extraArguments : arguments supplémentaires passés à la fonction de
147 base et ses dérivées (tuple ou dictionnaire)
149 self.__name = str(name)
150 self.__NbCallsAsMatrix, self.__NbCallsAsMethod, self.__NbCallsOfCached = 0, 0, 0
151 self.__reduceM = bool( reducingMemoryUse )
152 self.__avoidRC = bool( avoidingRedundancy )
153 self.__inputAsMF = bool( inputAsMultiFunction )
154 self.__mpEnabled = bool( enableMultiProcess )
155 self.__extraArgs = extraArguments
156 if fromMethod is not None and self.__inputAsMF:
157 self.__Method = fromMethod # logtimer(fromMethod)
159 self.__Type = "Method"
160 elif fromMethod is not None and not self.__inputAsMF:
161 self.__Method = partial( MultiFonction, _sFunction=fromMethod, _mpEnabled=self.__mpEnabled)
163 self.__Type = "Method"
164 elif fromMatrix is not None:
166 if isinstance(fromMatrix, str):
167 fromMatrix = PlatformInfo.strmatrix2liststr( fromMatrix )
168 self.__Matrix = numpy.asarray( fromMatrix, dtype=float )
169 self.__Type = "Matrix"
175 def disableAvoidingRedundancy(self):
177 Operator.CM.disable()
179 def enableAvoidingRedundancy(self):
184 Operator.CM.disable()
190 def appliedTo(self, xValue, HValue = None, argsAsSerie = False, returnSerieAsArrayMatrix = False):
192 Permet de restituer le résultat de l'application de l'opérateur à une
193 série d'arguments xValue. Cette méthode se contente d'appliquer, chaque
194 argument devant a priori être du bon type.
196 - les arguments par série sont :
197 - xValue : argument adapté pour appliquer l'opérateur
198 - HValue : valeur précalculée de l'opérateur en ce point
199 - argsAsSerie : indique si les arguments sont une mono ou multi-valeur
206 if HValue is not None:
210 PlatformInfo.isIterable( _xValue, True, " in Operator.appliedTo" )
212 if _HValue is not None:
213 assert len(_xValue) == len(_HValue), "Incompatible number of elements in xValue and HValue"
215 for i in range(len(_HValue)):
216 _HxValue.append( _HValue[i] )
218 Operator.CM.storeValueInX(_xValue[i],_HxValue[-1],self.__name)
223 for i, xv in enumerate(_xValue):
225 __alreadyCalculated, __HxV = Operator.CM.wasCalculatedIn(xv,self.__name)
227 __alreadyCalculated = False
229 if __alreadyCalculated:
230 self.__addOneCacheCall()
233 if self.__Matrix is not None:
234 self.__addOneMatrixCall()
235 _hv = self.__Matrix @ numpy.ravel(xv)
237 self.__addOneMethodCall()
241 _HxValue.append( _hv )
243 if len(_xserie)>0 and self.__Matrix is None:
244 if self.__extraArgs is None:
245 _hserie = self.__Method( _xserie ) # Calcul MF
247 _hserie = self.__Method( _xserie, self.__extraArgs ) # Calcul MF
248 if not hasattr(_hserie, "pop"):
249 raise TypeError("The user input multi-function doesn't seem to return sequence results, behaving like a mono-function. It has to be checked.")
255 Operator.CM.storeValueInX(_xv,_hv,self.__name)
257 if returnSerieAsArrayMatrix:
258 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
260 if argsAsSerie: return _HxValue
261 else: return _HxValue[-1]
263 def appliedControledFormTo(self, paires, argsAsSerie = False, returnSerieAsArrayMatrix = False):
265 Permet de restituer le résultat de l'application de l'opérateur à des
266 paires (xValue, uValue). Cette méthode se contente d'appliquer, son
267 argument devant a priori être du bon type. Si la uValue est None,
268 on suppose que l'opérateur ne s'applique qu'à xValue.
270 - paires : les arguments par paire sont :
271 - xValue : argument X adapté pour appliquer l'opérateur
272 - uValue : argument U adapté pour appliquer l'opérateur
273 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
275 if argsAsSerie: _xuValue = paires
276 else: _xuValue = (paires,)
277 PlatformInfo.isIterable( _xuValue, True, " in Operator.appliedControledFormTo" )
279 if self.__Matrix is not None:
281 for paire in _xuValue:
282 _xValue, _uValue = paire
283 self.__addOneMatrixCall()
284 _HxValue.append( self.__Matrix @ numpy.ravel(_xValue) )
287 for paire in _xuValue:
288 _xValue, _uValue = paire
289 if _uValue is not None:
290 _xuArgs.append( paire )
292 _xuArgs.append( _xValue )
293 self.__addOneMethodCall( len(_xuArgs) )
294 if self.__extraArgs is None:
295 _HxValue = self.__Method( _xuArgs ) # Calcul MF
297 _HxValue = self.__Method( _xuArgs, self.__extraArgs ) # Calcul MF
299 if returnSerieAsArrayMatrix:
300 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
302 if argsAsSerie: return _HxValue
303 else: return _HxValue[-1]
305 def appliedInXTo(self, paires, argsAsSerie = False, returnSerieAsArrayMatrix = False):
307 Permet de restituer le résultat de l'application de l'opérateur à une
308 série d'arguments xValue, sachant que l'opérateur est valable en
309 xNominal. Cette méthode se contente d'appliquer, son argument devant a
310 priori être du bon type. Si l'opérateur est linéaire car c'est une
311 matrice, alors il est valable en tout point nominal et xNominal peut
312 être quelconque. Il n'y a qu'une seule paire par défaut, et argsAsSerie
313 permet d'indiquer que l'argument est multi-paires.
315 - paires : les arguments par paire sont :
316 - xNominal : série d'arguments permettant de donner le point où
317 l'opérateur est construit pour être ensuite appliqué
318 - xValue : série d'arguments adaptés pour appliquer l'opérateur
319 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
321 if argsAsSerie: _nxValue = paires
322 else: _nxValue = (paires,)
323 PlatformInfo.isIterable( _nxValue, True, " in Operator.appliedInXTo" )
325 if self.__Matrix is not None:
327 for paire in _nxValue:
328 _xNominal, _xValue = paire
329 self.__addOneMatrixCall()
330 _HxValue.append( self.__Matrix @ numpy.ravel(_xValue) )
332 self.__addOneMethodCall( len(_nxValue) )
333 if self.__extraArgs is None:
334 _HxValue = self.__Method( _nxValue ) # Calcul MF
336 _HxValue = self.__Method( _nxValue, self.__extraArgs ) # Calcul MF
338 if returnSerieAsArrayMatrix:
339 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
341 if argsAsSerie: return _HxValue
342 else: return _HxValue[-1]
344 def asMatrix(self, ValueForMethodForm = "UnknownVoidValue", argsAsSerie = False):
346 Permet de renvoyer l'opérateur sous la forme d'une matrice
348 if self.__Matrix is not None:
349 self.__addOneMatrixCall()
350 mValue = [self.__Matrix,]
351 elif not isinstance(ValueForMethodForm,str) or ValueForMethodForm != "UnknownVoidValue": # Ne pas utiliser "None"
354 self.__addOneMethodCall( len(ValueForMethodForm) )
355 for _vfmf in ValueForMethodForm:
356 mValue.append( self.__Method(((_vfmf, None),)) )
358 self.__addOneMethodCall()
359 mValue = self.__Method(((ValueForMethodForm, None),))
361 raise ValueError("Matrix form of the operator defined as a function/method requires to give an operating point.")
363 if argsAsSerie: return mValue
364 else: return mValue[-1]
368 Renvoie la taille sous forme numpy si l'opérateur est disponible sous
369 la forme d'une matrice
371 if self.__Matrix is not None:
372 return self.__Matrix.shape
374 raise ValueError("Matrix form of the operator is not available, nor the shape")
376 def nbcalls(self, which=None):
378 Renvoie les nombres d'évaluations de l'opérateur
381 self.__NbCallsAsMatrix+self.__NbCallsAsMethod,
382 self.__NbCallsAsMatrix,
383 self.__NbCallsAsMethod,
384 self.__NbCallsOfCached,
385 Operator.NbCallsAsMatrix+Operator.NbCallsAsMethod,
386 Operator.NbCallsAsMatrix,
387 Operator.NbCallsAsMethod,
388 Operator.NbCallsOfCached,
390 if which is None: return __nbcalls
391 else: return __nbcalls[which]
393 def __addOneMatrixCall(self):
394 "Comptabilise un appel"
395 self.__NbCallsAsMatrix += 1 # Decompte local
396 Operator.NbCallsAsMatrix += 1 # Decompte global
398 def __addOneMethodCall(self, nb = 1):
399 "Comptabilise un appel"
400 self.__NbCallsAsMethod += nb # Decompte local
401 Operator.NbCallsAsMethod += nb # Decompte global
403 def __addOneCacheCall(self):
404 "Comptabilise un appel"
405 self.__NbCallsOfCached += 1 # Decompte local
406 Operator.NbCallsOfCached += 1 # Decompte global
408 # ==============================================================================
409 class FullOperator(object):
411 Classe générale d'interface de type opérateur complet
412 (Direct, Linéaire Tangent, Adjoint)
415 name = "GenericFullOperator",
417 asOneFunction = None, # 1 Fonction
418 asThreeFunctions = None, # 3 Fonctions in a dictionary
419 asScript = None, # 1 or 3 Fonction(s) by script
420 asDict = None, # Parameters
422 extraArguments = None,
423 performancePrf = None,
424 inputAsMF = False,# Fonction(s) as Multi-Functions
429 self.__name = str(name)
430 self.__check = bool(toBeChecked)
431 self.__extraArgs = extraArguments
436 if (asDict is not None) and isinstance(asDict, dict):
437 __Parameters.update( asDict )
438 # Priorité à EnableMultiProcessingInDerivatives=True
439 if "EnableMultiProcessing" in __Parameters and __Parameters["EnableMultiProcessing"]:
440 __Parameters["EnableMultiProcessingInDerivatives"] = True
441 __Parameters["EnableMultiProcessingInEvaluation"] = False
442 if "EnableMultiProcessingInDerivatives" not in __Parameters:
443 __Parameters["EnableMultiProcessingInDerivatives"] = False
444 if __Parameters["EnableMultiProcessingInDerivatives"]:
445 __Parameters["EnableMultiProcessingInEvaluation"] = False
446 if "EnableMultiProcessingInEvaluation" not in __Parameters:
447 __Parameters["EnableMultiProcessingInEvaluation"] = False
448 if "withIncrement" in __Parameters: # Temporaire
449 __Parameters["DifferentialIncrement"] = __Parameters["withIncrement"]
450 # Le défaut est équivalent à "ReducedOverallRequirements"
451 __reduceM, __avoidRC = True, True
452 if performancePrf is not None:
453 if performancePrf == "ReducedAmountOfCalculation":
454 __reduceM, __avoidRC = False, True
455 elif performancePrf == "ReducedMemoryFootprint":
456 __reduceM, __avoidRC = True, False
457 elif performancePrf == "NoSavings":
458 __reduceM, __avoidRC = False, False
460 if asScript is not None:
461 __Matrix, __Function = None, None
463 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
465 __Function = { "Direct":Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ) }
466 __Function.update({"useApproximatedDerivatives":True})
467 __Function.update(__Parameters)
468 elif asThreeFunctions:
470 "Direct" :Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ),
471 "Tangent":Interfaces.ImportFromScript(asScript).getvalue( "TangentOperator" ),
472 "Adjoint":Interfaces.ImportFromScript(asScript).getvalue( "AdjointOperator" ),
474 __Function.update(__Parameters)
477 if asOneFunction is not None:
478 if isinstance(asOneFunction, dict) and "Direct" in asOneFunction:
479 if asOneFunction["Direct"] is not None:
480 __Function = asOneFunction
482 raise ValueError("The function has to be given in a dictionnary which have 1 key (\"Direct\")")
484 __Function = { "Direct":asOneFunction }
485 __Function.update({"useApproximatedDerivatives":True})
486 __Function.update(__Parameters)
487 elif asThreeFunctions is not None:
488 if isinstance(asThreeFunctions, dict) and \
489 ("Tangent" in asThreeFunctions) and (asThreeFunctions["Tangent"] is not None) and \
490 ("Adjoint" in asThreeFunctions) and (asThreeFunctions["Adjoint"] is not None) and \
491 (("useApproximatedDerivatives" not in asThreeFunctions) or not bool(asThreeFunctions["useApproximatedDerivatives"])):
492 __Function = asThreeFunctions
493 elif isinstance(asThreeFunctions, dict) and \
494 ("Direct" in asThreeFunctions) and (asThreeFunctions["Direct"] is not None):
495 __Function = asThreeFunctions
496 __Function.update({"useApproximatedDerivatives":True})
498 raise ValueError("The functions has to be given in a dictionnary which have either 1 key (\"Direct\") or 3 keys (\"Direct\" (optionnal), \"Tangent\" and \"Adjoint\")")
499 if "Direct" not in asThreeFunctions:
500 __Function["Direct"] = asThreeFunctions["Tangent"]
501 __Function.update(__Parameters)
505 # if sys.version_info[0] < 3 and isinstance(__Function, dict):
506 # for k in ("Direct", "Tangent", "Adjoint"):
507 # if k in __Function and hasattr(__Function[k],"__class__"):
508 # if type(__Function[k]) is type(self.__init__):
509 # raise TypeError("can't use a class method (%s) as a function for the \"%s\" operator. Use a real function instead."%(type(__Function[k]),k))
511 if appliedInX is not None and isinstance(appliedInX, dict):
512 __appliedInX = appliedInX
513 elif appliedInX is not None:
514 __appliedInX = {"HXb":appliedInX}
518 if scheduledBy is not None:
519 self.__T = scheduledBy
521 if isinstance(__Function, dict) and \
522 ("useApproximatedDerivatives" in __Function) and bool(__Function["useApproximatedDerivatives"]) and \
523 ("Direct" in __Function) and (__Function["Direct"] is not None):
524 if "CenteredFiniteDifference" not in __Function: __Function["CenteredFiniteDifference"] = False
525 if "DifferentialIncrement" not in __Function: __Function["DifferentialIncrement"] = 0.01
526 if "withdX" not in __Function: __Function["withdX"] = None
527 if "withReducingMemoryUse" not in __Function: __Function["withReducingMemoryUse"] = __reduceM
528 if "withAvoidingRedundancy" not in __Function: __Function["withAvoidingRedundancy"] = __avoidRC
529 if "withToleranceInRedundancy" not in __Function: __Function["withToleranceInRedundancy"] = 1.e-18
530 if "withLenghtOfRedundancy" not in __Function: __Function["withLenghtOfRedundancy"] = -1
531 if "NumberOfProcesses" not in __Function: __Function["NumberOfProcesses"] = None
532 if "withmfEnabled" not in __Function: __Function["withmfEnabled"] = inputAsMF
533 from daCore import NumericObjects
534 FDA = NumericObjects.FDApproximation(
536 Function = __Function["Direct"],
537 centeredDF = __Function["CenteredFiniteDifference"],
538 increment = __Function["DifferentialIncrement"],
539 dX = __Function["withdX"],
540 extraArguments = self.__extraArgs,
541 reducingMemoryUse = __Function["withReducingMemoryUse"],
542 avoidingRedundancy = __Function["withAvoidingRedundancy"],
543 toleranceInRedundancy = __Function["withToleranceInRedundancy"],
544 lenghtOfRedundancy = __Function["withLenghtOfRedundancy"],
545 mpEnabled = __Function["EnableMultiProcessingInDerivatives"],
546 mpWorkers = __Function["NumberOfProcesses"],
547 mfEnabled = __Function["withmfEnabled"],
549 self.__FO["Direct"] = Operator( name = self.__name, fromMethod = FDA.DirectOperator, reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
550 self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMethod = FDA.TangentOperator, reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
551 self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMethod = FDA.AdjointOperator, reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
552 elif isinstance(__Function, dict) and \
553 ("Direct" in __Function) and ("Tangent" in __Function) and ("Adjoint" in __Function) and \
554 (__Function["Direct"] is not None) and (__Function["Tangent"] is not None) and (__Function["Adjoint"] is not None):
555 self.__FO["Direct"] = Operator( name = self.__name, fromMethod = __Function["Direct"], reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
556 self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMethod = __Function["Tangent"], reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
557 self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMethod = __Function["Adjoint"], reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
558 elif asMatrix is not None:
559 if isinstance(__Matrix, str):
560 __Matrix = PlatformInfo.strmatrix2liststr( __Matrix )
561 __matrice = numpy.asarray( __Matrix, dtype=float )
562 self.__FO["Direct"] = Operator( name = self.__name, fromMatrix = __matrice, reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
563 self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMatrix = __matrice, reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF )
564 self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMatrix = __matrice.T, reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF )
567 raise ValueError("The %s object is improperly defined or undefined, it requires at minima either a matrix, a Direct operator for approximate derivatives or a Tangent/Adjoint operators pair. Please check your operator input."%self.__name)
569 if __appliedInX is not None:
570 self.__FO["AppliedInX"] = {}
571 for key in list(__appliedInX.keys()):
572 if type( __appliedInX[key] ) is type( numpy.matrix([]) ):
573 # Pour le cas où l'on a une vraie matrice
574 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].A1, numpy.float ).T
575 elif type( __appliedInX[key] ) is type( numpy.array([]) ) and len(__appliedInX[key].shape) > 1:
576 # Pour le cas où l'on a un vecteur représenté en array avec 2 dimensions
577 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].reshape(len(__appliedInX[key]),), numpy.float ).T
579 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key], numpy.float ).T
581 self.__FO["AppliedInX"] = None
587 "x.__repr__() <==> repr(x)"
588 return repr(self.__FO)
591 "x.__str__() <==> str(x)"
592 return str(self.__FO)
594 # ==============================================================================
595 class Algorithm(object):
597 Classe générale d'interface de type algorithme
599 Elle donne un cadre pour l'écriture d'une classe élémentaire d'algorithme
600 d'assimilation, en fournissant un container (dictionnaire) de variables
601 persistantes initialisées, et des méthodes d'accès à ces variables stockées.
603 Une classe élémentaire d'algorithme doit implémenter la méthode "run".
605 def __init__(self, name):
607 L'initialisation présente permet de fabriquer des variables de stockage
608 disponibles de manière générique dans les algorithmes élémentaires. Ces
609 variables de stockage sont ensuite conservées dans un dictionnaire
610 interne à l'objet, mais auquel on accède par la méthode "get".
612 Les variables prévues sont :
613 - APosterioriCorrelations : matrice de corrélations de la matrice A
614 - APosterioriCovariance : matrice de covariances a posteriori : A
615 - APosterioriStandardDeviations : vecteur des écart-types de la matrice A
616 - APosterioriVariances : vecteur des variances de la matrice A
617 - Analysis : vecteur d'analyse : Xa
618 - BMA : Background moins Analysis : Xa - Xb
619 - CostFunctionJ : fonction-coût globale, somme des deux parties suivantes Jb et Jo
620 - CostFunctionJAtCurrentOptimum : fonction-coût globale à l'état optimal courant lors d'itérations
621 - CostFunctionJb : partie ébauche ou background de la fonction-coût : Jb
622 - CostFunctionJbAtCurrentOptimum : partie ébauche à l'état optimal courant lors d'itérations
623 - CostFunctionJo : partie observations de la fonction-coût : Jo
624 - CostFunctionJoAtCurrentOptimum : partie observations à l'état optimal courant lors d'itérations
625 - CurrentIterationNumber : numéro courant d'itération dans les algorithmes itératifs, à partir de 0
626 - CurrentOptimum : état optimal courant lors d'itérations
627 - CurrentState : état courant lors d'itérations
628 - GradientOfCostFunctionJ : gradient de la fonction-coût globale
629 - GradientOfCostFunctionJb : gradient de la partie ébauche de la fonction-coût
630 - GradientOfCostFunctionJo : gradient de la partie observations de la fonction-coût
631 - IndexOfOptimum : index de l'état optimal courant lors d'itérations
632 - Innovation : l'innovation : d = Y - H(X)
633 - InnovationAtCurrentState : l'innovation à l'état courant : dn = Y - H(Xn)
634 - JacobianMatrixAtBackground : matrice jacobienne à l'état d'ébauche
635 - JacobianMatrixAtCurrentState : matrice jacobienne à l'état courant
636 - JacobianMatrixAtOptimum : matrice jacobienne à l'optimum
637 - KalmanGainAtOptimum : gain de Kalman à l'optimum
638 - MahalanobisConsistency : indicateur de consistance des covariances
639 - OMA : Observation moins Analyse : Y - Xa
640 - OMB : Observation moins Background : Y - Xb
641 - ForecastCovariance : covariance de l'état prédit courant lors d'itérations
642 - ForecastState : état prédit courant lors d'itérations
643 - Residu : dans le cas des algorithmes de vérification
644 - SampledStateForQuantiles : échantillons d'états pour l'estimation des quantiles
645 - SigmaBck2 : indicateur de correction optimale des erreurs d'ébauche
646 - SigmaObs2 : indicateur de correction optimale des erreurs d'observation
647 - SimulatedObservationAtBackground : l'état observé H(Xb) à l'ébauche
648 - SimulatedObservationAtCurrentOptimum : l'état observé H(X) à l'état optimal courant
649 - SimulatedObservationAtCurrentState : l'état observé H(X) à l'état courant
650 - SimulatedObservationAtOptimum : l'état observé H(Xa) à l'optimum
651 - SimulationQuantiles : états observés H(X) pour les quantiles demandés
652 On peut rajouter des variables à stocker dans l'initialisation de
653 l'algorithme élémentaire qui va hériter de cette classe
655 logging.debug("%s Initialisation", str(name))
656 self._m = PlatformInfo.SystemUsage()
658 self._name = str( name )
659 self._parameters = {"StoreSupplementaryCalculations":[]}
660 self.__internal_state = {}
661 self.__required_parameters = {}
662 self.__required_inputs = {
663 "RequiredInputValues":{"mandatory":(), "optional":()},
664 "ClassificationTags":[],
666 self.__variable_names_not_public = {"nextStep":False} # Duplication dans AlgorithmAndParameters
667 self.__canonical_parameter_name = {} # Correspondance "lower"->"correct"
668 self.__canonical_stored_name = {} # Correspondance "lower"->"correct"
670 self.StoredVariables = {}
671 self.StoredVariables["APosterioriCorrelations"] = Persistence.OneMatrix(name = "APosterioriCorrelations")
672 self.StoredVariables["APosterioriCovariance"] = Persistence.OneMatrix(name = "APosterioriCovariance")
673 self.StoredVariables["APosterioriStandardDeviations"] = Persistence.OneVector(name = "APosterioriStandardDeviations")
674 self.StoredVariables["APosterioriVariances"] = Persistence.OneVector(name = "APosterioriVariances")
675 self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
676 self.StoredVariables["BMA"] = Persistence.OneVector(name = "BMA")
677 self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
678 self.StoredVariables["CostFunctionJAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJAtCurrentOptimum")
679 self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
680 self.StoredVariables["CostFunctionJbAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJbAtCurrentOptimum")
681 self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
682 self.StoredVariables["CostFunctionJoAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJoAtCurrentOptimum")
683 self.StoredVariables["CurrentEnsembleState"] = Persistence.OneMatrix(name = "CurrentEnsembleState")
684 self.StoredVariables["CurrentIterationNumber"] = Persistence.OneIndex(name = "CurrentIterationNumber")
685 self.StoredVariables["CurrentOptimum"] = Persistence.OneVector(name = "CurrentOptimum")
686 self.StoredVariables["CurrentState"] = Persistence.OneVector(name = "CurrentState")
687 self.StoredVariables["ForecastCovariance"] = Persistence.OneMatrix(name = "ForecastCovariance")
688 self.StoredVariables["ForecastState"] = Persistence.OneVector(name = "ForecastState")
689 self.StoredVariables["GradientOfCostFunctionJ"] = Persistence.OneVector(name = "GradientOfCostFunctionJ")
690 self.StoredVariables["GradientOfCostFunctionJb"] = Persistence.OneVector(name = "GradientOfCostFunctionJb")
691 self.StoredVariables["GradientOfCostFunctionJo"] = Persistence.OneVector(name = "GradientOfCostFunctionJo")
692 self.StoredVariables["IndexOfOptimum"] = Persistence.OneIndex(name = "IndexOfOptimum")
693 self.StoredVariables["Innovation"] = Persistence.OneVector(name = "Innovation")
694 self.StoredVariables["InnovationAtCurrentAnalysis"] = Persistence.OneVector(name = "InnovationAtCurrentAnalysis")
695 self.StoredVariables["InnovationAtCurrentState"] = Persistence.OneVector(name = "InnovationAtCurrentState")
696 self.StoredVariables["JacobianMatrixAtBackground"] = Persistence.OneMatrix(name = "JacobianMatrixAtBackground")
697 self.StoredVariables["JacobianMatrixAtCurrentState"] = Persistence.OneMatrix(name = "JacobianMatrixAtCurrentState")
698 self.StoredVariables["JacobianMatrixAtOptimum"] = Persistence.OneMatrix(name = "JacobianMatrixAtOptimum")
699 self.StoredVariables["KalmanGainAtOptimum"] = Persistence.OneMatrix(name = "KalmanGainAtOptimum")
700 self.StoredVariables["MahalanobisConsistency"] = Persistence.OneScalar(name = "MahalanobisConsistency")
701 self.StoredVariables["OMA"] = Persistence.OneVector(name = "OMA")
702 self.StoredVariables["OMB"] = Persistence.OneVector(name = "OMB")
703 self.StoredVariables["Residu"] = Persistence.OneScalar(name = "Residu")
704 self.StoredVariables["SampledStateForQuantiles"] = Persistence.OneMatrix(name = "SampledStateForQuantiles")
705 self.StoredVariables["SigmaBck2"] = Persistence.OneScalar(name = "SigmaBck2")
706 self.StoredVariables["SigmaObs2"] = Persistence.OneScalar(name = "SigmaObs2")
707 self.StoredVariables["SimulatedObservationAtBackground"] = Persistence.OneVector(name = "SimulatedObservationAtBackground")
708 self.StoredVariables["SimulatedObservationAtCurrentAnalysis"]= Persistence.OneVector(name = "SimulatedObservationAtCurrentAnalysis")
709 self.StoredVariables["SimulatedObservationAtCurrentOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentOptimum")
710 self.StoredVariables["SimulatedObservationAtCurrentState"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentState")
711 self.StoredVariables["SimulatedObservationAtOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtOptimum")
712 self.StoredVariables["SimulationQuantiles"] = Persistence.OneMatrix(name = "SimulationQuantiles")
714 for k in self.StoredVariables:
715 self.__canonical_stored_name[k.lower()] = k
717 for k, v in self.__variable_names_not_public.items():
718 self.__canonical_parameter_name[k.lower()] = k
719 self.__canonical_parameter_name["algorithm"] = "Algorithm"
720 self.__canonical_parameter_name["storesupplementarycalculations"] = "StoreSupplementaryCalculations"
722 def _pre_run(self, Parameters, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None ):
724 logging.debug("%s Lancement", self._name)
725 logging.debug("%s Taille mémoire utilisée de %.0f Mio"%(self._name, self._m.getUsedMemory("Mio")))
726 self._getTimeState(reset=True)
728 # Mise a jour des paramètres internes avec le contenu de Parameters, en
729 # reprenant les valeurs par défauts pour toutes celles non définies
730 self.__setParameters(Parameters, reset=True)
731 for k, v in self.__variable_names_not_public.items():
732 if k not in self._parameters: self.__setParameters( {k:v} )
734 # Corrections et compléments des vecteurs
735 def __test_vvalue(argument, variable, argname, symbol=None):
736 if symbol is None: symbol = variable
738 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
739 raise ValueError("%s %s vector %s is not set and has to be properly defined!"%(self._name,argname,symbol))
740 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
741 logging.debug("%s %s vector %s is not set, but is optional."%(self._name,argname,symbol))
743 logging.debug("%s %s vector %s is not set, but is not required."%(self._name,argname,symbol))
745 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
746 logging.debug("%s %s vector %s is required and set, and its size is %i."%(self._name,argname,symbol,numpy.array(argument).size))
747 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
748 logging.debug("%s %s vector %s is optional and set, and its size is %i."%(self._name,argname,symbol,numpy.array(argument).size))
750 logging.debug("%s %s vector %s is set although neither required nor optional, and its size is %i."%(self._name,argname,symbol,numpy.array(argument).size))
752 __test_vvalue( Xb, "Xb", "Background or initial state" )
753 __test_vvalue( Y, "Y", "Observation" )
754 __test_vvalue( U, "U", "Control" )
756 # Corrections et compléments des covariances
757 def __test_cvalue(argument, variable, argname, symbol=None):
758 if symbol is None: symbol = variable
760 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
761 raise ValueError("%s %s error covariance matrix %s is not set and has to be properly defined!"%(self._name,argname,symbol))
762 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
763 logging.debug("%s %s error covariance matrix %s is not set, but is optional."%(self._name,argname,symbol))
765 logging.debug("%s %s error covariance matrix %s is not set, but is not required."%(self._name,argname,symbol))
767 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
768 logging.debug("%s %s error covariance matrix %s is required and set."%(self._name,argname,symbol))
769 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
770 logging.debug("%s %s error covariance matrix %s is optional and set."%(self._name,argname,symbol))
772 logging.debug("%s %s error covariance matrix %s is set although neither required nor optional."%(self._name,argname,symbol))
774 __test_cvalue( B, "B", "Background" )
775 __test_cvalue( R, "R", "Observation" )
776 __test_cvalue( Q, "Q", "Evolution" )
778 # Corrections et compléments des opérateurs
779 def __test_ovalue(argument, variable, argname, symbol=None):
780 if symbol is None: symbol = variable
781 if argument is None or (isinstance(argument,dict) and len(argument)==0):
782 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
783 raise ValueError("%s %s operator %s is not set and has to be properly defined!"%(self._name,argname,symbol))
784 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
785 logging.debug("%s %s operator %s is not set, but is optional."%(self._name,argname,symbol))
787 logging.debug("%s %s operator %s is not set, but is not required."%(self._name,argname,symbol))
789 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
790 logging.debug("%s %s operator %s is required and set."%(self._name,argname,symbol))
791 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
792 logging.debug("%s %s operator %s is optional and set."%(self._name,argname,symbol))
794 logging.debug("%s %s operator %s is set although neither required nor optional."%(self._name,argname,symbol))
796 __test_ovalue( HO, "HO", "Observation", "H" )
797 __test_ovalue( EM, "EM", "Evolution", "M" )
798 __test_ovalue( CM, "CM", "Control Model", "C" )
800 # Corrections et compléments des bornes
801 if ("Bounds" in self._parameters) and isinstance(self._parameters["Bounds"], (list, tuple)) and (len(self._parameters["Bounds"]) > 0):
802 logging.debug("%s Bounds taken into account"%(self._name,))
804 self._parameters["Bounds"] = None
805 if ("StateBoundsForQuantiles" in self._parameters) and isinstance(self._parameters["StateBoundsForQuantiles"], (list, tuple)) and (len(self._parameters["StateBoundsForQuantiles"]) > 0):
806 logging.debug("%s Bounds for quantiles states taken into account"%(self._name,))
807 # Attention : contrairement à Bounds, pas de défaut à None, sinon on ne peut pas être sans bornes
809 # Corrections et compléments de l'initialisation en X
810 if "InitializationPoint" in self._parameters:
812 if self._parameters["InitializationPoint"] is not None and hasattr(self._parameters["InitializationPoint"],'size'):
813 if self._parameters["InitializationPoint"].size != numpy.ravel(Xb).size:
814 raise ValueError("Incompatible size %i of forced initial point that have to replace the background of size %i" \
815 %(self._parameters["InitializationPoint"].size,numpy.ravel(Xb).size))
816 # Obtenu par typecast : numpy.ravel(self._parameters["InitializationPoint"])
818 self._parameters["InitializationPoint"] = numpy.ravel(Xb)
820 if self._parameters["InitializationPoint"] is None:
821 raise ValueError("Forced initial point can not be set without any given Background or required value")
823 # Correction pour pallier a un bug de TNC sur le retour du Minimum
824 if "Minimizer" in self._parameters and self._parameters["Minimizer"] == "TNC":
825 self.setParameterValue("StoreInternalVariables",True)
827 # Verbosité et logging
828 if logging.getLogger().level < logging.WARNING:
829 self._parameters["optiprint"], self._parameters["optdisp"] = 1, 1
830 if PlatformInfo.has_scipy:
831 import scipy.optimize
832 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_ALL
834 self._parameters["optmessages"] = 15
836 self._parameters["optiprint"], self._parameters["optdisp"] = -1, 0
837 if PlatformInfo.has_scipy:
838 import scipy.optimize
839 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_NONE
841 self._parameters["optmessages"] = 15
845 def _post_run(self,_oH=None):
847 if ("StoreSupplementaryCalculations" in self._parameters) and \
848 "APosterioriCovariance" in self._parameters["StoreSupplementaryCalculations"]:
849 for _A in self.StoredVariables["APosterioriCovariance"]:
850 if "APosterioriVariances" in self._parameters["StoreSupplementaryCalculations"]:
851 self.StoredVariables["APosterioriVariances"].store( numpy.diag(_A) )
852 if "APosterioriStandardDeviations" in self._parameters["StoreSupplementaryCalculations"]:
853 self.StoredVariables["APosterioriStandardDeviations"].store( numpy.sqrt(numpy.diag(_A)) )
854 if "APosterioriCorrelations" in self._parameters["StoreSupplementaryCalculations"]:
855 _EI = numpy.diag(1./numpy.sqrt(numpy.diag(_A)))
856 _C = numpy.dot(_EI, numpy.dot(_A, _EI))
857 self.StoredVariables["APosterioriCorrelations"].store( _C )
858 if _oH is not None and "Direct" in _oH and "Tangent" in _oH and "Adjoint" in _oH:
859 logging.debug("%s Nombre d'évaluation(s) de l'opérateur d'observation direct/tangent/adjoint.: %i/%i/%i", self._name, _oH["Direct"].nbcalls(0),_oH["Tangent"].nbcalls(0),_oH["Adjoint"].nbcalls(0))
860 logging.debug("%s Nombre d'appels au cache d'opérateur d'observation direct/tangent/adjoint..: %i/%i/%i", self._name, _oH["Direct"].nbcalls(3),_oH["Tangent"].nbcalls(3),_oH["Adjoint"].nbcalls(3))
861 logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
862 logging.debug("%s Durées d'utilisation CPU de %.1fs et elapsed de %.1fs", self._name, self._getTimeState()[0], self._getTimeState()[1])
863 logging.debug("%s Terminé", self._name)
866 def _toStore(self, key):
867 "True if in StoreSupplementaryCalculations, else False"
868 return key in self._parameters["StoreSupplementaryCalculations"]
870 def get(self, key=None):
872 Renvoie l'une des variables stockées identifiée par la clé, ou le
873 dictionnaire de l'ensemble des variables disponibles en l'absence de
874 clé. Ce sont directement les variables sous forme objet qui sont
875 renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
876 des classes de persistance.
879 return self.StoredVariables[self.__canonical_stored_name[key.lower()]]
881 return self.StoredVariables
883 def __contains__(self, key=None):
884 "D.__contains__(k) -> True if D has a key k, else False"
885 if key is None or key.lower() not in self.__canonical_stored_name:
888 return self.__canonical_stored_name[key.lower()] in self.StoredVariables
891 "D.keys() -> list of D's keys"
892 if hasattr(self, "StoredVariables"):
893 return self.StoredVariables.keys()
898 "D.pop(k[,d]) -> v, remove specified key and return the corresponding value"
899 if hasattr(self, "StoredVariables") and k.lower() in self.__canonical_stored_name:
900 return self.StoredVariables.pop(self.__canonical_stored_name[k.lower()], d)
905 raise TypeError("pop expected at least 1 arguments, got 0")
906 "If key is not found, d is returned if given, otherwise KeyError is raised"
912 def run(self, Xb=None, Y=None, H=None, M=None, R=None, B=None, Q=None, Parameters=None):
914 Doit implémenter l'opération élémentaire de calcul d'assimilation sous
915 sa forme mathématique la plus naturelle possible.
917 raise NotImplementedError("Mathematical assimilation calculation has not been implemented!")
919 def defineRequiredParameter(self, name = None, default = None, typecast = None, message = None, minval = None, maxval = None, listval = None, listadv = None):
921 Permet de définir dans l'algorithme des paramètres requis et leurs
922 caractéristiques par défaut.
925 raise ValueError("A name is mandatory to define a required parameter.")
927 self.__required_parameters[name] = {
929 "typecast" : typecast,
936 self.__canonical_parameter_name[name.lower()] = name
937 logging.debug("%s %s (valeur par défaut = %s)", self._name, message, self.setParameterValue(name))
939 def getRequiredParameters(self, noDetails=True):
941 Renvoie la liste des noms de paramètres requis ou directement le
942 dictionnaire des paramètres requis.
945 return sorted(self.__required_parameters.keys())
947 return self.__required_parameters
949 def setParameterValue(self, name=None, value=None):
951 Renvoie la valeur d'un paramètre requis de manière contrôlée
953 __k = self.__canonical_parameter_name[name.lower()]
954 default = self.__required_parameters[__k]["default"]
955 typecast = self.__required_parameters[__k]["typecast"]
956 minval = self.__required_parameters[__k]["minval"]
957 maxval = self.__required_parameters[__k]["maxval"]
958 listval = self.__required_parameters[__k]["listval"]
959 listadv = self.__required_parameters[__k]["listadv"]
961 if value is None and default is None:
963 elif value is None and default is not None:
964 if typecast is None: __val = default
965 else: __val = typecast( default )
967 if typecast is None: __val = value
970 __val = typecast( value )
972 raise ValueError("The value '%s' for the parameter named '%s' can not be correctly evaluated with type '%s'."%(value, __k, typecast))
974 if minval is not None and (numpy.array(__val, float) < minval).any():
975 raise ValueError("The parameter named '%s' of value '%s' can not be less than %s."%(__k, __val, minval))
976 if maxval is not None and (numpy.array(__val, float) > maxval).any():
977 raise ValueError("The parameter named '%s' of value '%s' can not be greater than %s."%(__k, __val, maxval))
978 if listval is not None or listadv is not None:
979 if typecast is list or typecast is tuple or isinstance(__val,list) or isinstance(__val,tuple):
981 if listval is not None and v in listval: continue
982 elif listadv is not None and v in listadv: continue
984 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%(v, __k, listval))
985 elif not (listval is not None and __val in listval) and not (listadv is not None and __val in listadv):
986 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%( __val, __k,listval))
990 def requireInputArguments(self, mandatory=(), optional=()):
992 Permet d'imposer des arguments de calcul requis en entrée.
994 self.__required_inputs["RequiredInputValues"]["mandatory"] = tuple( mandatory )
995 self.__required_inputs["RequiredInputValues"]["optional"] = tuple( optional )
997 def getInputArguments(self):
999 Permet d'obtenir les listes des arguments de calcul requis en entrée.
1001 return self.__required_inputs["RequiredInputValues"]["mandatory"], self.__required_inputs["RequiredInputValues"]["optional"]
1003 def setAttributes(self, tags=()):
1005 Permet d'adjoindre des attributs comme les tags de classification.
1006 Renvoie la liste actuelle dans tous les cas.
1008 self.__required_inputs["ClassificationTags"].extend( tags )
1009 return self.__required_inputs["ClassificationTags"]
1011 def __setParameters(self, fromDico={}, reset=False):
1013 Permet de stocker les paramètres reçus dans le dictionnaire interne.
1015 self._parameters.update( fromDico )
1016 __inverse_fromDico_keys = {}
1017 for k in fromDico.keys():
1018 if k.lower() in self.__canonical_parameter_name:
1019 __inverse_fromDico_keys[self.__canonical_parameter_name[k.lower()]] = k
1020 #~ __inverse_fromDico_keys = dict([(self.__canonical_parameter_name[k.lower()],k) for k in fromDico.keys()])
1021 __canonic_fromDico_keys = __inverse_fromDico_keys.keys()
1022 for k in self.__required_parameters.keys():
1023 if k in __canonic_fromDico_keys:
1024 self._parameters[k] = self.setParameterValue(k,fromDico[__inverse_fromDico_keys[k]])
1026 self._parameters[k] = self.setParameterValue(k)
1029 if hasattr(self._parameters[k],"__len__") and len(self._parameters[k]) > 100:
1030 logging.debug("%s %s de longueur %s", self._name, self.__required_parameters[k]["message"], len(self._parameters[k]))
1032 logging.debug("%s %s : %s", self._name, self.__required_parameters[k]["message"], self._parameters[k])
1034 def _setInternalState(self, key=None, value=None, fromDico={}, reset=False):
1036 Permet de stocker des variables nommées constituant l'état interne
1038 if reset: # Vide le dictionnaire préalablement
1039 self.__internal_state = {}
1040 if key is not None and value is not None:
1041 self.__internal_state[key] = value
1042 self.__internal_state.update( dict(fromDico) )
1044 def _getInternalState(self, key=None):
1046 Restitue un état interne sous la forme d'un dictionnaire de variables nommées
1048 if key is not None and key in self.__internal_state:
1049 return self.__internal_state[key]
1051 return self.__internal_state
1053 def _getTimeState(self, reset=False):
1055 Initialise ou restitue le temps de calcul (cpu/elapsed) à la seconde
1058 self.__initial_cpu_time = time.process_time()
1059 self.__initial_elapsed_time = time.perf_counter()
1062 self.__cpu_time = time.process_time() - self.__initial_cpu_time
1063 self.__elapsed_time = time.perf_counter() - self.__initial_elapsed_time
1064 return self.__cpu_time, self.__elapsed_time
1066 def _StopOnTimeLimit(self, X=None, withReason=False):
1067 "Stop criteria on time limit: True/False [+ Reason]"
1068 c, e = self._getTimeState()
1069 if "MaximumCpuTime" in self._parameters and c > self._parameters["MaximumCpuTime"]:
1070 __SC, __SR = True, "Reached maximum CPU time (%.1fs > %.1fs)"%(c, self._parameters["MaximumCpuTime"])
1071 elif "MaximumElapsedTime" in self._parameters and e > self._parameters["MaximumElapsedTime"]:
1072 __SC, __SR = True, "Reached maximum elapsed time (%.1fs > %.1fs)"%(e, self._parameters["MaximumElapsedTime"])
1074 __SC, __SR = False, ""
1080 # ==============================================================================
1081 class PartialAlgorithm(object):
1083 Classe pour mimer "Algorithm" du point de vue stockage, mais sans aucune
1084 action avancée comme la vérification . Pour les méthodes reprises ici,
1085 le fonctionnement est identique à celles de la classe "Algorithm".
1087 def __init__(self, name):
1088 self._name = str( name )
1089 self._parameters = {"StoreSupplementaryCalculations":[]}
1091 self.StoredVariables = {}
1092 self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
1093 self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
1094 self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
1095 self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
1096 self.StoredVariables["CurrentIterationNumber"] = Persistence.OneIndex(name = "CurrentIterationNumber")
1098 self.__canonical_stored_name = {}
1099 for k in self.StoredVariables:
1100 self.__canonical_stored_name[k.lower()] = k
1102 def _toStore(self, key):
1103 "True if in StoreSupplementaryCalculations, else False"
1104 return key in self._parameters["StoreSupplementaryCalculations"]
1106 def get(self, key=None):
1108 Renvoie l'une des variables stockées identifiée par la clé, ou le
1109 dictionnaire de l'ensemble des variables disponibles en l'absence de
1110 clé. Ce sont directement les variables sous forme objet qui sont
1111 renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
1112 des classes de persistance.
1115 return self.StoredVariables[self.__canonical_stored_name[key.lower()]]
1117 return self.StoredVariables
1119 # ==============================================================================
1120 class AlgorithmAndParameters(object):
1122 Classe générale d'interface d'action pour l'algorithme et ses paramètres
1125 name = "GenericAlgorithm",
1132 self.__name = str(name)
1136 self.__algorithm = {}
1137 self.__algorithmFile = None
1138 self.__algorithmName = None
1140 self.updateParameters( asDict, asScript )
1142 if asAlgorithm is None and asScript is not None:
1143 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
1145 __Algo = asAlgorithm
1147 if __Algo is not None:
1148 self.__A = str(__Algo)
1149 self.__P.update( {"Algorithm":self.__A} )
1151 self.__setAlgorithm( self.__A )
1153 self.__variable_names_not_public = {"nextStep":False} # Duplication dans Algorithm
1155 def updateParameters(self,
1159 "Mise a jour des parametres"
1160 if asDict is None and asScript is not None:
1161 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1165 if __Dict is not None:
1166 self.__P.update( dict(__Dict) )
1168 def executePythonScheme(self, asDictAO = None):
1169 "Permet de lancer le calcul d'assimilation"
1170 Operator.CM.clearCache()
1172 if not isinstance(asDictAO, dict):
1173 raise ValueError("The objects for algorithm calculation have to be given together as a dictionnary, and they are not")
1174 if hasattr(asDictAO["Background"],"getO"): self.__Xb = asDictAO["Background"].getO()
1175 elif hasattr(asDictAO["CheckingPoint"],"getO"): self.__Xb = asDictAO["CheckingPoint"].getO()
1176 else: self.__Xb = None
1177 if hasattr(asDictAO["Observation"],"getO"): self.__Y = asDictAO["Observation"].getO()
1178 else: self.__Y = asDictAO["Observation"]
1179 if hasattr(asDictAO["ControlInput"],"getO"): self.__U = asDictAO["ControlInput"].getO()
1180 else: self.__U = asDictAO["ControlInput"]
1181 if hasattr(asDictAO["ObservationOperator"],"getO"): self.__HO = asDictAO["ObservationOperator"].getO()
1182 else: self.__HO = asDictAO["ObservationOperator"]
1183 if hasattr(asDictAO["EvolutionModel"],"getO"): self.__EM = asDictAO["EvolutionModel"].getO()
1184 else: self.__EM = asDictAO["EvolutionModel"]
1185 if hasattr(asDictAO["ControlModel"],"getO"): self.__CM = asDictAO["ControlModel"].getO()
1186 else: self.__CM = asDictAO["ControlModel"]
1187 self.__B = asDictAO["BackgroundError"]
1188 self.__R = asDictAO["ObservationError"]
1189 self.__Q = asDictAO["EvolutionError"]
1191 self.__shape_validate()
1193 self.__algorithm.run(
1203 Parameters = self.__P,
1207 def executeYACSScheme(self, FileName=None):
1208 "Permet de lancer le calcul d'assimilation"
1209 if FileName is None or not os.path.exists(FileName):
1210 raise ValueError("a YACS file name has to be given for YACS execution.\n")
1212 __file = os.path.abspath(FileName)
1213 logging.debug("The YACS file name is \"%s\"."%__file)
1214 if not PlatformInfo.has_salome or \
1215 not PlatformInfo.has_yacs or \
1216 not PlatformInfo.has_adao:
1217 raise ImportError("\n\n"+\
1218 "Unable to get SALOME, YACS or ADAO environnement variables.\n"+\
1219 "Please load the right environnement before trying to use it.\n")
1222 import SALOMERuntime
1224 SALOMERuntime.RuntimeSALOME_setRuntime()
1226 r = pilot.getRuntime()
1227 xmlLoader = loader.YACSLoader()
1228 xmlLoader.registerProcCataLoader()
1230 catalogAd = r.loadCatalog("proc", __file)
1231 r.addCatalog(catalogAd)
1236 p = xmlLoader.load(__file)
1237 except IOError as ex:
1238 print("The YACS XML schema file can not be loaded: %s"%(ex,))
1240 logger = p.getLogger("parser")
1241 if not logger.isEmpty():
1242 print("The imported YACS XML schema has errors on parsing:")
1243 print(logger.getStr())
1246 print("The YACS XML schema is not valid and will not be executed:")
1247 print(p.getErrorReport())
1249 info=pilot.LinkInfo(pilot.LinkInfo.ALL_DONT_STOP)
1250 p.checkConsistency(info)
1251 if info.areWarningsOrErrors():
1252 print("The YACS XML schema is not coherent and will not be executed:")
1253 print(info.getGlobalRepr())
1255 e = pilot.ExecutorSwig()
1257 if p.getEffectiveState() != pilot.DONE:
1258 print(p.getErrorReport())
1262 def get(self, key = None):
1263 "Vérifie l'existence d'une clé de variable ou de paramètres"
1264 if key in self.__algorithm:
1265 return self.__algorithm.get( key )
1266 elif key in self.__P:
1267 return self.__P[key]
1269 allvariables = self.__P
1270 for k in self.__variable_names_not_public: allvariables.pop(k, None)
1273 def pop(self, k, d):
1274 "Necessaire pour le pickling"
1275 return self.__algorithm.pop(k, d)
1277 def getAlgorithmRequiredParameters(self, noDetails=True):
1278 "Renvoie la liste des paramètres requis selon l'algorithme"
1279 return self.__algorithm.getRequiredParameters(noDetails)
1281 def getAlgorithmInputArguments(self):
1282 "Renvoie la liste des entrées requises selon l'algorithme"
1283 return self.__algorithm.getInputArguments()
1285 def getAlgorithmAttributes(self):
1286 "Renvoie la liste des attributs selon l'algorithme"
1287 return self.__algorithm.setAttributes()
1289 def setObserver(self, __V, __O, __I, __S):
1290 if self.__algorithm is None \
1291 or isinstance(self.__algorithm, dict) \
1292 or not hasattr(self.__algorithm,"StoredVariables"):
1293 raise ValueError("No observer can be build before choosing an algorithm.")
1294 if __V not in self.__algorithm:
1295 raise ValueError("An observer requires to be set on a variable named %s which does not exist."%__V)
1297 self.__algorithm.StoredVariables[ __V ].setDataObserver(
1300 HookParameters = __I,
1303 def removeObserver(self, __V, __O, __A = False):
1304 if self.__algorithm is None \
1305 or isinstance(self.__algorithm, dict) \
1306 or not hasattr(self.__algorithm,"StoredVariables"):
1307 raise ValueError("No observer can be removed before choosing an algorithm.")
1308 if __V not in self.__algorithm:
1309 raise ValueError("An observer requires to be removed on a variable named %s which does not exist."%__V)
1311 return self.__algorithm.StoredVariables[ __V ].removeDataObserver(
1316 def hasObserver(self, __V):
1317 if self.__algorithm is None \
1318 or isinstance(self.__algorithm, dict) \
1319 or not hasattr(self.__algorithm,"StoredVariables"):
1321 if __V not in self.__algorithm:
1323 return self.__algorithm.StoredVariables[ __V ].hasDataObserver()
1326 __allvariables = list(self.__algorithm.keys()) + list(self.__P.keys())
1327 for k in self.__variable_names_not_public:
1328 if k in __allvariables: __allvariables.remove(k)
1329 return __allvariables
1331 def __contains__(self, key=None):
1332 "D.__contains__(k) -> True if D has a key k, else False"
1333 return key in self.__algorithm or key in self.__P
1336 "x.__repr__() <==> repr(x)"
1337 return repr(self.__A)+", "+repr(self.__P)
1340 "x.__str__() <==> str(x)"
1341 return str(self.__A)+", "+str(self.__P)
1343 def __setAlgorithm(self, choice = None ):
1345 Permet de sélectionner l'algorithme à utiliser pour mener à bien l'étude
1346 d'assimilation. L'argument est un champ caractère se rapportant au nom
1347 d'un algorithme réalisant l'opération sur les arguments fixes.
1350 raise ValueError("Error: algorithm choice has to be given")
1351 if self.__algorithmName is not None:
1352 raise ValueError("Error: algorithm choice has already been done as \"%s\", it can't be changed."%self.__algorithmName)
1353 daDirectory = "daAlgorithms"
1355 # Recherche explicitement le fichier complet
1356 # ------------------------------------------
1358 for directory in sys.path:
1359 if os.path.isfile(os.path.join(directory, daDirectory, str(choice)+'.py')):
1360 module_path = os.path.abspath(os.path.join(directory, daDirectory))
1361 if module_path is None:
1362 raise ImportError("No algorithm module named \"%s\" has been found in the search path.\n The search path is %s"%(choice, sys.path))
1364 # Importe le fichier complet comme un module
1365 # ------------------------------------------
1367 sys_path_tmp = sys.path ; sys.path.insert(0,module_path)
1368 self.__algorithmFile = __import__(str(choice), globals(), locals(), [])
1369 if not hasattr(self.__algorithmFile, "ElementaryAlgorithm"):
1370 raise ImportError("this module does not define a valid elementary algorithm.")
1371 self.__algorithmName = str(choice)
1372 sys.path = sys_path_tmp ; del sys_path_tmp
1373 except ImportError as e:
1374 raise ImportError("The module named \"%s\" was found, but is incorrect at the import stage.\n The import error message is: %s"%(choice,e))
1376 # Instancie un objet du type élémentaire du fichier
1377 # -------------------------------------------------
1378 self.__algorithm = self.__algorithmFile.ElementaryAlgorithm()
1381 def __shape_validate(self):
1383 Validation de la correspondance correcte des tailles des variables et
1384 des matrices s'il y en a.
1386 if self.__Xb is None: __Xb_shape = (0,)
1387 elif hasattr(self.__Xb,"size"): __Xb_shape = (self.__Xb.size,)
1388 elif hasattr(self.__Xb,"shape"):
1389 if isinstance(self.__Xb.shape, tuple): __Xb_shape = self.__Xb.shape
1390 else: __Xb_shape = self.__Xb.shape()
1391 else: raise TypeError("The background (Xb) has no attribute of shape: problem !")
1393 if self.__Y is None: __Y_shape = (0,)
1394 elif hasattr(self.__Y,"size"): __Y_shape = (self.__Y.size,)
1395 elif hasattr(self.__Y,"shape"):
1396 if isinstance(self.__Y.shape, tuple): __Y_shape = self.__Y.shape
1397 else: __Y_shape = self.__Y.shape()
1398 else: raise TypeError("The observation (Y) has no attribute of shape: problem !")
1400 if self.__U is None: __U_shape = (0,)
1401 elif hasattr(self.__U,"size"): __U_shape = (self.__U.size,)
1402 elif hasattr(self.__U,"shape"):
1403 if isinstance(self.__U.shape, tuple): __U_shape = self.__U.shape
1404 else: __U_shape = self.__U.shape()
1405 else: raise TypeError("The control (U) has no attribute of shape: problem !")
1407 if self.__B is None: __B_shape = (0,0)
1408 elif hasattr(self.__B,"shape"):
1409 if isinstance(self.__B.shape, tuple): __B_shape = self.__B.shape
1410 else: __B_shape = self.__B.shape()
1411 else: raise TypeError("The a priori errors covariance matrix (B) has no attribute of shape: problem !")
1413 if self.__R is None: __R_shape = (0,0)
1414 elif hasattr(self.__R,"shape"):
1415 if isinstance(self.__R.shape, tuple): __R_shape = self.__R.shape
1416 else: __R_shape = self.__R.shape()
1417 else: raise TypeError("The observation errors covariance matrix (R) has no attribute of shape: problem !")
1419 if self.__Q is None: __Q_shape = (0,0)
1420 elif hasattr(self.__Q,"shape"):
1421 if isinstance(self.__Q.shape, tuple): __Q_shape = self.__Q.shape
1422 else: __Q_shape = self.__Q.shape()
1423 else: raise TypeError("The evolution errors covariance matrix (Q) has no attribute of shape: problem !")
1425 if len(self.__HO) == 0: __HO_shape = (0,0)
1426 elif isinstance(self.__HO, dict): __HO_shape = (0,0)
1427 elif hasattr(self.__HO["Direct"],"shape"):
1428 if isinstance(self.__HO["Direct"].shape, tuple): __HO_shape = self.__HO["Direct"].shape
1429 else: __HO_shape = self.__HO["Direct"].shape()
1430 else: raise TypeError("The observation operator (H) has no attribute of shape: problem !")
1432 if len(self.__EM) == 0: __EM_shape = (0,0)
1433 elif isinstance(self.__EM, dict): __EM_shape = (0,0)
1434 elif hasattr(self.__EM["Direct"],"shape"):
1435 if isinstance(self.__EM["Direct"].shape, tuple): __EM_shape = self.__EM["Direct"].shape
1436 else: __EM_shape = self.__EM["Direct"].shape()
1437 else: raise TypeError("The evolution model (EM) has no attribute of shape: problem !")
1439 if len(self.__CM) == 0: __CM_shape = (0,0)
1440 elif isinstance(self.__CM, dict): __CM_shape = (0,0)
1441 elif hasattr(self.__CM["Direct"],"shape"):
1442 if isinstance(self.__CM["Direct"].shape, tuple): __CM_shape = self.__CM["Direct"].shape
1443 else: __CM_shape = self.__CM["Direct"].shape()
1444 else: raise TypeError("The control model (CM) has no attribute of shape: problem !")
1446 # Vérification des conditions
1447 # ---------------------------
1448 if not( len(__Xb_shape) == 1 or min(__Xb_shape) == 1 ):
1449 raise ValueError("Shape characteristic of background (Xb) is incorrect: \"%s\"."%(__Xb_shape,))
1450 if not( len(__Y_shape) == 1 or min(__Y_shape) == 1 ):
1451 raise ValueError("Shape characteristic of observation (Y) is incorrect: \"%s\"."%(__Y_shape,))
1453 if not( min(__B_shape) == max(__B_shape) ):
1454 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) is incorrect: \"%s\"."%(__B_shape,))
1455 if not( min(__R_shape) == max(__R_shape) ):
1456 raise ValueError("Shape characteristic of observation errors covariance matrix (R) is incorrect: \"%s\"."%(__R_shape,))
1457 if not( min(__Q_shape) == max(__Q_shape) ):
1458 raise ValueError("Shape characteristic of evolution errors covariance matrix (Q) is incorrect: \"%s\"."%(__Q_shape,))
1459 if not( min(__EM_shape) == max(__EM_shape) ):
1460 raise ValueError("Shape characteristic of evolution operator (EM) is incorrect: \"%s\"."%(__EM_shape,))
1462 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[1] == max(__Xb_shape) ):
1463 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and state (X) \"%s\" are incompatible."%(__HO_shape,__Xb_shape))
1464 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[0] == max(__Y_shape) ):
1465 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation (Y) \"%s\" are incompatible."%(__HO_shape,__Y_shape))
1466 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__B) > 0 and not( __HO_shape[1] == __B_shape[0] ):
1467 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and a priori errors covariance matrix (B) \"%s\" are incompatible."%(__HO_shape,__B_shape))
1468 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__R) > 0 and not( __HO_shape[0] == __R_shape[1] ):
1469 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation errors covariance matrix (R) \"%s\" are incompatible."%(__HO_shape,__R_shape))
1471 if self.__B is not None and len(self.__B) > 0 and not( __B_shape[1] == max(__Xb_shape) ):
1472 if self.__algorithmName in ["EnsembleBlue",]:
1473 asPersistentVector = self.__Xb.reshape((-1,min(__B_shape)))
1474 self.__Xb = Persistence.OneVector("Background")
1475 for member in asPersistentVector:
1476 self.__Xb.store( numpy.asarray(member, dtype=float) )
1477 __Xb_shape = min(__B_shape)
1479 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) \"%s\" and background (Xb) \"%s\" are incompatible."%(__B_shape,__Xb_shape))
1481 if self.__R is not None and len(self.__R) > 0 and not( __R_shape[1] == max(__Y_shape) ):
1482 raise ValueError("Shape characteristic of observation errors covariance matrix (R) \"%s\" and observation (Y) \"%s\" are incompatible."%(__R_shape,__Y_shape))
1484 if self.__EM is not None and len(self.__EM) > 0 and not isinstance(self.__EM, dict) and not( __EM_shape[1] == max(__Xb_shape) ):
1485 raise ValueError("Shape characteristic of evolution model (EM) \"%s\" and state (X) \"%s\" are incompatible."%(__EM_shape,__Xb_shape))
1487 if self.__CM is not None and len(self.__CM) > 0 and not isinstance(self.__CM, dict) and not( __CM_shape[1] == max(__U_shape) ):
1488 raise ValueError("Shape characteristic of control model (CM) \"%s\" and control (U) \"%s\" are incompatible."%(__CM_shape,__U_shape))
1490 if ("Bounds" in self.__P) \
1491 and (isinstance(self.__P["Bounds"], list) or isinstance(self.__P["Bounds"], tuple)) \
1492 and (len(self.__P["Bounds"]) != max(__Xb_shape)):
1493 raise ValueError("The number \"%s\" of bound pairs for the state (X) components is different of the size \"%s\" of the state itself." \
1494 %(len(self.__P["Bounds"]),max(__Xb_shape)))
1496 if ("StateBoundsForQuantiles" in self.__P) \
1497 and (isinstance(self.__P["StateBoundsForQuantiles"], list) or isinstance(self.__P["StateBoundsForQuantiles"], tuple)) \
1498 and (len(self.__P["StateBoundsForQuantiles"]) != max(__Xb_shape)):
1499 raise ValueError("The number \"%s\" of bound pairs for the quantile state (X) components is different of the size \"%s\" of the state itself." \
1500 %(len(self.__P["StateBoundsForQuantiles"]),max(__Xb_shape)))
1504 # ==============================================================================
1505 class RegulationAndParameters(object):
1507 Classe générale d'interface d'action pour la régulation et ses paramètres
1510 name = "GenericRegulation",
1517 self.__name = str(name)
1520 if asAlgorithm is None and asScript is not None:
1521 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
1523 __Algo = asAlgorithm
1525 if asDict is None and asScript is not None:
1526 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1530 if __Dict is not None:
1531 self.__P.update( dict(__Dict) )
1533 if __Algo is not None:
1534 self.__P.update( {"Algorithm":str(__Algo)} )
1536 def get(self, key = None):
1537 "Vérifie l'existence d'une clé de variable ou de paramètres"
1539 return self.__P[key]
1543 # ==============================================================================
1544 class DataObserver(object):
1546 Classe générale d'interface de type observer
1549 name = "GenericObserver",
1561 self.__name = str(name)
1566 if onVariable is None:
1567 raise ValueError("setting an observer has to be done over a variable name or a list of variable names, not over None.")
1568 elif type(onVariable) in (tuple, list):
1569 self.__V = tuple(map( str, onVariable ))
1570 if withInfo is None:
1573 self.__I = (str(withInfo),)*len(self.__V)
1574 elif isinstance(onVariable, str):
1575 self.__V = (onVariable,)
1576 if withInfo is None:
1577 self.__I = (onVariable,)
1579 self.__I = (str(withInfo),)
1581 raise ValueError("setting an observer has to be done over a variable name or a list of variable names.")
1583 if asObsObject is not None:
1584 self.__O = asObsObject
1586 __FunctionText = str(UserScript('Observer', asTemplate, asString, asScript))
1587 __Function = Observer2Func(__FunctionText)
1588 self.__O = __Function.getfunc()
1590 for k in range(len(self.__V)):
1593 if ename not in withAlgo:
1594 raise ValueError("An observer is asked to be set on a variable named %s which does not exist."%ename)
1596 withAlgo.setObserver(ename, self.__O, einfo, scheduledBy)
1599 "x.__repr__() <==> repr(x)"
1600 return repr(self.__V)+"\n"+repr(self.__O)
1603 "x.__str__() <==> str(x)"
1604 return str(self.__V)+"\n"+str(self.__O)
1606 # ==============================================================================
1607 class UserScript(object):
1609 Classe générale d'interface de type texte de script utilisateur
1612 name = "GenericUserScript",
1619 self.__name = str(name)
1621 if asString is not None:
1623 elif self.__name == "UserPostAnalysis" and (asTemplate is not None) and (asTemplate in Templates.UserPostAnalysisTemplates):
1624 self.__F = Templates.UserPostAnalysisTemplates[asTemplate]
1625 elif self.__name == "Observer" and (asTemplate is not None) and (asTemplate in Templates.ObserverTemplates):
1626 self.__F = Templates.ObserverTemplates[asTemplate]
1627 elif asScript is not None:
1628 self.__F = Interfaces.ImportFromScript(asScript).getstring()
1633 "x.__repr__() <==> repr(x)"
1634 return repr(self.__F)
1637 "x.__str__() <==> str(x)"
1638 return str(self.__F)
1640 # ==============================================================================
1641 class ExternalParameters(object):
1643 Classe générale d'interface de type texte de script utilisateur
1646 name = "GenericExternalParameters",
1652 self.__name = str(name)
1655 self.updateParameters( asDict, asScript )
1657 def updateParameters(self,
1661 "Mise a jour des parametres"
1662 if asDict is None and asScript is not None:
1663 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "ExternalParameters" )
1667 if __Dict is not None:
1668 self.__P.update( dict(__Dict) )
1670 def get(self, key = None):
1672 return self.__P[key]
1674 return list(self.__P.keys())
1677 return list(self.__P.keys())
1679 def pop(self, k, d):
1680 return self.__P.pop(k, d)
1683 return self.__P.items()
1685 def __contains__(self, key=None):
1686 "D.__contains__(k) -> True if D has a key k, else False"
1687 return key in self.__P
1689 # ==============================================================================
1690 class State(object):
1692 Classe générale d'interface de type état
1695 name = "GenericVector",
1697 asPersistentVector = None,
1703 toBeChecked = False,
1706 Permet de définir un vecteur :
1707 - asVector : entrée des données, comme un vecteur compatible avec le
1708 constructeur de numpy.matrix, ou "True" si entrée par script.
1709 - asPersistentVector : entrée des données, comme une série de vecteurs
1710 compatible avec le constructeur de numpy.matrix, ou comme un objet de
1711 type Persistence, ou "True" si entrée par script.
1712 - asScript : si un script valide est donné contenant une variable
1713 nommée "name", la variable est de type "asVector" (par défaut) ou
1714 "asPersistentVector" selon que l'une de ces variables est placée à
1716 - asDataFile : si un ou plusieurs fichiers valides sont donnés
1717 contenant des valeurs en colonnes, elles-mêmes nommées "colNames"
1718 (s'il n'y a pas de nom de colonne indiquée, on cherche une colonne
1719 nommée "name"), on récupère les colonnes et on les range ligne après
1720 ligne (colMajor=False, par défaut) ou colonne après colonne
1721 (colMajor=True). La variable résultante est de type "asVector" (par
1722 défaut) ou "asPersistentVector" selon que l'une de ces variables est
1725 self.__name = str(name)
1726 self.__check = bool(toBeChecked)
1730 self.__is_vector = False
1731 self.__is_series = False
1733 if asScript is not None:
1734 __Vector, __Series = None, None
1735 if asPersistentVector:
1736 __Series = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1738 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1739 elif asDataFile is not None:
1740 __Vector, __Series = None, None
1741 if asPersistentVector:
1742 if colNames is not None:
1743 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
1745 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1746 if bool(colMajor) and not Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1747 __Series = numpy.transpose(__Series)
1748 elif not bool(colMajor) and Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1749 __Series = numpy.transpose(__Series)
1751 if colNames is not None:
1752 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
1754 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1756 __Vector = numpy.ravel(__Vector, order = "F")
1758 __Vector = numpy.ravel(__Vector, order = "C")
1760 __Vector, __Series = asVector, asPersistentVector
1762 if __Vector is not None:
1763 self.__is_vector = True
1764 if isinstance(__Vector, str):
1765 __Vector = PlatformInfo.strvect2liststr( __Vector )
1766 self.__V = numpy.ravel(numpy.asarray( __Vector, dtype=float )).reshape((-1,1))
1767 self.shape = self.__V.shape
1768 self.size = self.__V.size
1769 elif __Series is not None:
1770 self.__is_series = True
1771 if isinstance(__Series, (tuple, list, numpy.ndarray, numpy.matrix, str)):
1772 #~ self.__V = Persistence.OneVector(self.__name, basetype=numpy.matrix)
1773 self.__V = Persistence.OneVector(self.__name)
1774 if isinstance(__Series, str):
1775 __Series = PlatformInfo.strmatrix2liststr(__Series)
1776 for member in __Series:
1777 if isinstance(member, str):
1778 member = PlatformInfo.strvect2liststr( member )
1779 self.__V.store(numpy.asarray( member, dtype=float ))
1782 if isinstance(self.__V.shape, (tuple, list)):
1783 self.shape = self.__V.shape
1785 self.shape = self.__V.shape()
1786 if len(self.shape) == 1:
1787 self.shape = (self.shape[0],1)
1788 self.size = self.shape[0] * self.shape[1]
1790 raise ValueError("The %s object is improperly defined or undefined, it requires at minima either a vector, a list/tuple of vectors or a persistent object. Please check your vector input."%self.__name)
1792 if scheduledBy is not None:
1793 self.__T = scheduledBy
1795 def getO(self, withScheduler=False):
1797 return self.__V, self.__T
1798 elif self.__T is None:
1804 "Vérification du type interne"
1805 return self.__is_vector
1808 "Vérification du type interne"
1809 return self.__is_series
1812 "x.__repr__() <==> repr(x)"
1813 return repr(self.__V)
1816 "x.__str__() <==> str(x)"
1817 return str(self.__V)
1819 # ==============================================================================
1820 class Covariance(object):
1822 Classe générale d'interface de type covariance
1825 name = "GenericCovariance",
1826 asCovariance = None,
1827 asEyeByScalar = None,
1828 asEyeByVector = None,
1831 toBeChecked = False,
1834 Permet de définir une covariance :
1835 - asCovariance : entrée des données, comme une matrice compatible avec
1836 le constructeur de numpy.matrix
1837 - asEyeByScalar : entrée des données comme un seul scalaire de variance,
1838 multiplicatif d'une matrice de corrélation identité, aucune matrice
1839 n'étant donc explicitement à donner
1840 - asEyeByVector : entrée des données comme un seul vecteur de variance,
1841 à mettre sur la diagonale d'une matrice de corrélation, aucune matrice
1842 n'étant donc explicitement à donner
1843 - asCovObject : entrée des données comme un objet python, qui a les
1844 methodes obligatoires "getT", "getI", "diag", "trace", "__add__",
1845 "__sub__", "__neg__", "__mul__", "__rmul__" et facultatives "shape",
1846 "size", "cholesky", "choleskyI", "asfullmatrix", "__repr__", "__str__"
1847 - toBeChecked : booléen indiquant si le caractère SDP de la matrice
1848 pleine doit être vérifié
1850 self.__name = str(name)
1851 self.__check = bool(toBeChecked)
1854 self.__is_scalar = False
1855 self.__is_vector = False
1856 self.__is_matrix = False
1857 self.__is_object = False
1859 if asScript is not None:
1860 __Matrix, __Scalar, __Vector, __Object = None, None, None, None
1862 __Scalar = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1864 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1866 __Object = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1868 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1870 __Matrix, __Scalar, __Vector, __Object = asCovariance, asEyeByScalar, asEyeByVector, asCovObject
1872 if __Scalar is not None:
1873 if isinstance(__Scalar, str):
1874 __Scalar = PlatformInfo.strvect2liststr( __Scalar )
1875 if len(__Scalar) > 0: __Scalar = __Scalar[0]
1876 if numpy.array(__Scalar).size != 1:
1877 raise ValueError(' The diagonal multiplier given to define a sparse matrix is not a unique scalar value.\n Its actual measured size is %i. Please check your scalar input.'%numpy.array(__Scalar).size)
1878 self.__is_scalar = True
1879 self.__C = numpy.abs( float(__Scalar) )
1882 elif __Vector is not None:
1883 if isinstance(__Vector, str):
1884 __Vector = PlatformInfo.strvect2liststr( __Vector )
1885 self.__is_vector = True
1886 self.__C = numpy.abs( numpy.ravel(numpy.asarray( __Vector, dtype=float )) )
1887 self.shape = (self.__C.size,self.__C.size)
1888 self.size = self.__C.size**2
1889 elif __Matrix is not None:
1890 self.__is_matrix = True
1891 self.__C = numpy.matrix( __Matrix, float )
1892 self.shape = self.__C.shape
1893 self.size = self.__C.size
1894 elif __Object is not None:
1895 self.__is_object = True
1897 for at in ("getT","getI","diag","trace","__add__","__sub__","__neg__","__matmul__","__mul__","__rmatmul__","__rmul__"):
1898 if not hasattr(self.__C,at):
1899 raise ValueError("The matrix given for %s as an object has no attribute \"%s\". Please check your object input."%(self.__name,at))
1900 if hasattr(self.__C,"shape"):
1901 self.shape = self.__C.shape
1904 if hasattr(self.__C,"size"):
1905 self.size = self.__C.size
1910 # raise ValueError("The %s covariance matrix has to be specified either as a matrix, a vector for its diagonal or a scalar multiplying an identity matrix."%self.__name)
1914 def __validate(self):
1916 if self.__C is None:
1917 raise UnboundLocalError("%s covariance matrix value has not been set!"%(self.__name,))
1918 if self.ismatrix() and min(self.shape) != max(self.shape):
1919 raise ValueError("The given matrix for %s is not a square one, its shape is %s. Please check your matrix input."%(self.__name,self.shape))
1920 if self.isobject() and min(self.shape) != max(self.shape):
1921 raise ValueError("The matrix given for \"%s\" is not a square one, its shape is %s. Please check your object input."%(self.__name,self.shape))
1922 if self.isscalar() and self.__C <= 0:
1923 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your scalar input %s."%(self.__name,self.__C))
1924 if self.isvector() and (self.__C <= 0).any():
1925 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your vector input."%(self.__name,))
1926 if self.ismatrix() and (self.__check or logging.getLogger().level < logging.WARNING):
1928 L = numpy.linalg.cholesky( self.__C )
1930 raise ValueError("The %s covariance matrix is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1931 if self.isobject() and (self.__check or logging.getLogger().level < logging.WARNING):
1933 L = self.__C.cholesky()
1935 raise ValueError("The %s covariance object is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1938 "Vérification du type interne"
1939 return self.__is_scalar
1942 "Vérification du type interne"
1943 return self.__is_vector
1946 "Vérification du type interne"
1947 return self.__is_matrix
1950 "Vérification du type interne"
1951 return self.__is_object
1956 return Covariance(self.__name+"I", asCovariance = numpy.linalg.inv(self.__C) )
1957 elif self.isvector():
1958 return Covariance(self.__name+"I", asEyeByVector = 1. / self.__C )
1959 elif self.isscalar():
1960 return Covariance(self.__name+"I", asEyeByScalar = 1. / self.__C )
1961 elif self.isobject() and hasattr(self.__C,"getI"):
1962 return Covariance(self.__name+"I", asCovObject = self.__C.getI() )
1964 return None # Indispensable
1969 return Covariance(self.__name+"T", asCovariance = self.__C.T )
1970 elif self.isvector():
1971 return Covariance(self.__name+"T", asEyeByVector = self.__C )
1972 elif self.isscalar():
1973 return Covariance(self.__name+"T", asEyeByScalar = self.__C )
1974 elif self.isobject() and hasattr(self.__C,"getT"):
1975 return Covariance(self.__name+"T", asCovObject = self.__C.getT() )
1977 raise AttributeError("the %s covariance matrix has no getT attribute."%(self.__name,))
1980 "Décomposition de Cholesky"
1982 return Covariance(self.__name+"C", asCovariance = numpy.linalg.cholesky(self.__C) )
1983 elif self.isvector():
1984 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
1985 elif self.isscalar():
1986 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
1987 elif self.isobject() and hasattr(self.__C,"cholesky"):
1988 return Covariance(self.__name+"C", asCovObject = self.__C.cholesky() )
1990 raise AttributeError("the %s covariance matrix has no cholesky attribute."%(self.__name,))
1992 def choleskyI(self):
1993 "Inversion de la décomposition de Cholesky"
1995 return Covariance(self.__name+"H", asCovariance = numpy.linalg.inv(numpy.linalg.cholesky(self.__C)) )
1996 elif self.isvector():
1997 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
1998 elif self.isscalar():
1999 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
2000 elif self.isobject() and hasattr(self.__C,"choleskyI"):
2001 return Covariance(self.__name+"H", asCovObject = self.__C.choleskyI() )
2003 raise AttributeError("the %s covariance matrix has no choleskyI attribute."%(self.__name,))
2006 "Racine carrée matricielle"
2009 return Covariance(self.__name+"C", asCovariance = numpy.real(scipy.linalg.sqrtm(self.__C)) )
2010 elif self.isvector():
2011 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
2012 elif self.isscalar():
2013 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
2014 elif self.isobject() and hasattr(self.__C,"sqrtm"):
2015 return Covariance(self.__name+"C", asCovObject = self.__C.sqrtm() )
2017 raise AttributeError("the %s covariance matrix has no sqrtm attribute."%(self.__name,))
2020 "Inversion de la racine carrée matricielle"
2023 return Covariance(self.__name+"H", asCovariance = numpy.linalg.inv(numpy.real(scipy.linalg.sqrtm(self.__C))) )
2024 elif self.isvector():
2025 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
2026 elif self.isscalar():
2027 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
2028 elif self.isobject() and hasattr(self.__C,"sqrtmI"):
2029 return Covariance(self.__name+"H", asCovObject = self.__C.sqrtmI() )
2031 raise AttributeError("the %s covariance matrix has no sqrtmI attribute."%(self.__name,))
2033 def diag(self, msize=None):
2034 "Diagonale de la matrice"
2036 return numpy.diag(self.__C)
2037 elif self.isvector():
2039 elif self.isscalar():
2041 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
2043 return self.__C * numpy.ones(int(msize))
2044 elif self.isobject() and hasattr(self.__C,"diag"):
2045 return self.__C.diag()
2047 raise AttributeError("the %s covariance matrix has no diag attribute."%(self.__name,))
2049 def trace(self, msize=None):
2050 "Trace de la matrice"
2052 return numpy.trace(self.__C)
2053 elif self.isvector():
2054 return float(numpy.sum(self.__C))
2055 elif self.isscalar():
2057 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
2059 return self.__C * int(msize)
2060 elif self.isobject():
2061 return self.__C.trace()
2063 raise AttributeError("the %s covariance matrix has no trace attribute."%(self.__name,))
2065 def asfullmatrix(self, msize=None):
2068 return numpy.asarray(self.__C, dtype=float)
2069 elif self.isvector():
2070 return numpy.asarray( numpy.diag(self.__C), dtype=float )
2071 elif self.isscalar():
2073 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
2075 return numpy.asarray( self.__C * numpy.eye(int(msize)), dtype=float )
2076 elif self.isobject() and hasattr(self.__C,"asfullmatrix"):
2077 return self.__C.asfullmatrix()
2079 raise AttributeError("the %s covariance matrix has no asfullmatrix attribute."%(self.__name,))
2081 def assparsematrix(self):
2089 "x.__repr__() <==> repr(x)"
2090 return repr(self.__C)
2093 "x.__str__() <==> str(x)"
2094 return str(self.__C)
2096 def __add__(self, other):
2097 "x.__add__(y) <==> x+y"
2098 if self.ismatrix() or self.isobject():
2099 return self.__C + numpy.asmatrix(other)
2100 elif self.isvector() or self.isscalar():
2101 _A = numpy.asarray(other)
2102 _A.reshape(_A.size)[::_A.shape[1]+1] += self.__C
2103 return numpy.asmatrix(_A)
2105 def __radd__(self, other):
2106 "x.__radd__(y) <==> y+x"
2107 raise NotImplementedError("%s covariance matrix __radd__ method not available for %s type!"%(self.__name,type(other)))
2109 def __sub__(self, other):
2110 "x.__sub__(y) <==> x-y"
2111 if self.ismatrix() or self.isobject():
2112 return self.__C - numpy.asmatrix(other)
2113 elif self.isvector() or self.isscalar():
2114 _A = numpy.asarray(other)
2115 _A.reshape(_A.size)[::_A.shape[1]+1] = self.__C - _A.reshape(_A.size)[::_A.shape[1]+1]
2116 return numpy.asmatrix(_A)
2118 def __rsub__(self, other):
2119 "x.__rsub__(y) <==> y-x"
2120 raise NotImplementedError("%s covariance matrix __rsub__ method not available for %s type!"%(self.__name,type(other)))
2123 "x.__neg__() <==> -x"
2126 def __matmul__(self, other):
2127 "x.__mul__(y) <==> x@y"
2128 if self.ismatrix() and isinstance(other, (int, float)):
2129 return numpy.asarray(self.__C) * other
2130 elif self.ismatrix() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2131 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2132 return numpy.ravel(self.__C @ numpy.ravel(other))
2133 elif numpy.asarray(other).shape[0] == self.shape[1]: # Matrice
2134 return numpy.asarray(self.__C) @ numpy.asarray(other)
2136 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asarray(other).shape,self.__name))
2137 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2138 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2139 return numpy.ravel(self.__C) * numpy.ravel(other)
2140 elif numpy.asarray(other).shape[0] == self.shape[1]: # Matrice
2141 return numpy.ravel(self.__C).reshape((-1,1)) * numpy.asarray(other)
2143 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
2144 elif self.isscalar() and isinstance(other,numpy.matrix):
2145 return numpy.asarray(self.__C * other)
2146 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
2147 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
2148 return self.__C * numpy.ravel(other)
2150 return self.__C * numpy.asarray(other)
2151 elif self.isobject():
2152 return self.__C.__matmul__(other)
2154 raise NotImplementedError("%s covariance matrix __matmul__ method not available for %s type!"%(self.__name,type(other)))
2156 def __mul__(self, other):
2157 "x.__mul__(y) <==> x*y"
2158 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2159 return self.__C * other
2160 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2161 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2162 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
2163 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2164 return self.__C * numpy.asmatrix(other)
2166 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asmatrix(other).shape,self.__name))
2167 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2168 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2169 return numpy.asmatrix(self.__C * numpy.ravel(other)).T
2170 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2171 return numpy.asmatrix((self.__C * (numpy.asarray(other).transpose())).transpose())
2173 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
2174 elif self.isscalar() and isinstance(other,numpy.matrix):
2175 return self.__C * other
2176 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
2177 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
2178 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
2180 return self.__C * numpy.asmatrix(other)
2181 elif self.isobject():
2182 return self.__C.__mul__(other)
2184 raise NotImplementedError("%s covariance matrix __mul__ method not available for %s type!"%(self.__name,type(other)))
2186 def __rmatmul__(self, other):
2187 "x.__rmul__(y) <==> y@x"
2188 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2189 return other * self.__C
2190 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2191 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2192 return numpy.asmatrix(numpy.ravel(other)) * self.__C
2193 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2194 return numpy.asmatrix(other) * self.__C
2196 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name))
2197 elif self.isvector() and isinstance(other,numpy.matrix):
2198 if numpy.ravel(other).size == self.shape[0]: # Vecteur
2199 return numpy.asmatrix(numpy.ravel(other) * self.__C)
2200 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
2201 return numpy.asmatrix(numpy.array(other) * self.__C)
2203 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
2204 elif self.isscalar() and isinstance(other,numpy.matrix):
2205 return other * self.__C
2206 elif self.isobject():
2207 return self.__C.__rmatmul__(other)
2209 raise NotImplementedError("%s covariance matrix __rmatmul__ method not available for %s type!"%(self.__name,type(other)))
2211 def __rmul__(self, other):
2212 "x.__rmul__(y) <==> y*x"
2213 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2214 return other * self.__C
2215 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2216 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2217 return numpy.asmatrix(numpy.ravel(other)) * self.__C
2218 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2219 return numpy.asmatrix(other) * self.__C
2221 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name))
2222 elif self.isvector() and isinstance(other,numpy.matrix):
2223 if numpy.ravel(other).size == self.shape[0]: # Vecteur
2224 return numpy.asmatrix(numpy.ravel(other) * self.__C)
2225 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
2226 return numpy.asmatrix(numpy.array(other) * self.__C)
2228 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
2229 elif self.isscalar() and isinstance(other,numpy.matrix):
2230 return other * self.__C
2231 elif self.isscalar() and isinstance(other,float):
2232 return other * self.__C
2233 elif self.isobject():
2234 return self.__C.__rmul__(other)
2236 raise NotImplementedError("%s covariance matrix __rmul__ method not available for %s type!"%(self.__name,type(other)))
2239 "x.__len__() <==> len(x)"
2240 return self.shape[0]
2242 # ==============================================================================
2243 class Observer2Func(object):
2245 Création d'une fonction d'observateur a partir de son texte
2247 def __init__(self, corps=""):
2248 self.__corps = corps
2249 def func(self,var,info):
2250 "Fonction d'observation"
2253 "Restitution du pointeur de fonction dans l'objet"
2256 # ==============================================================================
2257 class CaseLogger(object):
2259 Conservation des commandes de création d'un cas
2261 def __init__(self, __name="", __objname="case", __addViewers=None, __addLoaders=None):
2262 self.__name = str(__name)
2263 self.__objname = str(__objname)
2264 self.__logSerie = []
2265 self.__switchoff = False
2267 "TUI" :Interfaces._TUIViewer,
2268 "SCD" :Interfaces._SCDViewer,
2269 "YACS":Interfaces._YACSViewer,
2270 "SimpleReportInRst":Interfaces._SimpleReportInRstViewer,
2271 "SimpleReportInHtml":Interfaces._SimpleReportInHtmlViewer,
2272 "SimpleReportInPlainTxt":Interfaces._SimpleReportInPlainTxtViewer,
2275 "TUI" :Interfaces._TUIViewer,
2276 "COM" :Interfaces._COMViewer,
2278 if __addViewers is not None:
2279 self.__viewers.update(dict(__addViewers))
2280 if __addLoaders is not None:
2281 self.__loaders.update(dict(__addLoaders))
2283 def register(self, __command=None, __keys=None, __local=None, __pre=None, __switchoff=False):
2284 "Enregistrement d'une commande individuelle"
2285 if __command is not None and __keys is not None and __local is not None and not self.__switchoff:
2286 if "self" in __keys: __keys.remove("self")
2287 self.__logSerie.append( (str(__command), __keys, __local, __pre, __switchoff) )
2289 self.__switchoff = True
2291 self.__switchoff = False
2293 def dump(self, __filename=None, __format="TUI", __upa=""):
2294 "Restitution normalisée des commandes"
2295 if __format in self.__viewers:
2296 __formater = self.__viewers[__format](self.__name, self.__objname, self.__logSerie)
2298 raise ValueError("Dumping as \"%s\" is not available"%__format)
2299 return __formater.dump(__filename, __upa)
2301 def load(self, __filename=None, __content=None, __object=None, __format="TUI"):
2302 "Chargement normalisé des commandes"
2303 if __format in self.__loaders:
2304 __formater = self.__loaders[__format]()
2306 raise ValueError("Loading as \"%s\" is not available"%__format)
2307 return __formater.load(__filename, __content, __object)
2309 # ==============================================================================
2312 _extraArguments = None,
2313 _sFunction = lambda x: x,
2318 Pour une liste ordonnée de vecteurs en entrée, renvoie en sortie la liste
2319 correspondante de valeurs de la fonction en argument
2321 # Vérifications et définitions initiales
2322 # logging.debug("MULTF Internal multifonction calculations begin with function %s"%(_sFunction.__name__,))
2323 if not PlatformInfo.isIterable( __xserie ):
2324 raise TypeError("MultiFonction not iterable unkown input type: %s"%(type(__xserie),))
2326 if (_mpWorkers is None) or (_mpWorkers is not None and _mpWorkers < 1):
2329 __mpWorkers = int(_mpWorkers)
2331 import multiprocessing
2342 # logging.debug("MULTF Internal multiprocessing calculations begin : evaluation of %i point(s)"%(len(_jobs),))
2343 import multiprocessing
2344 with multiprocessing.Pool(__mpWorkers) as pool:
2345 __multiHX = pool.map( _sFunction, _jobs )
2348 # logging.debug("MULTF Internal multiprocessing calculation end")
2350 # logging.debug("MULTF Internal monoprocessing calculation begin")
2352 if _extraArguments is None:
2353 for __xvalue in __xserie:
2354 __multiHX.append( _sFunction( __xvalue ) )
2355 elif _extraArguments is not None and isinstance(_extraArguments, (list, tuple, map)):
2356 for __xvalue in __xserie:
2357 __multiHX.append( _sFunction( __xvalue, *_extraArguments ) )
2358 elif _extraArguments is not None and isinstance(_extraArguments, dict):
2359 for __xvalue in __xserie:
2360 __multiHX.append( _sFunction( __xvalue, **_extraArguments ) )
2362 raise TypeError("MultiFonction extra arguments unkown input type: %s"%(type(_extraArguments),))
2363 # logging.debug("MULTF Internal monoprocessing calculation end")
2365 # logging.debug("MULTF Internal multifonction calculations end")
2368 # ==============================================================================
2369 def CostFunction3D(_x,
2370 _Hm = None, # Pour simuler Hm(x) : HO["Direct"].appliedTo
2371 _HmX = None, # Simulation déjà faite de Hm(x)
2372 _arg = None, # Arguments supplementaires pour Hm, sous la forme d'un tuple
2377 _SIV = False, # A résorber pour la 8.0
2378 _SSC = [], # self._parameters["StoreSupplementaryCalculations"]
2379 _nPS = 0, # nbPreviousSteps
2380 _QM = "DA", # QualityMeasure
2381 _SSV = {}, # Entrée et/ou sortie : self.StoredVariables
2382 _fRt = False, # Restitue ou pas la sortie étendue
2383 _sSc = True, # Stocke ou pas les SSC
2386 Fonction-coût générale utile pour les algorithmes statiques/3D : 3DVAR, BLUE
2387 et dérivés, Kalman et dérivés, LeastSquares, SamplingTest, PSO, SA, Tabu,
2388 DFO, QuantileRegression
2394 for k in ["CostFunctionJ",
2400 "SimulatedObservationAtCurrentOptimum",
2401 "SimulatedObservationAtCurrentState",
2405 if hasattr(_SSV[k],"store"):
2406 _SSV[k].append = _SSV[k].store # Pour utiliser "append" au lieu de "store"
2408 _X = numpy.asmatrix(numpy.ravel( _x )).T
2409 if _SIV or "CurrentState" in _SSC or "CurrentOptimum" in _SSC:
2410 _SSV["CurrentState"].append( _X )
2412 if _HmX is not None:
2416 raise ValueError("COSTFUNCTION3D Operator has to be defined.")
2420 _HX = _Hm( _X, *_arg )
2421 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
2423 if "SimulatedObservationAtCurrentState" in _SSC or \
2424 "SimulatedObservationAtCurrentOptimum" in _SSC:
2425 _SSV["SimulatedObservationAtCurrentState"].append( _HX )
2427 if numpy.any(numpy.isnan(_HX)):
2428 Jb, Jo, J = numpy.nan, numpy.nan, numpy.nan
2430 _Y = numpy.asmatrix(numpy.ravel( _Y )).T
2431 if _QM in ["AugmentedWeightedLeastSquares", "AWLS", "AugmentedPonderatedLeastSquares", "APLS", "DA"]:
2432 if _BI is None or _RI is None:
2433 raise ValueError("Background and Observation error covariance matrix has to be properly defined!")
2434 _Xb = numpy.asmatrix(numpy.ravel( _Xb )).T
2435 Jb = 0.5 * (_X - _Xb).T * _BI * (_X - _Xb)
2436 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
2437 elif _QM in ["WeightedLeastSquares", "WLS", "PonderatedLeastSquares", "PLS"]:
2439 raise ValueError("Observation error covariance matrix has to be properly defined!")
2441 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
2442 elif _QM in ["LeastSquares", "LS", "L2"]:
2444 Jo = 0.5 * (_Y - _HX).T * (_Y - _HX)
2445 elif _QM in ["AbsoluteValue", "L1"]:
2447 Jo = numpy.sum( numpy.abs(_Y - _HX) )
2448 elif _QM in ["MaximumError", "ME"]:
2450 Jo = numpy.max( numpy.abs(_Y - _HX) )
2451 elif _QM in ["QR", "Null"]:
2455 raise ValueError("Unknown asked quality measure!")
2457 J = float( Jb ) + float( Jo )
2460 _SSV["CostFunctionJb"].append( Jb )
2461 _SSV["CostFunctionJo"].append( Jo )
2462 _SSV["CostFunctionJ" ].append( J )
2464 if "IndexOfOptimum" in _SSC or \
2465 "CurrentOptimum" in _SSC or \
2466 "SimulatedObservationAtCurrentOptimum" in _SSC:
2467 IndexMin = numpy.argmin( _SSV["CostFunctionJ"][_nPS:] ) + _nPS
2468 if "IndexOfOptimum" in _SSC:
2469 _SSV["IndexOfOptimum"].append( IndexMin )
2470 if "CurrentOptimum" in _SSC:
2471 _SSV["CurrentOptimum"].append( _SSV["CurrentState"][IndexMin] )
2472 if "SimulatedObservationAtCurrentOptimum" in _SSC:
2473 _SSV["SimulatedObservationAtCurrentOptimum"].append( _SSV["SimulatedObservationAtCurrentState"][IndexMin] )
2478 if _QM in ["QR"]: # Pour le QuantileRegression
2483 # ==============================================================================
2484 if __name__ == "__main__":
2485 print('\n AUTODIAGNOSTIC\n')