1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2022 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les outils généraux élémentaires.
26 __author__ = "Jean-Philippe ARGAUD"
35 from functools import partial
36 from daCore import Persistence, PlatformInfo, Interfaces
37 from daCore import Templates
39 # ==============================================================================
40 class CacheManager(object):
42 Classe générale de gestion d'un cache de calculs
45 toleranceInRedundancy = 1.e-18,
46 lenghtOfRedundancy = -1,
49 Les caractéristiques de tolérance peuvent être modifiées à la création.
51 self.__tolerBP = float(toleranceInRedundancy)
52 self.__lenghtOR = int(lenghtOfRedundancy)
53 self.__initlnOR = self.__lenghtOR
63 def wasCalculatedIn(self, xValue, oName="" ):
64 "Vérifie l'existence d'un calcul correspondant à la valeur"
68 for i in range(min(len(self.__listOPCV),self.__lenghtOR)-1,-1,-1):
69 if not hasattr(xValue, 'size'):
71 elif (str(oName) != self.__listOPCV[i][3]):
73 elif (xValue.size != self.__listOPCV[i][0].size):
75 elif (numpy.ravel(xValue)[0] - self.__listOPCV[i][0][0]) > (self.__tolerBP * self.__listOPCV[i][2] / self.__listOPCV[i][0].size):
77 elif numpy.linalg.norm(numpy.ravel(xValue) - self.__listOPCV[i][0]) < (self.__tolerBP * self.__listOPCV[i][2]):
79 __HxV = self.__listOPCV[i][1]
83 def storeValueInX(self, xValue, HxValue, oName="" ):
84 "Stocke pour un opérateur o un calcul Hx correspondant à la valeur x"
85 if self.__lenghtOR < 0:
86 self.__lenghtOR = 2 * min(xValue.size, 50) + 2 # 2 * xValue.size + 2
87 self.__initlnOR = self.__lenghtOR
88 self.__seenNames.append(str(oName))
89 if str(oName) not in self.__seenNames: # Etend la liste si nouveau
90 self.__lenghtOR += 2 * min(xValue.size, 50) + 2 # 2 * xValue.size + 2
91 self.__initlnOR += self.__lenghtOR
92 self.__seenNames.append(str(oName))
93 while len(self.__listOPCV) > self.__lenghtOR:
94 self.__listOPCV.pop(0)
95 self.__listOPCV.append( (
96 copy.copy(numpy.ravel(xValue)), # 0 Previous point
97 copy.copy(HxValue), # 1 Previous value
98 numpy.linalg.norm(xValue), # 2 Norm
99 str(oName), # 3 Operator name
104 self.__initlnOR = self.__lenghtOR
106 self.__enabled = False
110 self.__lenghtOR = self.__initlnOR
111 self.__enabled = True
113 # ==============================================================================
114 class Operator(object):
116 Classe générale d'interface de type opérateur simple
124 name = "GenericOperator",
127 avoidingRedundancy = True,
128 reducingMemoryUse = False,
129 inputAsMultiFunction = False,
130 enableMultiProcess = False,
131 extraArguments = None,
134 On construit un objet de ce type en fournissant, à l'aide de l'un des
135 deux mots-clé, soit une fonction ou un multi-fonction python, soit une
138 - name : nom d'opérateur
139 - fromMethod : argument de type fonction Python
140 - fromMatrix : argument adapté au constructeur numpy.array/matrix
141 - avoidingRedundancy : booléen évitant (ou pas) les calculs redondants
142 - reducingMemoryUse : booléen forçant (ou pas) des calculs moins
144 - inputAsMultiFunction : booléen indiquant une fonction explicitement
145 définie (ou pas) en multi-fonction
146 - extraArguments : arguments supplémentaires passés à la fonction de
147 base et ses dérivées (tuple ou dictionnaire)
149 self.__name = str(name)
150 self.__NbCallsAsMatrix, self.__NbCallsAsMethod, self.__NbCallsOfCached = 0, 0, 0
151 self.__reduceM = bool( reducingMemoryUse )
152 self.__avoidRC = bool( avoidingRedundancy )
153 self.__inputAsMF = bool( inputAsMultiFunction )
154 self.__mpEnabled = bool( enableMultiProcess )
155 self.__extraArgs = extraArguments
156 if fromMethod is not None and self.__inputAsMF:
157 self.__Method = fromMethod # logtimer(fromMethod)
159 self.__Type = "Method"
160 elif fromMethod is not None and not self.__inputAsMF:
161 self.__Method = partial( MultiFonction, _sFunction=fromMethod, _mpEnabled=self.__mpEnabled)
163 self.__Type = "Method"
164 elif fromMatrix is not None:
166 if isinstance(fromMatrix, str):
167 fromMatrix = PlatformInfo.strmatrix2liststr( fromMatrix )
168 self.__Matrix = numpy.asarray( fromMatrix, dtype=float )
169 self.__Type = "Matrix"
175 def disableAvoidingRedundancy(self):
177 Operator.CM.disable()
179 def enableAvoidingRedundancy(self):
184 Operator.CM.disable()
190 def appliedTo(self, xValue, HValue = None, argsAsSerie = False, returnSerieAsArrayMatrix = False):
192 Permet de restituer le résultat de l'application de l'opérateur à une
193 série d'arguments xValue. Cette méthode se contente d'appliquer, chaque
194 argument devant a priori être du bon type.
196 - les arguments par série sont :
197 - xValue : argument adapté pour appliquer l'opérateur
198 - HValue : valeur précalculée de l'opérateur en ce point
199 - argsAsSerie : indique si les arguments sont une mono ou multi-valeur
206 if HValue is not None:
210 PlatformInfo.isIterable( _xValue, True, " in Operator.appliedTo" )
212 if _HValue is not None:
213 assert len(_xValue) == len(_HValue), "Incompatible number of elements in xValue and HValue"
215 for i in range(len(_HValue)):
216 _HxValue.append( _HValue[i] )
218 Operator.CM.storeValueInX(_xValue[i],_HxValue[-1],self.__name)
223 for i, xv in enumerate(_xValue):
225 __alreadyCalculated, __HxV = Operator.CM.wasCalculatedIn(xv,self.__name)
227 __alreadyCalculated = False
229 if __alreadyCalculated:
230 self.__addOneCacheCall()
233 if self.__Matrix is not None:
234 self.__addOneMatrixCall()
235 _hv = self.__Matrix @ numpy.ravel(xv)
237 self.__addOneMethodCall()
241 _HxValue.append( _hv )
243 if len(_xserie)>0 and self.__Matrix is None:
244 if self.__extraArgs is None:
245 _hserie = self.__Method( _xserie ) # Calcul MF
247 _hserie = self.__Method( _xserie, self.__extraArgs ) # Calcul MF
248 if not hasattr(_hserie, "pop"):
249 raise TypeError("The user input multi-function doesn't seem to return sequence results, behaving like a mono-function. It has to be checked.")
255 Operator.CM.storeValueInX(_xv,_hv,self.__name)
257 if returnSerieAsArrayMatrix:
258 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
260 if argsAsSerie: return _HxValue
261 else: return _HxValue[-1]
263 def appliedControledFormTo(self, paires, argsAsSerie = False, returnSerieAsArrayMatrix = False):
265 Permet de restituer le résultat de l'application de l'opérateur à des
266 paires (xValue, uValue). Cette méthode se contente d'appliquer, son
267 argument devant a priori être du bon type. Si la uValue est None,
268 on suppose que l'opérateur ne s'applique qu'à xValue.
270 - paires : les arguments par paire sont :
271 - xValue : argument X adapté pour appliquer l'opérateur
272 - uValue : argument U adapté pour appliquer l'opérateur
273 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
275 if argsAsSerie: _xuValue = paires
276 else: _xuValue = (paires,)
277 PlatformInfo.isIterable( _xuValue, True, " in Operator.appliedControledFormTo" )
279 if self.__Matrix is not None:
281 for paire in _xuValue:
282 _xValue, _uValue = paire
283 self.__addOneMatrixCall()
284 _HxValue.append( self.__Matrix @ numpy.ravel(_xValue) )
287 for paire in _xuValue:
288 _xValue, _uValue = paire
289 if _uValue is not None:
290 _xuArgs.append( paire )
292 _xuArgs.append( _xValue )
293 self.__addOneMethodCall( len(_xuArgs) )
294 if self.__extraArgs is None:
295 _HxValue = self.__Method( _xuArgs ) # Calcul MF
297 _HxValue = self.__Method( _xuArgs, self.__extraArgs ) # Calcul MF
299 if returnSerieAsArrayMatrix:
300 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
302 if argsAsSerie: return _HxValue
303 else: return _HxValue[-1]
305 def appliedInXTo(self, paires, argsAsSerie = False, returnSerieAsArrayMatrix = False):
307 Permet de restituer le résultat de l'application de l'opérateur à une
308 série d'arguments xValue, sachant que l'opérateur est valable en
309 xNominal. Cette méthode se contente d'appliquer, son argument devant a
310 priori être du bon type. Si l'opérateur est linéaire car c'est une
311 matrice, alors il est valable en tout point nominal et xNominal peut
312 être quelconque. Il n'y a qu'une seule paire par défaut, et argsAsSerie
313 permet d'indiquer que l'argument est multi-paires.
315 - paires : les arguments par paire sont :
316 - xNominal : série d'arguments permettant de donner le point où
317 l'opérateur est construit pour être ensuite appliqué
318 - xValue : série d'arguments adaptés pour appliquer l'opérateur
319 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
321 if argsAsSerie: _nxValue = paires
322 else: _nxValue = (paires,)
323 PlatformInfo.isIterable( _nxValue, True, " in Operator.appliedInXTo" )
325 if self.__Matrix is not None:
327 for paire in _nxValue:
328 _xNominal, _xValue = paire
329 self.__addOneMatrixCall()
330 _HxValue.append( self.__Matrix @ numpy.ravel(_xValue) )
332 self.__addOneMethodCall( len(_nxValue) )
333 if self.__extraArgs is None:
334 _HxValue = self.__Method( _nxValue ) # Calcul MF
336 _HxValue = self.__Method( _nxValue, self.__extraArgs ) # Calcul MF
338 if returnSerieAsArrayMatrix:
339 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
341 if argsAsSerie: return _HxValue
342 else: return _HxValue[-1]
344 def asMatrix(self, ValueForMethodForm = "UnknownVoidValue", argsAsSerie = False):
346 Permet de renvoyer l'opérateur sous la forme d'une matrice
348 if self.__Matrix is not None:
349 self.__addOneMatrixCall()
350 mValue = [self.__Matrix,]
351 elif not isinstance(ValueForMethodForm,str) or ValueForMethodForm != "UnknownVoidValue": # Ne pas utiliser "None"
354 self.__addOneMethodCall( len(ValueForMethodForm) )
355 for _vfmf in ValueForMethodForm:
356 mValue.append( self.__Method(((_vfmf, None),)) )
358 self.__addOneMethodCall()
359 mValue = self.__Method(((ValueForMethodForm, None),))
361 raise ValueError("Matrix form of the operator defined as a function/method requires to give an operating point.")
363 if argsAsSerie: return mValue
364 else: return mValue[-1]
368 Renvoie la taille sous forme numpy si l'opérateur est disponible sous
369 la forme d'une matrice
371 if self.__Matrix is not None:
372 return self.__Matrix.shape
374 raise ValueError("Matrix form of the operator is not available, nor the shape")
376 def nbcalls(self, which=None):
378 Renvoie les nombres d'évaluations de l'opérateur
381 self.__NbCallsAsMatrix+self.__NbCallsAsMethod,
382 self.__NbCallsAsMatrix,
383 self.__NbCallsAsMethod,
384 self.__NbCallsOfCached,
385 Operator.NbCallsAsMatrix+Operator.NbCallsAsMethod,
386 Operator.NbCallsAsMatrix,
387 Operator.NbCallsAsMethod,
388 Operator.NbCallsOfCached,
390 if which is None: return __nbcalls
391 else: return __nbcalls[which]
393 def __addOneMatrixCall(self):
394 "Comptabilise un appel"
395 self.__NbCallsAsMatrix += 1 # Decompte local
396 Operator.NbCallsAsMatrix += 1 # Decompte global
398 def __addOneMethodCall(self, nb = 1):
399 "Comptabilise un appel"
400 self.__NbCallsAsMethod += nb # Decompte local
401 Operator.NbCallsAsMethod += nb # Decompte global
403 def __addOneCacheCall(self):
404 "Comptabilise un appel"
405 self.__NbCallsOfCached += 1 # Decompte local
406 Operator.NbCallsOfCached += 1 # Decompte global
408 # ==============================================================================
409 class FullOperator(object):
411 Classe générale d'interface de type opérateur complet
412 (Direct, Linéaire Tangent, Adjoint)
415 name = "GenericFullOperator",
417 asOneFunction = None, # 1 Fonction
418 asThreeFunctions = None, # 3 Fonctions in a dictionary
419 asScript = None, # 1 or 3 Fonction(s) by script
420 asDict = None, # Parameters
422 extraArguments = None,
423 performancePrf = None,
424 inputAsMF = False,# Fonction(s) as Multi-Functions
429 self.__name = str(name)
430 self.__check = bool(toBeChecked)
431 self.__extraArgs = extraArguments
436 if (asDict is not None) and isinstance(asDict, dict):
437 __Parameters.update( asDict )
438 # Priorité à EnableMultiProcessingInDerivatives=True
439 if "EnableMultiProcessing" in __Parameters and __Parameters["EnableMultiProcessing"]:
440 __Parameters["EnableMultiProcessingInDerivatives"] = True
441 __Parameters["EnableMultiProcessingInEvaluation"] = False
442 if "EnableMultiProcessingInDerivatives" not in __Parameters:
443 __Parameters["EnableMultiProcessingInDerivatives"] = False
444 if __Parameters["EnableMultiProcessingInDerivatives"]:
445 __Parameters["EnableMultiProcessingInEvaluation"] = False
446 if "EnableMultiProcessingInEvaluation" not in __Parameters:
447 __Parameters["EnableMultiProcessingInEvaluation"] = False
448 if "withIncrement" in __Parameters: # Temporaire
449 __Parameters["DifferentialIncrement"] = __Parameters["withIncrement"]
450 # Le défaut est équivalent à "ReducedOverallRequirements"
451 __reduceM, __avoidRC = True, True
452 if performancePrf is not None:
453 if performancePrf == "ReducedAmountOfCalculation":
454 __reduceM, __avoidRC = False, True
455 elif performancePrf == "ReducedMemoryFootprint":
456 __reduceM, __avoidRC = True, False
457 elif performancePrf == "NoSavings":
458 __reduceM, __avoidRC = False, False
460 if asScript is not None:
461 __Matrix, __Function = None, None
463 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
465 __Function = { "Direct":Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ) }
466 __Function.update({"useApproximatedDerivatives":True})
467 __Function.update(__Parameters)
468 elif asThreeFunctions:
470 "Direct" :Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ),
471 "Tangent":Interfaces.ImportFromScript(asScript).getvalue( "TangentOperator" ),
472 "Adjoint":Interfaces.ImportFromScript(asScript).getvalue( "AdjointOperator" ),
474 __Function.update(__Parameters)
477 if asOneFunction is not None:
478 if isinstance(asOneFunction, dict) and "Direct" in asOneFunction:
479 if asOneFunction["Direct"] is not None:
480 __Function = asOneFunction
482 raise ValueError("The function has to be given in a dictionnary which have 1 key (\"Direct\")")
484 __Function = { "Direct":asOneFunction }
485 __Function.update({"useApproximatedDerivatives":True})
486 __Function.update(__Parameters)
487 elif asThreeFunctions is not None:
488 if isinstance(asThreeFunctions, dict) and \
489 ("Tangent" in asThreeFunctions) and (asThreeFunctions["Tangent"] is not None) and \
490 ("Adjoint" in asThreeFunctions) and (asThreeFunctions["Adjoint"] is not None) and \
491 (("useApproximatedDerivatives" not in asThreeFunctions) or not bool(asThreeFunctions["useApproximatedDerivatives"])):
492 __Function = asThreeFunctions
493 elif isinstance(asThreeFunctions, dict) and \
494 ("Direct" in asThreeFunctions) and (asThreeFunctions["Direct"] is not None):
495 __Function = asThreeFunctions
496 __Function.update({"useApproximatedDerivatives":True})
498 raise ValueError("The functions has to be given in a dictionnary which have either 1 key (\"Direct\") or 3 keys (\"Direct\" (optionnal), \"Tangent\" and \"Adjoint\")")
499 if "Direct" not in asThreeFunctions:
500 __Function["Direct"] = asThreeFunctions["Tangent"]
501 __Function.update(__Parameters)
505 # if sys.version_info[0] < 3 and isinstance(__Function, dict):
506 # for k in ("Direct", "Tangent", "Adjoint"):
507 # if k in __Function and hasattr(__Function[k],"__class__"):
508 # if type(__Function[k]) is type(self.__init__):
509 # raise TypeError("can't use a class method (%s) as a function for the \"%s\" operator. Use a real function instead."%(type(__Function[k]),k))
511 if appliedInX is not None and isinstance(appliedInX, dict):
512 __appliedInX = appliedInX
513 elif appliedInX is not None:
514 __appliedInX = {"HXb":appliedInX}
518 if scheduledBy is not None:
519 self.__T = scheduledBy
521 if isinstance(__Function, dict) and \
522 ("useApproximatedDerivatives" in __Function) and bool(__Function["useApproximatedDerivatives"]) and \
523 ("Direct" in __Function) and (__Function["Direct"] is not None):
524 if "CenteredFiniteDifference" not in __Function: __Function["CenteredFiniteDifference"] = False
525 if "DifferentialIncrement" not in __Function: __Function["DifferentialIncrement"] = 0.01
526 if "withdX" not in __Function: __Function["withdX"] = None
527 if "withReducingMemoryUse" not in __Function: __Function["withReducingMemoryUse"] = __reduceM
528 if "withAvoidingRedundancy" not in __Function: __Function["withAvoidingRedundancy"] = __avoidRC
529 if "withToleranceInRedundancy" not in __Function: __Function["withToleranceInRedundancy"] = 1.e-18
530 if "withLenghtOfRedundancy" not in __Function: __Function["withLenghtOfRedundancy"] = -1
531 if "NumberOfProcesses" not in __Function: __Function["NumberOfProcesses"] = None
532 if "withmfEnabled" not in __Function: __Function["withmfEnabled"] = inputAsMF
533 from daCore import NumericObjects
534 FDA = NumericObjects.FDApproximation(
536 Function = __Function["Direct"],
537 centeredDF = __Function["CenteredFiniteDifference"],
538 increment = __Function["DifferentialIncrement"],
539 dX = __Function["withdX"],
540 extraArguments = self.__extraArgs,
541 reducingMemoryUse = __Function["withReducingMemoryUse"],
542 avoidingRedundancy = __Function["withAvoidingRedundancy"],
543 toleranceInRedundancy = __Function["withToleranceInRedundancy"],
544 lenghtOfRedundancy = __Function["withLenghtOfRedundancy"],
545 mpEnabled = __Function["EnableMultiProcessingInDerivatives"],
546 mpWorkers = __Function["NumberOfProcesses"],
547 mfEnabled = __Function["withmfEnabled"],
549 self.__FO["Direct"] = Operator( name = self.__name, fromMethod = FDA.DirectOperator, reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
550 self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMethod = FDA.TangentOperator, reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
551 self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMethod = FDA.AdjointOperator, reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
552 elif isinstance(__Function, dict) and \
553 ("Direct" in __Function) and ("Tangent" in __Function) and ("Adjoint" in __Function) and \
554 (__Function["Direct"] is not None) and (__Function["Tangent"] is not None) and (__Function["Adjoint"] is not None):
555 self.__FO["Direct"] = Operator( name = self.__name, fromMethod = __Function["Direct"], reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
556 self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMethod = __Function["Tangent"], reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
557 self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMethod = __Function["Adjoint"], reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
558 elif asMatrix is not None:
559 if isinstance(__Matrix, str):
560 __Matrix = PlatformInfo.strmatrix2liststr( __Matrix )
561 __matrice = numpy.asarray( __Matrix, dtype=float )
562 self.__FO["Direct"] = Operator( name = self.__name, fromMatrix = __matrice, reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
563 self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMatrix = __matrice, reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF )
564 self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMatrix = __matrice.T, reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF )
567 raise ValueError("The %s object is improperly defined or undefined, it requires at minima either a matrix, a Direct operator for approximate derivatives or a Tangent/Adjoint operators pair. Please check your operator input."%self.__name)
569 if __appliedInX is not None:
570 self.__FO["AppliedInX"] = {}
571 for key in list(__appliedInX.keys()):
572 if type( __appliedInX[key] ) is type( numpy.matrix([]) ):
573 # Pour le cas où l'on a une vraie matrice
574 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].A1, numpy.float ).T
575 elif type( __appliedInX[key] ) is type( numpy.array([]) ) and len(__appliedInX[key].shape) > 1:
576 # Pour le cas où l'on a un vecteur représenté en array avec 2 dimensions
577 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].reshape(len(__appliedInX[key]),), numpy.float ).T
579 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key], numpy.float ).T
581 self.__FO["AppliedInX"] = None
587 "x.__repr__() <==> repr(x)"
588 return repr(self.__FO)
591 "x.__str__() <==> str(x)"
592 return str(self.__FO)
594 # ==============================================================================
595 class Algorithm(object):
597 Classe générale d'interface de type algorithme
599 Elle donne un cadre pour l'écriture d'une classe élémentaire d'algorithme
600 d'assimilation, en fournissant un container (dictionnaire) de variables
601 persistantes initialisées, et des méthodes d'accès à ces variables stockées.
603 Une classe élémentaire d'algorithme doit implémenter la méthode "run".
605 def __init__(self, name):
607 L'initialisation présente permet de fabriquer des variables de stockage
608 disponibles de manière générique dans les algorithmes élémentaires. Ces
609 variables de stockage sont ensuite conservées dans un dictionnaire
610 interne à l'objet, mais auquel on accède par la méthode "get".
612 Les variables prévues sont :
613 - APosterioriCorrelations : matrice de corrélations de la matrice A
614 - APosterioriCovariance : matrice de covariances a posteriori : A
615 - APosterioriStandardDeviations : vecteur des écart-types de la matrice A
616 - APosterioriVariances : vecteur des variances de la matrice A
617 - Analysis : vecteur d'analyse : Xa
618 - BMA : Background moins Analysis : Xa - Xb
619 - CostFunctionJ : fonction-coût globale, somme des deux parties suivantes Jb et Jo
620 - CostFunctionJAtCurrentOptimum : fonction-coût globale à l'état optimal courant lors d'itérations
621 - CostFunctionJb : partie ébauche ou background de la fonction-coût : Jb
622 - CostFunctionJbAtCurrentOptimum : partie ébauche à l'état optimal courant lors d'itérations
623 - CostFunctionJo : partie observations de la fonction-coût : Jo
624 - CostFunctionJoAtCurrentOptimum : partie observations à l'état optimal courant lors d'itérations
625 - CurrentIterationNumber : numéro courant d'itération dans les algorithmes itératifs, à partir de 0
626 - CurrentOptimum : état optimal courant lors d'itérations
627 - CurrentState : état courant lors d'itérations
628 - CurrentStepNumber : numéro courant de pas de mesure dans les algorithmes temporels
629 - GradientOfCostFunctionJ : gradient de la fonction-coût globale
630 - GradientOfCostFunctionJb : gradient de la partie ébauche de la fonction-coût
631 - GradientOfCostFunctionJo : gradient de la partie observations de la fonction-coût
632 - IndexOfOptimum : index de l'état optimal courant lors d'itérations
633 - Innovation : l'innovation : d = Y - H(X)
634 - InnovationAtCurrentState : l'innovation à l'état courant : dn = Y - H(Xn)
635 - JacobianMatrixAtBackground : matrice jacobienne à l'état d'ébauche
636 - JacobianMatrixAtCurrentState : matrice jacobienne à l'état courant
637 - JacobianMatrixAtOptimum : matrice jacobienne à l'optimum
638 - KalmanGainAtOptimum : gain de Kalman à l'optimum
639 - MahalanobisConsistency : indicateur de consistance des covariances
640 - OMA : Observation moins Analyse : Y - Xa
641 - OMB : Observation moins Background : Y - Xb
642 - ForecastCovariance : covariance de l'état prédit courant lors d'itérations
643 - ForecastState : état prédit courant lors d'itérations
644 - Residu : dans le cas des algorithmes de vérification
645 - SampledStateForQuantiles : échantillons d'états pour l'estimation des quantiles
646 - SigmaBck2 : indicateur de correction optimale des erreurs d'ébauche
647 - SigmaObs2 : indicateur de correction optimale des erreurs d'observation
648 - SimulatedObservationAtBackground : l'état observé H(Xb) à l'ébauche
649 - SimulatedObservationAtCurrentOptimum : l'état observé H(X) à l'état optimal courant
650 - SimulatedObservationAtCurrentState : l'état observé H(X) à l'état courant
651 - SimulatedObservationAtOptimum : l'état observé H(Xa) à l'optimum
652 - SimulationQuantiles : états observés H(X) pour les quantiles demandés
653 On peut rajouter des variables à stocker dans l'initialisation de
654 l'algorithme élémentaire qui va hériter de cette classe
656 logging.debug("%s Initialisation", str(name))
657 self._m = PlatformInfo.SystemUsage()
659 self._name = str( name )
660 self._parameters = {"StoreSupplementaryCalculations":[]}
661 self.__internal_state = {}
662 self.__required_parameters = {}
663 self.__required_inputs = {
664 "RequiredInputValues":{"mandatory":(), "optional":()},
665 "ClassificationTags":[],
667 self.__variable_names_not_public = {"nextStep":False} # Duplication dans AlgorithmAndParameters
668 self.__canonical_parameter_name = {} # Correspondance "lower"->"correct"
669 self.__canonical_stored_name = {} # Correspondance "lower"->"correct"
671 self.StoredVariables = {}
672 self.StoredVariables["APosterioriCorrelations"] = Persistence.OneMatrix(name = "APosterioriCorrelations")
673 self.StoredVariables["APosterioriCovariance"] = Persistence.OneMatrix(name = "APosterioriCovariance")
674 self.StoredVariables["APosterioriStandardDeviations"] = Persistence.OneVector(name = "APosterioriStandardDeviations")
675 self.StoredVariables["APosterioriVariances"] = Persistence.OneVector(name = "APosterioriVariances")
676 self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
677 self.StoredVariables["BMA"] = Persistence.OneVector(name = "BMA")
678 self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
679 self.StoredVariables["CostFunctionJAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJAtCurrentOptimum")
680 self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
681 self.StoredVariables["CostFunctionJbAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJbAtCurrentOptimum")
682 self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
683 self.StoredVariables["CostFunctionJoAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJoAtCurrentOptimum")
684 self.StoredVariables["CurrentEnsembleState"] = Persistence.OneMatrix(name = "CurrentEnsembleState")
685 self.StoredVariables["CurrentIterationNumber"] = Persistence.OneIndex(name = "CurrentIterationNumber")
686 self.StoredVariables["CurrentOptimum"] = Persistence.OneVector(name = "CurrentOptimum")
687 self.StoredVariables["CurrentState"] = Persistence.OneVector(name = "CurrentState")
688 self.StoredVariables["CurrentStepNumber"] = Persistence.OneIndex(name = "CurrentStepNumber")
689 self.StoredVariables["ForecastCovariance"] = Persistence.OneMatrix(name = "ForecastCovariance")
690 self.StoredVariables["ForecastState"] = Persistence.OneVector(name = "ForecastState")
691 self.StoredVariables["GradientOfCostFunctionJ"] = Persistence.OneVector(name = "GradientOfCostFunctionJ")
692 self.StoredVariables["GradientOfCostFunctionJb"] = Persistence.OneVector(name = "GradientOfCostFunctionJb")
693 self.StoredVariables["GradientOfCostFunctionJo"] = Persistence.OneVector(name = "GradientOfCostFunctionJo")
694 self.StoredVariables["IndexOfOptimum"] = Persistence.OneIndex(name = "IndexOfOptimum")
695 self.StoredVariables["Innovation"] = Persistence.OneVector(name = "Innovation")
696 self.StoredVariables["InnovationAtCurrentAnalysis"] = Persistence.OneVector(name = "InnovationAtCurrentAnalysis")
697 self.StoredVariables["InnovationAtCurrentState"] = Persistence.OneVector(name = "InnovationAtCurrentState")
698 self.StoredVariables["JacobianMatrixAtBackground"] = Persistence.OneMatrix(name = "JacobianMatrixAtBackground")
699 self.StoredVariables["JacobianMatrixAtCurrentState"] = Persistence.OneMatrix(name = "JacobianMatrixAtCurrentState")
700 self.StoredVariables["JacobianMatrixAtOptimum"] = Persistence.OneMatrix(name = "JacobianMatrixAtOptimum")
701 self.StoredVariables["KalmanGainAtOptimum"] = Persistence.OneMatrix(name = "KalmanGainAtOptimum")
702 self.StoredVariables["MahalanobisConsistency"] = Persistence.OneScalar(name = "MahalanobisConsistency")
703 self.StoredVariables["OMA"] = Persistence.OneVector(name = "OMA")
704 self.StoredVariables["OMB"] = Persistence.OneVector(name = "OMB")
705 self.StoredVariables["Residu"] = Persistence.OneScalar(name = "Residu")
706 self.StoredVariables["SampledStateForQuantiles"] = Persistence.OneMatrix(name = "SampledStateForQuantiles")
707 self.StoredVariables["SigmaBck2"] = Persistence.OneScalar(name = "SigmaBck2")
708 self.StoredVariables["SigmaObs2"] = Persistence.OneScalar(name = "SigmaObs2")
709 self.StoredVariables["SimulatedObservationAtBackground"] = Persistence.OneVector(name = "SimulatedObservationAtBackground")
710 self.StoredVariables["SimulatedObservationAtCurrentAnalysis"]= Persistence.OneVector(name = "SimulatedObservationAtCurrentAnalysis")
711 self.StoredVariables["SimulatedObservationAtCurrentOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentOptimum")
712 self.StoredVariables["SimulatedObservationAtCurrentState"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentState")
713 self.StoredVariables["SimulatedObservationAtOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtOptimum")
714 self.StoredVariables["SimulationQuantiles"] = Persistence.OneMatrix(name = "SimulationQuantiles")
716 for k in self.StoredVariables:
717 self.__canonical_stored_name[k.lower()] = k
719 for k, v in self.__variable_names_not_public.items():
720 self.__canonical_parameter_name[k.lower()] = k
721 self.__canonical_parameter_name["algorithm"] = "Algorithm"
722 self.__canonical_parameter_name["storesupplementarycalculations"] = "StoreSupplementaryCalculations"
724 def _pre_run(self, Parameters, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None ):
726 logging.debug("%s Lancement", self._name)
727 logging.debug("%s Taille mémoire utilisée de %.0f Mio"%(self._name, self._m.getUsedMemory("Mio")))
728 self._getTimeState(reset=True)
730 # Mise a jour des paramètres internes avec le contenu de Parameters, en
731 # reprenant les valeurs par défauts pour toutes celles non définies
732 self.__setParameters(Parameters, reset=True)
733 for k, v in self.__variable_names_not_public.items():
734 if k not in self._parameters: self.__setParameters( {k:v} )
736 # Corrections et compléments des vecteurs
737 def __test_vvalue(argument, variable, argname, symbol=None):
738 if symbol is None: symbol = variable
740 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
741 raise ValueError("%s %s vector %s is not set and has to be properly defined!"%(self._name,argname,symbol))
742 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
743 logging.debug("%s %s vector %s is not set, but is optional."%(self._name,argname,symbol))
745 logging.debug("%s %s vector %s is not set, but is not required."%(self._name,argname,symbol))
747 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
748 logging.debug("%s %s vector %s is required and set, and its size is %i."%(self._name,argname,symbol,numpy.array(argument).size))
749 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
750 logging.debug("%s %s vector %s is optional and set, and its size is %i."%(self._name,argname,symbol,numpy.array(argument).size))
752 logging.debug("%s %s vector %s is set although neither required nor optional, and its size is %i."%(self._name,argname,symbol,numpy.array(argument).size))
754 __test_vvalue( Xb, "Xb", "Background or initial state" )
755 __test_vvalue( Y, "Y", "Observation" )
756 __test_vvalue( U, "U", "Control" )
758 # Corrections et compléments des covariances
759 def __test_cvalue(argument, variable, argname, symbol=None):
760 if symbol is None: symbol = variable
762 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
763 raise ValueError("%s %s error covariance matrix %s is not set and has to be properly defined!"%(self._name,argname,symbol))
764 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
765 logging.debug("%s %s error covariance matrix %s is not set, but is optional."%(self._name,argname,symbol))
767 logging.debug("%s %s error covariance matrix %s is not set, but is not required."%(self._name,argname,symbol))
769 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
770 logging.debug("%s %s error covariance matrix %s is required and set."%(self._name,argname,symbol))
771 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
772 logging.debug("%s %s error covariance matrix %s is optional and set."%(self._name,argname,symbol))
774 logging.debug("%s %s error covariance matrix %s is set although neither required nor optional."%(self._name,argname,symbol))
776 __test_cvalue( B, "B", "Background" )
777 __test_cvalue( R, "R", "Observation" )
778 __test_cvalue( Q, "Q", "Evolution" )
780 # Corrections et compléments des opérateurs
781 def __test_ovalue(argument, variable, argname, symbol=None):
782 if symbol is None: symbol = variable
783 if argument is None or (isinstance(argument,dict) and len(argument)==0):
784 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
785 raise ValueError("%s %s operator %s is not set and has to be properly defined!"%(self._name,argname,symbol))
786 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
787 logging.debug("%s %s operator %s is not set, but is optional."%(self._name,argname,symbol))
789 logging.debug("%s %s operator %s is not set, but is not required."%(self._name,argname,symbol))
791 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
792 logging.debug("%s %s operator %s is required and set."%(self._name,argname,symbol))
793 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
794 logging.debug("%s %s operator %s is optional and set."%(self._name,argname,symbol))
796 logging.debug("%s %s operator %s is set although neither required nor optional."%(self._name,argname,symbol))
798 __test_ovalue( HO, "HO", "Observation", "H" )
799 __test_ovalue( EM, "EM", "Evolution", "M" )
800 __test_ovalue( CM, "CM", "Control Model", "C" )
802 # Corrections et compléments des bornes
803 if ("Bounds" in self._parameters) and isinstance(self._parameters["Bounds"], (list, tuple)) and (len(self._parameters["Bounds"]) > 0):
804 logging.debug("%s Bounds taken into account"%(self._name,))
806 self._parameters["Bounds"] = None
807 if ("StateBoundsForQuantiles" in self._parameters) and isinstance(self._parameters["StateBoundsForQuantiles"], (list, tuple)) and (len(self._parameters["StateBoundsForQuantiles"]) > 0):
808 logging.debug("%s Bounds for quantiles states taken into account"%(self._name,))
809 # Attention : contrairement à Bounds, pas de défaut à None, sinon on ne peut pas être sans bornes
811 # Corrections et compléments de l'initialisation en X
812 if "InitializationPoint" in self._parameters:
814 if self._parameters["InitializationPoint"] is not None and hasattr(self._parameters["InitializationPoint"],'size'):
815 if self._parameters["InitializationPoint"].size != numpy.ravel(Xb).size:
816 raise ValueError("Incompatible size %i of forced initial point that have to replace the background of size %i" \
817 %(self._parameters["InitializationPoint"].size,numpy.ravel(Xb).size))
818 # Obtenu par typecast : numpy.ravel(self._parameters["InitializationPoint"])
820 self._parameters["InitializationPoint"] = numpy.ravel(Xb)
822 if self._parameters["InitializationPoint"] is None:
823 raise ValueError("Forced initial point can not be set without any given Background or required value")
825 # Correction pour pallier a un bug de TNC sur le retour du Minimum
826 if "Minimizer" in self._parameters and self._parameters["Minimizer"] == "TNC":
827 self.setParameterValue("StoreInternalVariables",True)
829 # Verbosité et logging
830 if logging.getLogger().level < logging.WARNING:
831 self._parameters["optiprint"], self._parameters["optdisp"] = 1, 1
832 if PlatformInfo.has_scipy:
833 import scipy.optimize
834 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_ALL
836 self._parameters["optmessages"] = 15
838 self._parameters["optiprint"], self._parameters["optdisp"] = -1, 0
839 if PlatformInfo.has_scipy:
840 import scipy.optimize
841 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_NONE
843 self._parameters["optmessages"] = 15
847 def _post_run(self,_oH=None):
849 if ("StoreSupplementaryCalculations" in self._parameters) and \
850 "APosterioriCovariance" in self._parameters["StoreSupplementaryCalculations"]:
851 for _A in self.StoredVariables["APosterioriCovariance"]:
852 if "APosterioriVariances" in self._parameters["StoreSupplementaryCalculations"]:
853 self.StoredVariables["APosterioriVariances"].store( numpy.diag(_A) )
854 if "APosterioriStandardDeviations" in self._parameters["StoreSupplementaryCalculations"]:
855 self.StoredVariables["APosterioriStandardDeviations"].store( numpy.sqrt(numpy.diag(_A)) )
856 if "APosterioriCorrelations" in self._parameters["StoreSupplementaryCalculations"]:
857 _EI = numpy.diag(1./numpy.sqrt(numpy.diag(_A)))
858 _C = numpy.dot(_EI, numpy.dot(_A, _EI))
859 self.StoredVariables["APosterioriCorrelations"].store( _C )
860 if _oH is not None and "Direct" in _oH and "Tangent" in _oH and "Adjoint" in _oH:
861 logging.debug("%s Nombre d'évaluation(s) de l'opérateur d'observation direct/tangent/adjoint.: %i/%i/%i", self._name, _oH["Direct"].nbcalls(0),_oH["Tangent"].nbcalls(0),_oH["Adjoint"].nbcalls(0))
862 logging.debug("%s Nombre d'appels au cache d'opérateur d'observation direct/tangent/adjoint..: %i/%i/%i", self._name, _oH["Direct"].nbcalls(3),_oH["Tangent"].nbcalls(3),_oH["Adjoint"].nbcalls(3))
863 logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
864 logging.debug("%s Durées d'utilisation CPU de %.1fs et elapsed de %.1fs", self._name, self._getTimeState()[0], self._getTimeState()[1])
865 logging.debug("%s Terminé", self._name)
868 def _toStore(self, key):
869 "True if in StoreSupplementaryCalculations, else False"
870 return key in self._parameters["StoreSupplementaryCalculations"]
872 def get(self, key=None):
874 Renvoie l'une des variables stockées identifiée par la clé, ou le
875 dictionnaire de l'ensemble des variables disponibles en l'absence de
876 clé. Ce sont directement les variables sous forme objet qui sont
877 renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
878 des classes de persistance.
881 return self.StoredVariables[self.__canonical_stored_name[key.lower()]]
883 return self.StoredVariables
885 def __contains__(self, key=None):
886 "D.__contains__(k) -> True if D has a key k, else False"
887 if key is None or key.lower() not in self.__canonical_stored_name:
890 return self.__canonical_stored_name[key.lower()] in self.StoredVariables
893 "D.keys() -> list of D's keys"
894 if hasattr(self, "StoredVariables"):
895 return self.StoredVariables.keys()
900 "D.pop(k[,d]) -> v, remove specified key and return the corresponding value"
901 if hasattr(self, "StoredVariables") and k.lower() in self.__canonical_stored_name:
902 return self.StoredVariables.pop(self.__canonical_stored_name[k.lower()], d)
907 raise TypeError("pop expected at least 1 arguments, got 0")
908 "If key is not found, d is returned if given, otherwise KeyError is raised"
914 def run(self, Xb=None, Y=None, H=None, M=None, R=None, B=None, Q=None, Parameters=None):
916 Doit implémenter l'opération élémentaire de calcul d'assimilation sous
917 sa forme mathématique la plus naturelle possible.
919 raise NotImplementedError("Mathematical assimilation calculation has not been implemented!")
921 def defineRequiredParameter(self, name = None, default = None, typecast = None, message = None, minval = None, maxval = None, listval = None, listadv = None):
923 Permet de définir dans l'algorithme des paramètres requis et leurs
924 caractéristiques par défaut.
927 raise ValueError("A name is mandatory to define a required parameter.")
929 self.__required_parameters[name] = {
931 "typecast" : typecast,
938 self.__canonical_parameter_name[name.lower()] = name
939 logging.debug("%s %s (valeur par défaut = %s)", self._name, message, self.setParameterValue(name))
941 def getRequiredParameters(self, noDetails=True):
943 Renvoie la liste des noms de paramètres requis ou directement le
944 dictionnaire des paramètres requis.
947 return sorted(self.__required_parameters.keys())
949 return self.__required_parameters
951 def setParameterValue(self, name=None, value=None):
953 Renvoie la valeur d'un paramètre requis de manière contrôlée
955 __k = self.__canonical_parameter_name[name.lower()]
956 default = self.__required_parameters[__k]["default"]
957 typecast = self.__required_parameters[__k]["typecast"]
958 minval = self.__required_parameters[__k]["minval"]
959 maxval = self.__required_parameters[__k]["maxval"]
960 listval = self.__required_parameters[__k]["listval"]
961 listadv = self.__required_parameters[__k]["listadv"]
963 if value is None and default is None:
965 elif value is None and default is not None:
966 if typecast is None: __val = default
967 else: __val = typecast( default )
969 if typecast is None: __val = value
972 __val = typecast( value )
974 raise ValueError("The value '%s' for the parameter named '%s' can not be correctly evaluated with type '%s'."%(value, __k, typecast))
976 if minval is not None and (numpy.array(__val, float) < minval).any():
977 raise ValueError("The parameter named '%s' of value '%s' can not be less than %s."%(__k, __val, minval))
978 if maxval is not None and (numpy.array(__val, float) > maxval).any():
979 raise ValueError("The parameter named '%s' of value '%s' can not be greater than %s."%(__k, __val, maxval))
980 if listval is not None or listadv is not None:
981 if typecast is list or typecast is tuple or isinstance(__val,list) or isinstance(__val,tuple):
983 if listval is not None and v in listval: continue
984 elif listadv is not None and v in listadv: continue
986 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%(v, __k, listval))
987 elif not (listval is not None and __val in listval) and not (listadv is not None and __val in listadv):
988 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%( __val, __k,listval))
992 def requireInputArguments(self, mandatory=(), optional=()):
994 Permet d'imposer des arguments de calcul requis en entrée.
996 self.__required_inputs["RequiredInputValues"]["mandatory"] = tuple( mandatory )
997 self.__required_inputs["RequiredInputValues"]["optional"] = tuple( optional )
999 def getInputArguments(self):
1001 Permet d'obtenir les listes des arguments de calcul requis en entrée.
1003 return self.__required_inputs["RequiredInputValues"]["mandatory"], self.__required_inputs["RequiredInputValues"]["optional"]
1005 def setAttributes(self, tags=()):
1007 Permet d'adjoindre des attributs comme les tags de classification.
1008 Renvoie la liste actuelle dans tous les cas.
1010 self.__required_inputs["ClassificationTags"].extend( tags )
1011 return self.__required_inputs["ClassificationTags"]
1013 def __setParameters(self, fromDico={}, reset=False):
1015 Permet de stocker les paramètres reçus dans le dictionnaire interne.
1017 self._parameters.update( fromDico )
1018 __inverse_fromDico_keys = {}
1019 for k in fromDico.keys():
1020 if k.lower() in self.__canonical_parameter_name:
1021 __inverse_fromDico_keys[self.__canonical_parameter_name[k.lower()]] = k
1022 #~ __inverse_fromDico_keys = dict([(self.__canonical_parameter_name[k.lower()],k) for k in fromDico.keys()])
1023 __canonic_fromDico_keys = __inverse_fromDico_keys.keys()
1024 for k in self.__required_parameters.keys():
1025 if k in __canonic_fromDico_keys:
1026 self._parameters[k] = self.setParameterValue(k,fromDico[__inverse_fromDico_keys[k]])
1028 self._parameters[k] = self.setParameterValue(k)
1031 if hasattr(self._parameters[k],"__len__") and len(self._parameters[k]) > 100:
1032 logging.debug("%s %s de longueur %s", self._name, self.__required_parameters[k]["message"], len(self._parameters[k]))
1034 logging.debug("%s %s : %s", self._name, self.__required_parameters[k]["message"], self._parameters[k])
1036 def _setInternalState(self, key=None, value=None, fromDico={}, reset=False):
1038 Permet de stocker des variables nommées constituant l'état interne
1040 if reset: # Vide le dictionnaire préalablement
1041 self.__internal_state = {}
1042 if key is not None and value is not None:
1043 self.__internal_state[key] = value
1044 self.__internal_state.update( dict(fromDico) )
1046 def _getInternalState(self, key=None):
1048 Restitue un état interne sous la forme d'un dictionnaire de variables nommées
1050 if key is not None and key in self.__internal_state:
1051 return self.__internal_state[key]
1053 return self.__internal_state
1055 def _getTimeState(self, reset=False):
1057 Initialise ou restitue le temps de calcul (cpu/elapsed) à la seconde
1060 self.__initial_cpu_time = time.process_time()
1061 self.__initial_elapsed_time = time.perf_counter()
1064 self.__cpu_time = time.process_time() - self.__initial_cpu_time
1065 self.__elapsed_time = time.perf_counter() - self.__initial_elapsed_time
1066 return self.__cpu_time, self.__elapsed_time
1068 def _StopOnTimeLimit(self, X=None, withReason=False):
1069 "Stop criteria on time limit: True/False [+ Reason]"
1070 c, e = self._getTimeState()
1071 if "MaximumCpuTime" in self._parameters and c > self._parameters["MaximumCpuTime"]:
1072 __SC, __SR = True, "Reached maximum CPU time (%.1fs > %.1fs)"%(c, self._parameters["MaximumCpuTime"])
1073 elif "MaximumElapsedTime" in self._parameters and e > self._parameters["MaximumElapsedTime"]:
1074 __SC, __SR = True, "Reached maximum elapsed time (%.1fs > %.1fs)"%(e, self._parameters["MaximumElapsedTime"])
1076 __SC, __SR = False, ""
1082 # ==============================================================================
1083 class PartialAlgorithm(object):
1085 Classe pour mimer "Algorithm" du point de vue stockage, mais sans aucune
1086 action avancée comme la vérification . Pour les méthodes reprises ici,
1087 le fonctionnement est identique à celles de la classe "Algorithm".
1089 def __init__(self, name):
1090 self._name = str( name )
1091 self._parameters = {"StoreSupplementaryCalculations":[]}
1093 self.StoredVariables = {}
1094 self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
1095 self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
1096 self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
1097 self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
1098 self.StoredVariables["CurrentIterationNumber"] = Persistence.OneIndex(name = "CurrentIterationNumber")
1099 self.StoredVariables["CurrentStepNumber"] = Persistence.OneIndex(name = "CurrentStepNumber")
1101 self.__canonical_stored_name = {}
1102 for k in self.StoredVariables:
1103 self.__canonical_stored_name[k.lower()] = k
1105 def _toStore(self, key):
1106 "True if in StoreSupplementaryCalculations, else False"
1107 return key in self._parameters["StoreSupplementaryCalculations"]
1109 def get(self, key=None):
1111 Renvoie l'une des variables stockées identifiée par la clé, ou le
1112 dictionnaire de l'ensemble des variables disponibles en l'absence de
1113 clé. Ce sont directement les variables sous forme objet qui sont
1114 renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
1115 des classes de persistance.
1118 return self.StoredVariables[self.__canonical_stored_name[key.lower()]]
1120 return self.StoredVariables
1122 # ==============================================================================
1123 class AlgorithmAndParameters(object):
1125 Classe générale d'interface d'action pour l'algorithme et ses paramètres
1128 name = "GenericAlgorithm",
1135 self.__name = str(name)
1139 self.__algorithm = {}
1140 self.__algorithmFile = None
1141 self.__algorithmName = None
1143 self.updateParameters( asDict, asScript )
1145 if asAlgorithm is None and asScript is not None:
1146 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
1148 __Algo = asAlgorithm
1150 if __Algo is not None:
1151 self.__A = str(__Algo)
1152 self.__P.update( {"Algorithm":self.__A} )
1154 self.__setAlgorithm( self.__A )
1156 self.__variable_names_not_public = {"nextStep":False} # Duplication dans Algorithm
1158 def updateParameters(self,
1162 "Mise a jour des parametres"
1163 if asDict is None and asScript is not None:
1164 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1168 if __Dict is not None:
1169 self.__P.update( dict(__Dict) )
1171 def executePythonScheme(self, asDictAO = None):
1172 "Permet de lancer le calcul d'assimilation"
1173 Operator.CM.clearCache()
1175 if not isinstance(asDictAO, dict):
1176 raise ValueError("The objects for algorithm calculation have to be given together as a dictionnary, and they are not")
1177 if hasattr(asDictAO["Background"],"getO"): self.__Xb = asDictAO["Background"].getO()
1178 elif hasattr(asDictAO["CheckingPoint"],"getO"): self.__Xb = asDictAO["CheckingPoint"].getO()
1179 else: self.__Xb = None
1180 if hasattr(asDictAO["Observation"],"getO"): self.__Y = asDictAO["Observation"].getO()
1181 else: self.__Y = asDictAO["Observation"]
1182 if hasattr(asDictAO["ControlInput"],"getO"): self.__U = asDictAO["ControlInput"].getO()
1183 else: self.__U = asDictAO["ControlInput"]
1184 if hasattr(asDictAO["ObservationOperator"],"getO"): self.__HO = asDictAO["ObservationOperator"].getO()
1185 else: self.__HO = asDictAO["ObservationOperator"]
1186 if hasattr(asDictAO["EvolutionModel"],"getO"): self.__EM = asDictAO["EvolutionModel"].getO()
1187 else: self.__EM = asDictAO["EvolutionModel"]
1188 if hasattr(asDictAO["ControlModel"],"getO"): self.__CM = asDictAO["ControlModel"].getO()
1189 else: self.__CM = asDictAO["ControlModel"]
1190 self.__B = asDictAO["BackgroundError"]
1191 self.__R = asDictAO["ObservationError"]
1192 self.__Q = asDictAO["EvolutionError"]
1194 self.__shape_validate()
1196 self.__algorithm.run(
1206 Parameters = self.__P,
1210 def executeYACSScheme(self, FileName=None):
1211 "Permet de lancer le calcul d'assimilation"
1212 if FileName is None or not os.path.exists(FileName):
1213 raise ValueError("a YACS file name has to be given for YACS execution.\n")
1215 __file = os.path.abspath(FileName)
1216 logging.debug("The YACS file name is \"%s\"."%__file)
1217 if not PlatformInfo.has_salome or \
1218 not PlatformInfo.has_yacs or \
1219 not PlatformInfo.has_adao:
1220 raise ImportError("\n\n"+\
1221 "Unable to get SALOME, YACS or ADAO environnement variables.\n"+\
1222 "Please load the right environnement before trying to use it.\n")
1225 import SALOMERuntime
1227 SALOMERuntime.RuntimeSALOME_setRuntime()
1229 r = pilot.getRuntime()
1230 xmlLoader = loader.YACSLoader()
1231 xmlLoader.registerProcCataLoader()
1233 catalogAd = r.loadCatalog("proc", __file)
1234 r.addCatalog(catalogAd)
1239 p = xmlLoader.load(__file)
1240 except IOError as ex:
1241 print("The YACS XML schema file can not be loaded: %s"%(ex,))
1243 logger = p.getLogger("parser")
1244 if not logger.isEmpty():
1245 print("The imported YACS XML schema has errors on parsing:")
1246 print(logger.getStr())
1249 print("The YACS XML schema is not valid and will not be executed:")
1250 print(p.getErrorReport())
1252 info=pilot.LinkInfo(pilot.LinkInfo.ALL_DONT_STOP)
1253 p.checkConsistency(info)
1254 if info.areWarningsOrErrors():
1255 print("The YACS XML schema is not coherent and will not be executed:")
1256 print(info.getGlobalRepr())
1258 e = pilot.ExecutorSwig()
1260 if p.getEffectiveState() != pilot.DONE:
1261 print(p.getErrorReport())
1265 def get(self, key = None):
1266 "Vérifie l'existence d'une clé de variable ou de paramètres"
1267 if key in self.__algorithm:
1268 return self.__algorithm.get( key )
1269 elif key in self.__P:
1270 return self.__P[key]
1272 allvariables = self.__P
1273 for k in self.__variable_names_not_public: allvariables.pop(k, None)
1276 def pop(self, k, d):
1277 "Necessaire pour le pickling"
1278 return self.__algorithm.pop(k, d)
1280 def getAlgorithmRequiredParameters(self, noDetails=True):
1281 "Renvoie la liste des paramètres requis selon l'algorithme"
1282 return self.__algorithm.getRequiredParameters(noDetails)
1284 def getAlgorithmInputArguments(self):
1285 "Renvoie la liste des entrées requises selon l'algorithme"
1286 return self.__algorithm.getInputArguments()
1288 def getAlgorithmAttributes(self):
1289 "Renvoie la liste des attributs selon l'algorithme"
1290 return self.__algorithm.setAttributes()
1292 def setObserver(self, __V, __O, __I, __S):
1293 if self.__algorithm is None \
1294 or isinstance(self.__algorithm, dict) \
1295 or not hasattr(self.__algorithm,"StoredVariables"):
1296 raise ValueError("No observer can be build before choosing an algorithm.")
1297 if __V not in self.__algorithm:
1298 raise ValueError("An observer requires to be set on a variable named %s which does not exist."%__V)
1300 self.__algorithm.StoredVariables[ __V ].setDataObserver(
1303 HookParameters = __I,
1306 def removeObserver(self, __V, __O, __A = False):
1307 if self.__algorithm is None \
1308 or isinstance(self.__algorithm, dict) \
1309 or not hasattr(self.__algorithm,"StoredVariables"):
1310 raise ValueError("No observer can be removed before choosing an algorithm.")
1311 if __V not in self.__algorithm:
1312 raise ValueError("An observer requires to be removed on a variable named %s which does not exist."%__V)
1314 return self.__algorithm.StoredVariables[ __V ].removeDataObserver(
1319 def hasObserver(self, __V):
1320 if self.__algorithm is None \
1321 or isinstance(self.__algorithm, dict) \
1322 or not hasattr(self.__algorithm,"StoredVariables"):
1324 if __V not in self.__algorithm:
1326 return self.__algorithm.StoredVariables[ __V ].hasDataObserver()
1329 __allvariables = list(self.__algorithm.keys()) + list(self.__P.keys())
1330 for k in self.__variable_names_not_public:
1331 if k in __allvariables: __allvariables.remove(k)
1332 return __allvariables
1334 def __contains__(self, key=None):
1335 "D.__contains__(k) -> True if D has a key k, else False"
1336 return key in self.__algorithm or key in self.__P
1339 "x.__repr__() <==> repr(x)"
1340 return repr(self.__A)+", "+repr(self.__P)
1343 "x.__str__() <==> str(x)"
1344 return str(self.__A)+", "+str(self.__P)
1346 def __setAlgorithm(self, choice = None ):
1348 Permet de sélectionner l'algorithme à utiliser pour mener à bien l'étude
1349 d'assimilation. L'argument est un champ caractère se rapportant au nom
1350 d'un algorithme réalisant l'opération sur les arguments fixes.
1353 raise ValueError("Error: algorithm choice has to be given")
1354 if self.__algorithmName is not None:
1355 raise ValueError("Error: algorithm choice has already been done as \"%s\", it can't be changed."%self.__algorithmName)
1356 daDirectory = "daAlgorithms"
1358 # Recherche explicitement le fichier complet
1359 # ------------------------------------------
1361 for directory in sys.path:
1362 if os.path.isfile(os.path.join(directory, daDirectory, str(choice)+'.py')):
1363 module_path = os.path.abspath(os.path.join(directory, daDirectory))
1364 if module_path is None:
1365 raise ImportError("No algorithm module named \"%s\" has been found in the search path.\n The search path is %s"%(choice, sys.path))
1367 # Importe le fichier complet comme un module
1368 # ------------------------------------------
1370 sys_path_tmp = sys.path ; sys.path.insert(0,module_path)
1371 self.__algorithmFile = __import__(str(choice), globals(), locals(), [])
1372 if not hasattr(self.__algorithmFile, "ElementaryAlgorithm"):
1373 raise ImportError("this module does not define a valid elementary algorithm.")
1374 self.__algorithmName = str(choice)
1375 sys.path = sys_path_tmp ; del sys_path_tmp
1376 except ImportError as e:
1377 raise ImportError("The module named \"%s\" was found, but is incorrect at the import stage.\n The import error message is: %s"%(choice,e))
1379 # Instancie un objet du type élémentaire du fichier
1380 # -------------------------------------------------
1381 self.__algorithm = self.__algorithmFile.ElementaryAlgorithm()
1384 def __shape_validate(self):
1386 Validation de la correspondance correcte des tailles des variables et
1387 des matrices s'il y en a.
1389 if self.__Xb is None: __Xb_shape = (0,)
1390 elif hasattr(self.__Xb,"size"): __Xb_shape = (self.__Xb.size,)
1391 elif hasattr(self.__Xb,"shape"):
1392 if isinstance(self.__Xb.shape, tuple): __Xb_shape = self.__Xb.shape
1393 else: __Xb_shape = self.__Xb.shape()
1394 else: raise TypeError("The background (Xb) has no attribute of shape: problem !")
1396 if self.__Y is None: __Y_shape = (0,)
1397 elif hasattr(self.__Y,"size"): __Y_shape = (self.__Y.size,)
1398 elif hasattr(self.__Y,"shape"):
1399 if isinstance(self.__Y.shape, tuple): __Y_shape = self.__Y.shape
1400 else: __Y_shape = self.__Y.shape()
1401 else: raise TypeError("The observation (Y) has no attribute of shape: problem !")
1403 if self.__U is None: __U_shape = (0,)
1404 elif hasattr(self.__U,"size"): __U_shape = (self.__U.size,)
1405 elif hasattr(self.__U,"shape"):
1406 if isinstance(self.__U.shape, tuple): __U_shape = self.__U.shape
1407 else: __U_shape = self.__U.shape()
1408 else: raise TypeError("The control (U) has no attribute of shape: problem !")
1410 if self.__B is None: __B_shape = (0,0)
1411 elif hasattr(self.__B,"shape"):
1412 if isinstance(self.__B.shape, tuple): __B_shape = self.__B.shape
1413 else: __B_shape = self.__B.shape()
1414 else: raise TypeError("The a priori errors covariance matrix (B) has no attribute of shape: problem !")
1416 if self.__R is None: __R_shape = (0,0)
1417 elif hasattr(self.__R,"shape"):
1418 if isinstance(self.__R.shape, tuple): __R_shape = self.__R.shape
1419 else: __R_shape = self.__R.shape()
1420 else: raise TypeError("The observation errors covariance matrix (R) has no attribute of shape: problem !")
1422 if self.__Q is None: __Q_shape = (0,0)
1423 elif hasattr(self.__Q,"shape"):
1424 if isinstance(self.__Q.shape, tuple): __Q_shape = self.__Q.shape
1425 else: __Q_shape = self.__Q.shape()
1426 else: raise TypeError("The evolution errors covariance matrix (Q) has no attribute of shape: problem !")
1428 if len(self.__HO) == 0: __HO_shape = (0,0)
1429 elif isinstance(self.__HO, dict): __HO_shape = (0,0)
1430 elif hasattr(self.__HO["Direct"],"shape"):
1431 if isinstance(self.__HO["Direct"].shape, tuple): __HO_shape = self.__HO["Direct"].shape
1432 else: __HO_shape = self.__HO["Direct"].shape()
1433 else: raise TypeError("The observation operator (H) has no attribute of shape: problem !")
1435 if len(self.__EM) == 0: __EM_shape = (0,0)
1436 elif isinstance(self.__EM, dict): __EM_shape = (0,0)
1437 elif hasattr(self.__EM["Direct"],"shape"):
1438 if isinstance(self.__EM["Direct"].shape, tuple): __EM_shape = self.__EM["Direct"].shape
1439 else: __EM_shape = self.__EM["Direct"].shape()
1440 else: raise TypeError("The evolution model (EM) has no attribute of shape: problem !")
1442 if len(self.__CM) == 0: __CM_shape = (0,0)
1443 elif isinstance(self.__CM, dict): __CM_shape = (0,0)
1444 elif hasattr(self.__CM["Direct"],"shape"):
1445 if isinstance(self.__CM["Direct"].shape, tuple): __CM_shape = self.__CM["Direct"].shape
1446 else: __CM_shape = self.__CM["Direct"].shape()
1447 else: raise TypeError("The control model (CM) has no attribute of shape: problem !")
1449 # Vérification des conditions
1450 # ---------------------------
1451 if not( len(__Xb_shape) == 1 or min(__Xb_shape) == 1 ):
1452 raise ValueError("Shape characteristic of background (Xb) is incorrect: \"%s\"."%(__Xb_shape,))
1453 if not( len(__Y_shape) == 1 or min(__Y_shape) == 1 ):
1454 raise ValueError("Shape characteristic of observation (Y) is incorrect: \"%s\"."%(__Y_shape,))
1456 if not( min(__B_shape) == max(__B_shape) ):
1457 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) is incorrect: \"%s\"."%(__B_shape,))
1458 if not( min(__R_shape) == max(__R_shape) ):
1459 raise ValueError("Shape characteristic of observation errors covariance matrix (R) is incorrect: \"%s\"."%(__R_shape,))
1460 if not( min(__Q_shape) == max(__Q_shape) ):
1461 raise ValueError("Shape characteristic of evolution errors covariance matrix (Q) is incorrect: \"%s\"."%(__Q_shape,))
1462 if not( min(__EM_shape) == max(__EM_shape) ):
1463 raise ValueError("Shape characteristic of evolution operator (EM) is incorrect: \"%s\"."%(__EM_shape,))
1465 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[1] == max(__Xb_shape) ):
1466 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and state (X) \"%s\" are incompatible."%(__HO_shape,__Xb_shape))
1467 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[0] == max(__Y_shape) ):
1468 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation (Y) \"%s\" are incompatible."%(__HO_shape,__Y_shape))
1469 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__B) > 0 and not( __HO_shape[1] == __B_shape[0] ):
1470 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and a priori errors covariance matrix (B) \"%s\" are incompatible."%(__HO_shape,__B_shape))
1471 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__R) > 0 and not( __HO_shape[0] == __R_shape[1] ):
1472 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation errors covariance matrix (R) \"%s\" are incompatible."%(__HO_shape,__R_shape))
1474 if self.__B is not None and len(self.__B) > 0 and not( __B_shape[1] == max(__Xb_shape) ):
1475 if self.__algorithmName in ["EnsembleBlue",]:
1476 asPersistentVector = self.__Xb.reshape((-1,min(__B_shape)))
1477 self.__Xb = Persistence.OneVector("Background")
1478 for member in asPersistentVector:
1479 self.__Xb.store( numpy.asarray(member, dtype=float) )
1480 __Xb_shape = min(__B_shape)
1482 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) \"%s\" and background (Xb) \"%s\" are incompatible."%(__B_shape,__Xb_shape))
1484 if self.__R is not None and len(self.__R) > 0 and not( __R_shape[1] == max(__Y_shape) ):
1485 raise ValueError("Shape characteristic of observation errors covariance matrix (R) \"%s\" and observation (Y) \"%s\" are incompatible."%(__R_shape,__Y_shape))
1487 if self.__EM is not None and len(self.__EM) > 0 and not isinstance(self.__EM, dict) and not( __EM_shape[1] == max(__Xb_shape) ):
1488 raise ValueError("Shape characteristic of evolution model (EM) \"%s\" and state (X) \"%s\" are incompatible."%(__EM_shape,__Xb_shape))
1490 if self.__CM is not None and len(self.__CM) > 0 and not isinstance(self.__CM, dict) and not( __CM_shape[1] == max(__U_shape) ):
1491 raise ValueError("Shape characteristic of control model (CM) \"%s\" and control (U) \"%s\" are incompatible."%(__CM_shape,__U_shape))
1493 if ("Bounds" in self.__P) \
1494 and (isinstance(self.__P["Bounds"], list) or isinstance(self.__P["Bounds"], tuple)) \
1495 and (len(self.__P["Bounds"]) != max(__Xb_shape)):
1496 raise ValueError("The number \"%s\" of bound pairs for the state (X) components is different of the size \"%s\" of the state itself." \
1497 %(len(self.__P["Bounds"]),max(__Xb_shape)))
1499 if ("StateBoundsForQuantiles" in self.__P) \
1500 and (isinstance(self.__P["StateBoundsForQuantiles"], list) or isinstance(self.__P["StateBoundsForQuantiles"], tuple)) \
1501 and (len(self.__P["StateBoundsForQuantiles"]) != max(__Xb_shape)):
1502 raise ValueError("The number \"%s\" of bound pairs for the quantile state (X) components is different of the size \"%s\" of the state itself." \
1503 %(len(self.__P["StateBoundsForQuantiles"]),max(__Xb_shape)))
1507 # ==============================================================================
1508 class RegulationAndParameters(object):
1510 Classe générale d'interface d'action pour la régulation et ses paramètres
1513 name = "GenericRegulation",
1520 self.__name = str(name)
1523 if asAlgorithm is None and asScript is not None:
1524 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
1526 __Algo = asAlgorithm
1528 if asDict is None and asScript is not None:
1529 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1533 if __Dict is not None:
1534 self.__P.update( dict(__Dict) )
1536 if __Algo is not None:
1537 self.__P.update( {"Algorithm":str(__Algo)} )
1539 def get(self, key = None):
1540 "Vérifie l'existence d'une clé de variable ou de paramètres"
1542 return self.__P[key]
1546 # ==============================================================================
1547 class DataObserver(object):
1549 Classe générale d'interface de type observer
1552 name = "GenericObserver",
1564 self.__name = str(name)
1569 if onVariable is None:
1570 raise ValueError("setting an observer has to be done over a variable name or a list of variable names, not over None.")
1571 elif type(onVariable) in (tuple, list):
1572 self.__V = tuple(map( str, onVariable ))
1573 if withInfo is None:
1576 self.__I = (str(withInfo),)*len(self.__V)
1577 elif isinstance(onVariable, str):
1578 self.__V = (onVariable,)
1579 if withInfo is None:
1580 self.__I = (onVariable,)
1582 self.__I = (str(withInfo),)
1584 raise ValueError("setting an observer has to be done over a variable name or a list of variable names.")
1586 if asObsObject is not None:
1587 self.__O = asObsObject
1589 __FunctionText = str(UserScript('Observer', asTemplate, asString, asScript))
1590 __Function = Observer2Func(__FunctionText)
1591 self.__O = __Function.getfunc()
1593 for k in range(len(self.__V)):
1596 if ename not in withAlgo:
1597 raise ValueError("An observer is asked to be set on a variable named %s which does not exist."%ename)
1599 withAlgo.setObserver(ename, self.__O, einfo, scheduledBy)
1602 "x.__repr__() <==> repr(x)"
1603 return repr(self.__V)+"\n"+repr(self.__O)
1606 "x.__str__() <==> str(x)"
1607 return str(self.__V)+"\n"+str(self.__O)
1609 # ==============================================================================
1610 class UserScript(object):
1612 Classe générale d'interface de type texte de script utilisateur
1615 name = "GenericUserScript",
1622 self.__name = str(name)
1624 if asString is not None:
1626 elif self.__name == "UserPostAnalysis" and (asTemplate is not None) and (asTemplate in Templates.UserPostAnalysisTemplates):
1627 self.__F = Templates.UserPostAnalysisTemplates[asTemplate]
1628 elif self.__name == "Observer" and (asTemplate is not None) and (asTemplate in Templates.ObserverTemplates):
1629 self.__F = Templates.ObserverTemplates[asTemplate]
1630 elif asScript is not None:
1631 self.__F = Interfaces.ImportFromScript(asScript).getstring()
1636 "x.__repr__() <==> repr(x)"
1637 return repr(self.__F)
1640 "x.__str__() <==> str(x)"
1641 return str(self.__F)
1643 # ==============================================================================
1644 class ExternalParameters(object):
1646 Classe générale d'interface de type texte de script utilisateur
1649 name = "GenericExternalParameters",
1655 self.__name = str(name)
1658 self.updateParameters( asDict, asScript )
1660 def updateParameters(self,
1664 "Mise a jour des parametres"
1665 if asDict is None and asScript is not None:
1666 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "ExternalParameters" )
1670 if __Dict is not None:
1671 self.__P.update( dict(__Dict) )
1673 def get(self, key = None):
1675 return self.__P[key]
1677 return list(self.__P.keys())
1680 return list(self.__P.keys())
1682 def pop(self, k, d):
1683 return self.__P.pop(k, d)
1686 return self.__P.items()
1688 def __contains__(self, key=None):
1689 "D.__contains__(k) -> True if D has a key k, else False"
1690 return key in self.__P
1692 # ==============================================================================
1693 class State(object):
1695 Classe générale d'interface de type état
1698 name = "GenericVector",
1700 asPersistentVector = None,
1706 toBeChecked = False,
1709 Permet de définir un vecteur :
1710 - asVector : entrée des données, comme un vecteur compatible avec le
1711 constructeur de numpy.matrix, ou "True" si entrée par script.
1712 - asPersistentVector : entrée des données, comme une série de vecteurs
1713 compatible avec le constructeur de numpy.matrix, ou comme un objet de
1714 type Persistence, ou "True" si entrée par script.
1715 - asScript : si un script valide est donné contenant une variable
1716 nommée "name", la variable est de type "asVector" (par défaut) ou
1717 "asPersistentVector" selon que l'une de ces variables est placée à
1719 - asDataFile : si un ou plusieurs fichiers valides sont donnés
1720 contenant des valeurs en colonnes, elles-mêmes nommées "colNames"
1721 (s'il n'y a pas de nom de colonne indiquée, on cherche une colonne
1722 nommée "name"), on récupère les colonnes et on les range ligne après
1723 ligne (colMajor=False, par défaut) ou colonne après colonne
1724 (colMajor=True). La variable résultante est de type "asVector" (par
1725 défaut) ou "asPersistentVector" selon que l'une de ces variables est
1728 self.__name = str(name)
1729 self.__check = bool(toBeChecked)
1733 self.__is_vector = False
1734 self.__is_series = False
1736 if asScript is not None:
1737 __Vector, __Series = None, None
1738 if asPersistentVector:
1739 __Series = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1741 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1742 elif asDataFile is not None:
1743 __Vector, __Series = None, None
1744 if asPersistentVector:
1745 if colNames is not None:
1746 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
1748 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1749 if bool(colMajor) and not Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1750 __Series = numpy.transpose(__Series)
1751 elif not bool(colMajor) and Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1752 __Series = numpy.transpose(__Series)
1754 if colNames is not None:
1755 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
1757 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1759 __Vector = numpy.ravel(__Vector, order = "F")
1761 __Vector = numpy.ravel(__Vector, order = "C")
1763 __Vector, __Series = asVector, asPersistentVector
1765 if __Vector is not None:
1766 self.__is_vector = True
1767 if isinstance(__Vector, str):
1768 __Vector = PlatformInfo.strvect2liststr( __Vector )
1769 self.__V = numpy.ravel(numpy.asarray( __Vector, dtype=float )).reshape((-1,1))
1770 self.shape = self.__V.shape
1771 self.size = self.__V.size
1772 elif __Series is not None:
1773 self.__is_series = True
1774 if isinstance(__Series, (tuple, list, numpy.ndarray, numpy.matrix, str)):
1775 #~ self.__V = Persistence.OneVector(self.__name, basetype=numpy.matrix)
1776 self.__V = Persistence.OneVector(self.__name)
1777 if isinstance(__Series, str):
1778 __Series = PlatformInfo.strmatrix2liststr(__Series)
1779 for member in __Series:
1780 if isinstance(member, str):
1781 member = PlatformInfo.strvect2liststr( member )
1782 self.__V.store(numpy.asarray( member, dtype=float ))
1785 if isinstance(self.__V.shape, (tuple, list)):
1786 self.shape = self.__V.shape
1788 self.shape = self.__V.shape()
1789 if len(self.shape) == 1:
1790 self.shape = (self.shape[0],1)
1791 self.size = self.shape[0] * self.shape[1]
1793 raise ValueError("The %s object is improperly defined or undefined, it requires at minima either a vector, a list/tuple of vectors or a persistent object. Please check your vector input."%self.__name)
1795 if scheduledBy is not None:
1796 self.__T = scheduledBy
1798 def getO(self, withScheduler=False):
1800 return self.__V, self.__T
1801 elif self.__T is None:
1807 "Vérification du type interne"
1808 return self.__is_vector
1811 "Vérification du type interne"
1812 return self.__is_series
1815 "x.__repr__() <==> repr(x)"
1816 return repr(self.__V)
1819 "x.__str__() <==> str(x)"
1820 return str(self.__V)
1822 # ==============================================================================
1823 class Covariance(object):
1825 Classe générale d'interface de type covariance
1828 name = "GenericCovariance",
1829 asCovariance = None,
1830 asEyeByScalar = None,
1831 asEyeByVector = None,
1834 toBeChecked = False,
1837 Permet de définir une covariance :
1838 - asCovariance : entrée des données, comme une matrice compatible avec
1839 le constructeur de numpy.matrix
1840 - asEyeByScalar : entrée des données comme un seul scalaire de variance,
1841 multiplicatif d'une matrice de corrélation identité, aucune matrice
1842 n'étant donc explicitement à donner
1843 - asEyeByVector : entrée des données comme un seul vecteur de variance,
1844 à mettre sur la diagonale d'une matrice de corrélation, aucune matrice
1845 n'étant donc explicitement à donner
1846 - asCovObject : entrée des données comme un objet python, qui a les
1847 methodes obligatoires "getT", "getI", "diag", "trace", "__add__",
1848 "__sub__", "__neg__", "__mul__", "__rmul__" et facultatives "shape",
1849 "size", "cholesky", "choleskyI", "asfullmatrix", "__repr__", "__str__"
1850 - toBeChecked : booléen indiquant si le caractère SDP de la matrice
1851 pleine doit être vérifié
1853 self.__name = str(name)
1854 self.__check = bool(toBeChecked)
1857 self.__is_scalar = False
1858 self.__is_vector = False
1859 self.__is_matrix = False
1860 self.__is_object = False
1862 if asScript is not None:
1863 __Matrix, __Scalar, __Vector, __Object = None, None, None, None
1865 __Scalar = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1867 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1869 __Object = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1871 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1873 __Matrix, __Scalar, __Vector, __Object = asCovariance, asEyeByScalar, asEyeByVector, asCovObject
1875 if __Scalar is not None:
1876 if isinstance(__Scalar, str):
1877 __Scalar = PlatformInfo.strvect2liststr( __Scalar )
1878 if len(__Scalar) > 0: __Scalar = __Scalar[0]
1879 if numpy.array(__Scalar).size != 1:
1880 raise ValueError(' The diagonal multiplier given to define a sparse matrix is not a unique scalar value.\n Its actual measured size is %i. Please check your scalar input.'%numpy.array(__Scalar).size)
1881 self.__is_scalar = True
1882 self.__C = numpy.abs( float(__Scalar) )
1885 elif __Vector is not None:
1886 if isinstance(__Vector, str):
1887 __Vector = PlatformInfo.strvect2liststr( __Vector )
1888 self.__is_vector = True
1889 self.__C = numpy.abs( numpy.ravel(numpy.asarray( __Vector, dtype=float )) )
1890 self.shape = (self.__C.size,self.__C.size)
1891 self.size = self.__C.size**2
1892 elif __Matrix is not None:
1893 self.__is_matrix = True
1894 self.__C = numpy.matrix( __Matrix, float )
1895 self.shape = self.__C.shape
1896 self.size = self.__C.size
1897 elif __Object is not None:
1898 self.__is_object = True
1900 for at in ("getT","getI","diag","trace","__add__","__sub__","__neg__","__matmul__","__mul__","__rmatmul__","__rmul__"):
1901 if not hasattr(self.__C,at):
1902 raise ValueError("The matrix given for %s as an object has no attribute \"%s\". Please check your object input."%(self.__name,at))
1903 if hasattr(self.__C,"shape"):
1904 self.shape = self.__C.shape
1907 if hasattr(self.__C,"size"):
1908 self.size = self.__C.size
1913 # raise ValueError("The %s covariance matrix has to be specified either as a matrix, a vector for its diagonal or a scalar multiplying an identity matrix."%self.__name)
1917 def __validate(self):
1919 if self.__C is None:
1920 raise UnboundLocalError("%s covariance matrix value has not been set!"%(self.__name,))
1921 if self.ismatrix() and min(self.shape) != max(self.shape):
1922 raise ValueError("The given matrix for %s is not a square one, its shape is %s. Please check your matrix input."%(self.__name,self.shape))
1923 if self.isobject() and min(self.shape) != max(self.shape):
1924 raise ValueError("The matrix given for \"%s\" is not a square one, its shape is %s. Please check your object input."%(self.__name,self.shape))
1925 if self.isscalar() and self.__C <= 0:
1926 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your scalar input %s."%(self.__name,self.__C))
1927 if self.isvector() and (self.__C <= 0).any():
1928 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your vector input."%(self.__name,))
1929 if self.ismatrix() and (self.__check or logging.getLogger().level < logging.WARNING):
1931 numpy.linalg.cholesky( self.__C )
1933 raise ValueError("The %s covariance matrix is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1934 if self.isobject() and (self.__check or logging.getLogger().level < logging.WARNING):
1938 raise ValueError("The %s covariance object is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1941 "Vérification du type interne"
1942 return self.__is_scalar
1945 "Vérification du type interne"
1946 return self.__is_vector
1949 "Vérification du type interne"
1950 return self.__is_matrix
1953 "Vérification du type interne"
1954 return self.__is_object
1959 return Covariance(self.__name+"I", asCovariance = numpy.linalg.inv(self.__C) )
1960 elif self.isvector():
1961 return Covariance(self.__name+"I", asEyeByVector = 1. / self.__C )
1962 elif self.isscalar():
1963 return Covariance(self.__name+"I", asEyeByScalar = 1. / self.__C )
1964 elif self.isobject() and hasattr(self.__C,"getI"):
1965 return Covariance(self.__name+"I", asCovObject = self.__C.getI() )
1967 return None # Indispensable
1972 return Covariance(self.__name+"T", asCovariance = self.__C.T )
1973 elif self.isvector():
1974 return Covariance(self.__name+"T", asEyeByVector = self.__C )
1975 elif self.isscalar():
1976 return Covariance(self.__name+"T", asEyeByScalar = self.__C )
1977 elif self.isobject() and hasattr(self.__C,"getT"):
1978 return Covariance(self.__name+"T", asCovObject = self.__C.getT() )
1980 raise AttributeError("the %s covariance matrix has no getT attribute."%(self.__name,))
1983 "Décomposition de Cholesky"
1985 return Covariance(self.__name+"C", asCovariance = numpy.linalg.cholesky(self.__C) )
1986 elif self.isvector():
1987 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
1988 elif self.isscalar():
1989 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
1990 elif self.isobject() and hasattr(self.__C,"cholesky"):
1991 return Covariance(self.__name+"C", asCovObject = self.__C.cholesky() )
1993 raise AttributeError("the %s covariance matrix has no cholesky attribute."%(self.__name,))
1995 def choleskyI(self):
1996 "Inversion de la décomposition de Cholesky"
1998 return Covariance(self.__name+"H", asCovariance = numpy.linalg.inv(numpy.linalg.cholesky(self.__C)) )
1999 elif self.isvector():
2000 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
2001 elif self.isscalar():
2002 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
2003 elif self.isobject() and hasattr(self.__C,"choleskyI"):
2004 return Covariance(self.__name+"H", asCovObject = self.__C.choleskyI() )
2006 raise AttributeError("the %s covariance matrix has no choleskyI attribute."%(self.__name,))
2009 "Racine carrée matricielle"
2012 return Covariance(self.__name+"C", asCovariance = numpy.real(scipy.linalg.sqrtm(self.__C)) )
2013 elif self.isvector():
2014 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
2015 elif self.isscalar():
2016 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
2017 elif self.isobject() and hasattr(self.__C,"sqrtm"):
2018 return Covariance(self.__name+"C", asCovObject = self.__C.sqrtm() )
2020 raise AttributeError("the %s covariance matrix has no sqrtm attribute."%(self.__name,))
2023 "Inversion de la racine carrée matricielle"
2026 return Covariance(self.__name+"H", asCovariance = numpy.linalg.inv(numpy.real(scipy.linalg.sqrtm(self.__C))) )
2027 elif self.isvector():
2028 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
2029 elif self.isscalar():
2030 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
2031 elif self.isobject() and hasattr(self.__C,"sqrtmI"):
2032 return Covariance(self.__name+"H", asCovObject = self.__C.sqrtmI() )
2034 raise AttributeError("the %s covariance matrix has no sqrtmI attribute."%(self.__name,))
2036 def diag(self, msize=None):
2037 "Diagonale de la matrice"
2039 return numpy.diag(self.__C)
2040 elif self.isvector():
2042 elif self.isscalar():
2044 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
2046 return self.__C * numpy.ones(int(msize))
2047 elif self.isobject() and hasattr(self.__C,"diag"):
2048 return self.__C.diag()
2050 raise AttributeError("the %s covariance matrix has no diag attribute."%(self.__name,))
2052 def trace(self, msize=None):
2053 "Trace de la matrice"
2055 return numpy.trace(self.__C)
2056 elif self.isvector():
2057 return float(numpy.sum(self.__C))
2058 elif self.isscalar():
2060 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
2062 return self.__C * int(msize)
2063 elif self.isobject():
2064 return self.__C.trace()
2066 raise AttributeError("the %s covariance matrix has no trace attribute."%(self.__name,))
2068 def asfullmatrix(self, msize=None):
2071 return numpy.asarray(self.__C, dtype=float)
2072 elif self.isvector():
2073 return numpy.asarray( numpy.diag(self.__C), dtype=float )
2074 elif self.isscalar():
2076 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
2078 return numpy.asarray( self.__C * numpy.eye(int(msize)), dtype=float )
2079 elif self.isobject() and hasattr(self.__C,"asfullmatrix"):
2080 return self.__C.asfullmatrix()
2082 raise AttributeError("the %s covariance matrix has no asfullmatrix attribute."%(self.__name,))
2084 def assparsematrix(self):
2092 "x.__repr__() <==> repr(x)"
2093 return repr(self.__C)
2096 "x.__str__() <==> str(x)"
2097 return str(self.__C)
2099 def __add__(self, other):
2100 "x.__add__(y) <==> x+y"
2101 if self.ismatrix() or self.isobject():
2102 return self.__C + numpy.asmatrix(other)
2103 elif self.isvector() or self.isscalar():
2104 _A = numpy.asarray(other)
2105 if len(_A.shape) == 1:
2106 _A.reshape((-1,1))[::2] += self.__C
2108 _A.reshape(_A.size)[::_A.shape[1]+1] += self.__C
2109 return numpy.asmatrix(_A)
2111 def __radd__(self, other):
2112 "x.__radd__(y) <==> y+x"
2113 raise NotImplementedError("%s covariance matrix __radd__ method not available for %s type!"%(self.__name,type(other)))
2115 def __sub__(self, other):
2116 "x.__sub__(y) <==> x-y"
2117 if self.ismatrix() or self.isobject():
2118 return self.__C - numpy.asmatrix(other)
2119 elif self.isvector() or self.isscalar():
2120 _A = numpy.asarray(other)
2121 _A.reshape(_A.size)[::_A.shape[1]+1] = self.__C - _A.reshape(_A.size)[::_A.shape[1]+1]
2122 return numpy.asmatrix(_A)
2124 def __rsub__(self, other):
2125 "x.__rsub__(y) <==> y-x"
2126 raise NotImplementedError("%s covariance matrix __rsub__ method not available for %s type!"%(self.__name,type(other)))
2129 "x.__neg__() <==> -x"
2132 def __matmul__(self, other):
2133 "x.__mul__(y) <==> x@y"
2134 if self.ismatrix() and isinstance(other, (int, float)):
2135 return numpy.asarray(self.__C) * other
2136 elif self.ismatrix() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2137 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2138 return numpy.ravel(self.__C @ numpy.ravel(other))
2139 elif numpy.asarray(other).shape[0] == self.shape[1]: # Matrice
2140 return numpy.asarray(self.__C) @ numpy.asarray(other)
2142 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asarray(other).shape,self.__name))
2143 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2144 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2145 return numpy.ravel(self.__C) * numpy.ravel(other)
2146 elif numpy.asarray(other).shape[0] == self.shape[1]: # Matrice
2147 return numpy.ravel(self.__C).reshape((-1,1)) * numpy.asarray(other)
2149 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
2150 elif self.isscalar() and isinstance(other,numpy.matrix):
2151 return numpy.asarray(self.__C * other)
2152 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
2153 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
2154 return self.__C * numpy.ravel(other)
2156 return self.__C * numpy.asarray(other)
2157 elif self.isobject():
2158 return self.__C.__matmul__(other)
2160 raise NotImplementedError("%s covariance matrix __matmul__ method not available for %s type!"%(self.__name,type(other)))
2162 def __mul__(self, other):
2163 "x.__mul__(y) <==> x*y"
2164 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2165 return self.__C * other
2166 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2167 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2168 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
2169 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2170 return self.__C * numpy.asmatrix(other)
2172 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asmatrix(other).shape,self.__name))
2173 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2174 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2175 return numpy.asmatrix(self.__C * numpy.ravel(other)).T
2176 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2177 return numpy.asmatrix((self.__C * (numpy.asarray(other).transpose())).transpose())
2179 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
2180 elif self.isscalar() and isinstance(other,numpy.matrix):
2181 return self.__C * other
2182 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
2183 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
2184 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
2186 return self.__C * numpy.asmatrix(other)
2187 elif self.isobject():
2188 return self.__C.__mul__(other)
2190 raise NotImplementedError("%s covariance matrix __mul__ method not available for %s type!"%(self.__name,type(other)))
2192 def __rmatmul__(self, other):
2193 "x.__rmul__(y) <==> y@x"
2194 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2195 return other * self.__C
2196 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2197 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2198 return numpy.asmatrix(numpy.ravel(other)) * self.__C
2199 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2200 return numpy.asmatrix(other) * self.__C
2202 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name))
2203 elif self.isvector() and isinstance(other,numpy.matrix):
2204 if numpy.ravel(other).size == self.shape[0]: # Vecteur
2205 return numpy.asmatrix(numpy.ravel(other) * self.__C)
2206 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
2207 return numpy.asmatrix(numpy.array(other) * self.__C)
2209 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
2210 elif self.isscalar() and isinstance(other,numpy.matrix):
2211 return other * self.__C
2212 elif self.isobject():
2213 return self.__C.__rmatmul__(other)
2215 raise NotImplementedError("%s covariance matrix __rmatmul__ method not available for %s type!"%(self.__name,type(other)))
2217 def __rmul__(self, other):
2218 "x.__rmul__(y) <==> y*x"
2219 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2220 return other * self.__C
2221 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2222 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2223 return numpy.asmatrix(numpy.ravel(other)) * self.__C
2224 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2225 return numpy.asmatrix(other) * self.__C
2227 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name))
2228 elif self.isvector() and isinstance(other,numpy.matrix):
2229 if numpy.ravel(other).size == self.shape[0]: # Vecteur
2230 return numpy.asmatrix(numpy.ravel(other) * self.__C)
2231 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
2232 return numpy.asmatrix(numpy.array(other) * self.__C)
2234 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
2235 elif self.isscalar() and isinstance(other,numpy.matrix):
2236 return other * self.__C
2237 elif self.isscalar() and isinstance(other,float):
2238 return other * self.__C
2239 elif self.isobject():
2240 return self.__C.__rmul__(other)
2242 raise NotImplementedError("%s covariance matrix __rmul__ method not available for %s type!"%(self.__name,type(other)))
2245 "x.__len__() <==> len(x)"
2246 return self.shape[0]
2248 # ==============================================================================
2249 class Observer2Func(object):
2251 Création d'une fonction d'observateur a partir de son texte
2253 def __init__(self, corps=""):
2254 self.__corps = corps
2255 def func(self,var,info):
2256 "Fonction d'observation"
2259 "Restitution du pointeur de fonction dans l'objet"
2262 # ==============================================================================
2263 class CaseLogger(object):
2265 Conservation des commandes de création d'un cas
2267 def __init__(self, __name="", __objname="case", __addViewers=None, __addLoaders=None):
2268 self.__name = str(__name)
2269 self.__objname = str(__objname)
2270 self.__logSerie = []
2271 self.__switchoff = False
2273 "TUI" :Interfaces._TUIViewer,
2274 "SCD" :Interfaces._SCDViewer,
2275 "YACS":Interfaces._YACSViewer,
2276 "SimpleReportInRst":Interfaces._SimpleReportInRstViewer,
2277 "SimpleReportInHtml":Interfaces._SimpleReportInHtmlViewer,
2278 "SimpleReportInPlainTxt":Interfaces._SimpleReportInPlainTxtViewer,
2281 "TUI" :Interfaces._TUIViewer,
2282 "COM" :Interfaces._COMViewer,
2284 if __addViewers is not None:
2285 self.__viewers.update(dict(__addViewers))
2286 if __addLoaders is not None:
2287 self.__loaders.update(dict(__addLoaders))
2289 def register(self, __command=None, __keys=None, __local=None, __pre=None, __switchoff=False):
2290 "Enregistrement d'une commande individuelle"
2291 if __command is not None and __keys is not None and __local is not None and not self.__switchoff:
2292 if "self" in __keys: __keys.remove("self")
2293 self.__logSerie.append( (str(__command), __keys, __local, __pre, __switchoff) )
2295 self.__switchoff = True
2297 self.__switchoff = False
2299 def dump(self, __filename=None, __format="TUI", __upa=""):
2300 "Restitution normalisée des commandes"
2301 if __format in self.__viewers:
2302 __formater = self.__viewers[__format](self.__name, self.__objname, self.__logSerie)
2304 raise ValueError("Dumping as \"%s\" is not available"%__format)
2305 return __formater.dump(__filename, __upa)
2307 def load(self, __filename=None, __content=None, __object=None, __format="TUI"):
2308 "Chargement normalisé des commandes"
2309 if __format in self.__loaders:
2310 __formater = self.__loaders[__format]()
2312 raise ValueError("Loading as \"%s\" is not available"%__format)
2313 return __formater.load(__filename, __content, __object)
2315 # ==============================================================================
2318 _extraArguments = None,
2319 _sFunction = lambda x: x,
2324 Pour une liste ordonnée de vecteurs en entrée, renvoie en sortie la liste
2325 correspondante de valeurs de la fonction en argument
2327 # Vérifications et définitions initiales
2328 # logging.debug("MULTF Internal multifonction calculations begin with function %s"%(_sFunction.__name__,))
2329 if not PlatformInfo.isIterable( __xserie ):
2330 raise TypeError("MultiFonction not iterable unkown input type: %s"%(type(__xserie),))
2332 if (_mpWorkers is None) or (_mpWorkers is not None and _mpWorkers < 1):
2335 __mpWorkers = int(_mpWorkers)
2337 import multiprocessing
2348 # logging.debug("MULTF Internal multiprocessing calculations begin : evaluation of %i point(s)"%(len(_jobs),))
2349 with multiprocessing.Pool(__mpWorkers) as pool:
2350 __multiHX = pool.map( _sFunction, _jobs )
2353 # logging.debug("MULTF Internal multiprocessing calculation end")
2355 # logging.debug("MULTF Internal monoprocessing calculation begin")
2357 if _extraArguments is None:
2358 for __xvalue in __xserie:
2359 __multiHX.append( _sFunction( __xvalue ) )
2360 elif _extraArguments is not None and isinstance(_extraArguments, (list, tuple, map)):
2361 for __xvalue in __xserie:
2362 __multiHX.append( _sFunction( __xvalue, *_extraArguments ) )
2363 elif _extraArguments is not None and isinstance(_extraArguments, dict):
2364 for __xvalue in __xserie:
2365 __multiHX.append( _sFunction( __xvalue, **_extraArguments ) )
2367 raise TypeError("MultiFonction extra arguments unkown input type: %s"%(type(_extraArguments),))
2368 # logging.debug("MULTF Internal monoprocessing calculation end")
2370 # logging.debug("MULTF Internal multifonction calculations end")
2373 # ==============================================================================
2374 def CostFunction3D(_x,
2375 _Hm = None, # Pour simuler Hm(x) : HO["Direct"].appliedTo
2376 _HmX = None, # Simulation déjà faite de Hm(x)
2377 _arg = None, # Arguments supplementaires pour Hm, sous la forme d'un tuple
2382 _SIV = False, # A résorber pour la 8.0
2383 _SSC = [], # self._parameters["StoreSupplementaryCalculations"]
2384 _nPS = 0, # nbPreviousSteps
2385 _QM = "DA", # QualityMeasure
2386 _SSV = {}, # Entrée et/ou sortie : self.StoredVariables
2387 _fRt = False, # Restitue ou pas la sortie étendue
2388 _sSc = True, # Stocke ou pas les SSC
2391 Fonction-coût générale utile pour les algorithmes statiques/3D : 3DVAR, BLUE
2392 et dérivés, Kalman et dérivés, LeastSquares, SamplingTest, PSO, SA, Tabu,
2393 DFO, QuantileRegression
2399 for k in ["CostFunctionJ",
2405 "SimulatedObservationAtCurrentOptimum",
2406 "SimulatedObservationAtCurrentState",
2410 if hasattr(_SSV[k],"store"):
2411 _SSV[k].append = _SSV[k].store # Pour utiliser "append" au lieu de "store"
2413 _X = numpy.asmatrix(numpy.ravel( _x )).T
2414 if _SIV or "CurrentState" in _SSC or "CurrentOptimum" in _SSC:
2415 _SSV["CurrentState"].append( _X )
2417 if _HmX is not None:
2421 raise ValueError("COSTFUNCTION3D Operator has to be defined.")
2425 _HX = _Hm( _X, *_arg )
2426 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
2428 if "SimulatedObservationAtCurrentState" in _SSC or \
2429 "SimulatedObservationAtCurrentOptimum" in _SSC:
2430 _SSV["SimulatedObservationAtCurrentState"].append( _HX )
2432 if numpy.any(numpy.isnan(_HX)):
2433 Jb, Jo, J = numpy.nan, numpy.nan, numpy.nan
2435 _Y = numpy.asmatrix(numpy.ravel( _Y )).T
2436 if _QM in ["AugmentedWeightedLeastSquares", "AWLS", "AugmentedPonderatedLeastSquares", "APLS", "DA"]:
2437 if _BI is None or _RI is None:
2438 raise ValueError("Background and Observation error covariance matrix has to be properly defined!")
2439 _Xb = numpy.asmatrix(numpy.ravel( _Xb )).T
2440 Jb = 0.5 * (_X - _Xb).T * _BI * (_X - _Xb)
2441 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
2442 elif _QM in ["WeightedLeastSquares", "WLS", "PonderatedLeastSquares", "PLS"]:
2444 raise ValueError("Observation error covariance matrix has to be properly defined!")
2446 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
2447 elif _QM in ["LeastSquares", "LS", "L2"]:
2449 Jo = 0.5 * (_Y - _HX).T * (_Y - _HX)
2450 elif _QM in ["AbsoluteValue", "L1"]:
2452 Jo = numpy.sum( numpy.abs(_Y - _HX) )
2453 elif _QM in ["MaximumError", "ME"]:
2455 Jo = numpy.max( numpy.abs(_Y - _HX) )
2456 elif _QM in ["QR", "Null"]:
2460 raise ValueError("Unknown asked quality measure!")
2462 J = float( Jb ) + float( Jo )
2465 _SSV["CostFunctionJb"].append( Jb )
2466 _SSV["CostFunctionJo"].append( Jo )
2467 _SSV["CostFunctionJ" ].append( J )
2469 if "IndexOfOptimum" in _SSC or \
2470 "CurrentOptimum" in _SSC or \
2471 "SimulatedObservationAtCurrentOptimum" in _SSC:
2472 IndexMin = numpy.argmin( _SSV["CostFunctionJ"][_nPS:] ) + _nPS
2473 if "IndexOfOptimum" in _SSC:
2474 _SSV["IndexOfOptimum"].append( IndexMin )
2475 if "CurrentOptimum" in _SSC:
2476 _SSV["CurrentOptimum"].append( _SSV["CurrentState"][IndexMin] )
2477 if "SimulatedObservationAtCurrentOptimum" in _SSC:
2478 _SSV["SimulatedObservationAtCurrentOptimum"].append( _SSV["SimulatedObservationAtCurrentState"][IndexMin] )
2483 if _QM in ["QR"]: # Pour le QuantileRegression
2488 # ==============================================================================
2489 if __name__ == "__main__":
2490 print('\n AUTODIAGNOSTIC\n')