logging.basicConfig(level=logging.INFO)
#----------- Templates Part ---------------#
-begin_catalog_file = """
-# -*- coding: utf-8 -*-
+begin_catalog_file = """# -*- coding: utf-8 -*-
# --------------------------------------------------------
# generated by AdaoCatalogGenerator at ${date}
JdC = JDC_CATA (code = 'ADAO',
execmodul = None,
- regles = ( AU_MOINS_UN ('ASSIMILATION_STUDY'), AU_PLUS_UN ('ASSIMILATION_STUDY')),
+ regles = ( AU_MOINS_UN ('ASSIMILATION_STUDY','CHECKING_STUDY'), AU_PLUS_UN ('ASSIMILATION_STUDY','CHECKING_STUDY')),
)
"""
OutputVariables = F_variables("f"),
Observers = F_Observers("f")
)
+
+CHECKING_STUDY = PROC(nom="CHECKING_STUDY",
+ op=None,
+ repetable = "n",
+ Study_name = SIMP(statut="o", typ = "TXM"),
+ Study_repertory = SIMP(statut="f", typ = "TXM"),
+ Debug = SIMP(statut="o", typ = "I", into=(0, 1), defaut=0),
+ Algorithm = SIMP(statut="o", typ = "TXM", into=(${check_names})),
+ CheckingPoint = F_CheckingPoint("o"),
+ ObservationOperator = F_ObservationOperator("o"),
+ AlgorithmParameters = F_AlgorithmParameters("f"),
+ UserDataInit = F_Init("f"),
+ )
"""
begin_catalog_file = string.Template(begin_catalog_file)
# Final step: Add algorithm and assim_study
algos_names = ""
-decl_algos = ""
+check_names = ""
+decl_algos = ""
assim_study_object = daCore.AssimilationStudy.AssimilationStudy()
algos_list = assim_study_object.get_available_algorithms()
for algo_name in algos_list:
- logging.debug("An assimilation algorithm is found: " + algo_name)
- algos_names += "\"" + algo_name + "\", "
+ if algo_name in infos.AssimAlgos:
+ logging.debug("An assimilation algorithm is found: " + algo_name)
+ algos_names += "\"" + algo_name + "\", "
+ elif algo_name in infos.CheckAlgos:
+ logging.debug("A checking algorithm is found: " + algo_name)
+ check_names += "\"" + algo_name + "\", "
+ else:
+ logging.debug("This algorithm is not considered: " + algo_name)
mem_file.write(assim_study.substitute(algos_names=algos_names,
+ check_names=check_names,
decl_algos=decl_algos))
# Write file
final_file = open(catalog_path + "/" + catalog_name, "wr")
# Data
# print "Data entered are:"
+
# Background
try:
Background
assim_study.setBackgroundType(BackgroundType)
assim_study.setBackground(Background)
+# CheckingPoint
+try:
+ CheckingPoint
+except NameError:
+ pass
+else:
+ logging.debug("CREATE CheckingPoint is %s"%CheckingPoint)
+ logging.debug("CREATE CheckingPointType is %s"%CheckingPointType)
+ assim_study.setCheckingPointType(CheckingPointType)
+ assim_study.setCheckingPoint(CheckingPoint)
+
# BackgroundError
try:
BackgroundError
--- /dev/null
+#-*-coding:iso-8859-1-*-
+#
+# Copyright (C) 2008-2012 EDF R&D
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
+#
+
+import logging
+from daCore import BasicObjects, PlatformInfo
+m = PlatformInfo.SystemUsage()
+
+import numpy
+
+# ==============================================================================
+class ElementaryAlgorithm(BasicObjects.Algorithm):
+ def __init__(self):
+ BasicObjects.Algorithm.__init__(self, "GRADIENTTEST")
+ self.defineRequiredParameter(
+ name = "ResiduFormula",
+ default = "Taylor",
+ typecast = str,
+ message = "Formule de résidu utilisée",
+ listval = ["Norm", "Taylor"],
+ )
+ self.defineRequiredParameter(
+ name = "EpsilonMinimumExponent",
+ default = -8,
+ typecast = int,
+ message = "Exposant minimal en puissance de 10 pour le multiplicateur d'incrément",
+ minval = -20,
+ maxval = 0,
+ )
+ self.defineRequiredParameter(
+ name = "InitialDirection",
+ default = [],
+ typecast = list,
+ message = "Direction initiale de la dérivée directionnelle autour du point nominal",
+ )
+ self.defineRequiredParameter(
+ name = "AmplitudeOfInitialDirection",
+ default = 1.,
+ typecast = float,
+ message = "Amplitude de la direction initiale de la dérivée directionnelle autour du point nominal",
+ )
+ self.defineRequiredParameter(
+ name = "SetSeed",
+ typecast = numpy.random.seed,
+ message = "Graine fixée pour le générateur aléatoire",
+ )
+ self.defineRequiredParameter(
+ name = "PlotAndSave",
+ default = False,
+ typecast = bool,
+ message = "Trace et sauve les résultats",
+ )
+ self.defineRequiredParameter(
+ name = "ResultFile",
+ default = "",
+ typecast = str,
+ message = "Nom de base (hors extension) des fichiers de sauvegarde des résultats",
+ )
+ self.defineRequiredParameter(
+ name = "ResultTitle",
+ default = "",
+ typecast = str,
+ message = "Titre du tableau et de la figure",
+ )
+ self.defineRequiredParameter(
+ name = "ResultLabel",
+ default = "",
+ typecast = str,
+ message = "Label de la courbe tracée dans la figure",
+ )
+
+ def run(self, Xb=None, Y=None, H=None, M=None, R=None, B=None, Q=None, Parameters=None):
+ logging.debug("%s Lancement"%self._name)
+ logging.debug("%s Taille mémoire utilisée de %.1f Mo"%(self._name, m.getUsedMemory("Mo")))
+ #
+ # Paramètres de pilotage
+ # ----------------------
+ self.setParameters(Parameters)
+ #
+ # Opérateur d'observation
+ # -----------------------
+ Hm = H["Direct"].appliedTo
+ if self._parameters["ResiduFormula"] is "Taylor":
+ Ht = H["Tangent"].appliedInXTo
+ #
+ # Construction des perturbations
+ # ------------------------------
+ Perturbations = [ 10**i for i in xrange(self._parameters["EpsilonMinimumExponent"],1) ]
+ Perturbations.reverse()
+ #
+ # Calcul du point courant
+ # -----------------------
+ X = numpy.asmatrix(Xb).flatten().T
+ FX = numpy.asmatrix( Hm( X ) ).flatten().T
+ FX = numpy.asmatrix(FX).flatten().T
+ NormeX = numpy.linalg.norm( X )
+ NormeFX = numpy.linalg.norm( FX )
+ #
+ # Fabrication de la direction de l'incrément dX
+ # ----------------------------------------------
+ if len(self._parameters["InitialDirection"]) == 0:
+ dX0 = []
+ for v in X.A1:
+ if abs(v) > 1.e-8:
+ dX0.append( numpy.random.normal(0.,abs(v)) )
+ else:
+ dX0.append( numpy.random.normal(0.,X.mean()) )
+ else:
+ dX0 = numpy.asmatrix(self._parameters["InitialDirection"]).flatten()
+ #
+ dX0 = float(self._parameters["AmplitudeOfInitialDirection"]) * numpy.matrix( dX0 ).T
+ #
+ # Calcul du gradient au point courant X pour l'incrément dX
+ # ---------------------------------------------------------
+ if self._parameters["ResiduFormula"] is "Taylor":
+ GradFxdX = Ht( (X, dX0) )
+ GradFxdX = numpy.asmatrix(GradFxdX).flatten().T
+ #
+ # Entete des resultats
+ # --------------------
+ if self._parameters["ResiduFormula"] is "Taylor":
+ __doc__ = """
+ On observe le residu issu du développement de Taylor de la fonction H :
+
+ R(Alpha) = || H(x+Alpha*dx) - H(x) - Alpha * TangentH_x(dx) ||
+
+ Ce résidu doit décroître en Alpha**2 selon Alpha.
+ On prend dX0 = Normal(0,X) et dX = Alpha*dX0. H est le code de calcul.
+ """
+ elif self._parameters["ResiduFormula"] is "Norm":
+ __doc__ = """
+ On observe le residu, qui est une approximation du gradient :
+
+ || H(X+Alpha*dX) - H(X) ||
+ R(Alpha) = ---------------------------
+ Alpha
+
+ qui doit rester constant jusqu'à ce qu'on atteigne la précision du calcul.
+ On prend dX0 = Normal(0,X) et dX = Alpha*dX0. H est le code de calcul.
+ """
+ else:
+ __doc__ = ""
+ #
+ msgs = " ====" + "="*len(self._parameters["ResultTitle"]) + "====\n"
+ msgs += " " + self._parameters["ResultTitle"] + "\n"
+ msgs += " ====" + "="*len(self._parameters["ResultTitle"]) + "====\n"
+ msgs += __doc__
+ #
+ msg = " i Alpha ||X|| ||H(X)|| ||H(X+dX)|| ||dX|| ||H(X+dX)-H(X)|| ||H(X+dX)-H(X)||/||dX|| R(Alpha) "
+ nbtirets = len(msg)
+ msgs += "\n" + "-"*nbtirets
+ msgs += "\n" + msg
+ msgs += "\n" + "-"*nbtirets
+ #
+ Normalisation= -1
+ NormesdX = []
+ NormesFXdX = []
+ NormesdFX = []
+ NormesdFXsdX = []
+ NormesdFXsAm = []
+ NormesdFXGdX = []
+ #
+ # Boucle sur les perturbations
+ # ----------------------------
+ for i,amplitude in enumerate(Perturbations):
+ logging.debug("%s Etape de calcul numéro %i, avec la perturbation %8.3e"%(self._name, i, amplitude))
+ #
+ dX = amplitude * dX0
+ #
+ FX_plus_dX = Hm( X + dX )
+ FX_plus_dX = numpy.asmatrix(FX_plus_dX).flatten().T
+ #
+ NormedX = numpy.linalg.norm( dX )
+ NormeFXdX = numpy.linalg.norm( FX_plus_dX )
+ NormedFX = numpy.linalg.norm( FX_plus_dX - FX )
+ NormedFXsdX = NormedFX/NormedX
+ # Residu Taylor
+ if self._parameters["ResiduFormula"] is "Taylor":
+ NormedFXGdX = numpy.linalg.norm( FX_plus_dX - FX - amplitude * GradFxdX )
+ # Residu Norm
+ NormedFXsAm = NormedFX/amplitude
+ #
+ # if numpy.abs(NormedFX) < 1.e-20:
+ # break
+ #
+ NormesdX.append( NormedX )
+ NormesFXdX.append( NormeFXdX )
+ NormesdFX.append( NormedFX )
+ if self._parameters["ResiduFormula"] is "Taylor":
+ NormesdFXGdX.append( NormedFXGdX )
+ NormesdFXsdX.append( NormedFXsdX )
+ NormesdFXsAm.append( NormedFXsAm )
+ #
+ if self._parameters["ResiduFormula"] is "Taylor":
+ Residu = NormedFXGdX
+ elif self._parameters["ResiduFormula"] is "Norm":
+ Residu = NormedFXsAm
+ if Normalisation < 0 : Normalisation = Residu
+ #
+ msg = " %2i %5.0e %8.3e %8.3e %8.3e %8.3e %8.3e | %8.3e | %8.3e"%(i,amplitude,NormeX,NormeFX,NormeFXdX,NormedX,NormedFX,NormedFXsdX,Residu)
+ msgs += "\n" + msg
+ #
+ self.StoredVariables["CostFunctionJ"].store( Residu )
+ msgs += "\n" + "-"*nbtirets
+ msgs += "\n"
+ #
+ # Sorties eventuelles
+ # -------------------
+ logging.debug("%s Résultats :\n%s"%(self._name, msgs))
+ print
+ print "Results of gradient stability check:"
+ print msgs
+ #
+ if self._parameters["PlotAndSave"]:
+ f = open(str(self._parameters["ResultFile"])+".txt",'a')
+ f.write(msgs)
+ f.close()
+ #
+ Residus = self.StoredVariables["CostFunctionJ"].valueserie()[-len(Perturbations):]
+ if self._parameters["ResiduFormula"] is "Taylor":
+ PerturbationsCarre = [ 10**(2*i) for i in xrange(-len(NormesdFXGdX)+1,1) ]
+ PerturbationsCarre.reverse()
+ dessiner(
+ Perturbations,
+ Residus,
+ titre = self._parameters["ResultTitle"],
+ label = self._parameters["ResultLabel"],
+ logX = True,
+ logY = True,
+ filename = str(self._parameters["ResultFile"])+".ps",
+ YRef = PerturbationsCarre,
+ normdY0 = numpy.log10( NormesdFX[0] ),
+ )
+ elif self._parameters["ResiduFormula"] is "Norm":
+ dessiner(
+ Perturbations,
+ Residus,
+ titre = self._parameters["ResultTitle"],
+ label = self._parameters["ResultLabel"],
+ logX = True,
+ logY = True,
+ filename = str(self._parameters["ResultFile"])+".ps",
+ )
+ #
+ logging.debug("%s Taille mémoire utilisée de %.1f Mo"%(self._name, m.getUsedMemory("Mo")))
+ logging.debug("%s Terminé"%self._name)
+ #
+ return 0
+
+# ==============================================================================
+
+def dessiner(
+ X,
+ Y,
+ titre = "",
+ label = "",
+ logX = False,
+ logY = False,
+ filename = "",
+ pause = False,
+ YRef = None, # Vecteur de reference a comparer a Y
+ recalYRef = True, # Decalage du point 0 de YRef à Y[0]
+ normdY0 = 0., # Norme de DeltaY[0]
+ ):
+ import Gnuplot
+ __gnuplot = Gnuplot
+ __g = __gnuplot.Gnuplot(persist=1) # persist=1
+ # __g('set terminal '+__gnuplot.GnuplotOpts.default_term)
+ __g('set style data lines')
+ __g('set grid')
+ __g('set autoscale')
+ __g('set title "'+titre+'"')
+ # __g('set xrange [] reverse')
+ # __g('set yrange [0:2]')
+ #
+ if logX:
+ steps = numpy.log10( X )
+ __g('set xlabel "Facteur multiplicatif de dX, en echelle log10"')
+ else:
+ steps = X
+ __g('set xlabel "Facteur multiplicatif de dX"')
+ #
+ if logY:
+ values = numpy.log10( Y )
+ __g('set ylabel "Amplitude du residu, en echelle log10"')
+ else:
+ values = Y
+ __g('set ylabel "Amplitude du residu"')
+ #
+ __g.plot( __gnuplot.Data( steps, values, title=label, with_='lines lw 3' ) )
+ if YRef is not None:
+ if logY:
+ valuesRef = numpy.log10( YRef )
+ else:
+ valuesRef = YRef
+ if recalYRef and not numpy.all(values < -8):
+ valuesRef = valuesRef + values[0]
+ elif recalYRef and numpy.all(values < -8):
+ valuesRef = valuesRef + normdY0
+ else:
+ pass
+ __g.replot( __gnuplot.Data( steps, valuesRef, title="Reference", with_='lines lw 1' ) )
+ #
+ if filename is not "":
+ __g.hardcopy( filename, color=1)
+ if pause:
+ raw_input('Please press return to continue...\n')
+
+# ==============================================================================
+if __name__ == "__main__":
+ print '\n AUTODIAGNOSTIC \n'
return s
def generate_da(self):
-
+
+ if "__CHECKING_STUDY__Study_name" in self.dictMCVal.keys():
+ self.type_of_study = "CHECKING_STUDY"
+ else:
+ self.type_of_study = "ASSIMILATION_STUDY"
+
self.text_da += "#-*-coding:iso-8859-1-*- \n"
self.text_da += "study_config = {} \n"
+ # Extraction de Study_type
+ self.text_da += "study_config['StudyType'] = '" + self.type_of_study + "'\n"
# Extraction de Study_name
- self.text_da += "study_config['Name'] = '" + self.dictMCVal["__ASSIMILATION_STUDY__Study_name"] + "'\n"
+ self.text_da += "study_config['Name'] = '" + self.dictMCVal["__"+self.type_of_study+"__Study_name"] + "'\n"
# Extraction de Debug
- self.text_da += "study_config['Debug'] = '" + str(self.dictMCVal["__ASSIMILATION_STUDY__Debug"]) + "'\n"
+ self.text_da += "study_config['Debug'] = '" + str(self.dictMCVal["__"+self.type_of_study+"__Debug"]) + "'\n"
# Extraction de Algorithm
- self.text_da += "study_config['Algorithm'] = '" + self.dictMCVal["__ASSIMILATION_STUDY__Algorithm"] + "'\n"
-
- self.add_data("Background")
- self.add_data("BackgroundError")
- self.add_data("Observation")
- self.add_data("ObservationError")
+ self.text_da += "study_config['Algorithm'] = '" + self.dictMCVal["__"+self.type_of_study+"__Algorithm"] + "'\n"
+
+ if "__"+self.type_of_study+"__Background__INPUT_TYPE" in self.dictMCVal.keys():
+ self.add_data("Background")
+ if "__"+self.type_of_study+"__BackgroundError__INPUT_TYPE" in self.dictMCVal.keys():
+ self.add_data("BackgroundError")
+ if "__"+self.type_of_study+"__Observation__INPUT_TYPE" in self.dictMCVal.keys():
+ self.add_data("Observation")
+ if "__"+self.type_of_study+"__ObservationError__INPUT_TYPE" in self.dictMCVal.keys():
+ self.add_data("ObservationError")
+ if "__"+self.type_of_study+"__CheckingPoint__INPUT_TYPE" in self.dictMCVal.keys():
+ self.add_data("CheckingPoint")
self.add_data("ObservationOperator")
self.add_variables()
# Parametres optionnels
# Extraction du Study_repertory
- if "__ASSIMILATION_STUDY__Study_repertory" in self.dictMCVal.keys():
- self.text_da += "study_config['Repertory'] = '" + self.dictMCVal["__ASSIMILATION_STUDY__Study_repertory"] + "'\n"
+ if "__"+self.type_of_study+"__Study_repertory" in self.dictMCVal.keys():
+ self.text_da += "study_config['Repertory'] = '" + self.dictMCVal["__"+self.type_of_study+"__Study_repertory"] + "'\n"
# Extraction de AlgorithmParameters
- if "__ASSIMILATION_STUDY__AlgorithmParameters__INPUT_TYPE" in self.dictMCVal.keys():
+ if "__"+self.type_of_study+"__AlgorithmParameters__INPUT_TYPE" in self.dictMCVal.keys():
self.add_algorithm_parameters()
# Extraction de UserPostAnalysis
- if "__ASSIMILATION_STUDY__UserPostAnalysis__FROM" in self.dictMCVal.keys():
+ if "__"+self.type_of_study+"__UserPostAnalysis__FROM" in self.dictMCVal.keys():
self.add_UserPostAnalysis()
- if "__ASSIMILATION_STUDY__UserDataInit__INIT_FILE" in self.dictMCVal.keys():
+ if "__"+self.type_of_study+"__UserDataInit__INIT_FILE" in self.dictMCVal.keys():
self.add_init()
- if "__ASSIMILATION_STUDY__Observers__SELECTION" in self.dictMCVal.keys():
+ if "__"+self.type_of_study+"__Observers__SELECTION" in self.dictMCVal.keys():
self.add_observers()
def add_data(self, data_name):
# Extraction des données
- search_text = "__ASSIMILATION_STUDY__" + data_name + "__"
+ search_text = "__"+self.type_of_study+"__" + data_name + "__"
data_type = self.dictMCVal[search_text + "INPUT_TYPE"]
search_type = search_text + data_type + "__data__"
from_type = self.dictMCVal[search_type + "FROM"]
data_name = "AlgorithmParameters"
data_type = "Dict"
from_type = "Script"
- data = self.dictMCVal["__ASSIMILATION_STUDY__AlgorithmParameters__Dict__data__SCRIPT_DATA__SCRIPT_FILE"]
+ data = self.dictMCVal["__"+self.type_of_study+"__AlgorithmParameters__Dict__data__SCRIPT_DATA__SCRIPT_FILE"]
self.text_da += data_name + "_config = {} \n"
self.text_da += data_name + "_config['Type'] = '" + data_type + "'\n"
def add_init(self):
- init_file_data = self.dictMCVal["__ASSIMILATION_STUDY__UserDataInit__INIT_FILE"]
- init_target_list = self.dictMCVal["__ASSIMILATION_STUDY__UserDataInit__TARGET_LIST"]
+ init_file_data = self.dictMCVal["__"+self.type_of_study+"__UserDataInit__INIT_FILE"]
+ init_target_list = self.dictMCVal["__"+self.type_of_study+"__UserDataInit__TARGET_LIST"]
self.text_da += "Init_config = {}\n"
self.text_da += "Init_config['Type'] = 'Dict'\n"
def add_UserPostAnalysis(self):
- from_type = self.dictMCVal["__ASSIMILATION_STUDY__UserPostAnalysis__FROM"]
+ from_type = self.dictMCVal["__"+self.type_of_study+"__UserPostAnalysis__FROM"]
data = ""
if from_type == "String":
- data = self.dictMCVal["__ASSIMILATION_STUDY__UserPostAnalysis__STRING_DATA__STRING"]
+ data = self.dictMCVal["__"+self.type_of_study+"__UserPostAnalysis__STRING_DATA__STRING"]
self.text_da += "Analysis_config = {}\n"
self.text_da += "Analysis_config['From'] = 'String'\n"
self.text_da += "Analysis_config['Data'] = \"\"\"" + data + "\"\"\"\n"
self.text_da += "study_config['UserPostAnalysis'] = Analysis_config\n"
elif from_type == "Script":
- data = self.dictMCVal["__ASSIMILATION_STUDY__UserPostAnalysis__SCRIPT_DATA__SCRIPT_FILE"]
+ data = self.dictMCVal["__"+self.type_of_study+"__UserPostAnalysis__SCRIPT_DATA__SCRIPT_FILE"]
self.text_da += "Analysis_config = {}\n"
self.text_da += "Analysis_config['From'] = 'Script'\n"
self.text_da += "Analysis_config['Data'] = '" + data + "'\n"
def add_variables(self):
# Input variables
- if "__ASSIMILATION_STUDY__InputVariables__NAMES" in self.dictMCVal.keys():
+ if "__"+self.type_of_study+"__InputVariables__NAMES" in self.dictMCVal.keys():
names = []
sizes = []
- if isinstance(self.dictMCVal["__ASSIMILATION_STUDY__InputVariables__NAMES"], type("")):
- names.append(self.dictMCVal["__ASSIMILATION_STUDY__InputVariables__NAMES"])
+ if isinstance(self.dictMCVal["__"+self.type_of_study+"__InputVariables__NAMES"], type("")):
+ names.append(self.dictMCVal["__"+self.type_of_study+"__InputVariables__NAMES"])
else:
- names = self.dictMCVal["__ASSIMILATION_STUDY__InputVariables__NAMES"]
- if isinstance(self.dictMCVal["__ASSIMILATION_STUDY__InputVariables__SIZES"], type(1)):
- sizes.append(self.dictMCVal["__ASSIMILATION_STUDY__InputVariables__SIZES"])
+ names = self.dictMCVal["__"+self.type_of_study+"__InputVariables__NAMES"]
+ if isinstance(self.dictMCVal["__"+self.type_of_study+"__InputVariables__SIZES"], type(1)):
+ sizes.append(self.dictMCVal["__"+self.type_of_study+"__InputVariables__SIZES"])
else:
- sizes = self.dictMCVal["__ASSIMILATION_STUDY__InputVariables__SIZES"]
+ sizes = self.dictMCVal["__"+self.type_of_study+"__InputVariables__SIZES"]
self.text_da += "inputvariables_config = {}\n"
self.text_da += "inputvariables_config['Order'] = %s\n" % list(names)
self.text_da += "study_config['InputVariables'] = inputvariables_config\n"
# Output variables
- if "__ASSIMILATION_STUDY__OutputVariables__NAMES" in self.dictMCVal.keys():
+ if "__"+self.type_of_study+"__OutputVariables__NAMES" in self.dictMCVal.keys():
names = []
sizes = []
- if isinstance(self.dictMCVal["__ASSIMILATION_STUDY__OutputVariables__NAMES"], type("")):
- names.append(self.dictMCVal["__ASSIMILATION_STUDY__OutputVariables__NAMES"])
+ if isinstance(self.dictMCVal["__"+self.type_of_study+"__OutputVariables__NAMES"], type("")):
+ names.append(self.dictMCVal["__"+self.type_of_study+"__OutputVariables__NAMES"])
else:
- names = self.dictMCVal["__ASSIMILATION_STUDY__OutputVariables__NAMES"]
- if isinstance(self.dictMCVal["__ASSIMILATION_STUDY__OutputVariables__SIZES"], type(1)):
- sizes.append(self.dictMCVal["__ASSIMILATION_STUDY__OutputVariables__SIZES"])
+ names = self.dictMCVal["__"+self.type_of_study+"__OutputVariables__NAMES"]
+ if isinstance(self.dictMCVal["__"+self.type_of_study+"__OutputVariables__SIZES"], type(1)):
+ sizes.append(self.dictMCVal["__"+self.type_of_study+"__OutputVariables__SIZES"])
else:
- sizes = self.dictMCVal["__ASSIMILATION_STUDY__OutputVariables__SIZES"]
+ sizes = self.dictMCVal["__"+self.type_of_study+"__OutputVariables__SIZES"]
self.text_da += "outputvariables_config = {}\n"
self.text_da += "outputvariables_config['Order'] = %s\n" % list(names)
def add_observers(self):
observers = {}
- observer = self.dictMCVal["__ASSIMILATION_STUDY__Observers__SELECTION"]
+ observer = self.dictMCVal["__"+self.type_of_study+"__Observers__SELECTION"]
if isinstance(observer, type("")):
self.add_observer_in_dict(observer, observers)
else:
- for observer in self.dictMCVal["__ASSIMILATION_STUDY__Observers__SELECTION"]:
+ for observer in self.dictMCVal["__"+self.type_of_study+"__Observers__SELECTION"]:
self.add_observer_in_dict(observer, observers)
# Write observers in the python command file
"""
observers[observer] = {}
observers[observer]["name"] = observer
- observer_eficas_name = "__ASSIMILATION_STUDY__Observers__" + observer + "__" + observer + "_data__"
+ observer_eficas_name = "__"+self.type_of_study+"__Observers__" + observer + "__" + observer + "_data__"
# NodeType
node_type_key_name = observer_eficas_name + "NodeType"
observers[observer]["nodetype"] = self.dictMCVal[node_type_key_name]
self.algorithm = algorithm
self.algorithm_dict = None
self.Background = None
+ self.CheckingPoint = None
self.InputVariables = {}
self.OutputVariables = {}
self.InputVariablesOrder = []
#--------------------------------------
+ def setCheckingPointType(self, Type):
+ if Type == "Vector":
+ self.CheckingPointType = Type
+ else:
+ raise daError("[daStudy::setCheckingPointType] Type is unkown : " + Type + " Types are : Vector")
+
+ def setCheckingPoint(self, CheckingPoint):
+ try:
+ self.CheckingPointType
+ except AttributeError:
+ raise daError("[daStudy::setCheckingPoint] Type is not defined !")
+ self.CheckingPoint = CheckingPoint
+ if self.CheckingPointType == "Vector":
+ self.ADD.setBackground(asVector = CheckingPoint)
+
+ #--------------------------------------
+
def setBackgroundError(self, BackgroundError):
self.ADD.setBackgroundError(asCovariance = BackgroundError)
logging.fatal("Cannot found Algorithm in the study configuration")
sys.exit(1)
else:
- if study_config["Algorithm"] not in AssimAlgos:
+ if not (study_config["Algorithm"] in AssimAlgos or study_config["Algorithm"] in CheckAlgos):
logging.fatal("Algorithm provided is unknow : " + str(study_config["Algorithm"]) +
- "\n You can choose between : " + str(AssimAlgos))
+ "\n You can choose between : " + str(AssimAlgos)+" "+str(CheckAlgos))
sys.exit(1)
# Debug
"Observation", "ObservationError",
"ObservationOperator",
"EvolutionModel", "EvolutionError",
- "AlgorithmParameters"]
+ "AlgorithmParameters",
+ "CheckingPoint",
+ ]
AssimType = {}
AssimType["Background"] = ["Vector"]
AssimType["EvolutionError"] = ["Matrix"]
AssimType["AlgorithmParameters"] = ["Dict"]
AssimType["UserDataInit"] = ["Dict"]
+AssimType["CheckingPoint"] = ["Vector"]
FromNumpyList = {}
FromNumpyList["Vector"] = ["String", "Script"]
"NonLinearLeastSquares",
"QuantileRegression",
]
+CheckAlgos = [
+ "GradientTest",
+ "AdjointTest",
+ ]
AlgoDataRequirements = {}
AlgoDataRequirements["3DVAR"] = [
"Observation",
"ObservationOperator",
]
+AlgoDataRequirements["GradientTest"] = [
+ "CheckingPoint",
+ "ObservationOperator",
+ ]
AlgoType = {}
AlgoType["3DVAR"] = "Optim"
AssimDataDict["EvolutionError"] = ["Matrix"]
AssimDataDict["AlgorithmParameters"] = ["Dict"]
AssimDataDict["UserDataInit"] = ["Dict"]
+AssimDataDict["CheckingPoint"] = ["Vector"]
AssimDataDefaultDict = {}
AssimDataDefaultDict["Background"] = "Vector"
AssimDataDefaultDict["EvolutionError"] = "Matrix"
AssimDataDefaultDict["AlgorithmParameters"] = "Dict"
AssimDataDefaultDict["UserDataInit"] = "Dict"
+AssimDataDefaultDict["CheckingPoint"] = "Vector"
# Assimilation optional nodes
OptDict = {}