1 #-*-coding:iso-8859-1-*-
3 # Copyright (C) 2008-2013 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 from daCore import BasicObjects, PlatformInfo
25 m = PlatformInfo.SystemUsage()
29 # ==============================================================================
30 class ElementaryAlgorithm(BasicObjects.Algorithm):
32 BasicObjects.Algorithm.__init__(self, "ADJOINTTEST")
33 self.defineRequiredParameter(
34 name = "ResiduFormula",
35 default = "ScalarProduct",
37 message = "Formule de résidu utilisée",
38 listval = ["ScalarProduct"],
40 self.defineRequiredParameter(
41 name = "EpsilonMinimumExponent",
44 message = "Exposant minimal en puissance de 10 pour le multiplicateur d'incrément",
48 self.defineRequiredParameter(
49 name = "InitialDirection",
52 message = "Direction initiale de la dérivée directionnelle autour du point nominal",
54 self.defineRequiredParameter(
55 name = "AmplitudeOfInitialDirection",
58 message = "Amplitude de la direction initiale de la dérivée directionnelle autour du point nominal",
60 self.defineRequiredParameter(
62 typecast = numpy.random.seed,
63 message = "Graine fixée pour le générateur aléatoire",
65 self.defineRequiredParameter(
69 message = "Titre du tableau et de la figure",
72 def run(self, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None, Parameters=None):
73 logging.debug("%s Lancement"%self._name)
74 logging.debug("%s Taille mémoire utilisée de %.1f Mo"%(self._name, m.getUsedMemory("M")))
76 self.setParameters(Parameters)
78 Hm = HO["Direct"].appliedTo
79 Ht = HO["Tangent"].appliedInXTo
80 Ha = HO["Adjoint"].appliedInXTo
83 Perturbations = [ 10**i for i in xrange(self._parameters["EpsilonMinimumExponent"],1) ]
84 Perturbations.reverse()
86 X = numpy.asmatrix(numpy.ravel( Xb )).T
87 NormeX = numpy.linalg.norm( X )
89 Y = numpy.asmatrix(numpy.ravel( Hm( X ) )).T
90 Y = numpy.asmatrix(numpy.ravel( Y )).T
91 NormeY = numpy.linalg.norm( Y )
93 if len(self._parameters["InitialDirection"]) == 0:
97 dX0.append( numpy.random.normal(0.,abs(v)) )
99 dX0.append( numpy.random.normal(0.,X.mean()) )
101 dX0 = numpy.asmatrix(numpy.ravel( self._parameters["InitialDirection"] ))
103 dX0 = float(self._parameters["AmplitudeOfInitialDirection"]) * numpy.matrix( dX0 ).T
105 # Entete des resultats
106 # --------------------
108 if self._parameters["ResiduFormula"] == "ScalarProduct":
109 __entete = " i Alpha ||X|| ||Y|| ||dX|| R(Alpha) "
111 On observe le residu qui est la difference de deux produits scalaires :
113 R(Alpha) = | < TangentF_X(dX) , Y > - < dX , AdjointF_X(Y) > |
115 qui doit rester constamment egal zero a la precision du calcul.
116 On prend dX0 = Normal(0,X) et dX = Alpha*dX0. F est le code de calcul.
117 Y doit etre dans l'image de F. S'il n'est pas donne, on prend Y = F(X).
120 if len(self._parameters["ResultTitle"]) > 0:
122 msgs += __marge + "====" + "="*len(self._parameters["ResultTitle"]) + "====\n"
123 msgs += __marge + " " + self._parameters["ResultTitle"] + "\n"
124 msgs += __marge + "====" + "="*len(self._parameters["ResultTitle"]) + "====\n"
129 __nbtirets = len(__entete)
130 msgs += "\n" + __marge + "-"*__nbtirets
131 msgs += "\n" + __marge + __entete
132 msgs += "\n" + __marge + "-"*__nbtirets
137 for i,amplitude in enumerate(Perturbations):
139 NormedX = numpy.linalg.norm( dX )
141 TangentFXdX = numpy.asmatrix( Ht( (X,dX) ) )
142 AdjointFXY = numpy.asmatrix( Ha( (X,Y) ) )
144 Residu = abs(float(numpy.dot( TangentFXdX.A1 , Y.A1 ) - numpy.dot( dX.A1 , AdjointFXY.A1 )))
146 msg = " %2i %5.0e %9.3e %9.3e %9.3e | %9.3e"%(i,amplitude,NormeX,NormeY,NormedX,Residu)
147 msgs += "\n" + __marge + msg
149 self.StoredVariables["CostFunctionJ"].store( Residu )
151 msgs += "\n" + __marge + "-"*__nbtirets
154 # Sorties eventuelles
155 # -------------------
157 print "Results of adjoint check by \"%s\" formula:"%self._parameters["ResiduFormula"]
160 logging.debug("%s Nombre d'évaluation(s) de l'opérateur d'observation direct/tangent/adjoint : %i/%i/%i"%(self._name, HO["Direct"].nbcalls()[0],HO["Tangent"].nbcalls()[0],HO["Adjoint"].nbcalls()[0]))
161 logging.debug("%s Taille mémoire utilisée de %.1f Mo"%(self._name, m.getUsedMemory("M")))
162 logging.debug("%s Terminé"%self._name)
166 # ==============================================================================
167 if __name__ == "__main__":
168 print '\n AUTODIAGNOSTIC \n'