1 #@ MODIF reca_algo Macro DATE 26/05/2010 AUTEUR ASSIRE A.ASSIRE
2 # -*- coding: iso-8859-1 -*-
3 # RESPONSABLE ASSIRE A.ASSIRE
4 # CONFIGURATION MANAGEMENT OF EDF VERSION
5 # ======================================================================
6 # COPYRIGHT (C) 1991 - 2002 EDF R&D WWW.CODE-ASTER.ORG
7 # THIS PROGRAM IS FREE SOFTWARE; YOU CAN REDISTRIBUTE IT AND/OR MODIFY
8 # IT UNDER THE TERMS OF THE GNU GENERAL PUBLIC LICENSE AS PUBLISHED BY
9 # THE FREE SOFTWARE FOUNDATION; EITHER VERSION 2 OF THE LICENSE, OR
10 # (AT YOUR OPTION) ANY LATER VERSION.
12 # THIS PROGRAM IS DISTRIBUTED IN THE HOPE THAT IT WILL BE USEFUL, BUT
13 # WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED WARRANTY OF
14 # MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. SEE THE GNU
15 # GENERAL PUBLIC LICENSE FOR MORE DETAILS.
17 # YOU SHOULD HAVE RECEIVED A COPY OF THE GNU GENERAL PUBLIC LICENSE
18 # ALONG WITH THIS PROGRAM; IF NOT, WRITE TO EDF R&D CODE_ASTER,
19 # 1 AVENUE DU GENERAL DE GAULLE, 92141 CLAMART CEDEX, FRANCE.
20 # ======================================================================
26 import numpy.linalg as linalg
30 from Cata.cata import INFO_EXEC_ASTER
31 from Cata.cata import DETRUIRE
33 from Utilitai.Utmess import UTMESS
37 # ------------------------------------------------------------------------------
38 def calcul_gradient(A,erreur):
39 grad = NP.dot(NP.transpose(A),erreur)
42 # ------------------------------------------------------------------------------
45 return NP.dot(a,NP.transpose(a))**0.5
48 # ------------------------------------------------------------------------------
49 # ------------------------------------------------------------------------------
52 Classe gérant l'adimensionnement et le dimensionnement
55 def __init__(self,val_initiales):
57 Le constructeur calcul la matrice D et son inverse
59 self.val_init = val_initiales
60 dim =len(self.val_init)
61 self.D = NP.zeros((dim,dim), float)
63 self.D[i][i] = self.val_init[i]
64 self.inv_D=linalg.inv(self.D)
67 # ------------------------------------------------------------------------------
68 def adim_sensi(self, A):
69 for i in range(A.shape[0]):
70 for j in range(A.shape[1]):
71 A[i,j] = A[i,j] * self.val_init[j]
75 # ------------------------------------------------------------------------------
76 def redim_sensi(self, A):
77 for i in range(A.shape[0]):
78 for j in range(A.shape[1]):
79 A[i,j] = A[i,j] / self.val_init[j]
83 # ------------------------------------------------------------------------------
85 tab_adim = NP.dot(self.inv_D,copy.copy(tab))
89 # ------------------------------------------------------------------------------
91 def redim(self, tab_adim):
92 tab = NP.dot(self.D,tab_adim)
95 # ------------------------------------------------------------------------------
96 # ------------------------------------------------------------------------------
102 # ------------------------------------------------------------------------------
103 # ------------------------------------------------------------------------------
105 e1=linalg.eigvals(matrix)
109 if NP.all(e[0] != 0):
113 return condi,e[size-1],e[0]
117 # ------------------------------------------------------------------------------
118 # ------------------------------------------------------------------------------
120 e=linalg.eigvalsh(matrix)
127 # ------------------------------------------------------------------------------
128 # ------------------------------------------------------------------------------
129 def lambda_init(matrix):
131 Routine qui calcule la valeur initial du parametre de regularisation l.
133 condi,emax,emin=cond(matrix)
134 id=NP.identity(matrix.shape[0])
138 l=1.e-16*norm(matrix)
140 l=abs(10000.*emin-emax)/10001.
144 # ------------------------------------------------------------------------------
145 # ------------------------------------------------------------------------------
146 def Levenberg_bornes(val, Dim, val_init, borne_inf, borne_sup, A, erreur, l, ul_out):
148 On resoud le système par contraintes actives:
151 borne_inf < dval < borne_sup
153 s.(borne_inf - dval)=0
154 s.(borne_sup - dval)=0
157 id = NP.identity(dim)
159 Q=NP.dot(NP.transpose(A),A) +l*id
160 # Second membre du système
161 d=NP.dot(NP.transpose(A),erreur)
162 # Ens. de liaisons actives
163 Act=NP.array([], dtype=int)
166 # Increment des parametres
170 I=NP.ones(dim, dtype=int)
173 I=NP.nonzero(NP.greater(I,0))[0]
176 # test sur les bornes (on stocke si on est en butée haute ou basse)
177 if (val[i]+dval[i]>=borne_sup[i]):
178 dval[i]=borne_sup[i]-val[i]
180 if (val[i]+dval[i]<=borne_inf[i]):
181 dval[i]=borne_inf[i]-val[i]
184 # xi=-Q(I)-1.(d(I)+Q(I,Act).dval(Act))
185 t_QI = NP.take(Q, I, axis=0)
186 t_tQI_Act = NP.take(t_QI, Act, axis=1)
187 t_adim_Act = NP.take(Dim.adim(dval), Act)
188 if NP.size(t_tQI_Act) > 0 and NP.size(t_adim_Act) > 0:
189 smemb = NP.take(d, I) + NP.dot(t_tQI_Act, t_adim_Act)
191 smemb = NP.take(d, I)
192 xi=-linalg.solve(NP.take(t_QI, I, axis=1), smemb)
193 for i in NP.arange(len(I)):
194 dval[I[i]]=xi[i]*val_init[I[i]]
196 # s(Av)=-d(Act)-Q(Act,:).dval
197 sa=-NP.take(d,Act)-NP.dot(NP.take(Q,Act,axis=0),Dim.adim(dval))
198 for i in range(len(Act)):
203 # Nouvel ens. de liaisons actives
204 Act=NP.concatenate((NP.nonzero(NP.greater(dval,borne_sup-val))[0],
205 NP.nonzero(NP.less(dval,borne_inf-val))[0],
206 NP.nonzero(NP.greater(s,0.))[0])).astype(int)
207 done=(max(val+dval-borne_sup)<=0)&(min(val+dval-borne_inf)>=0)&(min(s)>=0.0)
208 # Pour éviter le cyclage
212 Q=NP.dot(NP.transpose(A),A) +l*id
215 res=open(os.getcwd()+'/fort.'+str(ul_out),'a')
216 res.write('\n\nQ = \n'+NP.array2string(Q-l*id,array_output=1,separator=','))
217 res.write('\n\nd = '+NP.array2string(d,array_output=1,separator=','))
218 res.write('\n\nval = '+NP.array2string(val,array_output=1,separator=','))
219 res.write('\n\nval_ini= '+NP.array2string(val_init,array_output=1,separator=','))
220 res.write('\n\nborne_inf= '+NP.array2string(borne_inf,array_output=1,separator=','))
221 res.write('\n\nborne_sup= '+NP.array2string(borne_sup,array_output=1,separator=','))
222 UTMESS('F','RECAL0_18')
224 newval=copy.copy(val+dval)
225 return newval,s,l,Act
228 # ------------------------------------------------------------------------------
229 # ------------------------------------------------------------------------------
230 def actualise_lambda(l, val, new_val, A, erreur, new_J, old_J):
232 id = NP.identity(dim)
234 Q=NP.dot(NP.transpose(A),A) +l*id
235 # Second membre du système
236 d=NP.dot(NP.transpose(A),erreur)
238 new_Q=old_J+0.5*NP.dot(NP.transpose(new_val-val),NP.dot(Q,new_val-val))+NP.dot(NP.transpose(new_val-val),d)
239 # Ratio de la décroissance réelle et de l'approx. quad.
240 if NP.all((old_Q-new_Q) != 0.):
241 R=(old_J-new_J)/(old_Q-new_Q)
254 # ------------------------------------------------------------------------------
255 # ------------------------------------------------------------------------------
256 def test_convergence(gradient_init, erreur, A, s):
260 gradient = calcul_gradient(A,erreur)+s
262 epsilon = NP.dot(gradient,gradient)/NP.dot(gradient_init,gradient_init)
264 UTMESS('F', "RECAL0_19")
266 epsilon = epsilon**0.5
270 # ------------------------------------------------------------------------------
271 # ------------------------------------------------------------------------------
272 def calcul_etat_final(para, A, iter, max_iter, prec, residu, Messg):
274 Fonction appelée quand la convergence est atteinte
275 on calcule le Hessien et les valeurs propres et vecteurs
276 propre associés au Hessien
281 # if ((iter < max_iter) or (residu < prec)):
283 Hessien = NP.dot(NP.transpose(A),A)
285 # Desactive temporairement les FPE qui pourraient etre generees (a tord!) par blas
287 valeurs_propres,vecteurs_propres = linalg.eig(Hessien)
288 vecteurs_propres=NP.transpose(vecteurs_propres) # numpy et Numeric n'ont pas la meme convention
289 sensible=NP.nonzero(NP.greater(abs(valeurs_propres/max(abs(valeurs_propres))),1.E-1))[0]
290 insensible=NP.nonzero(NP.less(abs(valeurs_propres/max(abs(valeurs_propres))),1.E-2))[0]
294 Messg.affiche_calcul_etat_final(para,Hessien,valeurs_propres,vecteurs_propres,sensible,insensible)