From 9d4d9f9b741e692c9307f523a70e29b05385fa8d Mon Sep 17 00:00:00 2001 From: Jean-Philippe ARGAUD Date: Wed, 26 Apr 2023 17:29:29 +0200 Subject: [PATCH] Documentation and code update for PSO --- ...ef_algorithm_ParticleSwarmOptimization.rst | 4 +- doc/en/snippets/BoxBounds.rst | 11 +- doc/en/snippets/CognitiveAcceleration.rst | 7 +- doc/en/snippets/InertiaWeight.rst | 6 +- doc/en/snippets/ModuleCompatibility.rst | 2 +- doc/en/snippets/SocialAcceleration.rst | 6 +- doc/en/snippets/VelocityClampingFactor.rst | 4 +- ...ef_algorithm_ParticleSwarmOptimization.rst | 1 - doc/fr/snippets/BoxBounds.rst | 16 +- doc/fr/snippets/CognitiveAcceleration.rst | 7 +- doc/fr/snippets/InertiaWeight.rst | 5 +- doc/fr/snippets/ModuleCompatibility.rst | 2 +- doc/fr/snippets/SocialAcceleration.rst | 6 +- doc/fr/snippets/VelocityClampingFactor.rst | 4 +- src/daComposant/daAlgorithms/Atoms/ecwnpso.py | 26 +- src/daComposant/daAlgorithms/Atoms/ecwopso.py | 24 +- src/daComposant/daAlgorithms/Atoms/ecwspso.py | 247 ++++++++++++++++++ .../daAlgorithms/ParticleSwarmOptimization.py | 45 +++- src/daComposant/daCore/BasicObjects.py | 9 + src/daComposant/daCore/NumericObjects.py | 34 ++- 20 files changed, 406 insertions(+), 60 deletions(-) create mode 100644 src/daComposant/daAlgorithms/Atoms/ecwspso.py diff --git a/doc/en/ref_algorithm_ParticleSwarmOptimization.rst b/doc/en/ref_algorithm_ParticleSwarmOptimization.rst index 11ed087..86844a4 100644 --- a/doc/en/ref_algorithm_ParticleSwarmOptimization.rst +++ b/doc/en/ref_algorithm_ParticleSwarmOptimization.rst @@ -47,12 +47,10 @@ error function :math:`J` of type :math:`L^1`, :math:`L^2` or :ref:`section_theory_optimization`. The default error function is the augmented weighted least squares function, classically used in data assimilation. -.There exists various variants of this algorithm. The following stable and +There exists various variants of this algorithm. The following stable and robust formulations are proposed here: .. index:: -.. index:: - pair: Variant ; PSO pair: Variant ; CanonicalPSO pair: Variant ; OGCR diff --git a/doc/en/snippets/BoxBounds.rst b/doc/en/snippets/BoxBounds.rst index 03731e8..c902806 100644 --- a/doc/en/snippets/BoxBounds.rst +++ b/doc/en/snippets/BoxBounds.rst @@ -3,10 +3,11 @@ BoxBounds *List of pairs of real values*. This key allows to define pairs of upper and lower bounds for *increments* on every state variable being optimized (and - not on state variables themselves). Bounds have to be given by a list of list - of pairs of lower/upper bounds for each increment on variable, with extreme - values every time there is no bound (``None`` is not allowed when there is no - bound). This key is required and there is no default values. + not on state variables themselves, whose bounds can be indicated by the + "*Bounds*" variable). Increment bounds have to be given by a list of list of + pairs of lower/upper bounds for each increment on variable, with a value of + ``None`` each time there is no bound. This key is only required if there are + no variable bounds, and there are no default values. Example : - ``{"BoxBounds":[[-0.5,0.5], [0.01,2.], [0.,1.e99], [-1.e99,1.e99]]}`` + ``{"BoxBounds":[[-0.5,0.5], [0.01,2.], [0.,None], [None,None]]}`` diff --git a/doc/en/snippets/CognitiveAcceleration.rst b/doc/en/snippets/CognitiveAcceleration.rst index 082b52d..2dbe15d 100644 --- a/doc/en/snippets/CognitiveAcceleration.rst +++ b/doc/en/snippets/CognitiveAcceleration.rst @@ -2,8 +2,9 @@ CognitiveAcceleration *Real value*. This key indicates the recall rate at the best previously known - value of the current insect. It is a floating point value between 0 and 1. - The default value is 0.5. + value of the current insect. It is a floating point positive value. The + default value is about :math:`1/2+ln(2)`, and it is recommended to adapt it, + rather by reducing it, to the physical case that is being treated. Example : - ``{"CognitiveAcceleration":0.5}`` + ``{"CognitiveAcceleration":1.19315}`` diff --git a/doc/en/snippets/InertiaWeight.rst b/doc/en/snippets/InertiaWeight.rst index eb9bc99..5c9c71b 100644 --- a/doc/en/snippets/InertiaWeight.rst +++ b/doc/en/snippets/InertiaWeight.rst @@ -3,7 +3,9 @@ InertiaWeight *Real value*. This key indicates the part of the insect velocity which is imposed by the swarm, named "inertia weight". It is a positive floating point - value. It is a floating point value between 0 and 1. The default value is 1. + value. It is a floating point value between 0 and 1. The default value is + about :math:`1/(2*ln(2))` and it is recommended to adapt it to the physical + case that is being treated. Example : - ``{"InertiaWeight":1.}`` + ``{"InertiaWeight":0.72135}`` diff --git a/doc/en/snippets/ModuleCompatibility.rst b/doc/en/snippets/ModuleCompatibility.rst index f138840..747d7ea 100644 --- a/doc/en/snippets/ModuleCompatibility.rst +++ b/doc/en/snippets/ModuleCompatibility.rst @@ -15,7 +15,7 @@ versions within the range described below. :widths: 20, 10, 10 Python, 3.6.5, 3.10.10 - Numpy, 1.14.3, 1.24.2 + Numpy, 1.14.3, 1.24.3 Scipy, 0.19.1, 1.10.1 MatplotLib, 2.2.2, 3.7.1 GnuplotPy, 1.8, 1.8 diff --git a/doc/en/snippets/SocialAcceleration.rst b/doc/en/snippets/SocialAcceleration.rst index 034908c..b1114bc 100644 --- a/doc/en/snippets/SocialAcceleration.rst +++ b/doc/en/snippets/SocialAcceleration.rst @@ -3,7 +3,9 @@ SocialAcceleration *Real value*. This key indicates the recall rate at the best swarm insect in the neighbourhood of the current insect, which is by default the whole swarm. - It is a floating point value between 0 and 1. The default value is 0.5. + It is a floating point positive value. The default value is about + :math:`1/2+ln(2)` and it is recommended to adapt it, rather by reducing it, + to the physical case that is being treated. Example : - ``{"SocialAcceleration":0.5}`` + ``{"SocialAcceleration":1.19315}`` diff --git a/doc/en/snippets/VelocityClampingFactor.rst b/doc/en/snippets/VelocityClampingFactor.rst index cac44d9..dc20815 100644 --- a/doc/en/snippets/VelocityClampingFactor.rst +++ b/doc/en/snippets/VelocityClampingFactor.rst @@ -4,7 +4,7 @@ VelocityClampingFactor *Real value*. This key indicates the rate of group velocity attenuation in the update for each insect, useful to avoid swarm explosion, i.e. uncontrolled growth of insect velocity. It is a floating point value between - 0+ and 1. The default value is 1.0. + 0+ and 1. The default value is 0.3. Example : - ``{"VelocityClampingFactor":1.0}`` + ``{"VelocityClampingFactor":0.3}`` diff --git a/doc/fr/ref_algorithm_ParticleSwarmOptimization.rst b/doc/fr/ref_algorithm_ParticleSwarmOptimization.rst index 973e242..f8331e2 100644 --- a/doc/fr/ref_algorithm_ParticleSwarmOptimization.rst +++ b/doc/fr/ref_algorithm_ParticleSwarmOptimization.rst @@ -53,7 +53,6 @@ Il existe diverses variantes de cet algorithme. On propose ici les formulations stables et robustes suivantes : .. index:: - pair: Variant ; PSO pair: Variant ; CanonicalPSO pair: Variant ; OGCR diff --git a/doc/fr/snippets/BoxBounds.rst b/doc/fr/snippets/BoxBounds.rst index 45c4ef0..d0f4e1b 100644 --- a/doc/fr/snippets/BoxBounds.rst +++ b/doc/fr/snippets/BoxBounds.rst @@ -2,12 +2,14 @@ BoxBounds *Liste de paires de valeurs réelles*. Cette clé permet de définir des paires - de bornes supérieure et inférieure pour chaque incrément de variable d'état - optimisée (et non pas chaque variable d'état elle-même). Les bornes doivent - être données par une liste de liste de paires de bornes inférieure/supérieure - pour chaque incrément de variable, avec une valeur extrême chaque fois qu'il - n'y a pas de borne (``None`` n'est pas une valeur autorisée lorsqu'il n'y a - pas de borne). Cette clé est requise et il n'y a pas de valeurs par défaut. + de bornes supérieure et inférieure pour chaque *incrément* de variable d'état + optimisée (et non pas chaque variable d'état elle-même, dont les bornes + peuvent être indiquées par la variable "*Bounds*"). Les bornes d'incréments + doivent être données par une liste de liste de paires de bornes + inférieure/supérieure pour chaque incrément de variable, avec une valeur + ``None`` chaque fois qu'il n'y a pas de borne. Cette clé est requise + uniquement s'il n'y a pas de bornes de paramètres, et il n'y a pas de valeurs + par défaut. Exemple : - ``{"BoxBounds":[[-0.5,0.5], [0.01,2.], [0.,1.e99], [-1.e99,1.e99]]}`` + ``{"BoxBounds":[[-0.5,0.5], [0.01,2.], [0.,None], [None,None]]}`` diff --git a/doc/fr/snippets/CognitiveAcceleration.rst b/doc/fr/snippets/CognitiveAcceleration.rst index 3a97886..3cb01ae 100644 --- a/doc/fr/snippets/CognitiveAcceleration.rst +++ b/doc/fr/snippets/CognitiveAcceleration.rst @@ -2,9 +2,10 @@ CognitiveAcceleration *Valeur réelle*. Cette clé indique le taux de rappel vers la meilleure valeur - connue précédemment de l'insecte courant. C'est une valeur réelle comprise - entre 0 et 1. Le défaut est de 0.5. + connue précédemment de l'insecte courant. C'est une valeur réelle positive. + Le défaut est à peu près de :math:`1/2+ln(2)` et il est recommandé de + l'adapter, plutôt en le réduisant, au cas physique qui est en traitement. Exemple : - ``{"CognitiveAcceleration":0.5}`` + ``{"CognitiveAcceleration":1.19315}`` diff --git a/doc/fr/snippets/InertiaWeight.rst b/doc/fr/snippets/InertiaWeight.rst index 8b5291e..a9778b7 100644 --- a/doc/fr/snippets/InertiaWeight.rst +++ b/doc/fr/snippets/InertiaWeight.rst @@ -3,7 +3,8 @@ InertiaWeight *Valeur réelle*. Cette clé indique la part de la vitesse de l'essaim qui est imposée à l'insecte, dite "poids de l'inertie". C'est une valeur réelle - comprise entre 0 et 1. Le défaut est de 1. + comprise entre 0 et 1. Le défaut est de à peu près :math:`1/(2*ln(2))` et il + est recommandé de l'adapter au cas physique qui est en traitement. Exemple : - ``{"InertiaWeight":1.}`` + ``{"InertiaWeight":0.72135}`` diff --git a/doc/fr/snippets/ModuleCompatibility.rst b/doc/fr/snippets/ModuleCompatibility.rst index a6a40fd..245acd9 100644 --- a/doc/fr/snippets/ModuleCompatibility.rst +++ b/doc/fr/snippets/ModuleCompatibility.rst @@ -16,7 +16,7 @@ l'étendue décrite ci-dessous. :widths: 20, 10, 10 Python, 3.6.5, 3.10.10 - Numpy, 1.14.3, 1.24.2 + Numpy, 1.14.3, 1.24.3 Scipy, 0.19.1, 1.10.1 MatplotLib, 2.2.2, 3.7.1 GnuplotPy, 1.8, 1.8 diff --git a/doc/fr/snippets/SocialAcceleration.rst b/doc/fr/snippets/SocialAcceleration.rst index 6e6e041..6353103 100644 --- a/doc/fr/snippets/SocialAcceleration.rst +++ b/doc/fr/snippets/SocialAcceleration.rst @@ -3,7 +3,9 @@ SocialAcceleration *Valeur réelle*. Cette clé indique le taux de rappel vers le meilleur insecte du voisinage de l'insecte courant, qui est par défaut l'essaim complet. C'est - une valeur réelle comprise entre 0 et 1. Le défaut est de 0.5. + une valeur réelle positive. Le défaut est à peu près de :math:`1/2+ln(2)` et + il est recommandé de l'adapter, plutôt en le réduisant, au cas physique qui + est en traitement. Exemple : - ``{"SocialAcceleration":0.5}`` + ``{"SocialAcceleration":1.19315}`` diff --git a/doc/fr/snippets/VelocityClampingFactor.rst b/doc/fr/snippets/VelocityClampingFactor.rst index e2ea625..d7f6512 100644 --- a/doc/fr/snippets/VelocityClampingFactor.rst +++ b/doc/fr/snippets/VelocityClampingFactor.rst @@ -5,8 +5,8 @@ VelocityClampingFactor groupe dans la mise à jour pour chaque insecte, utile pour éviter l'explosion de l'essaim, c'est-à-dire une croissance incontrôlée de la vitesse des insectes. C'est une valeur réelle comprise entre 0+ et 1. Le défaut est de - 1.0. + 0.3. Exemple : - ``{"VelocityClampingFactor":1.0}`` + ``{"VelocityClampingFactor":0.3}`` diff --git a/src/daComposant/daAlgorithms/Atoms/ecwnpso.py b/src/daComposant/daAlgorithms/Atoms/ecwnpso.py index 7fc546a..886ce54 100644 --- a/src/daComposant/daAlgorithms/Atoms/ecwnpso.py +++ b/src/daComposant/daAlgorithms/Atoms/ecwnpso.py @@ -103,10 +103,10 @@ def ecwnpso(selfA, Xb, Y, HO, R, B): LimitPlace = Bounds LimitSpeed = 0.5 * BoxBounds # "1/2*([Xmin,Xmax]-Xini)" # - NumberOfFunctionEvaluations = 1 + nbfct = 1 # Nb d'évaluations JXini, JbXini, JoXini = CostFunction(Xini,selfA._parameters["QualityCriterion"]) # - Swarm = numpy.zeros((__nbI,3,__nbP)) # 3 car (x,v,xbest) + Swarm = numpy.zeros((__nbI,4,__nbP)) # 4 car (x,v,xbest,lbest) for __p in range(__nbP) : Swarm[:,0,__p] = rand( low=LimitPlace[__p,0], high=LimitPlace[__p,1], size=__nbI) # Position Swarm[:,1,__p] = rand( low=LimitSpeed[__p,0], high=LimitSpeed[__p,1], size=__nbI) # Velocity @@ -114,7 +114,7 @@ def ecwnpso(selfA, Xb, Y, HO, R, B): # qSwarm = JXini * numpy.ones((__nbI,3)) # Qualité (J, Jb, Jo) par insecte for __i in range(__nbI): - NumberOfFunctionEvaluations += 1 + nbfct += 1 JTest, JbTest, JoTest = CostFunction(Swarm[__i,0,:],selfA._parameters["QualityCriterion"]) if JTest < JXini: Swarm[__i,2,:] = Swarm[__i,0,:] # xBest @@ -131,13 +131,21 @@ def ecwnpso(selfA, Xb, Y, HO, R, B): selfA.StoredVariables["CostFunctionJ" ].store( qSwarm[iBest,0] ) selfA.StoredVariables["CostFunctionJb"].store( qSwarm[iBest,1] ) selfA.StoredVariables["CostFunctionJo"].store( qSwarm[iBest,2] ) + if selfA._parameters["StoreInternalVariables"] or selfA._toStore("InternalStates"): + selfA.StoredVariables["InternalStates"].store( Swarm[:,0,:].T ) + if selfA._parameters["StoreInternalVariables"] or selfA._toStore("InternalCostFunctionJ"): + selfA.StoredVariables["InternalCostFunctionJ"].store( qSwarm[:,0] ) + if selfA._parameters["StoreInternalVariables"] or selfA._toStore("InternalCostFunctionJb"): + selfA.StoredVariables["InternalCostFunctionJb"].store( qSwarm[:,1] ) + if selfA._parameters["StoreInternalVariables"] or selfA._toStore("InternalCostFunctionJo"): + selfA.StoredVariables["InternalCostFunctionJo"].store( qSwarm[:,2] ) # selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) ) # # Minimisation de la fonctionnelle # -------------------------------- step = 0 - while KeepRunningCondition(step, NumberOfFunctionEvaluations): + while KeepRunningCondition(step, nbfct): step += 1 for __i in range(__nbI): rct = rand(size=__nbP) @@ -151,7 +159,7 @@ def ecwnpso(selfA, Xb, Y, HO, R, B): __velins = Swarm[__i,0,:] + Swarm[__i,1,:] Swarm[__i,0,:] = ApplyBounds( __velins, LimitPlace ) # - NumberOfFunctionEvaluations += 1 + nbfct += 1 JTest, JbTest, JoTest = CostFunction(Swarm[__i,0,:],selfA._parameters["QualityCriterion"]) if JTest < qSwarm[__i,0]: Swarm[__i,2,:] = Swarm[__i,0,:] # xBest @@ -167,6 +175,14 @@ def ecwnpso(selfA, Xb, Y, HO, R, B): selfA.StoredVariables["CostFunctionJ" ].store( qSwarm[iBest,0] ) selfA.StoredVariables["CostFunctionJb"].store( qSwarm[iBest,1] ) selfA.StoredVariables["CostFunctionJo"].store( qSwarm[iBest,2] ) + if selfA._parameters["StoreInternalVariables"] or selfA._toStore("InternalStates"): + selfA.StoredVariables["InternalStates"].store( Swarm[:,0,:].T ) + if selfA._parameters["StoreInternalVariables"] or selfA._toStore("InternalCostFunctionJ"): + selfA.StoredVariables["InternalCostFunctionJ"].store( qSwarm[:,0] ) + if selfA._parameters["StoreInternalVariables"] or selfA._toStore("InternalCostFunctionJb"): + selfA.StoredVariables["InternalCostFunctionJb"].store( qSwarm[:,1] ) + if selfA._parameters["StoreInternalVariables"] or selfA._toStore("InternalCostFunctionJo"): + selfA.StoredVariables["InternalCostFunctionJo"].store( qSwarm[:,2] ) logging.debug("%s Step %i: insect %i is the better one with J =%.7f"%(selfA._name,step,iBest,qSwarm[iBest,0])) # # Obtention de l'analyse diff --git a/src/daComposant/daAlgorithms/Atoms/ecwopso.py b/src/daComposant/daAlgorithms/Atoms/ecwopso.py index a1c7d16..e14f24b 100644 --- a/src/daComposant/daAlgorithms/Atoms/ecwopso.py +++ b/src/daComposant/daAlgorithms/Atoms/ecwopso.py @@ -103,7 +103,7 @@ def ecwopso(selfA, Xb, Y, HO, R, B): LimitPlace = Bounds LimitSpeed = 0.5 * BoxBounds # "1/2*([Xmin,Xmax]-Xini)" # - NumberOfFunctionEvaluations = 1 + nbfct = 1 # Nb d'évaluations JXini, JbXini, JoXini = CostFunction(Xini,selfA._parameters["QualityCriterion"]) # Swarm = numpy.zeros((__nbI,3,__nbP)) # 3 car (x,v,xbest) @@ -114,7 +114,7 @@ def ecwopso(selfA, Xb, Y, HO, R, B): # qSwarm = JXini * numpy.ones((__nbI,3)) # Qualité (J, Jb, Jo) par insecte for __i in range(__nbI): - NumberOfFunctionEvaluations += 1 + nbfct += 1 JTest, JbTest, JoTest = CostFunction(Swarm[__i,0,:],selfA._parameters["QualityCriterion"]) if JTest < JXini: Swarm[__i,2,:] = Swarm[__i,0,:] # xBest @@ -131,13 +131,21 @@ def ecwopso(selfA, Xb, Y, HO, R, B): selfA.StoredVariables["CostFunctionJ" ].store( qSwarm[iBest,0] ) selfA.StoredVariables["CostFunctionJb"].store( qSwarm[iBest,1] ) selfA.StoredVariables["CostFunctionJo"].store( qSwarm[iBest,2] ) + if selfA._parameters["StoreInternalVariables"] or selfA._toStore("InternalStates"): + selfA.StoredVariables["InternalStates"].store( Swarm[:,0,:].T ) + if selfA._parameters["StoreInternalVariables"] or selfA._toStore("InternalCostFunctionJ"): + selfA.StoredVariables["InternalCostFunctionJ"].store( qSwarm[:,0] ) + if selfA._parameters["StoreInternalVariables"] or selfA._toStore("InternalCostFunctionJb"): + selfA.StoredVariables["InternalCostFunctionJb"].store( qSwarm[:,1] ) + if selfA._parameters["StoreInternalVariables"] or selfA._toStore("InternalCostFunctionJo"): + selfA.StoredVariables["InternalCostFunctionJo"].store( qSwarm[:,2] ) # selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) ) # # Minimisation de la fonctionnelle # -------------------------------- step = 0 - while KeepRunningCondition(step, NumberOfFunctionEvaluations): + while KeepRunningCondition(step, nbfct): step += 1 for __i in range(__nbI): for __p in range(__nbP): @@ -148,7 +156,7 @@ def ecwopso(selfA, Xb, Y, HO, R, B): # Position Swarm[__i,0,__p] = Swarm[__i,0,__p] + Swarm[__i,1,__p] # - NumberOfFunctionEvaluations += 1 + nbfct += 1 JTest, JbTest, JoTest = CostFunction(Swarm[__i,0,:],selfA._parameters["QualityCriterion"]) if JTest < qSwarm[__i,0]: Swarm[__i,2,:] = Swarm[__i,0,:] # xBest @@ -164,6 +172,14 @@ def ecwopso(selfA, Xb, Y, HO, R, B): selfA.StoredVariables["CostFunctionJ" ].store( qSwarm[iBest,0] ) selfA.StoredVariables["CostFunctionJb"].store( qSwarm[iBest,1] ) selfA.StoredVariables["CostFunctionJo"].store( qSwarm[iBest,2] ) + if selfA._parameters["StoreInternalVariables"] or selfA._toStore("InternalStates"): + selfA.StoredVariables["InternalStates"].store( Swarm[:,0,:].T ) + if selfA._parameters["StoreInternalVariables"] or selfA._toStore("InternalCostFunctionJ"): + selfA.StoredVariables["InternalCostFunctionJ"].store( qSwarm[:,0] ) + if selfA._parameters["StoreInternalVariables"] or selfA._toStore("InternalCostFunctionJb"): + selfA.StoredVariables["InternalCostFunctionJb"].store( qSwarm[:,1] ) + if selfA._parameters["StoreInternalVariables"] or selfA._toStore("InternalCostFunctionJo"): + selfA.StoredVariables["InternalCostFunctionJo"].store( qSwarm[:,2] ) logging.debug("%s Step %i: insect %i is the better one with J =%.7f"%(selfA._name,step,iBest,qSwarm[iBest,0])) # # Obtention de l'analyse diff --git a/src/daComposant/daAlgorithms/Atoms/ecwspso.py b/src/daComposant/daAlgorithms/Atoms/ecwspso.py new file mode 100644 index 0000000..c23a907 --- /dev/null +++ b/src/daComposant/daAlgorithms/Atoms/ecwspso.py @@ -0,0 +1,247 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2008-2023 EDF R&D +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +# +# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com +# +# Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D + +__doc__ = """ + SPSO-2011 Particle Swarm Optimization +""" +__author__ = "Jean-Philippe ARGAUD" + +import numpy, logging, copy, math +from daCore.NumericObjects import ApplyBounds, VariablesAndIncrementsBounds +from daCore.NumericObjects import GenerateRandomPointInHyperSphere +from daCore.NumericObjects import GetNeighborhoodTopology +from numpy.random import uniform as rand + +# ============================================================================== +def ecwspso(selfA, Xb, Y, HO, R, B): + # + Hm = HO["Direct"].appliedTo + # + BI = B.getI() + RI = R.getI() + # + Xini = selfA._parameters["InitializationPoint"] + # + Bounds, BoxBounds = VariablesAndIncrementsBounds( + selfA._parameters["Bounds"], + selfA._parameters["BoxBounds"], + Xini, + selfA._name, + 0.5, + ) + # + def CostFunction(x, QualityMeasure="AugmentedWeightedLeastSquares"): + _X = numpy.asarray( x ).reshape((-1,1)) + _HX = numpy.asarray( Hm( _X ) ).reshape((-1,1)) + _Innovation = Y - _HX + # + if QualityMeasure in ["AugmentedWeightedLeastSquares","AWLS","DA"]: + if BI is None or RI is None: + raise ValueError("Background and Observation error covariance matrices has to be properly defined!") + Jb = 0.5 * (_X - Xb).T @ (BI @ (_X - Xb)) + Jo = 0.5 * _Innovation.T @ (RI @ _Innovation) + elif QualityMeasure in ["WeightedLeastSquares","WLS"]: + if RI is None: + raise ValueError("Observation error covariance matrix has to be properly defined!") + Jb = 0. + Jo = 0.5 * _Innovation.T @ (RI @ _Innovation) + elif QualityMeasure in ["LeastSquares","LS","L2"]: + Jb = 0. + Jo = 0.5 * _Innovation.T @ _Innovation + elif QualityMeasure in ["AbsoluteValue","L1"]: + Jb = 0. + Jo = numpy.sum( numpy.abs(_Innovation) ) + elif QualityMeasure in ["MaximumError","ME", "Linf"]: + Jb = 0. + Jo = numpy.max( numpy.abs(_Innovation) ) + # + J = float( Jb ) + float( Jo ) + # + return J, float( Jb ), float( Jo ) + # + def KeepRunningCondition(__step, __nbfct): + if __step >= selfA._parameters["MaximumNumberOfIterations"]: + logging.debug("%s Stopping search because the number %i of evolving iterations is exceeding the maximum %i."%(selfA._name, __step, selfA._parameters["MaximumNumberOfIterations"])) + return False + elif __nbfct >= selfA._parameters["MaximumNumberOfFunctionEvaluations"]: + logging.debug("%s Stopping search because the number %i of function evaluations is exceeding the maximum %i."%(selfA._name, __nbfct, selfA._parameters["MaximumNumberOfFunctionEvaluations"])) + return False + else: + return True + # + # Paramètres internes + # ------------------- + __nbI = selfA._parameters["NumberOfInsects"] + __nbP = len(Xini) # Dimension ou nombre de paramètres + # + __iw = float( selfA._parameters["InertiaWeight"] ) + __sa = float( selfA._parameters["SocialAcceleration"] ) + __ca = float( selfA._parameters["CognitiveAcceleration"] ) + __vc = float( selfA._parameters["VelocityClampingFactor"] ) + logging.debug("%s Cognitive acceleration (recall to the best previously known value of the insect) = %s"%(selfA._name, str(__ca))) + logging.debug("%s Social acceleration (recall to the best insect value of the group) = %s"%(selfA._name, str(__sa))) + logging.debug("%s Velocity clamping factor = %s"%(selfA._name, str(__vc))) + # + # Initialisation de l'essaim + # -------------------------- + LimitPlace = Bounds + LimitSpeed = BoxBounds + # + nbfct = 1 # Nb d'évaluations + JXini, JbXini, JoXini = CostFunction(Xini,selfA._parameters["QualityCriterion"]) + # + Swarm = numpy.zeros((__nbI,4,__nbP)) # 4 car (x,v,gbest,lbest) + for __p in range(__nbP) : + Swarm[:,0,__p] = rand( low=LimitPlace[__p,0], high=LimitPlace[__p,1], size=__nbI) # Position + Swarm[:,1,__p] = rand( low=LimitSpeed[__p,0], high=LimitSpeed[__p,1], size=__nbI) # Velocity + logging.debug("%s Initialisation of the swarm with %i insects of size %i "%(selfA._name,Swarm.shape[0],Swarm.shape[2])) + # + __nbh = GetNeighborhoodTopology( selfA._parameters["SwarmTopology"], list(range(__nbI)) ) + # + qSwarm = JXini * numpy.ones((__nbI,6)) # Qualités (J, Jb, Jo) par insecte + par voisinage + for __i in range(__nbI): + nbfct += 1 + JTest, JbTest, JoTest = CostFunction(Swarm[__i,0,:],selfA._parameters["QualityCriterion"]) + if JTest < JXini: + Swarm[__i,2,:] = Swarm[__i,0,:] # xBest + qSwarm[__i,:3] = (JTest, JbTest, JoTest) + else: + Swarm[__i,2,:] = Xini # xBest + qSwarm[__i,:3] = (JXini, JbXini, JoXini) + logging.debug("%s Initialisation of the best previous insects"%selfA._name) + # + iBest = numpy.argmin(qSwarm[:,0]) + xBest = Swarm[iBest,2,:] + for __i in range(__nbI): + Swarm[__i,3,:] = xBest # lBest + qSwarm[__i,3:] = qSwarm[iBest,:3] + if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"): + selfA.StoredVariables["CurrentState"].store( xBest ) + selfA.StoredVariables["CostFunctionJ" ].store( qSwarm[iBest,0] ) + selfA.StoredVariables["CostFunctionJb"].store( qSwarm[iBest,1] ) + selfA.StoredVariables["CostFunctionJo"].store( qSwarm[iBest,2] ) + if selfA._parameters["StoreInternalVariables"] or selfA._toStore("InternalStates"): + selfA.StoredVariables["InternalStates"].store( Swarm[:,0,:].T ) + if selfA._parameters["StoreInternalVariables"] or selfA._toStore("InternalCostFunctionJ"): + selfA.StoredVariables["InternalCostFunctionJ"].store( qSwarm[:,0] ) + if selfA._parameters["StoreInternalVariables"] or selfA._toStore("InternalCostFunctionJb"): + selfA.StoredVariables["InternalCostFunctionJb"].store( qSwarm[:,1] ) + if selfA._parameters["StoreInternalVariables"] or selfA._toStore("InternalCostFunctionJo"): + selfA.StoredVariables["InternalCostFunctionJo"].store( qSwarm[:,2] ) + # + selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) ) + # + # Minimisation de la fonctionnelle + # -------------------------------- + step = 0 + while KeepRunningCondition(step, nbfct): + step += 1 + for __i in range(__nbI): + rct = rand(size=__nbP) + rst = rand(size=__nbP) + rrt = rand(size=__nbP) + # Points + __xPoint = Swarm[__i,0,:] + __pPoint = __xPoint \ + + __ca * rct * (Swarm[__i,2,:] - Swarm[__i,0,:]) + __lPoint = __xPoint \ + + __sa * rst * (Swarm[__i,3,:] - Swarm[__i,0,:]) + __gPoint = (__xPoint + __pPoint + __lPoint) / 3 + __radius = numpy.linalg.norm(__gPoint - __xPoint) + __rPoint = GenerateRandomPointInHyperSphere( __gPoint, __radius ) + # Vitesse + __value = __iw * Swarm[__i,1,:] + __rPoint - __xPoint + Swarm[__i,1,:] = ApplyBounds( __value, LimitSpeed ) + # Position + __value = Swarm[__i,0,:] + Swarm[__i,1,:] + Swarm[__i,0,:] = ApplyBounds( __value, LimitPlace ) + # + nbfct += 1 + # Update gbest + JTest, JbTest, JoTest = CostFunction(Swarm[__i,0,:],selfA._parameters["QualityCriterion"]) + if JTest < qSwarm[__i,0]: + Swarm[__i,2,:] = Swarm[__i,0,:] # xBest + qSwarm[__i,:3] = (JTest, JbTest, JoTest) + # + # Update lbest + for __i in range(__nbI): + __im = numpy.argmin( [qSwarm[__v,0] for __v in __nbh[__i]] ) + __il = __nbh[__i][__im] # Best in NB + if qSwarm[__il,0] < qSwarm[__i,3]: + Swarm[__i,3,:] = Swarm[__il,2,:] # lBest + qSwarm[__i,3:] = qSwarm[__il,:3] + # + iBest = numpy.argmin(qSwarm[:,0]) + xBest = Swarm[iBest,2,:] + selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) ) + if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"): + selfA.StoredVariables["CurrentState"].store( xBest ) + if selfA._toStore("SimulatedObservationAtCurrentState"): + selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Hm( xBest ) ) + selfA.StoredVariables["CostFunctionJ" ].store( qSwarm[iBest,0] ) + selfA.StoredVariables["CostFunctionJb"].store( qSwarm[iBest,1] ) + selfA.StoredVariables["CostFunctionJo"].store( qSwarm[iBest,2] ) + if selfA._parameters["StoreInternalVariables"] or selfA._toStore("InternalStates"): + selfA.StoredVariables["InternalStates"].store( Swarm[:,0,:].T ) + if selfA._parameters["StoreInternalVariables"] or selfA._toStore("InternalCostFunctionJ"): + selfA.StoredVariables["InternalCostFunctionJ"].store( qSwarm[:,0] ) + if selfA._parameters["StoreInternalVariables"] or selfA._toStore("InternalCostFunctionJb"): + selfA.StoredVariables["InternalCostFunctionJb"].store( qSwarm[:,1] ) + if selfA._parameters["StoreInternalVariables"] or selfA._toStore("InternalCostFunctionJo"): + selfA.StoredVariables["InternalCostFunctionJo"].store( qSwarm[:,2] ) + logging.debug("%s Step %i: insect %i is the better one with J =%.7f"%(selfA._name,step,iBest,qSwarm[iBest,0])) + # + # Obtention de l'analyse + # ---------------------- + Xa = xBest + # + selfA.StoredVariables["Analysis"].store( Xa ) + # + # Calculs et/ou stockages supplémentaires + # --------------------------------------- + if selfA._toStore("OMA") or \ + selfA._toStore("SimulatedObservationAtOptimum"): + HXa = Hm(Xa) + if selfA._toStore("Innovation") or \ + selfA._toStore("OMB") or \ + selfA._toStore("SimulatedObservationAtBackground"): + HXb = Hm(Xb) + Innovation = Y - HXb + if selfA._toStore("Innovation"): + selfA.StoredVariables["Innovation"].store( Innovation ) + if selfA._toStore("OMB"): + selfA.StoredVariables["OMB"].store( Innovation ) + if selfA._toStore("BMA"): + selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) ) + if selfA._toStore("OMA"): + selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) ) + if selfA._toStore("SimulatedObservationAtBackground"): + selfA.StoredVariables["SimulatedObservationAtBackground"].store( HXb ) + if selfA._toStore("SimulatedObservationAtOptimum"): + selfA.StoredVariables["SimulatedObservationAtOptimum"].store( HXa ) + # + selfA._post_run(HO) + return 0 + +# ============================================================================== +if __name__ == "__main__": + print('\n AUTODIAGNOSTIC\n') diff --git a/src/daComposant/daAlgorithms/ParticleSwarmOptimization.py b/src/daComposant/daAlgorithms/ParticleSwarmOptimization.py index 3579096..f6b31aa 100644 --- a/src/daComposant/daAlgorithms/ParticleSwarmOptimization.py +++ b/src/daComposant/daAlgorithms/ParticleSwarmOptimization.py @@ -22,7 +22,7 @@ import numpy, logging, copy from daCore import BasicObjects -from daAlgorithms.Atoms import ecwnpso, ecwopso +from daAlgorithms.Atoms import ecwnpso, ecwopso, ecwspso # ============================================================================== class ElementaryAlgorithm(BasicObjects.Algorithm): @@ -30,16 +30,16 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): BasicObjects.Algorithm.__init__(self, "PARTICLESWARMOPTIMIZATION") self.defineRequiredParameter( name = "Variant", - default = "PSO", + default = "CanonicalPSO", typecast = str, message = "Variant ou formulation de la méthode", listval = [ - "PSO", + "CanonicalPSO", "OGCR", + "SPSO-2011", ], listadv = [ - "CanonicalPSO", - "SPSO-2011", + "PSO", ], ) self.defineRequiredParameter( @@ -69,9 +69,25 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): message = "Nombre d'insectes dans l'essaim", minval = -1, ) + self.defineRequiredParameter( + name = "SwarmTopology", + default = "FullyConnectedNeighborhood", + typecast = str, + message = "Mode de définition du voisinage de chaque particule", + listval = [ + "FullyConnectedNeighborhood", "FullyConnectedNeighbourhood", "gbest", + "RingNeighborhoodWithRadius1", "RingNeighbourhoodWithRadius1", "lbest", + "RingNeighborhoodWithRadius2", "RingNeighbourhoodWithRadius2", + "AdaptativeRandomWith3Neighbors", "AdaptativeRandomWith3Neighbours", "abest", + "AdaptativeRandomWith5Neighbors", "AdaptativeRandomWith5Neighbours", + ], + listadv = [ + "VonNeumannNeighborhood", "VonNeumannNeighbourhood", + ], + ) self.defineRequiredParameter( name = "InertiaWeight", - default = 1., + default = 0.72135, # 1/(2*ln(2)) typecast = float, message = "Part de la vitesse de l'essaim qui est imposée à l'insecte, ou poids de l'inertie (entre 0 et 1)", minval = 0., @@ -80,19 +96,17 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): ) self.defineRequiredParameter( name = "CognitiveAcceleration", - default = 0.5, + default = 1.19315, # 1/2+ln(2) typecast = float, message = "Taux de rappel à la meilleure position de l'insecte précédemment connue (entre 0 et 1)", minval = 0., - maxval = 1., ) self.defineRequiredParameter( name = "SocialAcceleration", - default = 0.5, + default = 1.19315, # 1/2+ln(2) typecast = float, message = "Taux de rappel au meilleur insecte du groupe local (entre 0 et 1)", minval = 0., - maxval = 1., oldname = "GroupRecallRate", ) self.defineRequiredParameter( @@ -143,14 +157,14 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): "SimulatedObservationAtOptimum", ] ) - self.defineRequiredParameter( # Pas de type - name = "BoxBounds", - message = "Liste des valeurs de bornes d'incréments de paramètres", - ) self.defineRequiredParameter( # Pas de type name = "Bounds", message = "Liste des paires de bornes", ) + self.defineRequiredParameter( # Pas de type + name = "BoxBounds", + message = "Liste des paires de bornes d'incréments", + ) self.defineRequiredParameter( name = "InitializationPoint", typecast = numpy.ravel, @@ -176,6 +190,9 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): elif self._parameters["Variant"] in ["OGCR"]: ecwopso.ecwopso(self, Xb, Y, HO, R, B) # + elif self._parameters["Variant"] in ["SPSO-2011"]: + ecwspso.ecwspso(self, Xb, Y, HO, R, B) + # #-------------------------- else: raise ValueError("Error in Variant name: %s"%self._parameters["Variant"]) diff --git a/src/daComposant/daCore/BasicObjects.py b/src/daComposant/daCore/BasicObjects.py index 2b20ae6..1550b99 100644 --- a/src/daComposant/daCore/BasicObjects.py +++ b/src/daComposant/daCore/BasicObjects.py @@ -713,7 +713,12 @@ class Algorithm(object): - GradientOfCostFunctionJo : gradient de la partie observations de la fonction-coût - IndexOfOptimum : index de l'état optimal courant lors d'itérations - Innovation : l'innovation : d = Y - H(X) + - InnovationAtCurrentAnalysis : l'innovation à l'état analysé : da = Y - H(Xa) - InnovationAtCurrentState : l'innovation à l'état courant : dn = Y - H(Xn) + - InternalCostFunctionJ : ensemble de valeurs internes de fonction-coût J dans un vecteur + - InternalCostFunctionJb : ensemble de valeurs internes de fonction-coût Jb dans un vecteur + - InternalCostFunctionJb : ensemble de valeurs internes de fonction-coût Jo dans un vecteur + - InternalStates : ensemble d'états internes rangés par colonne dans une matrice (=EnsembleOfSnapshots) - JacobianMatrixAtBackground : matrice jacobienne à l'état d'ébauche - JacobianMatrixAtCurrentState : matrice jacobienne à l'état courant - JacobianMatrixAtOptimum : matrice jacobienne à l'optimum @@ -780,6 +785,10 @@ class Algorithm(object): self.StoredVariables["Innovation"] = Persistence.OneVector(name = "Innovation") self.StoredVariables["InnovationAtCurrentAnalysis"] = Persistence.OneVector(name = "InnovationAtCurrentAnalysis") self.StoredVariables["InnovationAtCurrentState"] = Persistence.OneVector(name = "InnovationAtCurrentState") + self.StoredVariables["InternalCostFunctionJ"] = Persistence.OneVector(name = "InternalCostFunctionJ") + self.StoredVariables["InternalCostFunctionJb"] = Persistence.OneVector(name = "InternalCostFunctionJb") + self.StoredVariables["InternalCostFunctionJo"] = Persistence.OneVector(name = "InternalCostFunctionJo") + self.StoredVariables["InternalStates"] = Persistence.OneMatrix(name = "InternalStates") self.StoredVariables["JacobianMatrixAtBackground"] = Persistence.OneMatrix(name = "JacobianMatrixAtBackground") self.StoredVariables["JacobianMatrixAtCurrentState"] = Persistence.OneMatrix(name = "JacobianMatrixAtCurrentState") self.StoredVariables["JacobianMatrixAtOptimum"] = Persistence.OneMatrix(name = "JacobianMatrixAtOptimum") diff --git a/src/daComposant/daCore/NumericObjects.py b/src/daComposant/daCore/NumericObjects.py index 2f14503..428f42d 100644 --- a/src/daComposant/daCore/NumericObjects.py +++ b/src/daComposant/daCore/NumericObjects.py @@ -25,7 +25,7 @@ __doc__ = """ """ __author__ = "Jean-Philippe ARGAUD" -import os, copy, types, sys, logging, numpy, itertools +import os, copy, types, sys, logging, math, numpy, itertools from daCore.BasicObjects import Operator, Covariance, PartialAlgorithm from daCore.PlatformInfo import PlatformInfo mpr = PlatformInfo().MachinePrecision() @@ -936,6 +936,38 @@ def Apply3DVarRecentringOnEnsemble( __EnXn, __EnXf, __Ynpu, __HO, __R, __B, __Su # return Xa + EnsembleOfAnomalies( __EnXn ) +# ============================================================================== +def GenerateRandomPointInHyperSphere( __Center, __Radius ): + "Génère un point aléatoire uniformément à l'intérieur d'une hyper-sphère" + __Dimension = numpy.asarray( __Center ).size + __GaussDelta = numpy.random.normal( 0, 1, size=__Center.shape ) + __VectorNorm = numpy.linalg.norm( __GaussDelta ) + __PointOnHS = __Radius * (__GaussDelta / __VectorNorm) + __MoveInHS = math.exp( math.log(numpy.random.uniform()) / __Dimension) # rand()**1/n + __PointInHS = __MoveInHS * __PointOnHS + return __Center + __PointInHS + +# ============================================================================== +def GetNeighborhoodTopology( __ntype, __ipop ): + "Renvoi une topologie de connexion pour une population de points" + if __ntype in ["FullyConnectedNeighborhood", "FullyConnectedNeighbourhood", "gbest"]: + __topology = [__ipop for __i in __ipop] + elif __ntype in ["RingNeighborhoodWithRadius1", "RingNeighbourhoodWithRadius1", "lbest"]: + __cpop = list(__ipop[-1:]) + list(__ipop) + list(__ipop[:1]) + __topology = [__cpop[__n:__n+3] for __n in range(len(__ipop))] + elif __ntype in ["RingNeighborhoodWithRadius2", "RingNeighbourhoodWithRadius2"]: + __cpop = list(__ipop[-2:]) + list(__ipop) + list(__ipop[:2]) + __topology = [__cpop[__n:__n+5] for __n in range(len(__ipop))] + elif __ntype in ["AdaptativeRandomWith3Neighbors", "AdaptativeRandomWith3Neighbours", "abest"]: + __cpop = 3*list(__ipop) + __topology = [[__i]+list(numpy.random.choice(__cpop,3)) for __i in __ipop] + elif __ntype in ["AdaptativeRandomWith5Neighbors", "AdaptativeRandomWith5Neighbours"]: + __cpop = 5*list(__ipop) + __topology = [[__i]+list(numpy.random.choice(__cpop,5)) for __i in __ipop] + else: + raise ValueError("Swarm topology type unavailable because name \"%s\" is unknown."%__ntype) + return __topology + # ============================================================================== def FindIndexesFromNames( __NameOfLocations = None, __ExcludeLocations = None, ForceArray = False ): "Exprime les indices des noms exclus, en ignorant les absents" -- 2.39.2