├── .gitignore ├── GoldsteinPrice.m ├── Himmelblau.m ├── PSO.m ├── PSO_Image.png ├── README.md ├── StyblinskiTang.m ├── StyblinskiTangNoise.m ├── TEST_PSO_1.m ├── TEST_PSO_2.m ├── TEST_PSO_3.m ├── TEST_PSO_4.m ├── TEST_PSO_5.m ├── TEST_PSO_6.m ├── TEST_PSO_7.m ├── license.txt ├── makeStruct.m ├── mergeOptions.m ├── plotBowl.m ├── plotGoldsteinPrice.m ├── plotHimmelblau.m ├── plotPsoHistory.m └── plotStyblinskiTang.m /.gitignore: -------------------------------------------------------------------------------- 1 | #Matlab autosave files 2 | **/*.m~ 3 | **/*.asv 4 | 5 | 6 | -------------------------------------------------------------------------------- /GoldsteinPrice.m: -------------------------------------------------------------------------------- 1 | function f = GoldsteinPrice(z) 2 | % f = GoldsteinPrice(z) 3 | % 4 | % Compute the Goldstein-Price function, used for testing optimization 5 | % 6 | % Reference: 7 | % Wikipedia: Test Functions for Optimization 8 | % 9 | % Global Minimum: 10 | % f(0, -1) = 0; 11 | % 12 | % NOTES: 13 | % The true GoldsteinPrice function has a global minimum of f(0,-1) = 3, 14 | % but I've shifted the entire function by 3 to make the plotting nice. 15 | % 16 | 17 | x = z(1,:); 18 | y = z(2,:); 19 | 20 | f = (1+((x+y+1).^2).*(19-14*x+3*x.^2-14*y+6*x.*y + 3*y.^2)).*... 21 | (30+((2*x-3*y).^2).*(18-32*x+12*x.^2+48*y-36*x.*y+27*y.^2)); 22 | 23 | f = f - 3; % Shift objective to have optimal value of 0.0 24 | 25 | end -------------------------------------------------------------------------------- /Himmelblau.m: -------------------------------------------------------------------------------- 1 | function f = Himmelblau(z) 2 | % f = Himmelblau(z) 3 | % 4 | % Compute Himmelblau's function: 5 | % f(x,y) = (x^2 + y - 11)^2 + (x+y^2-7)^2 6 | % 7 | % Used as a test function for optimization 8 | % 9 | % INPUTS: 10 | % z = [x;y] = [2,n] = evaluation poitn ion state space 11 | % 12 | % OUTPUTS: 13 | % f = f(z) = f(x,y) = (x.^2 + y - 11).^2 + (x+y.^2-7).^2 14 | % 15 | % Local Maximum: 16 | % f(-0.270845, -0.923039) = 181.617 17 | % 18 | % Local Minima: 19 | % f(3.0, 2.0) = 0.0 20 | % f(-2.805118, 3.131312) = 0.0 21 | % f(-3.779310, -3.283186) = 0.0 22 | % f(3.584428, -1.848126) = 0.0 23 | % 24 | % Reference: 25 | % --> Wikipedia: Himmelblau's Function 26 | % 27 | 28 | x = z(1,:); 29 | y = z(2,:); 30 | f = (x.^2 + y - 11).^2 + (x+y.^2-7).^2; 31 | 32 | end -------------------------------------------------------------------------------- /PSO.m: -------------------------------------------------------------------------------- 1 | function [xBest, fBest, info, dataLog] = PSO(objFun, x0, xLow, xUpp, options) 2 | % [xBest, fBest, info, dataLog] = PSO(objFun, x0, xLow, xUpp, options) 3 | % 4 | % Particle Swarm Optimization 5 | % 6 | % This function minimizes OBJFUN using a variant of particle swarm 7 | % optimization. The optimization uses an initial guess X0, and searches 8 | % over a search space bounded by XLOW and XUPP. 9 | % 10 | % INPUTS: 11 | % objFun = objective function handle: 12 | % f = objFun(x) 13 | % x = [n, m] = search point in n-dimensional space (for m points) 14 | % f = [1, m] = objective function value, for each of m points 15 | % x0 = [n, 1] = initial search location 16 | % --> Optional input. Set x0 = [] to ignore. 17 | % xLow = [n, 1] = lower bounds on search space 18 | % xUpp = [n, 1] = upper bounds on search space 19 | % options = option struct. All fields are optional, with defaults: 20 | % .alpha = 0.6 = search weight on current search direction 21 | % .beta = 0.9 = search weight on global best 22 | % .gamma = 0.9 = search weight on local best 23 | % .nPopulation = m = 3*n = population count 24 | % .maxIter = 100 = maximum number of generations 25 | % .tolFun = 1e-6 = exit when variance in objective is < tolFun 26 | % .tolX = 1e-10 = exit when norm of variance in state < tolX 27 | % .flagVectorize = false = is the objective function vectorized? 28 | % .flagMinimize = true = minimize objective 29 | % --> Set to false to maximize objective 30 | % .flagWarmStart = false = directly use initial guess? 31 | % --> true: first particle starts at x0 32 | % --> false: all particles are randomly selected 33 | % .guessWeight = 0.2; trade-off for initialization; range [0, 0.9) 34 | % --> 0.0 ignore x0; use random initialization [xLow, xUpp] 35 | % --> 0.9 heavy weight on initial guess (x0) 36 | % .plotFun = function handle for plotting progress 37 | % plotFun( dataLog(iter), iter ) 38 | % --> See OUTPUTS for details about dataLog 39 | % --> Leave empty to omit plotting (faster) 40 | % .display = 'iter'; 41 | % --> 'iter' = print out info for each iteration 42 | % --> 'final' = print out some info on exit 43 | % --> 'off' = disable printing 44 | % .printMod = 1 (only used if display == 'iter') 45 | % 46 | % OUTPUTS: 47 | % xBest = [n, 1] = best point ever found 48 | % fBest = [1, 1] = value of best point found 49 | % 50 | % info = output struct with solver info 51 | % .input = copy of solver inputs: 52 | % .objFun 53 | % .x0 54 | % .xLow 55 | % .xUpp 56 | % .options 57 | % .exitFlag = how did optimization finish 58 | % 0 = objective variance < tolFun 59 | % 1 = reached max iteration count 60 | % 2 = norm of state variance < tolX 61 | % .fEvalCount = how many calls to the objective function? 62 | % .X_Global = [n,iter] = best point in each generation 63 | % .F_Global = [1,iter] = value of the best point ever 64 | % .I_Global = [1,iter] = index of the best point ever 65 | % .X_Best_Var = [n,iter] = variance in best point along each dim 66 | % .X_Var = [n,iter] = variance in current search along each dim 67 | % .X_Best_Mean = [n,iter] = mean in best point along each dim 68 | % .X_Mean = [n,iter] = mean in current search along each dim 69 | % .F_Best_Var = [1,iter] = variance in the best val at each gen 70 | % .F_Var = [1,iter] = variance in the current val at each gen 71 | % .F_Best_Mean = [1,iter] = mean of the population best value 72 | % .F_Mean = [1,iter] = mean of the current population value 73 | % 74 | % dataLog(iter) = struct array with data from each iteration 75 | % .X = [n,m] = current position of each particle 76 | % .V = [n,m] = current "velocity" of each particle 77 | % .F = [1,m] = value of each particle 78 | % .X_Best = [n,m] = best point for each particle 79 | % .F_Best = [1,m] = value of the best point for each particle 80 | % .X_Global = [n,1] = best point ever (over all particles) 81 | % .F_Global = [1,1] = value of the best point ever 82 | % .I_Global = [1,1] = index of the best point ever 83 | % 84 | % NOTES: 85 | % This function uses a slightly different algorithm based on whether or 86 | % not the objective function is vectorized. If the objective is 87 | % vectorized, then the new global best point is only computed once per 88 | % iteration (generation). If the objective is not vectorized, then the 89 | % global best is updated after each particle is updated. 90 | % 91 | % 92 | % DEPENDENCIES 93 | % --> mergeOptions() 94 | % --> makeStruct() 95 | % 96 | % 97 | % REFERENCES: 98 | % 99 | % http://www.scholarpedia.org/article/Particle_swarm_optimization 100 | % 101 | % Clerc and Kennedy (2002) 102 | % 103 | % 104 | % CHANGE LOG: 105 | % 106 | % February 21, 2016 107 | % --> Changed default and bounds for options.guessWeight 108 | % --> Added options.flagWarmStart 109 | % --> Made x0 argument optional (pass x0 as [] to ignore) 110 | % 111 | 112 | 113 | %%%% Basic input validation: 114 | [n, m] = size(xLow); 115 | if m ~= 1 116 | error('x0 is not a valid size! Must be a column vector.') 117 | end 118 | [nRow, nCol] = size(xLow); 119 | if nRow ~= n || nCol ~= 1 120 | error(['xLow is not a valid size! Must be [' num2str(n) ', 1]']); 121 | end 122 | [nRow, nCol] = size(xUpp); 123 | if nRow ~= n || nCol ~= 1 124 | error(['xUpp is not a valid size! Must be [' num2str(n) ', 1]']); 125 | end 126 | 127 | 128 | %%%% Options Struct: 129 | default.alpha = 0.6; %search weight on current search direction 130 | default.beta = 0.9; %search weight on global best 131 | default.gamma = 0.9; %search weight on local best 132 | default.nPopulation = 3*n; % 3*n = population count 133 | default.maxIter = 100; % maximum number of generations 134 | default.tolFun = 1e-6; % exit when variance in objective is < tolFun 135 | default.tolX = 1e-10; % exit when norm of variance in state < tolX 136 | default.flagVectorize = false; % is the objective function vectorized? 137 | default.flagMinimize = true; %true for minimization, false for maximization 138 | default.xDelMax = xUpp - xLow; %Maximnum position update; 139 | default.flagWarmStart = false; %Directly use the initial point? 140 | default.guessWeight = 0.2; % on range [0, 0.9); 0 = ignore guess, 1 = start at guess 141 | default.plotFun = []; % Handle to a function for plotting the progress 142 | default.display = 'iter'; % Print out progress to user 143 | default.printMod = 1; % Print out every [printMod] iterations 144 | if nargin == 5 % user provided options struct! 145 | options = mergeOptions(default,options); 146 | else % no user-defined options. Use defaults. 147 | options = default; 148 | end 149 | 150 | 151 | %%% Options validation: 152 | if options.guessWeight < 0 153 | options.guessWeight = 0; 154 | disp('WARNING: options.guessWeight must be on range [0, 0.9)'); 155 | elseif options.guessWeight > 0.9 156 | options.guessWeight = 0.9; 157 | disp('WARNING: options.guessWeight must be on range [0, 0.9)'); 158 | end 159 | 160 | 161 | %%%% Minimize vs Maximize: 162 | if options.flagMinimize 163 | optFun = @min; 164 | else 165 | optFun = @max; 166 | end 167 | 168 | %%%% Check to see if user defined x0. If not, force defaults 169 | if isempty(x0) 170 | x0 = 0.5*xLow + 0.5*xUpp; 171 | options.guessWeight = 0.0; 172 | options.flagWarmStart = false; 173 | end 174 | 175 | 176 | %%%% Initialize the population 177 | 178 | % Sample two random points in the search space for each particle 179 | m = options.nPopulation; %population size 180 | X1 = xLow*ones(1,m) + ((xUpp-xLow)*ones(1,m)).*rand(n,m); 181 | X2 = xLow*ones(1,m) + ((xUpp-xLow)*ones(1,m)).*rand(n,m); 182 | 183 | % Move initial points towards initial guess, by convex combination 184 | w = options.guessWeight; %for initialization 185 | X0 = x0*ones(1,m); 186 | X1 = w*X0 + (1-w)*X1; 187 | X2 = w*X0 + (1-w)*X2; 188 | 189 | % Initialize population: 190 | X = X1; % Initial position of the population 191 | V = X2-X1; % Initial "velocity" of the population 192 | 193 | % Check for warm start. If so, override random initial point with x0 194 | if options.flagWarmStart 195 | X(:,1) = x0; 196 | V(:,1) = zeros(size(x0)); 197 | end 198 | 199 | if options.flagVectorize % Batch process objective 200 | X_Low = xLow*ones(1,m); 201 | X_Upp = xUpp*ones(1,m); 202 | F = objFun(X); % Function value at each particle in the population 203 | else % Objective not vectorized 204 | F = zeros(1,m); 205 | for idx = 1:m % Loop over particles 206 | F(1,idx) = objFun(X(:,idx)); 207 | end 208 | end 209 | 210 | X_Best = X; % Best point, for each particle in the population 211 | F_Best = F; % Value of best point, for each particle in the population 212 | 213 | [F_Global, I_Global] = optFun(F_Best); % Value of best point ever, over all points 214 | X_Global = X(:, I_Global); % Best point ever, over all points 215 | 216 | 217 | %%%% Allocate memory for the dataLog 218 | maxIter = options.maxIter; 219 | dataLog(maxIter) = makeStruct(X, V, F, X_Best, F_Best, X_Global, F_Global, I_Global); 220 | 221 | 222 | %%%% Allocate memory for info 223 | info.X_Global = zeros(n,maxIter); 224 | info.F_Global = zeros(1,maxIter); 225 | info.I_Global = zeros(1,maxIter); 226 | info.X_Best_Var = zeros(n,maxIter); 227 | info.F_Best_Var = zeros(1,maxIter); 228 | info.X_Best_Mean = zeros(n,maxIter); 229 | info.F_Best_Mean = zeros(1,maxIter); 230 | info.X_Var = zeros(n,maxIter); 231 | info.F_Var = zeros(1,maxIter); 232 | info.X_Mean = zeros(n,maxIter); 233 | info.F_Mean = zeros(1,maxIter); 234 | info.iter = 1:maxIter; 235 | 236 | 237 | %%%% MAIN LOOP: 238 | info.exitFlag = 1; %Assume that we will reach maximum iteration 239 | for iter = 1:maxIter 240 | 241 | %%% Compute new generation of points: 242 | if iter > 1 % Then do an update on each particle 243 | r1 = rand(n,m); 244 | r2 = rand(n,m); 245 | 246 | if options.flagVectorize % Batch process objective 247 | 248 | V = ... %Update equations 249 | options.alpha*V + ... % Current search direction 250 | options.beta*r1.*((X_Global*ones(1,m))-X) + ... % Global direction 251 | options.gamma*r2.*(X_Best-X); % Local best direction 252 | X_New = X + V; % Update position 253 | X = max(min(X_New, X_Upp), X_Low); % Clamp position to bounds 254 | 255 | F = objFun(X); %Evaluate 256 | 257 | F_Best_New = optFun(F_Best, F); %Compute the best point 258 | idxUpdate = F_Best_New ~= F_Best; % Which indicies updated? 259 | X_Best(:,idxUpdate) = X(:,idxUpdate); %Copy over new best points 260 | F_Best = F_Best_New; 261 | [F_Global, I_Global] = optFun(F_Best); % Value of best point ever, over all points 262 | X_Global = X(:, I_Global); % Best point ever, over all points 263 | 264 | 265 | else %Objective is not vectorized. 266 | 267 | for idx = 1:m %%%%%%% Loop over particles %%%%%%%%%%%%%% 268 | 269 | V(:,idx) = ... %Update equations 270 | options.alpha*V(:,idx) + ... % Current search direction 271 | options.beta*r1(:,idx).*(X_Global-X(:,idx)) + ... % Global direction 272 | options.gamma*r2(:,idx).*(X_Best(:,idx)-X(:,idx)); % Local best direction 273 | X_New = X(:,idx) + V(:,idx); % Update position 274 | X(:,idx) = max(min(X_New, xUpp), xLow); % Clamp position to bounds 275 | 276 | F(:,idx) = objFun(X(:,idx)); %Evaluate 277 | 278 | [F_Best(1,idx), iBest] = optFun([F(1,idx), F_Best(1,idx)]); 279 | if iBest == 1 %Then new point is better! 280 | X_Best(:,idx) = X(:,idx); 281 | [F_Global, iBest] = optFun([F_Best(1,idx), F_Global]); 282 | if iBest == 1 %Then new point is the global best! 283 | X_Global = X_Best(:,idx); 284 | end 285 | end 286 | 287 | end %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 288 | 289 | end 290 | end 291 | 292 | %%% Log Data 293 | dataLog(iter) = makeStruct(X, V, F, X_Best, F_Best, X_Global, F_Global, I_Global); 294 | info.X_Global(:,iter) = X_Global; 295 | info.F_Global(iter) = F_Global; 296 | info.I_Global(iter) = I_Global; 297 | info.X_Var(:,iter) = var(X, 0, 2); 298 | info.X_Best_Var(:,iter) = var(X_Best, 0, 2); 299 | info.X_Mean(:,iter) = mean(X, 2); 300 | info.X_Best_Mean(:,iter) = mean(X_Best, 2); 301 | info.F_Var(1,iter) = var(F); 302 | info.F_Best_Var(1,iter) = var(F_Best); 303 | info.F_Mean(1,iter) = mean(F); 304 | info.F_Best_Mean(1,iter) = mean(F_Best); 305 | 306 | %%% Plot 307 | if ~isempty(options.plotFun) 308 | options.plotFun(dataLog(iter), iter); 309 | end 310 | 311 | %%% Print: 312 | xVar = norm(info.X_Var(:,iter)); 313 | if strcmp('iter',options.display) 314 | if mod(iter-1,options.printMod)==0 315 | fprintf('iter: %3d, fBest: %9.3e, fVar: %9.3e xVar: %9.3e \n',... 316 | iter, info.F_Global(iter), info.F_Var(1,iter), xVar); 317 | end 318 | end 319 | 320 | %%% Convergence: 321 | if info.F_Var(1,iter) < options.tolFun 322 | info.exitFlag = 0; 323 | dataLog = dataLog(1:iter); 324 | info = truncateInfo(info,maxIter,iter); 325 | break 326 | elseif xVar < options.tolX 327 | info.exitFlag = 2; 328 | dataLog = dataLog(1:iter); 329 | info = truncateInfo(info,maxIter,iter); 330 | break 331 | end 332 | end 333 | 334 | xBest = info.X_Global(:,end); 335 | fBest = info.F_Global(end); 336 | info.input = makeStruct(objFun, x0, xLow, xUpp, options); %Copy inputs 337 | info.fEvalCount = iter*m; 338 | 339 | %%% Print: 340 | if strcmp('iter',options.display) || strcmp('final',options.display) 341 | switch info.exitFlag 342 | case 0 343 | fprintf('Optimization Converged. Exit: fVar < tolFun \n'); 344 | case 1 345 | fprintf('Maximum iteration reached. \n'); 346 | case 2 347 | fprintf('Optimization Converged. Exit: norm(xVar) < tolX \n'); 348 | end 349 | end 350 | 351 | end 352 | 353 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 354 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 355 | 356 | function info = truncateInfo(info,maxIter,iter) 357 | % 358 | % Removes the empty entries in the info struct 359 | 360 | names = fieldnames(info); 361 | for i=1:length(names) 362 | if (isnumeric(info.(names{i}))) % Check if it's a matrix 363 | if size(info.(names{i}),2) == maxIter % Check if it is iteration data 364 | info.(names{i}) = info.(names{i})(:,1:iter); 365 | end 366 | end 367 | end 368 | 369 | end 370 | 371 | 372 | 373 | 374 | -------------------------------------------------------------------------------- /PSO_Image.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MatthewPeterKelly/ParticleSwarmOptimization/14f5123e4154e96f396aea34941e26046f5d2363/PSO_Image.png -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Particle Swarm Optimization 2 | 3 | This directory contains a simple implementation of particle swarm optimization (PSO.m), as well as scripts that use it to solve standard optimization test problems (TEST_PSO_*.m). 4 | 5 | This implementation of PSO is designed for solving a bounded non-linear paramter optimization problem, with an initial guess. It is fully vectorized. 6 | 7 | There are a variety of options that can be set by the user, but will be initialized to a default value if ommitted. 8 | 9 | The output of the solver contains a full history of the optimization, which can be plotted using plotPsoHistory.m. Additionally, the user can define a plotting function to be called on each iteration.Both of these features are demonstrated in the TEST_PSO_*.m scripts. 10 | 11 | The code supports both vectorized and non-vectorized objective function. If the objective function is vectorized, then the global best is updated synchronously, once per generation. If the objective function is not vectorized, then the optimization uses an asynchronous update, updating the global best after every particle update. 12 | 13 | 14 | ## Test Functions: 15 | 16 | - TEST_PSO_1.m --> 2-D Sphere Function 17 | - TEST_PSO_2.m --> Himmelblau's function 18 | - TEST_PSO_3.m --> Goldstein-Price function 19 | - TEST_PSO_4.m --> 2-D Styblinski-Tang function 20 | - TEST_PSO_5.m --> N-D Styblinski-Tang function 21 | 22 | ## Help file for PSO.m 23 | 24 | [xBest, fBest, info, dataLog] = PSO(objFun, x0, xLow, xUpp, options) 25 | 26 | Particle Swarm Optimization 27 | 28 | This function minimizes OBJFUN using a variant of particle swarm 29 | optimization. The optimization uses an initial guess X0, and searches 30 | over a search space bounded by XLOW and XUPP. 31 | 32 | INPUTS: 33 | objFun = objective function handle: 34 | f = objFun(x) 35 | x = [n, m] = search point in n-dimensional space (for m points) 36 | f = [1, m] = objective function value, for each of m points 37 | x0 = [n, 1] = initial search location 38 | --> Optional input. Set x0 = [] to ignore. 39 | xLow = [n, 1] = lower bounds on search space 40 | xUpp = [n, 1] = upper bounds on search space 41 | options = option struct. All fields are optional, with defaults: 42 | .alpha = 0.6 = search weight on current search direction 43 | .beta = 0.9 = search weight on global best 44 | .gamma = 0.9 = search weight on local best 45 | .nPopulation = m = 3*n = population count 46 | .maxIter = 100 = maximum number of generations 47 | .tolFun = 1e-6 = exit when variance in objective is < tolFun 48 | .tolX = 1e-10 = exit when norm of variance in state < tolX 49 | .flagVectorize = false = is the objective function vectorized? 50 | .flagMinimize = true = minimize objective 51 | --> Set to false to maximize objective 52 | .flagWarmStart = false = directly use initial guess? 53 | --> true: first particle starts at x0 54 | --> false: all particles are randomly selected 55 | .guessWeight = 0.2; trade-off for initialization; range [0, 0.9) 56 | --> 0.0 ignore x0; use random initialization [xLow, xUpp] 57 | --> 0.9 heavy weight on initial guess (x0) 58 | .plotFun = function handle for plotting progress 59 | plotFun( dataLog(iter), iter ) 60 | --> See OUTPUTS for details about dataLog 61 | --> Leave empty to omit plotting (faster) 62 | .display = 'iter'; 63 | --> 'iter' = print out info for each iteration 64 | --> 'final' = print out some info on exit 65 | --> 'off' = disable printing 66 | .printMod = 1 (only used if display == 'iter') 67 | 68 | OUTPUTS: 69 | xBest = [n, 1] = best point ever found 70 | fBest = [1, 1] = value of best point found 71 | 72 | info = output struct with solver info 73 | .input = copy of solver inputs: 74 | .objFun 75 | .x0 76 | .xLow 77 | .xUpp 78 | .options 79 | .exitFlag = how did optimization finish 80 | 0 = objective variance < tolFun 81 | 1 = reached max iteration count 82 | 2 = norm of state variance < tolX 83 | .fEvalCount = how many calls to the objective function? 84 | .X_Global = [n,iter] = best point in each generation 85 | .F_Global = [1,iter] = value of the best point ever 86 | .I_Global = [1,iter] = index of the best point ever 87 | .X_Best_Var = [n,iter] = variance in best point along each dim 88 | .X_Var = [n,iter] = variance in current search along each dim 89 | .X_Best_Mean = [n,iter] = mean in best point along each dim 90 | .X_Mean = [n,iter] = mean in current search along each dim 91 | .F_Best_Var = [1,iter] = variance in the best val at each gen 92 | .F_Var = [1,iter] = variance in the current val at each gen 93 | .F_Best_Mean = [1,iter] = mean of the population best value 94 | .F_Mean = [1,iter] = mean of the current population value 95 | 96 | dataLog(iter) = struct array with data from each iteration 97 | .X = [n,m] = current position of each particle 98 | .V = [n,m] = current "velocity" of each particle 99 | .F = [1,m] = value of each particle 100 | .X_Best = [n,m] = best point for each particle 101 | .F_Best = [1,m] = value of the best point for each particle 102 | .X_Global = [n,1] = best point ever (over all particles) 103 | .F_Global = [1,1] = value of the best point ever 104 | .I_Global = [1,1] = index of the best point ever 105 | 106 | NOTES: 107 | This function uses a slightly different algorithm based on whether or 108 | not the objective function is vectorized. If the objective is 109 | vectorized, then the new global best point is only computed once per 110 | iteration (generation). If the objective is not vectorized, then the 111 | global best is updated after each particle is updated. 112 | 113 | 114 | DEPENDENCIES 115 | --> mergeOptions() 116 | --> makeStruct() 117 | 118 | 119 | REFERENCES: 120 | 121 | http://www.scholarpedia.org/article/Particle_swarm_optimization 122 | 123 | Clerc and Kennedy (2002) 124 | -------------------------------------------------------------------------------- /StyblinskiTang.m: -------------------------------------------------------------------------------- 1 | function f = StyblinskiTang(x) 2 | % f = StyblinskiTang(x) 3 | % 4 | % Compute the Styblinski-Tang(z) function, used for testing optimization 5 | % 6 | % Reference: 7 | % Wikipedia: Test Functions for Optimization 8 | % 9 | % Global Minimum: 10 | % N = size(x,1); %Dimension of search space 11 | % xStar = -2.903534027771178 * ones(N,1); 12 | % f(xStar) = -39.166165703771419 * N; 13 | % 14 | 15 | [n,m] = size(x); 16 | f = zeros(1,m); 17 | for i=1:n 18 | f = f + x(i,:).^4 - 16*x(i,:).^2 + 5*x(i,:); 19 | end 20 | f = 0.5*f; 21 | 22 | end -------------------------------------------------------------------------------- /StyblinskiTangNoise.m: -------------------------------------------------------------------------------- 1 | function f = StyblinskiTangNoise(x,alpha) 2 | % f = StyblinskiTangNoise(x,alpha) 3 | % 4 | % Compute the Styblinski-Tang(z) function, used for testing optimization 5 | % 6 | % Reference: 7 | % Wikipedia: Test Functions for Optimization 8 | % 9 | % Global Minimum: 10 | % N = size(x,1); %Dimension of search space 11 | % xStar = -2.903534027771178 * ones(N,1); 12 | % f(xStar) = -39.166165703771419 * N; 13 | % 14 | 15 | [n,m] = size(x); 16 | f = zeros(1,m); 17 | for i=1:n 18 | f = f + x(i,:).^4 - 16*x(i,:).^2 + 5*x(i,:); 19 | end 20 | f = 0.5*f; 21 | 22 | % Add some noise 23 | f = f + 39.166165703771419*n*alpha*randn(size(f)); 24 | 25 | end -------------------------------------------------------------------------------- /TEST_PSO_1.m: -------------------------------------------------------------------------------- 1 | % TEST -- PSO -- Particle Swarm Optimization 2 | % 3 | % First test, simple quadratic bowl 4 | 5 | clc; clear; 6 | 7 | %%%% Set up problem 8 | 9 | objFun = @(x)( sum(x.^2,1) ); % Minimize this function 10 | 11 | xLow = -ones(2,1); % lower bound on the search space 12 | xUpp = ones(2,1); % upper bound on the search space 13 | x0 = []; % No initial guess 14 | 15 | options.alpha = 0.4; % weight on current search direction 16 | options.beta = 0.9; % weight on local best search direction 17 | options.gamma = 0.9; % weight on global best search direction 18 | 19 | options.nPopulation = 10; 20 | options.maxIter = 20; 21 | 22 | options.plotFun = @plotBowl; % Plots progress 23 | 24 | 25 | %%%% Solve 26 | [xBest, fBest, info, dataLog] = PSO(objFun, x0, xLow, xUpp, options); 27 | 28 | %%%% Analysis 29 | figure(101); clf; 30 | plotPsoHistory(info); 31 | 32 | 33 | -------------------------------------------------------------------------------- /TEST_PSO_2.m: -------------------------------------------------------------------------------- 1 | % TEST -- PSO -- Particle Swarm Optimization 2 | % 3 | % Test 2: Himmelblau's function 4 | % 5 | % 6 | 7 | clc; clear; clear global; figure(200); clf; 8 | global HimmelblauSaveAnimation %Set to true to save the animation 9 | 10 | %%%% Set up problem 11 | 12 | objFun = @Himmelblau; % Minimize this function 13 | 14 | xLow = -5*ones(2,1); % lower bound on the search space 15 | xUpp = 5*ones(2,1); % upper bound on the search space 16 | x0 = [0;0]; % initial guess 17 | 18 | % options.alpha = 0.4; % weight on current search direction 19 | % options.beta = 0.9; % weight on local best search direction 20 | % options.gamma = 0.9; % weight on global best search direction 21 | 22 | options.nPopulation = 15; 23 | options.maxIter = 50; 24 | 25 | options.plotFun = @plotHimmelblau; % Plots progress 26 | HimmelblauSaveAnimation = false; %Save an animation 27 | 28 | 29 | %%%% Solve 30 | [xBest, fBest, info, dataLog] = PSO(objFun, x0, xLow, xUpp, options); 31 | 32 | %%%% Analysis 33 | figure(201); clf; 34 | plotPsoHistory(info); 35 | 36 | 37 | -------------------------------------------------------------------------------- /TEST_PSO_3.m: -------------------------------------------------------------------------------- 1 | % TEST -- PSO -- Particle Swarm Optimization 2 | % 3 | % Test 3: Goldstein-Price function 4 | % 5 | % 6 | 7 | clc; clear; clear global; figure(300); clf; 8 | 9 | %%%% Set up problem 10 | 11 | objFun = @GoldsteinPrice; % Minimize this function 12 | 13 | xLow = -2*ones(2,1); % lower bound on the search space 14 | xUpp = 2*ones(2,1); % upper bound on the search space 15 | x0 = [-1;1]; % initial guess 16 | 17 | % options.alpha = 0.4; % weight on current search direction 18 | % options.beta = 0.9; % weight on local best search direction 19 | % options.gamma = 0.9; % weight on global best search direction 20 | 21 | % options.tolX = 1e-8; 22 | % options.tolFun = 1e-4; 23 | 24 | % options.flagVectorize = true; % Objective function is vectorized 25 | 26 | options.nPopulation = 15; 27 | options.maxIter = 50; 28 | 29 | options.plotFun = @plotGoldsteinPrice; % Plots progress 30 | 31 | %%%% Solve 32 | [xBest, fBest, info, dataLog] = PSO(objFun, x0, xLow, xUpp, options); 33 | 34 | %%%% Analysis 35 | figure(301); clf; 36 | plotPsoHistory(info); 37 | 38 | 39 | -------------------------------------------------------------------------------- /TEST_PSO_4.m: -------------------------------------------------------------------------------- 1 | % TEST -- PSO -- Particle Swarm Optimization 2 | % 3 | % Test 4: Styblinski-Tang function (2D) 4 | % 5 | % 6 | 7 | clc; clear; clear global; figure(400); clf; 8 | 9 | %%%% Set up problem 10 | 11 | objFun = @StyblinskiTang; % Minimize this function 12 | 13 | xLow = -5*ones(2,1); % lower bound on the search space 14 | xUpp = 5*ones(2,1); % upper bound on the search space 15 | x0 = [0;0]; % initial guess 16 | 17 | options.alpha = 0.5; % weight on current search direction 18 | options.beta = 1.1; % weight on local best search direction 19 | options.gamma = 1.1; % weight on global best search direction 20 | 21 | options.nPopulation = 20; 22 | options.maxIter = 50; 23 | 24 | options.plotFun = @plotStyblinskiTang; % Plots progress 25 | 26 | 27 | %%%% Solve 28 | [xBest, fBest, info, dataLog] = PSO(objFun, x0, xLow, xUpp, options); 29 | 30 | %%%% Analysis 31 | figure(401); clf; 32 | plotPsoHistory(info); 33 | 34 | 35 | -------------------------------------------------------------------------------- /TEST_PSO_5.m: -------------------------------------------------------------------------------- 1 | % TEST -- PSO -- Particle Swarm Optimization 2 | % 3 | % Test 5: Styblinski-Tang function (5-D) 4 | % 5 | % There are many local minimums to this problem, but only one global 6 | % minimum. All are of similar value. 7 | % >> help StyblinskiTang % For more details 8 | % 9 | 10 | clc; clear; 11 | 12 | %%%% Set up problem 13 | 14 | objFun = @StyblinskiTang; % Minimize this function 15 | 16 | xLow = -5*ones(5,1); % lower bound on the search space 17 | xUpp = 5*ones(5,1); % upper bound on the search space 18 | x0 = -2*ones(5,1); % initial guess 19 | 20 | options.alpha = 0.5; % weight on current search direction 21 | options.beta = 1.0; % weight on local best search direction 22 | options.gamma = 1.0; % weight on global best search direction 23 | 24 | options.nPopulation = 15; 25 | options.maxIter = 25; 26 | 27 | options.flagVectorize = true; % Use vectorized objective function 28 | options.flagWarmStart = true; % Include x0 in first generation 29 | 30 | %%%% Solve 31 | [xBest, fBest, info, dataLog] = PSO(objFun, x0, xLow, xUpp, options); 32 | 33 | %%%% Analysis 34 | figure(501); clf; 35 | plotPsoHistory(info); 36 | 37 | 38 | -------------------------------------------------------------------------------- /TEST_PSO_6.m: -------------------------------------------------------------------------------- 1 | % TEST -- PSO -- Particle Swarm Optimization 2 | % 3 | % Test 6: Styblinski-Tang function (5-D) 4 | % 5 | % There are many local minimums to this problem, but only one global 6 | % minimum. All are of similar value. 7 | % >> help StyblinskiTang % For more details 8 | % 9 | % This script is a meta-optimization, running a simple grid search to 10 | % determine which set of paramters will cause the optimization to most 11 | % rapidly converge to a local minimum. 12 | % 13 | % Note - the resulting set of parameters are not good in general, since 14 | % they will always find the closest local minimum. 15 | % 16 | % This script will take some time to execute, since it runs many 17 | % optimizations one after the other. 18 | % 19 | 20 | clc; clear; 21 | 22 | %%%% Set up problem 23 | 24 | objFun = @StyblinskiTang; % Minimize this function 25 | 26 | xLow = -5*ones(5,1); % lower bound on the search space 27 | xUpp = 5*ones(5,1); % upper bound on the search space 28 | x0 = zeros(5,1); % initial guess 29 | 30 | options.maxIter = 100; 31 | options.tolFun = 1e-6; 32 | options.tolX = 1e-6; 33 | options.flagVectorize = false; 34 | options.guessWeight = 0.11; 35 | options.display = 'off'; 36 | 37 | %%%% Select the grid search parameters: 38 | Z_alpha = linspace(0.1, 0.7, 9); 39 | Z_betaGamma = linspace(0.7, 1.6, 9); 40 | Z_nPopulation = [8, 12, 16, 20, 24]; 41 | 42 | N_REPEAT = 5; %Run optimization this many times for each set of params 43 | 44 | nAlpha = length(Z_alpha); 45 | nBetaGamma = length(Z_betaGamma); 46 | nPopIter = length(Z_nPopulation); 47 | 48 | nTrial = nAlpha*nBetaGamma*nPopIter; 49 | Alpha = zeros(nTrial,1); 50 | BetaGamma = zeros(nTrial,1); 51 | Pop = zeros(nTrial,1); 52 | 53 | F_Eval_Count = zeros(nTrial,1); 54 | F_Best = zeros(nTrial,1); 55 | 56 | 57 | nEval = zeros(1,N_REPEAT); 58 | fVal = zeros(1,N_REPEAT); 59 | 60 | idx = 0; 61 | for i=1:nAlpha 62 | for j=1:nBetaGamma 63 | for k=1:nPopIter 64 | idx = idx + 1; 65 | 66 | %%%% Unpack parameters: 67 | options.alpha = Z_alpha(i); 68 | options.beta = Z_betaGamma(j); 69 | options.gamma = Z_betaGamma(j); 70 | options.nPopulation = Z_nPopulation(k); 71 | 72 | %%%% Log parameters (lazy way) 73 | Alpha(idx) = Z_alpha(i); 74 | BetaGamma(idx) = Z_betaGamma(j); 75 | Pop(idx) = Z_nPopulation(k); 76 | 77 | %%%% Solve 78 | for rr = 1:N_REPEAT 79 | [~, fBest, info] = PSO(objFun, x0, xLow, xUpp, options); 80 | nEval(rr) = info.fEvalCount; 81 | fVal(rr) = fBest; 82 | end 83 | F_Eval_Count(idx) = mean(nEval(rr)); 84 | F_Best(idx) = mean(fVal); 85 | 86 | %%%% User Read-Out: 87 | fprintf('Iter: %d / %d \n', idx, nTrial); 88 | 89 | end 90 | end 91 | end 92 | 93 | %%%% Data Analysis 94 | 95 | FF = [F_Eval_Count, F_Best]; 96 | 97 | [~,IDX] = sortrows(FF,[-1,-2]); %[worst --> best] 98 | 99 | % for i=1:nTrial 100 | % fprintf('nEval: %4d, fVal: %6.3e, Alpha: %4.2f, Beta: %4.2f, nPop: %d \n',... 101 | % F_Eval_Count(IDX(i)), F_Best(IDX(i)), Alpha(IDX(i)), BetaGamma(IDX(i)), Pop(IDX(i))); 102 | % end 103 | 104 | 105 | %%%% Agregate the top N parameter runs: 106 | N = 10; ii = length(IDX) + 1 - (1:N); 107 | disp('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'); 108 | disp('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'); 109 | for i=1:N 110 | fprintf('nEval: %4d, fVal: %6.3e, Alpha: %4.2f, Beta: %4.2f, nPop: %d \n',... 111 | F_Eval_Count(IDX(ii(i))), F_Best(IDX(ii(i))), Alpha(IDX(ii(i))), BetaGamma(IDX(ii(i))), Pop(IDX(ii(i)))); 112 | end 113 | fprintf('nEval: mean = %6.1f, median = %6.1f, range = %6.1f \n', mean(F_Eval_Count(IDX(ii))), median(F_Eval_Count(IDX(ii))),range(F_Eval_Count(IDX(ii)))); 114 | fprintf('fVal: mean = %6.1f, median = %6.1f, range = %6.1f \n', mean(F_Best(IDX(ii))), median(F_Best(IDX(ii))),range(F_Best(IDX(ii)))); 115 | fprintf('Alpha: mean = %6.1f, median = %6.1f, range = %6.1f \n', mean(Alpha(IDX(ii))), median(Alpha(IDX(ii))), range(Alpha(IDX(ii)))); 116 | fprintf('Beta: mean = %6.1f, median = %6.1f, range = %6.1f \n', mean(BetaGamma(IDX(ii))), median(BetaGamma(IDX(ii))), range(BetaGamma(IDX(ii)))); 117 | fprintf('Pop: mean = %6.1f, median = %6.1f, range = %6.1f \n', mean(Pop(IDX(ii))), median(Pop(IDX(ii))), range(Pop(IDX(ii)))); 118 | -------------------------------------------------------------------------------- /TEST_PSO_7.m: -------------------------------------------------------------------------------- 1 | % TEST -- PSO -- Particle Swarm Optimization 2 | % 3 | % Test 7: Styblinski-Tang function (5-D) with noise 4 | % 5 | % There are many local minimums to this problem, but only one global 6 | % minimum. All are of similar value. 7 | % >> help StyblinskiTang % For more details 8 | % 9 | % Noisy objective function 10 | % 11 | 12 | clc; clear; 13 | 14 | %%%% Set up problem 15 | 16 | alpha = 0.01; %noise variance -> 1 is on order of optimal objective 17 | objFun = @(x)( StyblinskiTangNoise(x, alpha) ); % Minimize this function 18 | 19 | xLow = -5*ones(5,1); % lower bound on the search space 20 | xUpp = 5*ones(5,1); % upper bound on the search space 21 | x0 = -2*ones(5,1); % initial guess 22 | 23 | options.alpha = 0.5; % weight on current search direction 24 | options.beta = 0.8; % weight on local best search direction 25 | options.gamma = 0.8; % weight on global best search direction 26 | 27 | options.flagWarmStart = true; % Include x0 in first generation 28 | 29 | options.nPopulation = 100; 30 | options.maxIter = 50; 31 | 32 | options.flagVectorize = true; 33 | 34 | %%%% Solve 35 | [xBest, fBest, info, dataLog] = PSO(objFun, x0, xLow, xUpp, options); 36 | 37 | %%%% Analysis 38 | figure(501); clf; 39 | plotPsoHistory(info); 40 | 41 | 42 | -------------------------------------------------------------------------------- /license.txt: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | Copyright (c) 2016 Matthew P. Kelly 3 | 4 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 5 | 6 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 7 | 8 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 9 | -------------------------------------------------------------------------------- /makeStruct.m: -------------------------------------------------------------------------------- 1 | function StructOutput = makeStruct(varargin) 2 | % StructOutput = makeStruct(varargin) 3 | % 4 | % A struct is created with the property that each field corresponds to one 5 | % of the arguments passed to this function. 6 | % 7 | % Example: 8 | % 9 | % R = makeStruct(a,b,c,d,e); 10 | % 11 | % R.a == a; 12 | % R.b == b; 13 | % R.c == c; 14 | % R.d == d; 15 | % R.e == e; 16 | % 17 | % Notes: 18 | % 19 | % An arbitrary number of inputs are allowed, but their names should be 20 | % unique. 21 | % 22 | 23 | N_Inputs = length(varargin); 24 | 25 | for i=1:N_Inputs 26 | name = inputname(i); 27 | StructOutput.(name) = varargin{i}; 28 | end 29 | 30 | end -------------------------------------------------------------------------------- /mergeOptions.m: -------------------------------------------------------------------------------- 1 | function output = mergeOptions(default, user,name) 2 | % output = mergeOptions(default, user, name) 3 | % 4 | % Merge a default options struct with a user-defined options struct. Works 5 | % recursively, and will issue warning messages if the user attempts to 6 | % define a field that is not in the default options. 7 | % 8 | % BEHAVIOR: 9 | % 10 | % - All fields in DEFAULT will be present in OUTPUT 11 | % - If a field is in both DEFAULT and USER, then the value from USER is 12 | % present in OUTPUT 13 | % - If a field is present in USER, but not DEFAULT, then issue a warning. 14 | % - Applies recursively 15 | % 16 | % 17 | % NOTES: 18 | % 19 | % The argument NAME is optional, and contains a string specifying the 20 | % name of the options struct. This is primarily used for printing 21 | % warnings to the user. 22 | % 23 | % This function works recursively. For example, if there is a struct 24 | % inside of a struct, then it will recursively apply this merge. 25 | % 26 | 27 | %%%% Start by assuming that the OUTPUT is just the DEFAULT 28 | output = default; 29 | 30 | if nargin == 2 31 | structName = ''; 32 | else 33 | structName = [name '.']; 34 | end 35 | 36 | if ~isempty(user) 37 | 38 | %%%% Check for any overriding fields in the USER-defined struct 39 | default_fields = fieldnames(default); 40 | for i=1:length(default_fields) 41 | if isfield(user,default_fields{i}) 42 | C0 = isstruct(default.(default_fields{i})); 43 | C1 = isstruct(user.(default_fields{i})); 44 | if C0 && C1 % Both are structs 45 | output.(default_fields{i}) = mergeOptions(... 46 | default.(default_fields{i}),... 47 | user.(default_fields{i}),... 48 | [structName default_fields{i}]); 49 | elseif ~C0 && ~C1 % Both are fields 50 | output.(default_fields{i}) = user.(default_fields{i}); 51 | elseif C0 && ~C1 %default is struct, user is a field 52 | disp(['WARNING: ' structName default_fields{i} ' should be a struct!']); 53 | elseif ~C0 && C1 %default is struct, user is a field 54 | disp(['WARNING: ' structName default_fields{i} ' should not be a struct!']); 55 | end 56 | end 57 | end 58 | 59 | %%%% Check for any fields in USER that are not in DEFAULT 60 | user_fields = fieldnames(user); 61 | for i=1:length(user_fields) 62 | if ~isfield(default, user_fields{i}) 63 | disp(['WARNING: unrecognized option: ' structName user_fields{i}]); 64 | end 65 | end 66 | 67 | end 68 | 69 | end -------------------------------------------------------------------------------- /plotBowl.m: -------------------------------------------------------------------------------- 1 | function plotBowl(dataLog, iter) 2 | % plotBowl(dataLog, iter) 3 | % 4 | % Used to display progress as the optimization progresses 5 | % 6 | % dataLog(iter) = struct array with data from each iteration 7 | % .X = [n,m] = current position of each particle 8 | % .V = [n,m] = current "velocity" of each particle 9 | % .F = [1,m] = value of each particle 10 | % .X_Best = [n,m] = best point for each particle 11 | % .F_Best = [1,m] = value of the best point for each particle 12 | % .X_Global = [n,1] = best point ever (over all particles) 13 | % .F_Global = [1,1] = value of the best point ever 14 | % .I_Global = [1,1] = index of the best point ever 15 | % 16 | 17 | figure(100); clf; hold on; 18 | axis([-1,1,-1,1]); axis equal; 19 | 20 | %%%% Draw contour lines: 21 | rectangle('Position',[-1,-1,2,2],'Curvature',[1,1],'LineWidth',1); 22 | rectangle('Position',0.8^2*[-1,-1,2,2],'Curvature',[1,1],'LineWidth',1); 23 | rectangle('Position',0.6^2*[-1,-1,2,2],'Curvature',[1,1],'LineWidth',1); 24 | rectangle('Position',0.4^2*[-1,-1,2,2],'Curvature',[1,1],'LineWidth',1); 25 | rectangle('Position',0.2^2*[-1,-1,2,2],'Curvature',[1,1],'LineWidth',1); 26 | 27 | %%%% Plot current position 28 | x = dataLog.X(1,:); %First dimension 29 | y = dataLog.X(2,:); %Second dimension 30 | plot(x,y,'k.','MarkerSize',10); 31 | 32 | %%%% Plot the local best 33 | x = dataLog.X_Best(1,:); %First dimension 34 | y = dataLog.X_Best(2,:); %Second dimension 35 | plot(x,y,'ko','MarkerSize',5,'LineWidth',1); 36 | 37 | %%%% Plot the global best 38 | x = dataLog.X_Global(1,:); %First dimension 39 | y = dataLog.X_Global(2,:); %Second dimension 40 | plot(x,y,'bo','MarkerSize',8,'LineWidth',2); 41 | 42 | %%%% Annotations 43 | title(sprintf('Iteration: %d, ObjVal: %6.3g', iter, dataLog.F_Global)); 44 | xlabel('x1') 45 | ylabel('x2'); 46 | 47 | %%%% Force drawing: 48 | drawnow; 49 | pause(0.02); %Make it possible to see updates 50 | 51 | end -------------------------------------------------------------------------------- /plotGoldsteinPrice.m: -------------------------------------------------------------------------------- 1 | function plotGoldsteinPrice(dataLog, iter) 2 | % plotGoldsteinPrice(dataLog, iter) 3 | % 4 | % Used to display progress as the optimization progresses 5 | % 6 | % dataLog(iter) = struct array with data from each iteration 7 | % .X = [n,m] = current position of each particle 8 | % .V = [n,m] = current "velocity" of each particle 9 | % .F = [1,m] = value of each particle 10 | % .X_Best = [n,m] = best point for each particle 11 | % .F_Best = [1,m] = value of the best point for each particle 12 | % .X_Global = [n,1] = best point ever (over all particles) 13 | % .F_Global = [1,1] = value of the best point ever 14 | % .I_Global = [1,1] = index of the best point ever 15 | % 16 | 17 | global GoldsteinPriceContourHandle GoldsteinPricePopulationHandle 18 | global GoldsteinPricePopBestHandle GoldsteinPriceGlobalHandle 19 | 20 | figure(300); hold on; 21 | 22 | %%%% Plot the function to be optimized 23 | if isempty(GoldsteinPriceContourHandle) 24 | x = linspace(-2,2,50); 25 | y = linspace(-2,2,50); 26 | [XX,YY] = meshgrid(x,y); 27 | xx = reshape(XX,1,numel(XX)); 28 | yy = reshape(YY,1,numel(YY)); 29 | zz = [xx;yy]; 30 | ff = GoldsteinPrice(zz); 31 | FF = reshape(ff,50,50); 32 | GoldsteinPriceContourHandle = contour(XX,YY,sqrt(FF),15); 33 | 34 | % Plot the solution: 35 | plot(0,-1,'rx','LineWidth',3,'MarkerSize',20); 36 | end 37 | 38 | %%%% Plot current position 39 | if isempty(GoldsteinPricePopulationHandle) 40 | x = dataLog.X(1,:); %First dimension 41 | y = dataLog.X(2,:); %Second dimension 42 | GoldsteinPricePopulationHandle = plot(x,y,'k.','MarkerSize',10); 43 | else 44 | x = dataLog.X(1,:); %First dimension 45 | y = dataLog.X(2,:); %Second dimension 46 | set(GoldsteinPricePopulationHandle,... 47 | 'xData',x, 'yData',y); 48 | end 49 | 50 | 51 | %%%% Plot best position for each particle 52 | if isempty(GoldsteinPricePopBestHandle) 53 | x = dataLog.X_Best(1,:); %First dimension 54 | y = dataLog.X_Best(2,:); %Second dimension 55 | GoldsteinPricePopBestHandle = plot(x,y,'ko','MarkerSize',5,'LineWidth',1); 56 | else 57 | x = dataLog.X_Best(1,:); %First dimension 58 | y = dataLog.X_Best(2,:); %Second dimension 59 | set(GoldsteinPricePopBestHandle,... 60 | 'xData',x, 'yData',y); 61 | end 62 | 63 | %%%% Plot best ever position of the entire swarm 64 | if isempty(GoldsteinPriceGlobalHandle) 65 | x = dataLog.X_Global(1,:); %First dimension 66 | y = dataLog.X_Global(2,:); %Second dimension 67 | GoldsteinPriceGlobalHandle = plot(x,y,'bo','MarkerSize',8,'LineWidth',2); 68 | else 69 | x = dataLog.X_Global(1,:); %First dimension 70 | y = dataLog.X_Global(2,:); %Second dimension 71 | set(GoldsteinPriceGlobalHandle,... 72 | 'xData',x, 'yData',y); 73 | end 74 | 75 | %%%% Annotations 76 | title(sprintf('Iteration: %d, ObjVal: %6.3g', iter, dataLog.F_Global)); 77 | xlabel('x1'); 78 | ylabel('x2'); 79 | 80 | %%%% Format the axis so things look right: 81 | axis equal; axis(2*[-1,1,-1,1]); 82 | 83 | %%%% Push the draw commands through the plot buffer 84 | drawnow; 85 | pause(0.05); %Slow down animation 86 | 87 | end -------------------------------------------------------------------------------- /plotHimmelblau.m: -------------------------------------------------------------------------------- 1 | function plotHimmelblau(dataLog, iter) 2 | % plotHimmelblau(dataLog, iter) 3 | % 4 | % Used to display progress as the optimization progresses 5 | % 6 | % dataLog(iter) = struct array with data from each iteration 7 | % .X = [n,m] = current position of each particle 8 | % .V = [n,m] = current "velocity" of each particle 9 | % .F = [1,m] = value of each particle 10 | % .X_Best = [n,m] = best point for each particle 11 | % .F_Best = [1,m] = value of the best point for each particle 12 | % .X_Global = [n,1] = best point ever (over all particles) 13 | % .F_Global = [1,1] = value of the best point ever 14 | % .I_Global = [1,1] = index of the best point ever 15 | % 16 | 17 | global HimmelblauContourHandle HimmelblauPopulationHandle 18 | global HimmelblauPopBestHandle HimmelblauGlobalHandle 19 | global HimmelblauSaveAnimation HimmelblauFrameId 20 | 21 | figure(200); hold on; 22 | 23 | %%%% Plot the function to be optimized 24 | if isempty(HimmelblauContourHandle) 25 | x = linspace(-5,5,50); 26 | y = linspace(-5,5,50); 27 | [XX,YY] = meshgrid(x,y); 28 | xx = reshape(XX,1,numel(XX)); 29 | yy = reshape(YY,1,numel(YY)); 30 | zz = [xx;yy]; 31 | ff = Himmelblau(zz); 32 | FF = reshape(ff,50,50); 33 | HimmelblauContourHandle = contour(XX,YY,FF,15); 34 | 35 | % Plot the solution: 36 | plot(3.0, 2.0,'rx','LineWidth',3,'MarkerSize',20); 37 | plot(-2.805118, 3.131312,'rx','LineWidth',3,'MarkerSize',20); 38 | plot(-3.779310, -3.283186,'rx','LineWidth',3,'MarkerSize',20); 39 | plot(3.584428, -1.848126,'rx','LineWidth',3,'MarkerSize',20); 40 | end 41 | 42 | %%%% Plot current position 43 | if isempty(HimmelblauPopulationHandle) 44 | x = dataLog.X(1,:); %First dimension 45 | y = dataLog.X(2,:); %Second dimension 46 | HimmelblauPopulationHandle = plot(x,y,'k.','MarkerSize',10); 47 | else 48 | x = dataLog.X(1,:); %First dimension 49 | y = dataLog.X(2,:); %Second dimension 50 | set(HimmelblauPopulationHandle,... 51 | 'xData',x, 'yData',y); 52 | end 53 | 54 | 55 | %%%% Plot best position for each particle 56 | if isempty(HimmelblauPopBestHandle) 57 | x = dataLog.X_Best(1,:); %First dimension 58 | y = dataLog.X_Best(2,:); %Second dimension 59 | HimmelblauPopBestHandle = plot(x,y,'ko','MarkerSize',5,'LineWidth',1); 60 | else 61 | x = dataLog.X_Best(1,:); %First dimension 62 | y = dataLog.X_Best(2,:); %Second dimension 63 | set(HimmelblauPopBestHandle,... 64 | 'xData',x, 'yData',y); 65 | end 66 | 67 | %%%% Plot best ever position of the entire swarm 68 | if isempty(HimmelblauGlobalHandle) 69 | x = dataLog.X_Global(1,:); %First dimension 70 | y = dataLog.X_Global(2,:); %Second dimension 71 | HimmelblauGlobalHandle = plot(x,y,'bo','MarkerSize',8,'LineWidth',2); 72 | else 73 | x = dataLog.X_Global(1,:); %First dimension 74 | y = dataLog.X_Global(2,:); %Second dimension 75 | set(HimmelblauGlobalHandle,... 76 | 'xData',x, 'yData',y); 77 | end 78 | 79 | %%%% Annotations 80 | title(sprintf('Iteration: %d, ObjVal: %6.3g', iter, dataLog.F_Global)); 81 | xlabel('x1'); 82 | ylabel('x2'); 83 | 84 | %%%% Format the axis so things look right: 85 | axis equal; axis(5*[-1,1,-1,1]); 86 | 87 | %%%% Push the draw commands through the plot buffer 88 | drawnow; 89 | pause(0.05); %Slow down animation 90 | 91 | %%%% Save animation if desired: 92 | animationFileName = 'PSO_Himmelblau_Animation.gif'; 93 | if isempty(HimmelblauFrameId) 94 | HimmelblauFrameId = 1; 95 | else 96 | HimmelblauFrameId = HimmelblauFrameId + 1; 97 | end 98 | if isempty(HimmelblauSaveAnimation) 99 | HimmelblauSaveAnimation = false; 100 | else 101 | if HimmelblauSaveAnimation 102 | frame = getframe(gcf); 103 | im = frame2im(frame); 104 | [imind,cm] = rgb2ind(im,256); 105 | if HimmelblauFrameId == 1; 106 | imwrite(imind,cm,animationFileName,'gif', 'Loopcount',inf); 107 | else 108 | imwrite(imind,cm,animationFileName,'gif','WriteMode','append'); 109 | end 110 | end 111 | end 112 | 113 | end -------------------------------------------------------------------------------- /plotPsoHistory.m: -------------------------------------------------------------------------------- 1 | function plotPsoHistory(info) 2 | % plotPsoHistory(info) 3 | % 4 | % Plots the history of the optimization for particle swarm optimization, 5 | % using the info struct returned by PSO 6 | % 7 | 8 | % Check if a log scale is appropriate 9 | useLogScale = false; 10 | isPos = [info.F_Global; info.F_Mean; info.F_Best_Mean] > 0; 11 | if sum(~isPos)==0 %then log scale is possible 12 | val = sort(info.F_Global([1,end])); %ascending order 13 | if (val(2)/val(1)) > 50 % Then probably on log scale 14 | useLogScale = true; 15 | end 16 | end 17 | 18 | %Plot the function value over time: 19 | subplot(2,2,1); hold on; 20 | plot(info.iter, info.F_Best_Mean); 21 | plot(info.iter, info.F_Mean); 22 | plot(info.iter, info.F_Global); 23 | xlabel('iteration') 24 | ylabel('objective') 25 | title('Objective value') 26 | legend('mean(F\_best)','mean(F)','Global Best') 27 | if useLogScale, set(gca,'YScale','log'); end 28 | 29 | %Plot the variance in the function value over time: 30 | subplot(2,2,3); hold on; 31 | plot(info.iter, info.F_Best_Var); 32 | plot(info.iter, info.F_Var); 33 | xlabel('iteration') 34 | ylabel('objective variance') 35 | title('Objective value variance') 36 | legend('var(F\_best)','var(F)') 37 | if useLogScale, set(gca,'YScale','log'); end 38 | 39 | 40 | %Plot the search variance along each dimension: 41 | subplot(2,2,2); hold on; 42 | nDim = size(info.X_Mean,1); 43 | colorMap = lines(nDim); 44 | legendData = cell(1,nDim); 45 | for i=1:nDim 46 | plot(info.iter, info.X_Mean(i,:), 'Color',colorMap(i,:), 'LineWidth', 1); 47 | legendData{i} = ['x' num2str(i)]; 48 | end 49 | if nDim < 8, legend(legendData); end 50 | for i=1:nDim 51 | plot(info.iter, info.X_Best_Mean(i,:), 'Color',colorMap(i,:), 'LineWidth', 2); 52 | plot(info.iter, info.X_Global(i,:), 'Color',colorMap(i,:), 'LineWidth', 4); 53 | end 54 | xlabel('iteration') 55 | ylabel('state') 56 | title('search position') 57 | 58 | %Plot the search variance along each dimension: 59 | subplot(2,2,4); hold on; 60 | colorMap = lines(nDim); 61 | for i=1:nDim 62 | plot(info.iter, info.X_Var(i,:), 'Color',colorMap(i,:), 'LineWidth', 1); 63 | end 64 | if nDim < 8, legend(legendData); end 65 | for i=1:nDim 66 | plot(info.iter, info.X_Best_Var(i,:), 'Color',colorMap(i,:), 'LineWidth', 2); 67 | end 68 | xlabel('iteration') 69 | ylabel('state variance') 70 | title('search position variance') 71 | 72 | 73 | end -------------------------------------------------------------------------------- /plotStyblinskiTang.m: -------------------------------------------------------------------------------- 1 | function plotStyblinskiTang(dataLog, iter) 2 | % plotStyblinskiTang(dataLog, iter) 3 | % 4 | % Used to display progress as the optimization progresses 5 | % 6 | % dataLog(iter) = struct array with data from each iteration 7 | % .X = [n,m] = current position of each particle 8 | % .V = [n,m] = current "velocity" of each particle 9 | % .F = [1,m] = value of each particle 10 | % .X_Best = [n,m] = best point for each particle 11 | % .F_Best = [1,m] = value of the best point for each particle 12 | % .X_Global = [n,1] = best point ever (over all particles) 13 | % .F_Global = [1,1] = value of the best point ever 14 | % .I_Global = [1,1] = index of the best point ever 15 | % 16 | 17 | global StyblinskiTangContourHandle StyblinskiTangPopulationHandle 18 | global StyblinskiTangPopBestHandle StyblinskiTangGlobalHandle 19 | 20 | figure(200); hold on; 21 | 22 | %%%% Plot the function to be optimized 23 | if isempty(StyblinskiTangContourHandle) 24 | x = linspace(-5,5,50); 25 | y = linspace(-5,5,50); 26 | [XX,YY] = meshgrid(x,y); 27 | xx = reshape(XX,1,numel(XX)); 28 | yy = reshape(YY,1,numel(YY)); 29 | zz = [xx;yy]; 30 | ff = StyblinskiTang(zz); 31 | FF = reshape(ff,50,50); 32 | StyblinskiTangContourHandle = contour(XX,YY,FF,15); 33 | 34 | % Plot the solution: 35 | plot(-2.903534,-2.903534,'rx','LineWidth',4,'MarkerSize',25); 36 | end 37 | 38 | %%%% Plot current position 39 | if isempty(StyblinskiTangPopulationHandle) 40 | x = dataLog.X(1,:); %First dimension 41 | y = dataLog.X(2,:); %Second dimension 42 | StyblinskiTangPopulationHandle = plot(x,y,'k.','MarkerSize',20); 43 | else 44 | x = dataLog.X(1,:); %First dimension 45 | y = dataLog.X(2,:); %Second dimension 46 | set(StyblinskiTangPopulationHandle,... 47 | 'xData',x, 'yData',y); 48 | end 49 | 50 | 51 | %%%% Plot best position for each particle 52 | if isempty(StyblinskiTangPopBestHandle) 53 | x = dataLog.X_Best(1,:); %First dimension 54 | y = dataLog.X_Best(2,:); %Second dimension 55 | StyblinskiTangPopBestHandle = plot(x,y,'ko','MarkerSize',10,'LineWidth',2); 56 | else 57 | x = dataLog.X_Best(1,:); %First dimension 58 | y = dataLog.X_Best(2,:); %Second dimension 59 | set(StyblinskiTangPopBestHandle,... 60 | 'xData',x, 'yData',y); 61 | end 62 | 63 | %%%% Plot best ever position of the entire swarm 64 | if isempty(StyblinskiTangGlobalHandle) 65 | x = dataLog.X_Global(1,:); %First dimension 66 | y = dataLog.X_Global(2,:); %Second dimension 67 | StyblinskiTangGlobalHandle = plot(x,y,'bo','MarkerSize',15,'LineWidth',3); 68 | else 69 | x = dataLog.X_Global(1,:); %First dimension 70 | y = dataLog.X_Global(2,:); %Second dimension 71 | set(StyblinskiTangGlobalHandle,... 72 | 'xData',x, 'yData',y); 73 | end 74 | 75 | %%%% Annotations 76 | title(sprintf('Iteration: %d, ObjVal: %6.3g', iter, dataLog.F_Global)); 77 | xlabel('x1'); 78 | ylabel('x2'); 79 | 80 | %%%% Format the axis so things look right: 81 | axis equal; axis(5*[-1,1,-1,1]); 82 | 83 | %%%% Push the draw commands through the plot buffer 84 | drawnow; 85 | pause(0.05); %Slow down animation 86 | 87 | end --------------------------------------------------------------------------------