├── .gitignore
├── qaoa
├── kronm.m
├── optimization
│ ├── nm.m
│ ├── brute.m
│ ├── gs.m
│ ├── bruteforce.m
│ ├── ms.m
│ ├── pso.m
│ ├── baynm.m
│ └── bso.m
├── interpolation.m
├── variational_state.m
├── expval.m
└── qaoa.m
├── LICENSE
├── problem.m
├── utilities
└── exactproblem.m
└── README.md
/.gitignore:
--------------------------------------------------------------------------------
1 |
2 | .DS_Store
3 | *.m~
4 |
--------------------------------------------------------------------------------
/qaoa/kronm.m:
--------------------------------------------------------------------------------
1 | function x = kronm(Q,x)
2 | % Fast Kronecker matrix multiplication.
3 | % Never computes the actual Kronecker matrix and omits
4 | % multiplication by identity matrices.
5 | % https://math.stackexchange.com/questions/3175653/how-to-efficiently-compute-the-matrix-vector-product-y-i-p-otimes-a-otimes
6 |
7 | L = Q{1}; % LEFT
8 | M = Q{2}; % MIDDLE
9 | R = Q{3}; % RIGHT
10 |
11 | % rearrange x to 2-by-LR
12 | x = reshape(permute(reshape(x,[R,2,L]),[2,1,3]),[2,L*R]);
13 |
14 | % actual multiplication: just of size 2
15 | x = M*x;
16 | % rearrange back the reshult
17 | x = reshape(x,[2,R,L]);
18 | x = ipermute(x,[2,1,3]);
19 | x = x(:);
20 | end
--------------------------------------------------------------------------------
/qaoa/optimization/nm.m:
--------------------------------------------------------------------------------
1 | function result = nm(problem)
2 | % Nelder-Mead MVP
3 | nm.objective = problem.objective;
4 | nm.x0 = problem.x0;
5 | nm.solver = 'fminsearch';
6 | options = optimset('Display','iter' ...
7 | ,'PlotFcns',@optimplotfval ...
8 | ... Termination tolerance on the function value, (default) 1e-4
9 | ,'TolFun',1e-2 ...
10 | ... Termination tolerance on x, (default) 1e-4
11 | ,'TolX',1e-2 ...
12 | );
13 | nm.options = options;
14 | [xmin,fval,exitflag,output] = fminsearch(nm);
15 | result.xmin = xmin;
16 | result.fval = fval;
17 | result.exitflag = exitflag;
18 | result.output = output;
19 | end
20 |
21 |
--------------------------------------------------------------------------------
/qaoa/optimization/brute.m:
--------------------------------------------------------------------------------
1 | function result = brute(problem)
2 | %{
3 | BRUTE FORCE METHOD
4 |
5 | Description
6 | ----------
7 | Bruteforce creates a grid given by the variable ranges and evaluates
8 | the function for all these grid points and then selects the grid point
9 | where the function has its minimum value.
10 |
11 | Note
12 | ----------
13 | This algorithm is very slow for p larger than one.
14 | %}
15 |
16 | p = length(problem.x0)/2;
17 | fun = problem.objective;
18 |
19 | steps = 200; % This determines the size of the grid
20 | gamma_vec = linspace(0,pi,steps);
21 | beta_vec = linspace(0,pi,steps);
22 | ranges = [repmat({gamma_vec},1,p) repmat({beta_vec},1,p)];
23 | [xmin,fval] = bruteforce(fun,ranges);
24 | result.xmin = xmin;
25 | result.fval = fval;
26 | end
27 |
28 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2021 Pontus Wikståhl
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/qaoa/interpolation.m:
--------------------------------------------------------------------------------
1 | function [x0] = interpolation(x0)
2 | %{
3 | INTERPOLATION-BASED STRATEGY
4 |
5 | Description
6 | -----------
7 | Uses linear interpolation to produce a good starting point
8 | for optimizing QAOA as one iteratively increases the
9 | level p.
10 |
11 | Parameters
12 | ----------
13 | opt : 1-2(p-1) array (row vector)
14 | Optimal angels for level p-1
15 |
16 | Returns
17 | -------
18 | x0 : 2-p array (row vector)
19 | Starting-points for level p.
20 | %}
21 |
22 | % declare variabels
23 | p = length(x0)/2;
24 | gamma_opt = x0(1:p);
25 | beta_opt = x0((p+1):2*p);
26 | gamma0 = zeros(1,p);
27 | beta0 = zeros(1,p);
28 |
29 | % gamma0 and beta0
30 | for j = 1:(p+1)
31 | if j == 1
32 | gamma0(j) = (p-j+1)/p * gamma_opt(j);
33 | beta0(j) = (p-j+1)/p * beta_opt(j);
34 | elseif j == (p+1)
35 | gamma0(j) = (j-1)/p * gamma_opt(j-1);
36 | beta0(j) = (j-1)/p * beta_opt(j-1);
37 | else
38 | gamma0(j) = (j-1)/p * gamma_opt(j-1) + (p-j+1)/p * gamma_opt(j);
39 | beta0(j) = (j-1)/p * beta_opt(j-1) + (p-j+1)/p * beta_opt(j);
40 | end
41 | end
42 |
43 | x0 = [gamma0, beta0];
44 | end
45 |
46 |
--------------------------------------------------------------------------------
/qaoa/variational_state.m:
--------------------------------------------------------------------------------
1 | function [s] = variational_state(cost,p,q,s,X,gamma,beta)
2 | % Constructs the variational quantum state |γ,β⟩
3 | %
4 | % Parameters
5 | % ----------
6 | % gamma: 1-p Array (row vector)
7 | % Array with angles [γ1 ... γp]
8 | %
9 | % beta: 1-p Array (row vector)
10 | % Array with angles [β1 ... βp]
11 | %
12 | % cost : 1-D array (column vector) containing all the values of the cost
13 | % function.
14 | %
15 | % p : integer
16 | % The number of iterations.
17 | %
18 | % q : integer
19 | % The number of qubits.
20 | %
21 | % s : 1-D array (column vector)
22 | % initial state vector |+⟩ = H^(⊗N)·|0⟩⊗|0⟩⊗...⊗|0⟩
23 | %
24 | % X : 1-q cell
25 | % Reduced Pauli sigma-x matrices
26 | %
27 | % Returns
28 | % -------
29 | % s : 1-2^n Array (column vector)
30 | % Returns the state vector |γ,β⟩.
31 |
32 |
33 | % Final state |γ,β⟩ = U(B,β_p)U(C,γ_p) ... U(B,β_1)U(C,γ_1)|s⟩
34 | for i = 1:p
35 | % |s⟩ = U(C,γ_i)·|s⟩
36 | % Hadamard product, in other words, we do an entrywise product, since
37 | % the Hamiltonian is diagonal.
38 | s = exp(-1j * gamma(i) * cost) .* s;
39 |
40 | % |s⟩ = U(B,β_i)*|s⟩
41 | for j = 1:q
42 | % Construct the rotation matrix and apply it to the state vector.
43 | % Use fast Kronecker matrix multiplication for matrices
44 | s = cos(beta(i)) * s - 1j * sin(beta(i)) * kronm(X{j},s);
45 | end
46 | end
47 | end
48 |
--------------------------------------------------------------------------------
/problem.m:
--------------------------------------------------------------------------------
1 | %{
2 | Description: Simulates the quantum approximate optimization
3 | algorithm (QAOA).
4 |
5 | Developer: Pontus Vikstål
6 | %}
7 | addpath('qaoa')
8 |
9 | % Eigenvalues of the Cost Hamiltonian given as a column vector
10 | cost = [1;-1];
11 | cost_min = min(cost); % Smallest eigenvalue
12 |
13 | % Circuit depth
14 | p = 1;
15 | % Angles
16 | gamma = [];
17 | beta = [];
18 | % Classical optimizer
19 | minimizer = 'GlobalSearch';
20 |
21 | %{
22 | Run the QAOA. Given the eigenvalues of the cost Hamiltonian, the iteration
23 | level p, a classical optimizer (optional), and starting point(s) for the
24 | classical optimizer (optional); The Code returns the variational state |γ,β⟩
25 | (final_state) using the best angles found by the classical optimizer.
26 | The result from the classical optimizer is also returned as a struct.
27 | If a non-empty array with angles is given as input, the classical
28 | optimizier is not used and the final_state is computed using the
29 | input angles, and the result is set to 0.
30 | %}
31 | [final_state,result] = qaoa(cost,p,gamma,beta,minimizer);
32 |
33 | % Obtain the probability distribution by taking the absolute square |c_n|^2
34 | % of each element of the final state vector |γ,β⟩.
35 | probabilities = abs(final_state).^2;
36 |
37 | % Calculates the expectation value ⟨γ,β|C|γ,β⟩. We take the real part
38 | % to remove the small imaginary part due to machine inaccuracy.
39 | exp_val = real(final_state' * (cost .* final_state));
40 | fprintf('Expected value = %f \n',round(exp_val,2));
41 |
42 | % Calculate the approximation ratio
43 | % r = (⟨γ,β|C|γ,β⟩ - C_max)/(C_min - C_max), 0 ≤ r ≤ 1
44 | cost_max = max(cost);
45 | approx_ratio = (exp_val-cost_max)/(cost_min - cost_max);
46 | fprintf('Approximation ratio = %f \n',round(approx_ratio,2));
47 |
48 | % Print the probability of obtaining the optimal solution.
49 | z = find(cost == cost_min); % In case of the ground state being degenerate
50 | fidelity = sum(probabilities(z));
51 | fprintf('Success probability = %f %%\n',round(fidelity*100,2));
52 |
53 | rmpath('qaoa')
--------------------------------------------------------------------------------
/utilities/exactproblem.m:
--------------------------------------------------------------------------------
1 |
2 | % Example
3 | % Remember |-1/2> -> |0> -> 0
4 | % |1/2>-> |1> -> 1
5 | %{
6 | clc;clear
7 | S = {[0,1,2,3,4,5,6],[1,2,3,5]};
8 | U = [0,1,2,3,4,5,6];
9 |
10 |
11 | A = [1 0;
12 | 1 1;
13 | 1 0];
14 | [problem,J,h,eigvals] = exactproblem(A);
15 | %}
16 | function [problem,J,h,c,eigvals] = exactproblem(A)
17 | %EXACTPROBLEM creates a problem structure of the exact cover problem
18 | % Input:
19 | % A: m-n Matrix, representing the problem
20 | %
21 | % Returns:
22 | % problem: Problem structure for the exact cover problem
23 | %
24 | % J: n-n matrix representing the couplings
25 | %
26 | % h: n-1 vector representing the magnetic fields
27 | %
28 | % eigvals: eigenvalues of the Hamiltonian
29 |
30 | problem.Aeq = A;
31 | problem.lb = zeros(size(A,1),1);
32 | problem.ub = ones(size(A,1),1);
33 | problem.beq = ones(size(A,2),1);
34 |
35 | K = A;
36 | v = size(K,2); % number of subsets
37 | J = zeros(v,v); % couplings
38 | h = zeros(v,1); % magnetic fields
39 |
40 | % Coupling matrix
41 | for i = 1:v
42 | for j = 1:v
43 | J(i,j) = 1/2 * sum(K(:,i) .* K(:,j));
44 | end
45 | end
46 |
47 | % Magnetic field vector
48 | for i = 1:v
49 | h(i) = sum(K(:,i)) - 1/2 * sum(K .* K(:,i), 'all');
50 | end
51 |
52 | % Constant
53 | c = 1/4 * sum((sum(K,2) - 2).^2) + 1/2 * trace(J);
54 |
55 | %{
56 | % Pauli-Z
57 | sigma_z = [ 1;
58 | -1];
59 | % Construct all possible Pauli-Z matrices
60 | Z = cell(1,v);
61 | for i = 1:v
62 | Z{i} = kron(ones(2^(i-1),1),kron(sigma_z,ones(2^(v-i),1)));
63 | end
64 |
65 | % Hamiltonian
66 | H = zeros(2^v,1);
67 |
68 | % J[i][j] terms
69 | for i = 1:(v-1)
70 | L = 0;
71 | for j = (i+1):v
72 | L = L + J(i,j) * (Z{i} .* Z{j});
73 | end
74 | H = H + L;
75 | end
76 |
77 | % h[i] terms
78 | for i = 1:v
79 | H = H + h(i) * Z{i};
80 | end
81 |
82 | % constant term
83 | H = H + ones(2^v,1) * c;
84 |
85 | eigvals = H;
86 | %}
87 | % Construct all possible solutions
88 | smat = dec2bin(0:2^v-1)-'0';
89 | smat = sum((smat*A'-1).^2, 2);
90 | eigvals = smat;
91 |
92 | end
93 |
94 |
--------------------------------------------------------------------------------
/qaoa/optimization/gs.m:
--------------------------------------------------------------------------------
1 | function result = gs(problem)
2 | %{
3 | GLOBAL SEARCH METHOD (Default)
4 |
5 | Description
6 | ----------
7 | GlobalSearch uses a scatter-search mechanism for generating start
8 | points. GlobalSearch analyzes start points and rejects those points
9 | that are unlikely to improve the best local minimum found so far.
10 | %}
11 |
12 |
13 | % Create options, see following link for possible options
14 | % https://se.mathworks.com/help/optim/ug/fmincon.html
15 | opts = optimoptions(...
16 | ... Solver https://se.mathworks.com/help/optim/ug/fmincon.html
17 | @fmincon...
18 | ... Optimization algorithm, 'interior-point' (default)
19 | ... https://se.mathworks.com/help/optim/ug/choosing-the-algorithm.html
20 | ,'Algorithm','trust-region-reflective' ...
21 | ... Displays output at each iteration, 'final' (default)
22 | ,'Display','final' ...
23 | ... Termination tolerance on x, 1e-6 (default) | 1e-10 (interior-point)
24 | ,'StepTolerance',1e-3 ...
25 | ... Termination tolerance on the function value, 1e-6 (default)
26 | ,'FunctionTolerance',1e-3 ...
27 | ... Max number of iterations, 400 (default) | 1000 (interrior-point)
28 | ,'MaxIterations',400 ...
29 | ... Maximum number of function evaluations allowed,
30 | ... 100*numberOfVariables (default) | 3000 (interrior-point)
31 | ...,'MaxFunctionEvaluations',100 ...
32 | ... Gradient for the objective function, false (default)
33 | ,'SpecifyObjectiveGradient',true ...
34 | ... Compare user-supplied derivatives to finite-differencing
35 | ... derivatives, false (default) | true
36 | ,'CheckGradients',false ...
37 | ... Finite differences, used to estimate gradients
38 | ... 'forward' (default) | 'central'
39 | ,'FiniteDifferenceType','central' ...
40 | ... Scalar or vector step size factor for finite differences
41 | ,'FiniteDifferenceStepSize',1e-6 ...
42 | ... When true, fmincon estimates gradients in parallel, false (default)
43 | ,'UseParallel',true ...
44 | );
45 |
46 | problem.solver = 'fmincon';
47 | problem.options = opts;
48 |
49 | gs = GlobalSearch();
50 | gs.NumTrialPoints = 1e3; % Number of random seeds (starting points)
51 |
52 | [xmin,fval,exitflag,output,solutions] = run(gs,problem);
53 |
54 | result.xmin = xmin;
55 | result.fval = fval;
56 | result.exitflag = exitflag;
57 | result.output = output;
58 | result.solutions = solutions;
59 | end
60 |
61 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # MatlabQAOA
2 | Simulating the quantum approximate optimization algorithm with Matlab
3 |
4 | # Table of contents
5 | 1. [Installation](#installation)
6 | 2. [Usage](#usage)
7 |
8 | ## Installation
9 | Installation requires Matlab version 2018b or newer.
10 |
11 | ## Usage
12 |
13 | The code is executed using the `problem.m` script file. To get started you begin by specifying the cost Hamiltonian of which you want to find the ground state of as a 1-D array (column vector) with all of its energy eigenvalues. Storing the cost Hamiltonian as a vector is more memory efficient than storing it as a matrix. We can do this since the cost Hamiltonian is diagonal in the computational basis. Next, you choose the number of iterations that you want the QAOA to run, i.e. the variable `p`. You have the option to specify the angles `gamma` and `beta` or simply leave them as empty arrays. If you specify `gamma` and `beta` as arrays, the QAOA will use these angles to construct the variational state. Otherwise, if the angles are given by empty arrays the code when executed, will try to find the optimal ones using MATLAB´s [GlobalSearch](https://se.mathworks.com/help/gads/globalsearch.html) classical optimizer as default. You can change which classical optimizer that you want to use. There are seven ones in total to choose from.
14 |
15 | 1. `GlobalSearch` (Default)
16 | 2. `MultiStart`
17 | 3. `Bayesian`
18 | 4. `BayesianHybridNelderMead`
19 | 5. `NelderMead`
20 | 6. `ParticleSwarm`
21 | 7. `BruteForce`
22 |
23 | When you run the `problem.m` script file, it will call on the QAOA algorithm (**qaoa.m**) file using the `qaoa` function. This function can have up to six input variables: two are required the rest are optional! The required ones are: **1**. The eigenvalues of the cost Hamiltonian `cost` and **2**. The number of iterations `p` of the QAOA algorithm. The optional ones are **3**. The `gamma` angles; **4**. The `beta` angles; **5.** The classical optimizer and **6** potential starting points for the classical optimizer to use. The qaoa function will return the final variational state |γ,β⟩ (using either the given input angles or the best-found angles by the classical optimizer) and the result from the classical optimizer as a struct data type.
24 |
25 | A typical example could be
26 | ```
27 | % Eigenvalues of the Cost Hamiltonian given as a column vector for a single spin 1/2 particle
28 | cost = [1;-1];
29 |
30 | % Circuit depth
31 | p = 1;
32 | % Angles
33 | gamma = [];
34 | beta = [];
35 | % Classical optimizer
36 | minimizer = 'GlobalSearch';
37 | [final_state,result] = qaoa(cost,p,gamma,beta,minimizer);
38 | ```
39 |
--------------------------------------------------------------------------------
/qaoa/optimization/bruteforce.m:
--------------------------------------------------------------------------------
1 | function [xmin,fval] = bruteforce(func,ranges)
2 | % Minimize a function over a given range by brute force.
3 | %
4 | % Uses the "brute force" method, i.e. computes the function's value
5 | % at each point of a multidimensional grid of points, to find the global
6 | % minimum of the function.
7 | %
8 | % Parameters
9 | % ----------
10 | % func: @(x,y,...)
11 | % The objective function to be minimized
12 | %
13 | % ranges: cell
14 | % The program uses these to create the grid of points
15 | % on which the objective function will be computed.
16 | %
17 | % Returns
18 | % -------
19 | % x0 : array
20 | % A 1-D array containing the coordinates of a point at which the
21 | % objective function has its minimum value.
22 |
23 |
24 | if iscell(ranges) == 0
25 | error('Ranges must be in cell format.')
26 | end
27 |
28 | % Number of parameters to optimize.
29 | N = length(ranges);
30 |
31 | % Create a 1xN cell array
32 | grid = cell(1,N);
33 |
34 | % Fill each cell array with grid points
35 | [grid{:}] = ndgrid(ranges{:});
36 |
37 | % Concatenate and reshape the grid
38 | grid = reshape(cat(N,grid{:}),[],N);
39 |
40 | % Size of grid
41 | Nsize = size(grid);
42 |
43 | % Allocate memory
44 | fgrid = ones(1,Nsize(1));
45 |
46 | % Function values at each point of the evaluation grid
47 | parfor i = 1:Nsize(1)
48 | fgrid(i) = func(grid(i,:));
49 | end
50 |
51 | % Find global minimum
52 | [fval, idx] = min(fgrid);
53 |
54 | % Grid point where the function has its global minimum
55 | xmin = grid(idx,:);
56 |
57 | if N/2 == 1
58 | % Make a 3D plot of func only if p = 1
59 | X = ranges{1};
60 | Y = ranges{2};
61 | Z = reshape(fgrid,length(X),length(Y));
62 | f = figure('Renderer', 'painters', 'Position', [0 0 1500 600]);
63 | movegui(f,'center')
64 | subplot(1,2,1)
65 | %make here your first plot
66 | surf(X,Y,Z,'EdgeColor','none')
67 | colormap jet % color
68 |
69 | xlabel('$\gamma$','Interpreter','latex')
70 | ylabel('$\beta$','Interpreter','latex')
71 | zlabel('$F_1(\gamma,\beta)$','Interpreter','latex')
72 | set(gca, 'FontSize', 20);
73 |
74 | subplot(1,2,2)
75 | %make here your second plot
76 | imagesc(X,Y,Z)
77 | %contourf(X,Y,Z)
78 | colorbar
79 |
80 | xlabel('$\gamma$','Interpreter','latex','FontSize',50)
81 | ylabel('$\beta$','Interpreter','latex','FontSize',50)
82 | zlabel('$F_1(\gamma,\beta)$','Interpreter','latex')
83 | set(gca, 'FontSize', 30);
84 | set(gca,'YDir','normal'); % coord (0,0) at origin
85 |
86 |
87 | savefig('bruteforce.fig')
88 | end
89 |
90 | end
91 |
--------------------------------------------------------------------------------
/qaoa/optimization/ms.m:
--------------------------------------------------------------------------------
1 | function result = ms(problem)
2 | %{
3 | MULTI START
4 |
5 | Description
6 | ----------
7 | Multistart attempts to find multiple local minimas to the function
8 | by starting from various points. It distributes start points to
9 | multiple processors for local solution. It returns the local
10 | solution where the function has its minimum value.
11 | %}
12 |
13 | % Create options, see following link for possible options
14 | % https://se.mathworks.com/help/optim/ug/fmincon.html
15 | opts = optimoptions(...
16 | ... Solver https://se.mathworks.com/help/optim/ug/fmincon.html
17 | @fmincon...
18 | ... Optimization algorithm, 'interior-point' (default)
19 | ... https://se.mathworks.com/help/optim/ug/choosing-the-algorithm.html
20 | ,'Algorithm','trust-region-reflective' ...
21 | ... Displays output at each iteration, 'final' (default)
22 | ,'Display','final' ...
23 | ... Termination tolerance on x, 1e-6 (default) | 1e-10 (interior-point)
24 | ,'StepTolerance',1e-3 ...
25 | ... Termination tolerance on the function value, 1e-6 (default)
26 | ,'FunctionTolerance',1e-3 ...
27 | ... Max number of iterations, 400 (default) | 1000 (interrior-point)
28 | ,'MaxIterations',400 ...
29 | ... Maximum number of function evaluations allowed,
30 | ... 100*numberOfVariables (default) | 3000 (interrior-point)
31 | ...,'MaxFunctionEvaluations',100 ...
32 | ... Gradient for the objective function, false (default)
33 | ,'SpecifyObjectiveGradient',true ...
34 | ... Compare user-supplied derivatives to finite-differencing
35 | ... derivatives, false (default) | true
36 | ,'CheckGradients',false ...
37 | ... Finite differences, used to estimate gradients
38 | ... 'forward' (default) | 'central'
39 | ,'FiniteDifferenceType','central' ...
40 | ... Scalar or vector step size factor for finite differences
41 | ,'FiniteDifferenceStepSize',1e-6 ...
42 | ... When true, fmincon estimates gradients in parallel, false (default)
43 | ,'UseParallel',true ...
44 | );
45 |
46 | problem.solver = 'fmincon';
47 | problem.options = opts;
48 |
49 | ms = MultiStart('UseParallel',true);
50 | rs = CustomStartPointSet(problem.x0);
51 | %stpoints = CustomStartPointSet(list(rs,problem));
52 | stpoints = CustomStartPointSet(problem.x0);
53 | problem.x0 = problem.x0(1,:);
54 | [xmin,fval,exitflag,output,solutions] = run(ms,problem,stpoints);
55 |
56 | result.xmin = xmin;
57 | result.fval = fval;
58 | result.exitflag = exitflag;
59 | result.output = output;
60 | result.solutions = solutions;
61 | end
62 |
63 |
--------------------------------------------------------------------------------
/qaoa/expval.m:
--------------------------------------------------------------------------------
1 | function [f,gradf] = expval(x,cost,p,q,s,X)
2 | % Calculate the expectation value f = ⟨γ,β|C|γ,β⟩, and the gradient of
3 | % the expectation value.
4 | %
5 | % Parameters
6 | % ----------
7 | % x : 1-2p Array (row vector)
8 | % Array with angles [γ1 ... γp β1 ... βp]
9 | %
10 | % cost : 1-D array (column vector) containing all the values of the cost
11 | % function.
12 | %
13 | % p : integer
14 | % The number of iterations.
15 | %
16 | % q : integer
17 | % The number of qubits.
18 | %
19 | % s : 1-D array (column vector)
20 | % initial state vector |+⟩ = H^(⊗N)·|0⟩⊗|0⟩⊗...⊗|0⟩
21 | %
22 | % X : 1-q cell
23 | % Reduced Pauli sigma-x matrices
24 | %
25 | % Returns
26 | % -------
27 | % f : float
28 | % Returns the expectation value of ⟨γ,β|C|γ,β⟩
29 | %
30 | % gradf : 1-2p Array (column vector)
31 | % Array with the computed gradient
32 | % [∂f/∂γ1 ; ... ; ∂f/∂γp ; ∂f/∂β1 ; ... ; ∂f/∂βp]
33 |
34 |
35 | % angles
36 | gamma = x(1:p);
37 | beta = x((p+1):2*p);
38 |
39 | % state |γ,β⟩ = U(B,β_p)U(C,γ_p) ... U(B,β_1)U(C,γ_1)|+⟩
40 | s = variational_state(cost,p,q,s,X,gamma,beta);
41 |
42 | % calculate the expectation value f = ⟨γ,β|C|γ,β⟩
43 | f = real(dot(s, cost .* s));
44 |
45 | % gradient of the expectation value
46 | % NOTE: This part of the code could probably be optimized further
47 | if nargout > 1 % gradient required and should be set to nargout > 1
48 |
49 | % computes
50 | % gradf = [∂f/∂γ1 ; ... ; ∂f/∂γp ; ∂f/∂β1 ; ... ; ∂f/∂βp]
51 | % where
52 | % ∂f/∂γn = -2Im(⟨γ,β|W^p_n·C·(W^p_n)^†·C|γ,β⟩)
53 | % and
54 | % ∂f/∂βn = -2Im(⟨γ,β|W^p_n·U(C,γn)^†·B·U(C,γn)·(W^p_n)^†·C|γ,β⟩)
55 | % and
56 | % W^p_n = U(B,βp)U(C,γp) ... U(B,βn)U(C,γn) with 1≤n≤p.
57 |
58 | gradf = zeros(2*p,1); % allocate memory
59 | bra = conj(s); % "conjugate" = ⟨γ,β|
60 | ket = cost .* s; % C|γ,β⟩
61 |
62 | for n = p:-1:1 % count backwards
63 |
64 | for i = 1:q
65 | % ⟨γ,β|U(B,βn) = ⟨s| for n = 1
66 | bra = cos(beta(n)) * bra - 1j * sin(beta(n)) * kronm(X{i},bra);
67 | % U(B,βn)^†·C|γ,β⟩
68 | ket = cos(beta(n)) * ket + 1j * sin(beta(n)) * kronm(X{i}, ket);
69 | end
70 | % ⟨γ,β|U(B,βn)·U(C,γn)
71 | bra = bra .* exp(-1j * gamma(n) * cost);
72 | % U(C,γn)^†·U(B,βn)^†·C|γ,β⟩
73 | ket = exp(1j * gamma(n) * cost) .* ket;
74 |
75 | % ∂f/∂γn = -2Im(⟨γ,β|W^p_n·C·(W^p_n)^†·C|γ,β⟩)
76 | gradf(n) = -2 * imag(bra.' * (cost .* ket));
77 |
78 | % ⟨γ,β|W^p_n·U(C,γn)^†
79 | left = bra .* exp(1j * gamma(n) * cost);
80 | % U(C,γn)·(W^p_n)^†·C|γ,β⟩
81 | right = exp(-1j * gamma(n) * cost) .* ket;
82 |
83 | % Construct the operator B = Σ_j^n σ^j_x and apply it to the right
84 | B = zeros(2^q,1);
85 | for i = 1:q
86 | B = B + kronm(X{i}, right);
87 | end
88 |
89 | % ∂f/∂βn = -2Im(⟨γ,β|W^p_n·U(C,γn)^†·B·U(C,γn)·(W^p_n)^†·C|γ,β⟩)
90 | gradf(p+n) = -2 * imag(left.' * B);
91 | end
92 | end
93 | end
94 |
95 |
--------------------------------------------------------------------------------
/qaoa/optimization/pso.m:
--------------------------------------------------------------------------------
1 | function result = pso(problem)
2 | %{
3 |
4 | %}
5 |
6 | obj = problem.objective;
7 | x0 = problem.x0;
8 | lb = problem.lb;
9 | ub = problem.ub;
10 | nvars = size(lb,1);
11 |
12 | hybridopts = optimoptions(@fmincon...
13 | ... Optimization algorithm, 'interior-point' (default)
14 | ... https://se.mathworks.com/help/optim/ug/choosing-the-algorithm.html
15 | ,'Algorithm','trust-region-reflective' ...
16 | ... Displays output at each iteration, 'final' (default)
17 | ,'Display','final' ...
18 | ... Termination tolerance on x, 1e-6 (default) | 1e-10 (interior-point)
19 | ,'StepTolerance',1e-6 ...
20 | ... Termination tolerance on the function value, 1e-6 (default)
21 | ,'FunctionTolerance',1e-6 ...
22 | ... Max number of iterations, 400 (default) | 1000 (interrior-point)
23 | ,'MaxIterations',300 ...
24 | ... Maximum number of function evaluations allowed,
25 | ... 100*nvars (default) | 3000 (interrior-point)
26 | ,'MaxFunctionEvaluations',100*nvars ...
27 | ... Gradient for the objective function, false (default)
28 | ,'SpecifyObjectiveGradient',true ...
29 | ... When true, fmincon estimates gradients in parallel, false (default)
30 | ,'UseParallel',false ...
31 | );
32 |
33 | options = optimoptions(@particleswarm...
34 | ... Displays output at each iteration, 'final' (default)
35 | ,'Display','final' ...
36 | ... Termination tolerance on the function value, 1e-6 (default)
37 | ,'FunctionTolerance',1e-5 ...
38 | ... Function that continues the optimization after particleswarm terminates
39 | ,'HybridFcn',{'fmincon',hybridopts} ...
40 | ... Initial population or partial population of particles.
41 | ,'InitialSwarmMatrix',x0 ... % the rest of the swarm is random
42 | ... Max number of iterations, 200*nvars (default)
43 | ,'MaxIterations',100 ...
44 | ... Positive integer with default 20
45 | ,'MaxStallIterations',20 ...
46 | ... Maximum number of seconds without an improvement in the best known
47 | ... objective function value. Positive scalar with default Inf.
48 | ,'MaxStallTime',inf ...
49 | ... Maximum time in seconds that particleswarm runs. Default is Inf.
50 | ,'MaxTime',inf ...
51 | ... Minimum adaptive neighborhood size, a scalar from 0 to 1. 0.25 (default)
52 | ,'MinNeighborsFraction',.20 ...
53 | ... Weighting of each particle’s best position when adjusting velocity
54 | ... 1.49 (default)
55 | ,'SelfAdjustmentWeight',1.25 ...
56 | ... Weighting of the neighborhood’s best position when adjusting velocity
57 | ... 1.49 (default)
58 | ,'SocialAdjustmentWeight',1.75 ...
59 | ... Number of particles in the swarm. Default is min(100,10*nvars)
60 | ,'SwarmSize',(20+2*nvars) ...
61 | ... When true, fmincon estimates gradients in parallel, false (default)
62 | ,'UseParallel',true ...
63 | );
64 |
65 |
66 | [xmin,fval,exitflag,output] = particleswarm(obj,nvars,lb,ub,options);
67 | result.xmin = xmin;
68 | result.fval = fval;
69 | result.exitflag = exitflag;
70 | result.output = output;
71 | end
72 |
73 |
--------------------------------------------------------------------------------
/qaoa/optimization/baynm.m:
--------------------------------------------------------------------------------
1 | function result = baynm(problem)
2 |
3 | % Combination of Bayesian Optimization and Nelder Mead
4 | x0 = problem.x0;
5 | p = length(problem.lb)/2;
6 | fun = problem.objective;
7 |
8 | vars = [];
9 | varnames = {1,2*p};
10 |
11 | for i = 1:p
12 | name = strcat('gamma',int2str(i));
13 | optvar = optimizableVariable(name,[0 pi]);
14 | vars = [vars,optvar]; % vector with optimizableVariable objects
15 | varnames{i} = name;
16 | end
17 | for i = (p+1):2*p
18 | name = strcat('beta',int2str(i-p));
19 | optvar = optimizableVariable(name,[0 pi]);
20 | vars = [vars,optvar]; % vector with optimizableVariable objects
21 | varnames{i} = name;
22 | end
23 |
24 | % Initial evaluation points, specified as an N-by-D table, where N is
25 | % the number of evaluation points, and D is the number of variables.
26 | initial_x = array2table(x0,'VariableNames',varnames);
27 |
28 | % 3. Decide on options, meaning the bayseopt.
29 | results = bayesopt(@objfun,vars ...
30 | ... Function to choose next evaluation point
31 | ... 'expected-improvement-per-second-plus' (default)
32 | ,'AcquisitionFunctionName','expected-improvement' ...
33 | ... If fun is stochastic, false (default) | true
34 | ,'IsObjectiveDeterministic',true ...
35 | ... Propensity to explore, 0.5 (default) | positive real
36 | ,'ExplorationRatio',.5 ...
37 | ... Fit Gaussian Process model, 300 (default)
38 | ,'GPActiveSetSize',300 ...
39 | ... ObjFun evaluation limit, 30 (default) | positive integer
40 | ,'MaxObjectiveEvaluations',30 ...
41 | ... Compute in parallel, false (default) | true
42 | ,'UseParallel',true ...
43 | ... Imputation method for parallel worker objective function values
44 | ... 'clipped-model-prediction' (default)
45 | ,'ParallelMethod','min-observed' ...
46 | ... Number of initial evaluation points, 4 (default) | positive integer
47 | ,'NumSeedPoints',size(x0,1) ...
48 | ... Command line display, 1 (default)
49 | ,'Verbose',1 ...
50 | ... Initial evaluation points
51 | ,'InitialX',initial_x ...
52 | ... Plot functi on called after each iteration
53 | ... {@plotObjectiveModel,@plotMinObjective} (default)
54 | ,'PlotFcn',{@plotObjectiveModel,@plotMinObjective} ...
55 | ... Function called after each iteration, {} (default)
56 | ... ,'OutputFcn',{@assignInBase,@saveToFile} ...
57 | ... File name for the @saveToFile output function
58 | ... ,'SaveFileName','optimizations/test.mat' ...
59 | ... Variable name for the @assignInBase output function
60 | ... ,'SaveVariableName','Results' ...
61 | );
62 | xmin = results.XAtMinObjective{1,:};
63 |
64 | nm.objective = problem.objective;
65 | nm.x0 = xmin;
66 | nm.solver = 'fminsearch';
67 | options = optimset('Display','final' ...
68 | ,'PlotFcns',@optimplotfval ...
69 | ... Termination tolerance on the function value, (default) 1e-4
70 | ,'TolFun',1e-6 ...
71 | ... Termination tolerance on x, (default) 1e-4
72 | ,'TolX',1e-6 ...
73 | ... Maximum number of iterations allowed, (default) 200*numberOfVariables
74 | ,'MaxIter',100 ...
75 | ... Maximum number of function evaluations, (default) 200*numberOfVariables
76 | ,'MaxFunEvals',100*p ...
77 | );
78 | nm.options = options;
79 | [xmin,fval,exitflag,output] = fminsearch(nm);
80 |
81 | result.xmin = xmin;
82 | result.fval = fval;
83 | result.exitflag = exitflag;
84 | result.output = output;
85 |
86 | % 2. Create your objective function
87 | % The objective function has the following signature:
88 | function fval = objfun(in)
89 | x = in.Variables;
90 | fval = fun(x);
91 | end
92 | end
93 |
94 |
--------------------------------------------------------------------------------
/qaoa/qaoa.m:
--------------------------------------------------------------------------------
1 | function [final_state,result] = qaoa(cost,p,gamma,beta,minimizer,x0)
2 | %
3 | % Quantum Approximate Optimization Algorithm.
4 | %
5 | % Parameters
6 | % ----------
7 | % cost : 1-D array (column vector) containing all the values of the cost
8 | % function.
9 | %
10 | % p : integer
11 | % The number of iterations.
12 | %
13 | % gamma : Either an empty or 1-p array (row vector) containing the
14 | % optimal angles. (optional)
15 | %
16 | % beta : Either an empty or 1-p array (row vector) containing the
17 | % optimal angles. (optional)
18 | %
19 | % minimizer : string (optional)
20 | % Optimization algorithm 'GlobalSearch' (default),
21 | % 'MultiStart', 'Bayesian', BayesianHybridNelderMead,
22 | % 'NelderMead','ParticleSwarm','BruteForce'.
23 | %
24 | % x0 : 1-2p array (row vector) (optional)
25 | % Starting-points for level p.
26 | %
27 | % Returns
28 | % -------
29 | % final_state : 1-D array (column vector)
30 | % Returns the state vector |γ,β⟩
31 | %
32 | % result : struct with fields
33 | % The result from the minimizer
34 | %
35 | %
36 |
37 | % if no variational parameters are given simply set them as empty arrays.
38 | if (~exist('gamma', 'var') && ~exist('beta', 'var'))
39 | gamma = []; beta = [];
40 | end
41 |
42 | if ~exist('minimizer', 'var')
43 | minimizer = 'GlobalSearch';
44 | end
45 |
46 | % declare variables
47 | q = log2(length(cost)); % Number of qubits required
48 | sigma_x = [0 1;1 0]; % Pauli sigma-x matrix
49 |
50 | X = cell(1,q);
51 | for i = 1:q
52 | % Creates the i:th Palui sigma-x matrix and stores it in a cell.
53 | X{i} = {2^(i-1),sigma_x,2^(q-i)};
54 | end
55 |
56 | % Construct the initial state vector |+⟩ = H^(⊗N)·|0⟩⊗|0⟩⊗...⊗|0⟩
57 | s = 1 / sqrt(2^q) * ones(2^q,1);
58 |
59 | if size(gamma) ~= size(beta)
60 | % Check if gamma and beta are arrays of equal size.
61 | error('The arrays gamma and beta must be of equal size.')
62 |
63 | elseif isempty(gamma) == 1
64 | % If gamma and beta are empty arrays find the optimal angles.
65 | if ~exist('x0', 'var')
66 | x0 = pi * rand(1,2*p); % random start-point;
67 | end
68 |
69 | % Create problem structure
70 | problem = struct();
71 | problem.lb = zeros(2*p,1); % lower bounds
72 | problem.ub = pi * ones(2*p,1); % upper bounds
73 | problem.x0 = x0; % initial points
74 | problem.objective = @(x)expval(x,cost,p,q,s,X); % objective function
75 |
76 | addpath('qaoa/optimization')
77 | if ~exist('minimizer','var') || strcmp(minimizer,'GlobalSearch')
78 | result = gs(problem);
79 | xmin = result.xmin;
80 | elseif strcmp(minimizer,'MultiStart')
81 | result = ms(problem);
82 | xmin = result.xmin;
83 | elseif strcmp(minimizer,'Bayesian')
84 | result = bso(problem);
85 | xmin = result.XAtMinObjective{1,:};
86 | elseif strcmp(minimizer,'BruteForce')
87 | result = brute(problem);
88 | xmin = result.xmin;
89 | elseif strcmp(minimizer,'NelderMead')
90 | result = nm(problem);
91 | xmin = result.xmin;
92 | elseif strcmp(minimizer,'BayesianHybridNelderMead')
93 | result = baynm(problem);
94 | xmin = result.xmin;
95 | elseif strcmp(minimizer,'ParticleSwarm')
96 | result = pso(problem);
97 | xmin = result.xmin;
98 | end
99 |
100 | % The found optimal angles.
101 | gamma = xmin(1:p);
102 | beta = xmin((p+1):2*p);
103 |
104 | elseif size(gamma) ~= p
105 | % If gamma and beta are given by nonempty arrays verify that they have
106 | % correct dimensions, i.e. that they are equal to p.
107 | error('The number of total angles must be equal to 2p.')
108 | else
109 | result = 0;
110 | end
111 |
112 | % Final state |γ,β⟩ = U(B,β_p)U(C,γ_p)···U(B,β_1)U(C,γ_1)|s⟩
113 | final_state = variational_state(cost,p,q,s,X,gamma,beta);
114 |
--------------------------------------------------------------------------------
/qaoa/optimization/bso.m:
--------------------------------------------------------------------------------
1 | function result = bso(problem)
2 | %{
3 | BAYESIAN OPTIMIZATION
4 |
5 | Description
6 | ----------
7 | To perform a Bayesian optimization using bayesopt, follow these steps.
8 |
9 | 1. Prepare your variables. See Variables for a Bayesian Optimization.
10 | https://se.mathworks.com/help/stats/variables-for-a-bayesian-optimization.html
11 |
12 | 2. Create your objective function. See Bayesian Optimization Objective
13 | Functions. If necessary, create constraints, too. See Constraints in
14 | Bayesian Optimization.
15 | https://se.mathworks.com/help/stats/bayesian-optimization-objective-functions.html
16 |
17 | 3. Decide on options, meaning the bayseopt Name, Value pairs. You are
18 | not required to pass any options to bayesopt but you typically do,
19 | especially when trying to improve a solution.
20 |
21 | 4. Call bayesopt.
22 | https://se.mathworks.com/help/stats/bayesopt.html
23 |
24 | Examine the solution. You can decide to resume the optimization by using
25 | resume, or restart the optimization, usually with modified options.
26 | %}
27 |
28 | % 1. For each variable in your objective function, create a variable
29 | % description object using optimizableVariable. Each variable has a
30 | % unique name and a range of values
31 | x0 = problem.x0;
32 | p = size(x0,2)/2;
33 | fun = problem.objective;
34 |
35 | vars = [];
36 | varnames = {1,2*p};
37 |
38 | for i = 1:p
39 | name = strcat('gamma',int2str(i));
40 | optvar = optimizableVariable(name,[0 pi]);
41 | vars = [vars,optvar]; % vector with optimizableVariable objects
42 | varnames{i} = name;
43 | end
44 | for i = (p+1):2*p
45 | name = strcat('beta',int2str(i-p));
46 | optvar = optimizableVariable(name,[0 pi]);
47 | vars = [vars,optvar]; % vector with optimizableVariable objects
48 | varnames{i} = name;
49 | end
50 |
51 | % Initial evaluation points, specified as an N-by-D table, where N is
52 | % the number of evaluation points, and D is the number of variables.
53 | initial_x = table(); % create an empty table
54 | for i = 1:2*p
55 | initial_x = addvars(initial_x,x0(:,i),'NewVariableNames',varnames{i});
56 | end
57 |
58 | % 3. Decide on options, meaning the bayseopt.
59 | result = bayesopt(@objfun,vars ...
60 | ... Function to choose next evaluation point
61 | ... 'expected-improvement-per-second-plus' (default)
62 | ,'AcquisitionFunctionName','expected-improvement' ...
63 | ... If fun is stochastic, false (default) | true
64 | ,'IsObjectiveDeterministic',true ...
65 | ... Propensity to explore, 0.5 (default) | positive real
66 | ,'ExplorationRatio',.5 ...
67 | ... Fit Gaussian Process model, 300 (default)
68 | ,'GPActiveSetSize',300 ...
69 | ... ObjFun evaluation limit, 30 (default) | positive integer
70 | ,'MaxObjectiveEvaluations',30 ...
71 | ... Compute in parallel, false (default) | true
72 | ,'UseParallel',true ...
73 | ... Imputation method for parallel worker objective function values
74 | ... 'clipped-model-prediction' (default)
75 | ,'ParallelMethod','min-observed' ...
76 | ... Number of initial evaluation points, 4 (default) | positive integer
77 | ,'NumSeedPoints',size(x0,1) ...
78 | ... Command line display, 1 (default)
79 | ,'Verbose',1 ...
80 | ... Initial evaluation points
81 | ,'InitialX',initial_x ...
82 | ... Plot functi on called after each iteration
83 | ... {@plotObjectiveModel,@plotMinObjective} (default)
84 | ,'PlotFcn',{@plotObjectiveModel,@plotMinObjective} ...
85 | ... Function called after each iteration, {} (default)
86 | ... ,'OutputFcn',{@assignInBase,@saveToFile} ...
87 | ... File name for the @saveToFile output function
88 | ... ,'SaveFileName','optimizations/test.mat' ...
89 | ... Variable name for the @assignInBase output function
90 | ... ,'SaveVariableName','Results' ...
91 | );
92 |
93 | result
94 |
95 | % 2. Create your objective function
96 | % The objective function has the following signature:
97 | function fval = objfun(in)
98 | x = in.Variables;
99 | fval = fun(x);
100 | end
101 | end
102 |
--------------------------------------------------------------------------------