├── BEGE
├── Data.mat
├── Sim_Var.mat
├── Data_Sims.mat
├── loglikedgam.m
├── cov_smc_real.mat
├── start_smc_real.mat
├── loglikedgam_parallel.m
├── cov_smc_unbiased_real.mat
├── start_smc_unbiased_real.mat
├── logsumexp.m
├── Gamma.m
├── quantile_weighted.m
├── Run_prediction.m
├── Run_estimation.m
├── linspaceNDim.m
├── MCMC_biased.m
├── bege_BayesPostPredict_volatility_unbiased_is.m
├── MCMC_unbiased_is.m
├── bege_BayesPostPredict_volatility_sim_unbiased_is.m
├── SMC_RW_LikeAnneal_parallel_biased.m
├── SMC_RW_DataAnneal_Par_unif_unbiased_mc.m
├── SMC_RW_DataAnneal_Par_unif_unbiased_is.m
├── SMC_RW_DataAnneal_parallel_biased.m
└── SMC_RW_LikeAnneal_ParVec_unbiased_is.m
├── GARCH
├── Data.mat
├── Data_Sims_garch.mat
├── Sim_Var_garch.mat
├── logsumexp.m
├── Run_estimation.m
├── Gamma.m
├── quantile_weighted.m
├── Run_prediction.m
├── linspaceNDim.m
├── garch_BayesPostPredict_volatility.m
├── garch_BayesPostPredict_volatility_sim.m
├── garch_SMC_RW_LikeAnneal_parallel.m
└── garch_SMC_RW_DataAnneal_parallel.m
├── GJR-GARCH
├── Data.mat
├── Sim_Var_gjr.mat
├── Data_Sims_gjr.mat
├── logsumexp.m
├── Run_estimation.m
├── Gamma.m
├── quantile_weighted.m
├── Run_prediction.m
├── linspaceNDim.m
├── gjr_BayesPostPredict_volatility.m
├── gjr_BayesPostPredict_volatility_sim.m
├── GJR_SMC_RW_LikeAnneal_parallel.m
└── GJR_SMC_RW_DataAnneal_parallel.m
└── README.md
/BEGE/Data.mat:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DanLAct/GARCH-SMC/HEAD/BEGE/Data.mat
--------------------------------------------------------------------------------
/GARCH/Data.mat:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DanLAct/GARCH-SMC/HEAD/GARCH/Data.mat
--------------------------------------------------------------------------------
/BEGE/Sim_Var.mat:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DanLAct/GARCH-SMC/HEAD/BEGE/Sim_Var.mat
--------------------------------------------------------------------------------
/BEGE/Data_Sims.mat:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DanLAct/GARCH-SMC/HEAD/BEGE/Data_Sims.mat
--------------------------------------------------------------------------------
/BEGE/loglikedgam.m:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DanLAct/GARCH-SMC/HEAD/BEGE/loglikedgam.m
--------------------------------------------------------------------------------
/GJR-GARCH/Data.mat:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DanLAct/GARCH-SMC/HEAD/GJR-GARCH/Data.mat
--------------------------------------------------------------------------------
/BEGE/cov_smc_real.mat:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DanLAct/GARCH-SMC/HEAD/BEGE/cov_smc_real.mat
--------------------------------------------------------------------------------
/BEGE/start_smc_real.mat:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DanLAct/GARCH-SMC/HEAD/BEGE/start_smc_real.mat
--------------------------------------------------------------------------------
/GARCH/Data_Sims_garch.mat:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DanLAct/GARCH-SMC/HEAD/GARCH/Data_Sims_garch.mat
--------------------------------------------------------------------------------
/GARCH/Sim_Var_garch.mat:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DanLAct/GARCH-SMC/HEAD/GARCH/Sim_Var_garch.mat
--------------------------------------------------------------------------------
/GJR-GARCH/Sim_Var_gjr.mat:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DanLAct/GARCH-SMC/HEAD/GJR-GARCH/Sim_Var_gjr.mat
--------------------------------------------------------------------------------
/BEGE/loglikedgam_parallel.m:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DanLAct/GARCH-SMC/HEAD/BEGE/loglikedgam_parallel.m
--------------------------------------------------------------------------------
/GJR-GARCH/Data_Sims_gjr.mat:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DanLAct/GARCH-SMC/HEAD/GJR-GARCH/Data_Sims_gjr.mat
--------------------------------------------------------------------------------
/BEGE/cov_smc_unbiased_real.mat:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DanLAct/GARCH-SMC/HEAD/BEGE/cov_smc_unbiased_real.mat
--------------------------------------------------------------------------------
/BEGE/start_smc_unbiased_real.mat:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DanLAct/GARCH-SMC/HEAD/BEGE/start_smc_unbiased_real.mat
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # GARCH-SMC
2 | Efficient Bayesian estimation for GARCH-type models via Sequential Monte Carlo
3 |
4 |
5 | The code contained here implements SMC for estimating and predicting the GARCH, GJR-GARCH, and BEGE model investigated in the [paper](http://arxiv.org/abs/1906.03828). The implementation of MCMC for estimating the BEGE model is also provided.
6 |
7 | Each subfolder provides script files ‘Run_estimation’ and ‘Run_prediction’, which are instructions for running each of the methods on real data and simulated data.
8 |
--------------------------------------------------------------------------------
/BEGE/logsumexp.m:
--------------------------------------------------------------------------------
1 | function s = logsumexp(x, dim)
2 | % Returns log(sum(exp(x),dim)) while avoiding numerical underflow.
3 | % Default is dim = 1 (columns).
4 | % Written by Mo Chen (mochen@ie.cuhk.edu.hk). March 2009.
5 | if nargin == 1,
6 | % Determine which dimension sum will use
7 | dim = find(size(x)~=1,1);
8 | if isempty(dim), dim = 1; end
9 | end
10 |
11 | % subtract the largest in each column
12 | y = max(x,[],dim);
13 | x = bsxfun(@minus,x,y);
14 | s = y + log(sum(exp(x),dim));
15 | i = find(~isfinite(y));
16 | if ~isempty(i)
17 | s(i) = y(i);
18 | end
19 |
--------------------------------------------------------------------------------
/GARCH/logsumexp.m:
--------------------------------------------------------------------------------
1 | function s = logsumexp(x, dim)
2 | % Returns log(sum(exp(x),dim)) while avoiding numerical underflow.
3 | % Default is dim = 1 (columns).
4 | % Written by Mo Chen (mochen@ie.cuhk.edu.hk). March 2009.
5 | if nargin == 1,
6 | % Determine which dimension sum will use
7 | dim = find(size(x)~=1,1);
8 | if isempty(dim), dim = 1; end
9 | end
10 |
11 | % subtract the largest in each column
12 | y = max(x,[],dim);
13 | x = bsxfun(@minus,x,y);
14 | s = y + log(sum(exp(x),dim));
15 | i = find(~isfinite(y));
16 | if ~isempty(i)
17 | s(i) = y(i);
18 | end
19 |
--------------------------------------------------------------------------------
/GJR-GARCH/logsumexp.m:
--------------------------------------------------------------------------------
1 | function s = logsumexp(x, dim)
2 | % Returns log(sum(exp(x),dim)) while avoiding numerical underflow.
3 | % Default is dim = 1 (columns).
4 | % Written by Mo Chen (mochen@ie.cuhk.edu.hk). March 2009.
5 | if nargin == 1,
6 | % Determine which dimension sum will use
7 | dim = find(size(x)~=1,1);
8 | if isempty(dim), dim = 1; end
9 | end
10 |
11 | % subtract the largest in each column
12 | y = max(x,[],dim);
13 | x = bsxfun(@minus,x,y);
14 | s = y + log(sum(exp(x),dim));
15 | i = find(~isfinite(y));
16 | if ~isempty(i)
17 | s(i) = y(i);
18 | end
19 |
--------------------------------------------------------------------------------
/GARCH/Run_estimation.m:
--------------------------------------------------------------------------------
1 | %%% Performing posterior estimation for the GARCH model of real data using SMC
2 | % NOTE: run each function separately
3 |
4 | %% Data annealing SMC
5 | N = 10000; % can change the value of N as desired
6 | [theta, theta_particle, loglike, logprior, W, log_evidence] = garch_SMC_RW_DataAnneal_parallel(N);
7 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
8 | %%
9 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
10 |
11 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
12 | %% Likelihood annealing SMC
13 | N = 10000; % can change the value of N as desired
14 | [theta, theta_particle, loglike, logprior, gamma, log_evidence] = garch_SMC_RW_LikeAnneal_parallel(N);
15 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
16 |
--------------------------------------------------------------------------------
/GJR-GARCH/Run_estimation.m:
--------------------------------------------------------------------------------
1 | %%% Performing posterior estimation for the GJR-GARCH model of real data using SMC
2 | % NOTE: run each function separately
3 |
4 | %% Data annealing SMC
5 | N = 10000; % can change the value of N as desired
6 | [theta, theta_particle, loglike, logprior, W, log_evidence] = GJR_SMC_RW_DataAnneal_parallel(N);
7 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
8 | %%
9 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
10 |
11 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
12 | %% Likelihood annealing SMC
13 | N = 10000; % can change the value of N as desired
14 | [theta, theta_particle, loglike, logprior, gamma, log_evidence] = GJR_SMC_RW_LikeAnneal_parallel(N);
15 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
--------------------------------------------------------------------------------
/GARCH/Gamma.m:
--------------------------------------------------------------------------------
1 | function [gamma] = Gamma(gamma_Current,gamma_Previous,N,loglike,logW_Prior)
2 | % Gets the temperatures from likelihood annealing strategy
3 |
4 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%% INPUT %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
5 | % gamma_Current - New temperature
6 | %
7 | % gamma_Previous - Old temperature
8 | %
9 | % N - Size of population of particles
10 | %
11 | % loglike - Log-likelihood
12 | %
13 | % logW_Prior - Old samples' log-weights
14 |
15 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%% OUTPUT %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
16 | % gamma - Difference between Effective Sample Size and the target
17 |
18 | logW_Curr = logW_Prior + (gamma_Current - gamma_Previous)*loglike;
19 | logW_Curr=logW_Curr-max(logW_Curr);
20 | W=exp(logW_Curr)/sum(exp(logW_Curr));
21 | gamma=1/sum(W.^2)-N/2;
22 |
23 | end
24 |
25 |
--------------------------------------------------------------------------------
/GJR-GARCH/Gamma.m:
--------------------------------------------------------------------------------
1 | function [gamma] = Gamma(gamma_Current,gamma_Previous,N,loglike,logW_Prior)
2 | % Gets the temperatures from likelihood annealing strategy
3 |
4 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%% INPUT %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
5 | % gamma_Current - New temperature
6 | %
7 | % gamma_Previous - Old temperature
8 | %
9 | % N - Size of population of particles
10 | %
11 | % loglike - Log-likelihood
12 | %
13 | % logW_Prior - Old samples' log-weights
14 |
15 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%% OUTPUT %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
16 | % gamma - Difference between Effective Sample Size and the target
17 |
18 | logW_Curr = logW_Prior + (gamma_Current - gamma_Previous)*loglike;
19 | logW_Curr=logW_Curr-max(logW_Curr);
20 | W=exp(logW_Curr)/sum(exp(logW_Curr));
21 | gamma=1/sum(W.^2)-N/2;
22 |
23 | end
24 |
25 |
--------------------------------------------------------------------------------
/BEGE/Gamma.m:
--------------------------------------------------------------------------------
1 | function [gamma] = Gamma(gamma_Current,gamma_Previous,N,loglike,logW_Prior)
2 | % Gets the temperatures from likelihood annealing strategy
3 |
4 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%% INPUT %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
5 | % gamma_Current - New temperature
6 | %
7 | % gamma_Previous - Old temperature
8 | %
9 | % N - Size of population of particles
10 | %
11 | % loglike - Log-likelihood
12 | %
13 | % logW_Prior - Old samples' log-weights
14 |
15 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%% OUTPUT %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
16 | % gamma - Difference between Effective Sample Size and the target
17 |
18 | logW_Curr = logW_Prior + (gamma_Current - gamma_Previous)*loglike;
19 | inds = isnan(logW_Curr);
20 | logW_Curr(inds) = -inf;
21 | logW_Curr=logW_Curr-max(logW_Curr);
22 | W=exp(logW_Curr)/sum(exp(logW_Curr));
23 | gamma=1/sum(W.^2)-N/2;
24 | end
25 |
26 |
--------------------------------------------------------------------------------
/BEGE/quantile_weighted.m:
--------------------------------------------------------------------------------
1 | function [quantile] = quantile_weighted(samples,p,weights)
2 | % Gets quantiles for a weighted sample
3 |
4 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%% INPUT %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
5 | % samples - The sample particles (that have associated weights)
6 | %
7 | % p - Desired quantile
8 | %
9 | % weights - Weights associated with samples
10 |
11 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%% OUTPUT %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
12 | % quantile - The sample associated with desired quantile p.
13 | % load('Test_rate_returnPredict.mat');
14 | % load('Test_W.mat');
15 | % samples=rate_returnPredict(:,1);
16 | % p=0.975;
17 | % weights=W(:,end-110+1);
18 |
19 | [N, d] = size(samples);
20 | quantile = zeros(1,d);
21 |
22 | if length(weights)~=N
23 | error('The samples and weights should be of the same size!');
24 | end
25 |
26 | %Different from normal since bottom isn't 0 and top isn't 1...
27 |
28 | for j=1:d
29 | [sorted, order] = sort(samples(:,j));%'sorted'-the whole sorted samples;'order'-corresponding Indices of the sample
30 | cumsum_w = weights(order);
31 | cumsum_w = cumsum(cumsum_w);
32 | lower = find(cumsum_w
sambroise@gmail.com
21 | %
22 | % $ Date: 2009/01/29 21:00:00 GMT $
23 | % $ revised Date: 2009/02/02 18:00:00 GMT $
24 | % Bug fixed for singleton dimensions that occur when d1 or d2
25 | % are empty matrix, scalar or vector.
26 | %
27 | %
28 |
29 |
30 | if nargin == 2
31 | n = 100;
32 | end
33 | n = double(n);
34 | d1 = squeeze(d1); d2 = squeeze(d2);
35 |
36 | if ndims(d1)~= ndims(d2) || any(size(d1)~= size(d2))
37 | error('d1 and d2 must have the same number of dimension and the same size'),
38 | end
39 |
40 | NDim = ndims(d1);
41 | %%%%%%%% To know if the two first dimensions are singleton dimensions
42 | if NDim==2 && any(size(d1)==1)
43 | NDim = NDim-1;
44 | if all(size(d1)==1)
45 | NDim = 0;
46 | end
47 | end
48 |
49 | pp = (0:n-2)./(floor(n)-1);
50 |
51 | Sum1 = TensorProduct(d1, ones(1,n-1));
52 | Sum2 = TensorProduct((d2-d1), pp);
53 | y = cat(NDim+1, Sum1 + Sum2, shiftdim(d2, size(d1, 1)==1 ));
54 |
55 | %%%%% An old function that I wrote to replace the built in Matlab function:
56 | %%%%% KRON
57 |
58 | function Z = TensorProduct(X,Y)
59 | % Z = TensorProduct(X,Y) returns the REAL Kronecker tensor product of X and Y.
60 | % The result is a multidimensional array formed by taking all possible products
61 | % between the elements of X and those of Y.
62 | %
63 | % If X is m-by-n and Y is p-by-q-by-r, then kron(X,Y)
64 | % is m-by-p-by-n-by-q-by-r.
65 | %
66 | % X and Y are multidimensional matrices
67 | % of any size and number of dimensions
68 | %
69 | % E.g. if X is of dimensions (4, 5, 3) and Y of dimension (3, 1, 7, 4)
70 | % TensorProduct(X, Y) returns a multidimensional matrix Z of dimensions:
71 | % (4, 5, 3, 3, 7, 4)
72 | %
73 | % $ Date: 2001/11/09 10:20:00 GMT $
74 | %
75 | % Steeve AMBROISE --> sambroise@gmail.com
76 | %
77 |
78 | sX=size(X);sY=size(Y);
79 |
80 | ndim1=ndims(X);ndim2=ndims(Y);
81 |
82 | indperm=[ndim2+1:ndim1+ndim2,1:ndim2];
83 |
84 | % to remove all singleton dimensions
85 | Z=squeeze(repmat(X,[ones(1,ndims(X)),sY]).*...
86 | permute(repmat(Y,[ones(1,ndims(Y)),sX]),indperm));
--------------------------------------------------------------------------------
/GARCH/linspaceNDim.m:
--------------------------------------------------------------------------------
1 | function y = linspaceNDim(d1, d2, n)
2 | %LINSPACENDIM Linearly spaced multidimensional matrix.
3 | % LINSPACENDIM(d1, d2) generates a multi-dimensional
4 | % matrix of 100 linearly equally spaced points between
5 | % each element of matrices d1 and d2.
6 | %
7 | % LINSPACENDIM(d1, d2, N) generates N points between
8 | % each element of matrices X1 and X2.
9 | %
10 | % Example:
11 | % d1 = rand(3, 2, 4); d2 = rand(size(d1)); n = 10;
12 | %
13 | % y = linspaceNDim(d1, d2, n) returns a multidimensional matrix y of
14 | % size (3, 2, 4, 10)
15 | %
16 | %
17 | % Class support for inputs X1,X2:
18 | % float: Multidimensional matrix, vector, double, single
19 | %
20 | % Steeve AMBROISE --> sambroise@gmail.com
21 | %
22 | % $ Date: 2009/01/29 21:00:00 GMT $
23 | % $ revised Date: 2009/02/02 18:00:00 GMT $
24 | % Bug fixed for singleton dimensions that occur when d1 or d2
25 | % are empty matrix, scalar or vector.
26 | %
27 | %
28 |
29 |
30 | if nargin == 2
31 | n = 100;
32 | end
33 | n = double(n);
34 | d1 = squeeze(d1); d2 = squeeze(d2);
35 |
36 | if ndims(d1)~= ndims(d2) || any(size(d1)~= size(d2))
37 | error('d1 and d2 must have the same number of dimension and the same size'),
38 | end
39 |
40 | NDim = ndims(d1);
41 | %%%%%%%% To know if the two first dimensions are singleton dimensions
42 | if NDim==2 && any(size(d1)==1)
43 | NDim = NDim-1;
44 | if all(size(d1)==1)
45 | NDim = 0;
46 | end
47 | end
48 |
49 | pp = (0:n-2)./(floor(n)-1);
50 |
51 | Sum1 = TensorProduct(d1, ones(1,n-1));
52 | Sum2 = TensorProduct((d2-d1), pp);
53 | y = cat(NDim+1, Sum1 + Sum2, shiftdim(d2, size(d1, 1)==1 ));
54 |
55 | %%%%% An old function that I wrote to replace the built in Matlab function:
56 | %%%%% KRON
57 |
58 | function Z = TensorProduct(X,Y)
59 | % Z = TensorProduct(X,Y) returns the REAL Kronecker tensor product of X and Y.
60 | % The result is a multidimensional array formed by taking all possible products
61 | % between the elements of X and those of Y.
62 | %
63 | % If X is m-by-n and Y is p-by-q-by-r, then kron(X,Y)
64 | % is m-by-p-by-n-by-q-by-r.
65 | %
66 | % X and Y are multidimensional matrices
67 | % of any size and number of dimensions
68 | %
69 | % E.g. if X is of dimensions (4, 5, 3) and Y of dimension (3, 1, 7, 4)
70 | % TensorProduct(X, Y) returns a multidimensional matrix Z of dimensions:
71 | % (4, 5, 3, 3, 7, 4)
72 | %
73 | % $ Date: 2001/11/09 10:20:00 GMT $
74 | %
75 | % Steeve AMBROISE --> sambroise@gmail.com
76 | %
77 |
78 | sX=size(X);sY=size(Y);
79 |
80 | ndim1=ndims(X);ndim2=ndims(Y);
81 |
82 | indperm=[ndim2+1:ndim1+ndim2,1:ndim2];
83 |
84 | % to remove all singleton dimensions
85 | Z=squeeze(repmat(X,[ones(1,ndims(X)),sY]).*...
86 | permute(repmat(Y,[ones(1,ndims(Y)),sX]),indperm));
--------------------------------------------------------------------------------
/GJR-GARCH/linspaceNDim.m:
--------------------------------------------------------------------------------
1 | function y = linspaceNDim(d1, d2, n)
2 | %LINSPACENDIM Linearly spaced multidimensional matrix.
3 | % LINSPACENDIM(d1, d2) generates a multi-dimensional
4 | % matrix of 100 linearly equally spaced points between
5 | % each element of matrices d1 and d2.
6 | %
7 | % LINSPACENDIM(d1, d2, N) generates N points between
8 | % each element of matrices X1 and X2.
9 | %
10 | % Example:
11 | % d1 = rand(3, 2, 4); d2 = rand(size(d1)); n = 10;
12 | %
13 | % y = linspaceNDim(d1, d2, n) returns a multidimensional matrix y of
14 | % size (3, 2, 4, 10)
15 | %
16 | %
17 | % Class support for inputs X1,X2:
18 | % float: Multidimensional matrix, vector, double, single
19 | %
20 | % Steeve AMBROISE --> sambroise@gmail.com
21 | %
22 | % $ Date: 2009/01/29 21:00:00 GMT $
23 | % $ revised Date: 2009/02/02 18:00:00 GMT $
24 | % Bug fixed for singleton dimensions that occur when d1 or d2
25 | % are empty matrix, scalar or vector.
26 | %
27 | %
28 |
29 |
30 | if nargin == 2
31 | n = 100;
32 | end
33 | n = double(n);
34 | d1 = squeeze(d1); d2 = squeeze(d2);
35 |
36 | if ndims(d1)~= ndims(d2) || any(size(d1)~= size(d2))
37 | error('d1 and d2 must have the same number of dimension and the same size'),
38 | end
39 |
40 | NDim = ndims(d1);
41 | %%%%%%%% To know if the two first dimensions are singleton dimensions
42 | if NDim==2 && any(size(d1)==1)
43 | NDim = NDim-1;
44 | if all(size(d1)==1)
45 | NDim = 0;
46 | end
47 | end
48 |
49 | pp = (0:n-2)./(floor(n)-1);
50 |
51 | Sum1 = TensorProduct(d1, ones(1,n-1));
52 | Sum2 = TensorProduct((d2-d1), pp);
53 | y = cat(NDim+1, Sum1 + Sum2, shiftdim(d2, size(d1, 1)==1 ));
54 |
55 | %%%%% An old function that I wrote to replace the built in Matlab function:
56 | %%%%% KRON
57 |
58 | function Z = TensorProduct(X,Y)
59 | % Z = TensorProduct(X,Y) returns the REAL Kronecker tensor product of X and Y.
60 | % The result is a multidimensional array formed by taking all possible products
61 | % between the elements of X and those of Y.
62 | %
63 | % If X is m-by-n and Y is p-by-q-by-r, then kron(X,Y)
64 | % is m-by-p-by-n-by-q-by-r.
65 | %
66 | % X and Y are multidimensional matrices
67 | % of any size and number of dimensions
68 | %
69 | % E.g. if X is of dimensions (4, 5, 3) and Y of dimension (3, 1, 7, 4)
70 | % TensorProduct(X, Y) returns a multidimensional matrix Z of dimensions:
71 | % (4, 5, 3, 3, 7, 4)
72 | %
73 | % $ Date: 2001/11/09 10:20:00 GMT $
74 | %
75 | % Steeve AMBROISE --> sambroise@gmail.com
76 | %
77 |
78 | sX=size(X);sY=size(Y);
79 |
80 | ndim1=ndims(X);ndim2=ndims(Y);
81 |
82 | indperm=[ndim2+1:ndim1+ndim2,1:ndim2];
83 |
84 | % to remove all singleton dimensions
85 | Z=squeeze(repmat(X,[ones(1,ndims(X)),sY]).*...
86 | permute(repmat(Y,[ones(1,ndims(Y)),sX]),indperm));
87 |
--------------------------------------------------------------------------------
/GARCH/garch_BayesPostPredict_volatility.m:
--------------------------------------------------------------------------------
1 | function [h_predict, CI_low_h_t, CI_up_h_t, Predict_median_h_t] = garch_BayesPostPredict_volatility(N, theta, theta_particle, W)
2 | %One-step ahead forecast of conditional variances under GARCH(1,1) model for Real Data, with using the output of parameters sampled from Data Annealing SMC
3 | %%%%%%%%%%%%%%%%%%%% INPUT %%%%%%%%%%%%%%%%%%%%
4 | % N - Size of population of particles
5 | % theta - Parameters sampled from Data Annealing SMC: N samples from different amount of available observations: y_{1:t}, t=1,..,T
6 | % theta_particle - Transformed theta
7 | % W - The weights of weighted samples, corresponding to the above thetas/theta_particles
8 |
9 | %%%%%%%%%%%%%%%%%%%% OUTPUT %%%%%%%%%%%%%%%%%%%
10 | % h_predict - N samples of the one-step ahead forecast of conditional variances
11 | % CI_low_h_t - 0.025 quantile of the above posterior predictive sample of the conditional variances
12 | % CI_up_h_t - 0.975 quantile of the above posterior predictive sample of the conditional variances
13 | % Predict_median_h_t - Median of the above posterior predictive sample of the conditional variances
14 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
15 |
16 | %load('Data for Prediction_garch_unif.mat');
17 |
18 | %loading monthly S&P stock log return
19 | load('Data.mat');
20 | rate_return = MonthlyReturns2018;
21 | rate_return(isnan(rate_return)) = 0;
22 |
23 | %Ranges for parameters transformation
24 | mylims = zeros(4,2);
25 | mylims(3,1) = 0.2;
26 | mylims(4,1) = -0.9;
27 | mylims(:,2) = [0.3; 0.5; 0.99; 0.9];
28 |
29 | T = length(rate_return); % total number of data
30 | start=200;% Starting time point for making prediction; the time point for the 1st predicted conditional variance is: t = start+1
31 | PT=T-start;
32 |
33 | CI_low_h_t=zeros(1,PT);
34 | CI_up_h_t=zeros(1,PT);
35 | Predict_median_h_t=zeros(1,PT);
36 | params=theta;
37 | h_predict = zeros(N,PT);
38 |
39 | for t=1:PT
40 | fprintf('Just starting with the %ith prediction.\n',t);
41 |
42 | [~,h] = garch_loglike_parallel(theta_particle(:,:,t+start-1),rate_return(1:t+start-1),mylims);
43 |
44 | h_t = params(:,1,t+start-1)+params(:,2,t+start-1).*(rate_return(t+start-1)-params(:,4,t+start-1)).^2 + params(:,3,t+start-1).*h(:,end);
45 | h_predict(:,t)=h_t;
46 |
47 | CI_low_h_t(:,t)=quantile_weighted( h_t,0.025,W(:,t+start));
48 | CI_up_h_t(:,t)=quantile_weighted( h_t,0.975,W(:,t+start));
49 | Predict_median_h_t(:,t)=quantile_weighted(h_t,0.5,W(:,t+start));
50 |
51 | end
52 | save('garch_BayesPostPredict_RealData_var.mat');
53 | end
54 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
55 |
56 |
57 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
58 | function [loglikelihood,h] = garch_loglike_parallel(params,data,mylims)
59 | % Computing the likelihood and conditional variances of the time series under GARCH dynamics, given observed data and model parameters
60 | %====================================================================================================================
61 |
62 | [N,d]=size(params);
63 |
64 | % Transforming back to original scale;
65 | for j = 1:d
66 | params(:,j) = (mylims(j,2)'.*exp(params(:,j))+mylims(j,1)')./(exp(params(:,j))+1);
67 | end
68 |
69 | %%%%%%%%%%%%%%%%%%%
70 | %SETTING PARAMETERS
71 | %%%%%%%%%%%%%%%%%%%
72 | a0 = params(:,1); %alpha_0
73 | a1 = params(:,2); %alpha_1
74 | b1 = params(:,3); %beta_1
75 | r_bar = params(:,4); %mu
76 |
77 | n = length(data);
78 | h = zeros(N,n);
79 |
80 | t1=10e-6;
81 |
82 | h(:,1) = max(a0./(1-a1-b1),t1);
83 |
84 | logl = zeros(N,1);
85 | logl = -0.5*log(h(:,1)) - 0.5*(data(1)-r_bar).^2./h(:,1) - 0.5*log(2*pi*ones(N,1));
86 |
87 | for t = 2:n
88 | h(:,t) = a0+a1.*(data(t-1)-r_bar).^2 + b1.*h(:,t-1);
89 | logl_tmp = -0.5*log(h(:,t)) - 0.5*(data(t)-r_bar).^2./h(:,t) - 0.5*log(2*pi*ones(N,1));
90 | logl = logl + logl_tmp;
91 | end
92 |
93 | loglikelihood = logl;
94 | end
95 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
96 |
--------------------------------------------------------------------------------
/GARCH/garch_BayesPostPredict_volatility_sim.m:
--------------------------------------------------------------------------------
1 | function [h_predict, CI_low_h_t, CI_up_h_t, Predict_median_h_t, ELPD_LFO_true_var] = garch_BayesPostPredict_volatility_sim(N, Sim_Var, theta, theta_particle, W)
2 | %One-step ahead forecast of conditional variances together with LFO-CV under GARCH(1,1) model for Simulated Data, with using the output of parameters sampled from Data Annealing SMC
3 | %%%%%%%%%%%%%%%%%%%% INPUT %%%%%%%%%%%%%%%%%%%%
4 | % N - Size of population of particles
5 | % Sim_Var - Conditional variance of the simulated data
6 | % theta - Parameters sampled from Data Annealing SMC: N samples from different amount of available observations: y_{1:t}, t=1,..,T
7 | % theta_particle - Transformed theta
8 | % W - The weights of weighted samples, corresponding to the above thetas/theta_particles
9 |
10 | %%%%%%%%%%%%%%%%%%%% OUTPUT %%%%%%%%%%%%%%%%%%%
11 | % h_predict - N samples of the one-step ahead forecast of conditional variances
12 | % CI_low_h_t - 0.025 quantile of the above posterior predictive sample of the conditional variances
13 | % CI_up_h_t - 0.975 quantile of the above posterior predictive sample of the conditional variances
14 | % Predict_median_h_t - Median of the above posterior predictive sample of the conditional variances
15 | % ELPD_LFO_true_var - Approximated epld_LFO
16 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
17 |
18 | %load('Prediction_TrueGarch_unif_sim.mat');
19 | %load('Sim_Var_garch.mat');
20 |
21 | %loading simulated data
22 | load('Data_Sims_garch.mat');
23 | rate_return = Simulation;
24 | rate_return(isnan(rate_return)) = 0;
25 |
26 | %Ranges for parameters transformation
27 | mylims = zeros(4,2);
28 | mylims(3,1) = 0.2;
29 | mylims(4,1) = -0.9;
30 | mylims(:,2) = [0.3; 0.5; 0.99; 0.9];
31 |
32 | T = length(rate_return); % total number of data
33 | start=200; % Starting time point for making prediction; the time point for the 1st predicted conditional variance is: t = start+1
34 | PT=T-start;
35 |
36 | CI_low_h_t=zeros(1,PT);
37 | CI_up_h_t=zeros(1,PT);
38 | Predict_median_h_t=zeros(1,PT);
39 | params=theta;
40 | h_predict = zeros(N,PT);
41 | h_garch = Sim_Var;
42 |
43 | parfor t=1:PT
44 | fprintf('Just starting with the %ith prediction.\n',t);
45 |
46 | [~,h] = garch_loglike_parallel(theta_particle(:,:,t+start-1),rate_return(1:t+start-1),mylims);
47 |
48 | h_t = params(:,1,t+start-1)+params(:,2,t+start-1).*(rate_return(t+start-1)-params(:,4,t+start-1)).^2 + params(:,3,t+start-1).*h(:,end);
49 | h_predict(:,t)=h_t;
50 |
51 | CI_low_h_t(:,t)=quantile_weighted( h_t,0.025,W(:,t+start));
52 | CI_up_h_t(:,t)=quantile_weighted( h_t,0.975,W(:,t+start));
53 | Predict_median_h_t(:,t)=quantile_weighted(h_t,0.5,W(:,t+start));
54 |
55 | [likelihood_predict_TureData_var(t),x] = ksdensity(h_t,h_garch(start+t),'Weights',W(:,start+t));
56 |
57 | end
58 |
59 | ELPD_LFO_true_var = sum(log(likelihood_predict_TureData_var));
60 |
61 | save('garch_BayesPostPredict_SimulatedData_EPLD_var.mat');
62 | end
63 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
64 |
65 |
66 |
67 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
68 | function [loglikelihood,h] = garch_loglike_parallel(params,data,mylims)
69 | % Computing the likelihood and conditional variances of the time series under GARCH dynamics, given observed data and model parameters
70 | %====================================================================================================================
71 |
72 | [N,d]=size(params);
73 |
74 | % Transforming back to original scale;
75 | for j = 1:d
76 | params(:,j) = (mylims(j,2)'.*exp(params(:,j))+mylims(j,1)')./(exp(params(:,j))+1);
77 | end
78 |
79 | %%%%%%%%%%%%%%%%%%%
80 | %SETTING PARAMETERS
81 | %%%%%%%%%%%%%%%%%%%
82 | a0 = params(:,1); %alpha_0
83 | a1 = params(:,2); %alpha_1
84 | b1 = params(:,3); %beta_1
85 | r_bar = params(:,4); %mu
86 |
87 | n = length(data);
88 | h = zeros(N,n);
89 |
90 | t1=10e-6;
91 |
92 | h(:,1) = max(a0./(1-a1-b1),t1);
93 |
94 | logl = zeros(N,1);
95 | logl = -0.5*log(h(:,1)) - 0.5*(data(1)-r_bar).^2./h(:,1) - 0.5*log(2*pi*ones(N,1));
96 |
97 | for t = 2:n
98 | h(:,t) = a0+a1.*(data(t-1)-r_bar).^2 + b1.*h(:,t-1);
99 | logl_tmp = -0.5*log(h(:,t)) - 0.5*(data(t)-r_bar).^2./h(:,t) - 0.5*log(2*pi*ones(N,1));
100 | logl = logl + logl_tmp;
101 | end
102 |
103 | loglikelihood = logl;
104 | end
105 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
106 |
107 |
108 |
--------------------------------------------------------------------------------
/GJR-GARCH/gjr_BayesPostPredict_volatility.m:
--------------------------------------------------------------------------------
1 | function [h_predict, CI_low_h_t, CI_up_h_t, Predict_median_h_t] = gjr_BayesPostPredict_volatility(N, theta, theta_particle, W)
2 | %One-step ahead forecast of conditional variances under GJR-GARCH(1,1) model for Real Data, with using the output of parameters sampled from Data Annealing SMC
3 | %%%%%%%%%%%%%%%%%%%% INPUT %%%%%%%%%%%%%%%%%%%%
4 | % N - Size of population of particles
5 | % theta - Parameters sampled from Data Annealing SMC: N samples from different amount of available observations: y_{1:t}, t=1,..,T
6 | % theta_particle - Transformed theta
7 | % W - The weights of weighted samples, corresponding to the above thetas/theta_particles
8 |
9 | %%%%%%%%%%%%%%%%%%%% OUTPUT %%%%%%%%%%%%%%%%%%%
10 | % h_predict - N samples of the one-step ahead forecast of conditional variances
11 | % CI_low_h_t - 0.025 quantile of the above posterior predictive sample of the conditional variances
12 | % CI_up_h_t - 0.975 quantile of the above posterior predictive sample of the conditional variances
13 | % Predict_median_h_t - Median of the above posterior predictive sample of the conditional variances
14 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
15 |
16 | %load('Data for Prediction_gjr_unif.mat');
17 |
18 | %loading monthly S&P stock log return
19 | load('Data.mat');
20 | rate_return = MonthlyReturns2018;
21 | rate_return(isnan(rate_return)) = 0;
22 |
23 | %loading monthly S&P stock log return
24 | mylims = zeros(5,2);
25 | mylims(:,1) = zeros(5,1);
26 | mylims(4,1) = 0.5;
27 | mylims(5,1) = -0.9;
28 | mylims(:,2) = [0.3; 0.3; 0.3; 0.99; 0.9];
29 |
30 | T = length(rate_return); % total number of data
31 | start=200; % Starting time point for making prediction; the time point for the 1st predicted conditional variance is: t = start+1
32 | PT=T-start;
33 |
34 | CI_low_h_t=zeros(1,PT);
35 | CI_up_h_t=zeros(1,PT);
36 | Predict_median_h_t=zeros(1,PT);
37 | params=theta;
38 | h_predict = zeros(N,PT);
39 |
40 | parfor t=1:PT
41 | fprintf('Just starting with the %ith prediction.\n',t);
42 |
43 | [~,h] = gjrloglike_parallel_unif(theta_particle(:,:,t+start-1),rate_return(1:t+start-1),mylims);
44 |
45 | h_t = params(:,1,t+start-1)+params(:,2,t+start-1).*(rate_return(t+start-1)-params(:,5,t+start-1)).^2 ...
46 | + params(:,3,t+start-1).*((rate_return(t+start-1)-params(:,5,t+start-1))<0).*(rate_return(t+start-1)-params(:,5,t+start-1)).^2 ...
47 | + params(:,4,t+start-1).*h(:,end);
48 | h_predict(:,t)=h_t;
49 |
50 | CI_low_h_t(:,t)=quantile_weighted( h_t,0.025,W(:,t+start));
51 | CI_up_h_t(:,t)=quantile_weighted( h_t,0.975,W(:,t+start));
52 | Predict_median_h_t(:,t)=quantile_weighted(h_t,0.5,W(:,t+start));
53 |
54 | end
55 | save('gjr_BayesPostPredict__RealData_var.mat');
56 | end
57 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
58 |
59 |
60 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
61 | function [loglikelihood,h] = gjrloglike_parallel_unif(params,data,mylims)
62 | % Computing the likelihood of the time series under GJR-GARCH dynamics, given observed data and model parameters
63 | %====================================================================================================================
64 |
65 | [N,d]=size(params);
66 |
67 | % Transforming back to original scale;
68 | for j = 1:d
69 | params(:,j) = (mylims(j,2)'.*exp(params(:,j))+mylims(j,1)')./(exp(params(:,j))+1);
70 | end
71 |
72 | %%%%%%%%%%%%%%%%%%%
73 | %SETTING PARAMETERS
74 | %%%%%%%%%%%%%%%%%%%
75 | h0 = params(:,1); %alpha_0
76 | phi_p = params(:,2); %phi
77 | phi_n = params(:,3); %phi_{-}
78 | rho_h = params(:,4); %beta_sigma
79 | r_bar = params(:,5); %mu
80 |
81 | t1=10e-6;
82 |
83 | n = length(data);
84 | h = zeros(N,n);
85 |
86 | h(:,1) = max(h0./(1-phi_p-phi_n./2-rho_h),t1);
87 |
88 | logl=zeros(N,1);
89 | logl = - .5*log(h(:,1)) - .5*(data(1)-r_bar).^2./h(:,1) -.5*log(2*pi*ones(N,1));
90 |
91 | for t = 2:n
92 | inds_neg = find(data(t-1)-r_bar<0);
93 | inds_pos = find(data(t-1)-r_bar>0);
94 | h(inds_pos,t) = h0(inds_pos) + rho_h(inds_pos).*h(inds_pos,t-1)+ phi_p(inds_pos).*(data(t-1)-r_bar(inds_pos)).^2;
95 | h(inds_neg,t) = h0(inds_neg) + rho_h(inds_neg).*h(inds_neg,t-1)+ phi_p(inds_neg).*(data(t-1)-r_bar(inds_neg)).^2+ phi_n(inds_neg).*(data(t-1)-r_bar(inds_neg)).^2;
96 | logl_tmp = - .5*log(h(:,t)) - .5*(data(t)-r_bar).^2./h(:,t) -.5*log(2*pi*ones(N,1));
97 | logl=logl+logl_tmp;
98 |
99 | end
100 |
101 | loglikelihood=logl;
102 | end
103 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
104 |
105 |
--------------------------------------------------------------------------------
/GJR-GARCH/gjr_BayesPostPredict_volatility_sim.m:
--------------------------------------------------------------------------------
1 | function [h_predict, CI_low_h_t, CI_up_h_t, Predict_median_h_t, ELPD_LFO_true_var] = gjr_BayesPostPredict_volatility_sim(N, Sim_Var, theta, theta_particle, W)
2 | %One-step ahead forecast of conditional variances together with LFO-CV under GJR-GARCH(1,1) model for Simulated Data, with using the output of parameters sampled from Data Annealing SMC
3 | %%%%%%%%%%%%%%%%%%%% INPUT %%%%%%%%%%%%%%%%%%%%
4 | % N - Size of population of particles
5 | % Sim_Var - Conditional variance of the simulated data
6 | % theta - Parameters sampled from Data Annealing SMC: N samples from different amount of available observations: y_{1:t}, t=1,..,T
7 | % theta_particle - Transformed theta
8 | % W - The weights of weighted samples, corresponding to the above thetas/theta_particles
9 |
10 | %%%%%%%%%%%%%%%%%%%% OUTPUT %%%%%%%%%%%%%%%%%%%
11 | % h_predict - N samples of the one-step ahead forecast of conditional variances
12 | % CI_low_h_t - 0.025 quantile of the above posterior predictive sample of the conditional variances
13 | % CI_up_h_t - 0.975 quantile of the above posterior predictive sample of the conditional variances
14 | % Predict_median_h_t - Median of the above posterior predictive sample of the conditional variances
15 | % ELPD_LFO_true_var - Approximated epld_LFO
16 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
17 |
18 | %load('Prediction_TrueGjr_unif_sim.mat');
19 | %load('Data_Sims_unif_gjr_h_gjr.mat');
20 |
21 | %loading simulated data
22 | load('Data_Sims_gjr.mat');
23 | rate_return = Simulation;
24 | rate_return(isnan(rate_return)) = 0;
25 |
26 | %Ranges for parameters transformation
27 | mylims = zeros(5,2);
28 | mylims(:,1) = zeros(5,1);
29 | mylims(4,1) = 0.5;
30 | mylims(5,1) = -0.9;
31 | mylims(:,2) = [0.3; 0.3; 0.3; 0.99; 0.9];
32 |
33 | T = length(rate_return); % total number of data
34 | start=200; % Starting time point for making prediction; the time point for the 1st predicted conditional variance is: t = start+1
35 | PT=T-start;
36 |
37 | CI_low_h_t=zeros(1,PT);
38 | CI_up_h_t=zeros(1,PT);
39 | Predict_median_h_t=zeros(1,PT);
40 | params=theta;
41 | h_predict = zeros(N,PT);
42 | h_gjr = Sim_Var;
43 |
44 | parfor t=1:PT
45 | fprintf('Just starting with the %ith prediction.\n',t);
46 |
47 | [~,h] = gjrloglike_parallel_unif(theta_particle(:,:,t+start-1),rate_return(1:t+start-1),mylims);
48 |
49 | h_t = params(:,1,t+start-1)+params(:,2,t+start-1).*(rate_return(t+start-1)-params(:,5,t+start-1)).^2 ...
50 | + params(:,3,t+start-1).*((rate_return(t+start-1)-params(:,5,t+start-1))<0).*(rate_return(t+start-1)-params(:,5,t+start-1)).^2 ...
51 | + params(:,4,t+start-1).*h(:,end);
52 | h_predict(:,t)=h_t;
53 |
54 |
55 | CI_low_h_t(:,t)=quantile_weighted( h_t,0.025,W(:,t+start));
56 | CI_up_h_t(:,t)=quantile_weighted( h_t,0.975,W(:,t+start));
57 | Predict_median_h_t(:,t)=quantile_weighted(h_t,0.5,W(:,t+start));
58 |
59 | [likelihood_predict_TureData_var(t),x] = ksdensity(h_t,h_gjr(start+t),'Weights',W(:,start+t));
60 |
61 |
62 | end
63 |
64 | ELPD_LFO_true_var = sum(log(likelihood_predict_TureData_var));
65 |
66 | save('gjr_BayesPostPredict_SimulatedData_ELPD_var.mat');
67 | end
68 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
69 |
70 |
71 |
72 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
73 | function [loglikelihood,h] = gjrloglike_parallel_unif(params,data,mylims)
74 | % Computing the likelihood of the time series under GJR-GARCH dynamics, given observed data and model parameters
75 | %====================================================================================================================
76 |
77 | [N,d]=size(params);
78 |
79 | % Transforming back to original scale;
80 | for j = 1:d
81 | params(:,j) = (mylims(j,2)'.*exp(params(:,j))+mylims(j,1)')./(exp(params(:,j))+1);
82 | end
83 |
84 | %%%%%%%%%%%%%%%%%%%
85 | %SETTING PARAMETERS
86 | %%%%%%%%%%%%%%%%%%%
87 | h0 = params(:,1); %alpha_0
88 | phi_p = params(:,2); %phi
89 | phi_n = params(:,3); %phi_{-}
90 | rho_h = params(:,4); %beta_sigma
91 | r_bar = params(:,5); %mu
92 |
93 | t1=10e-6;
94 |
95 | n = length(data);
96 | h = zeros(N,n);
97 |
98 | h(:,1) = max(h0./(1-phi_p-phi_n./2-rho_h),t1);
99 |
100 | logl=zeros(N,1);
101 | logl = - .5*log(h(:,1)) - .5*(data(1)-r_bar).^2./h(:,1) -.5*log(2*pi*ones(N,1));
102 |
103 | for t = 2:n
104 | inds_neg = find(data(t-1)-r_bar<0);
105 | inds_pos = find(data(t-1)-r_bar>0);
106 | h(inds_pos,t) = h0(inds_pos) + rho_h(inds_pos).*h(inds_pos,t-1)+ phi_p(inds_pos).*(data(t-1)-r_bar(inds_pos)).^2;
107 | h(inds_neg,t) = h0(inds_neg) + rho_h(inds_neg).*h(inds_neg,t-1)+ phi_p(inds_neg).*(data(t-1)-r_bar(inds_neg)).^2+ phi_n(inds_neg).*(data(t-1)-r_bar(inds_neg)).^2;
108 | logl_tmp = - .5*log(h(:,t)) - .5*(data(t)-r_bar).^2./h(:,t) -.5*log(2*pi*ones(N,1));
109 | logl=logl+logl_tmp;
110 |
111 | end
112 |
113 | loglikelihood=logl;
114 | end
115 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
116 |
117 |
118 |
119 |
--------------------------------------------------------------------------------
/BEGE/MCMC_biased.m:
--------------------------------------------------------------------------------
1 | function [theta, theta_transf, loglike, logprior] = MCMC_biased(N,start_smc,cov_smc,h)
2 | %MCMC for estimating BEGE model's parameters;
3 |
4 | %%%%%%%%%%%%%%%%%%%% INPUT %%%%%%%%%%%%%%%%%%%%
5 | % N - Total number of MCMC iterations
6 | % start_smc - Initial values of parameters; can be chosen from the SMC
7 | % population to accelerate the convergance
8 | % cov_smc - Covariance of the SMC's final population of sampled
9 | % parameters in transformed scale
10 | % h - optimal scale from SMC used to tune the covariance
11 |
12 | %%%%%%%%%%%%%%%%%%%% OUTPUT %%%%%%%%%%%%%%%%%%%
13 | % theta - N samples from each temperature
14 | % theta_transf - N transformed samples from each temperature
15 | % loglike - Log likelihood of the BEGE model, corresponding to the above thetas
16 | % logprior - Log prior of the BEGE model, corresponding to the above thetas
17 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
18 |
19 | %loading monthly S&P stock log return
20 | load('Data.mat');
21 | rate_return=MonthlyReturns2018;
22 | rate_return(isnan(rate_return))=0;%Set any empty value as zero
23 |
24 | %Setting ranges for parameters to do transformation
25 | mylims = zeros(11,2);
26 | mylims(:,1) = zeros(11,1);
27 | mylims(9,1) = -0.2;
28 | mylims(11,1) = -0.9;
29 | mylims(:,2) = [0.5;0.3;0.99;0.5;0.5;1;0.3;0.99;0.1;0.75;0.9];
30 |
31 | n = 11; % dimension of theta
32 |
33 | theta = zeros(N,n);
34 | theta_transf = zeros(N,n);
35 | logprior = zeros(N,1);
36 | loglike = zeros(N,1);
37 |
38 | theta(1,:)=start_smc; %set initial values as the ones from the final population of SMC
39 |
40 | for j = 1:n
41 | theta_transf(1,j) = log((theta(1,j) - mylims(j,1))./(mylims(j,2) - theta(1,j)));
42 | end
43 |
44 | logprior_current=log_prior_unif(theta_transf(1,:),mylims);
45 | logprior(1) = logprior_current;
46 | [~, loglike_current,~,~,~] = bege_gjrgarch_likelihood(theta_transf(1,:),rate_return,mylims);
47 | loglike(1) = loglike_current;
48 |
49 | for i=1:N
50 | fprintf('Just starting with the %ith move.\n',i);
51 | theta_transf_prop=mvnrnd( theta_transf(i,:),h^2*cov_smc);
52 | logprior_prop=log_prior_unif(theta_transf_prop,mylims);
53 |
54 | if isinf(logprior_prop) ==1
55 | theta_transf(i+1,:) = theta_transf(i,:);
56 | theta(i+1,:) = theta(i,:);
57 | loglike(i+1) = loglike(i);
58 | logprior(i+1) = logprior(i);
59 | continue;
60 | end
61 |
62 | [~, loglike_prop,~,~,~] = bege_gjrgarch_likelihood(theta_transf_prop,rate_return,mylims);
63 |
64 | %Acceptance rate
65 | Alpha=exp(loglike_prop - loglike_current + logprior_prop - logprior_current);
66 |
67 | if rand=0);
73 |
74 | if isempty(inds_neg)==0
75 | p_t(inds_neg)=max(p_bar(inds_neg)+rho_p(inds_neg).*ptseries(inds_neg,end)+...
76 | phi_pn(inds_neg).*(((rate_return(t+start-1)-r_bar(inds_neg)).^2)./(2*(tp(inds_neg).^2))),t1);
77 | n_t(inds_neg)=max(n_bar(inds_neg)+rho_n(inds_neg).*ntseries(inds_neg,end)+...
78 | phi_nn(inds_neg).*(((rate_return(t+start-1)-r_bar(inds_neg)).^2)./(2*(tn(inds_neg).^2))),t1);
79 | end
80 |
81 | if isempty(inds_pos)==0
82 | p_t(inds_pos)=max(p_bar(inds_pos)+rho_p(inds_pos).*ptseries(inds_pos,end)+...
83 | phi_pp(inds_pos).*(((rate_return(t+start-1)-r_bar(inds_pos)).^2)./(2*(tp(inds_pos).^2))),t1);
84 | n_t(inds_pos)=max(n_bar(inds_pos)+rho_n(inds_pos).*ntseries(inds_pos,end)+...
85 | phi_np(inds_pos).*(((rate_return(t+start-1)-r_bar(inds_pos)).^2)./(2*(tn(inds_pos).^2))),t1);
86 | end
87 |
88 | h_t = tp.^2.*p_t+tn.^2.*n_t;
89 | h_predict(:,t) = h_t;
90 |
91 | CI_low_h_t(:,t)=quantile_weighted( h_predict(:,t),0.025,W(:,t+start));
92 | CI_up_h_t(:,t)=quantile_weighted( h_predict(:,t),0.975,W(:,t+start));
93 | Predict_median_h_t(:,t)=quantile_weighted(h_predict(:,t),0.5,W(:,t+start));
94 |
95 | end
96 | %save('results_BayesPostPredict_RealData_unbiased_is_var.mat');
97 | end
98 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
99 |
100 |
101 |
102 | function [loglikelihood,ptseries,ntseries] = bege_gjrgarch_likelihood_unbiased_is(params,data,mylims)
103 | % Computing the likelihood of the time series under BEGE-GJR-GARCH dynamics, given observed data and model parameters
104 | %====================================================================================================================
105 |
106 | % Transforming back to original scale;
107 | for j = 1:length(params)
108 | params(j) = (mylims(j,2)'.*exp(params(j))+mylims(j,1)')/(exp(params(j))+1);
109 | end
110 |
111 | %%%%%%%%%%%%%%%%%%%
112 | %SETTING PARAMETERS
113 | %%%%%%%%%%%%%%%%%%%
114 | r_bar = params(11); % (mu)
115 | p_bar=params(1); % (p_0)
116 | tp=params(2); % (sigma_p)
117 | rho_p=params(3); %
118 | phi_pp=params(4); %
119 | phi_pn=params(5); %
120 | n_bar=params(6); % (n_0)
121 | tn=params(7); % (sigma_n)
122 | rho_n=params(8); %
123 | phi_np=params(9); %
124 | phi_nn=params(10); %
125 |
126 |
127 | %Computing underlying pt and nt processes
128 | ptseries=zeros(length(data),1);
129 | ntseries=zeros(length(data),1);
130 |
131 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%
132 | %COMPUTING THE LOG-LIKELIHOOD
133 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%
134 |
135 | loglikelihood=0;
136 | t1=10e-1;
137 |
138 | previous_p=max(p_bar/(1-rho_p-(phi_pp+phi_pn)/2),t1);
139 | previous_n=max(n_bar/(1-rho_n-(phi_np+phi_nn)/2),t1);
140 | ptseries(1)=max(previous_p,t1);
141 | ntseries(1)=max(previous_n,t1);
142 |
143 | loglikelihood=loglikelihood+loglikedgam_unbiased_is(data(1)-r_bar,ptseries(1),ntseries(1),tp,tn);
144 |
145 | for t=2:length(data)
146 | if ((data(t-1)-r_bar)<0)
147 | p_t=max(p_bar+rho_p*previous_p+...
148 | phi_pn*(((data(t-1)-r_bar)^2)/(2*(tp^2))),t1);
149 | n_t=max(n_bar+rho_n*previous_n+...
150 | phi_nn*(((data(t-1)-r_bar)^2)/(2*(tn^2))),t1);
151 | else
152 | p_t=max(p_bar+rho_p*previous_p+...
153 | phi_pp*(((data(t-1)-r_bar)^2)/(2*(tp^2))),t1);
154 | n_t=max(n_bar+rho_n*previous_n+...
155 | phi_np*(((data(t-1)-r_bar)^2)/(2*(tn^2))),t1);
156 | end
157 | obs=data(t)-r_bar;
158 | tmp = loglikedgam_unbiased_is(obs,p_t,n_t,tp,tn);
159 | loglikelihood=loglikelihood+tmp;
160 |
161 | ptseries(t)=p_t;
162 | ntseries(t)=n_t;
163 | previous_p=p_t;
164 | previous_n=n_t;
165 | end
166 |
167 | end
168 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
169 |
170 | function loglikedgam = loglikedgam_unbiased_is(z,p,n,tp,tn)
171 | % This function unbiasedly estimates the likelihood of an observation under
172 | % the BEGE density by using importance sampling.
173 | % The likelihood can be exactly computed in the cases of p=n=1; p=1&n>1;
174 | % n=1&p>1.
175 | %
176 | % Input:
177 | % z - the point at which the pdf is evaluated
178 | % p - good environment shape parameter
179 | % n - bad environment shape parameter
180 | % tp - good environment scale parameter
181 | % tn - bad environment scale parameter
182 | %
183 | % Output:
184 | % loglikedgam - the loglikelihood of the observations
185 |
186 |
187 | wp_bar = -p*tp;
188 | wn_bar = -n*tn;
189 | sigma = 1/tp + 1/tn;
190 | delta = max(wp_bar, wn_bar + z);
191 | N = 1000;
192 |
193 | if p==1 && n==1
194 | loglikedgam = -log(tp) - log(tn) + wp_bar/tp + (z+wn_bar)/tn - log(sigma)...
195 | - sigma*delta;
196 | return;
197 | end
198 |
199 | if p==1 && n>1
200 |
201 | if (delta == wn_bar + z)
202 | loglikedgam = -log(tp) - n*log(tn) + wp_bar/tp - (z + wn_bar)/tp - n*log(sigma);
203 | return;
204 | else
205 | loglikedgam = -log(tp) - n*log(tn) + wp_bar/tp - (z + wn_bar)/tp - n*log(sigma) + log(1 - gamcdf(delta - (wn_bar + z), n, 1/sigma));
206 | return;
207 | end
208 |
209 | end
210 |
211 | if n==1 && p>1
212 | if (delta == wp_bar)
213 | loglikedgam = -log(tn) - p*log(tp) + (z + wn_bar)/tn - wp_bar/tn - p*log(sigma);
214 | return;
215 | else
216 | loglikedgam = -log(tn) - p*log(tp) + (z + wn_bar)/tn - wp_bar/tn - p*log(sigma) + log(1 - gamcdf(delta - wp_bar, p, 1/sigma));
217 | return;
218 | end
219 | end
220 |
221 | % n>1 && p>1
222 | bneg_mode = sigma*(z+wn_bar+wp_bar)+(p+n-2);
223 | constant = sigma*(z*wp_bar + wp_bar*wn_bar) + (p-1)*(z+wn_bar) + (n-1)*wp_bar;
224 | mode = (bneg_mode + sqrt(bneg_mode^2 - 4*sigma*constant))/(2*sigma);
225 |
226 | variance = -1/((1-p)/(mode-wp_bar)^2 + (1-n)/(mode-z-wn_bar)^2);
227 | bneg = 2*variance + (mode - delta)^2;
228 | a = (bneg + sqrt(bneg^2 - 4*variance^2))/(2*variance);
229 | b = (mode - delta)/(a-1);
230 | wp = gamrnd(a,b,N,1) + delta;
231 | log_target = -gammaln(p) - p*log(tp) + (p-1)*log(wp-wp_bar) - (wp-wp_bar)/tp...
232 | - gammaln(n) - n*log(tn) + (n-1)*log(wp-z-wn_bar) - (wp-z-wn_bar)/tn;
233 | log_importance = -a*log(b) - gammaln(a) +(a-1)*log(wp - delta) -(wp - delta)/b;
234 | logw = log_target - log_importance;
235 | loglikedgam = -log(N) + logsumexp(logw);
236 |
237 | end
--------------------------------------------------------------------------------
/BEGE/MCMC_unbiased_is.m:
--------------------------------------------------------------------------------
1 | function [theta, theta_transf, loglike, logprior] = MCMC_unbiased_is(N,start_smc,cov_smc,h)
2 | %MCMC for estimating BEGE model's parameters;
3 | %The likelihood is unbiasedly estimated using importance sampling method
4 |
5 | %%%%%%%%%%%%%%%%%%%% INPUT %%%%%%%%%%%%%%%%%%%%
6 | % N - Total number of MCMC iterations
7 | % start_smc - Initial values of parameters; can be chosen from the SMC
8 | % population to accelerate the convergance
9 | % cov_smc - Covariance of the SMC's final population of sampled
10 | % parameters in transformed scale
11 | % h - optimal scale from SMC used to tune the covariance
12 |
13 | %%%%%%%%%%%%%%%%%%%% OUTPUT %%%%%%%%%%%%%%%%%%%
14 | % theta - N samples from each temperature
15 | % theta_transf - N transformed samples from each temperature
16 | % loglike - Log likelihood of the BEGE model, corresponding to the above thetas
17 | % logprior - Log prior of the BEGE model, corresponding to the above thetas
18 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
19 |
20 | %loading monthly S&P stock log return
21 | load('Data.mat');
22 | rate_return=MonthlyReturns2018;
23 | rate_return(isnan(rate_return))=0;%Set any empty value as zero
24 |
25 | %Setting ranges for parameters to do transformation
26 | mylims = zeros(11,2);
27 | mylims(:,1) = zeros(11,1);
28 | mylims(9,1) = -0.2;
29 | mylims(11,1) = -0.9;
30 | mylims(:,2) = [0.5;0.3;0.99;0.5;0.5;1;0.3;0.99;0.1;0.75;0.9];
31 |
32 | n = 11; % dimension of theta
33 |
34 | theta = zeros(N,n);
35 | theta_transf = zeros(N,n);
36 | logprior = zeros(N,1);
37 | loglike = zeros(N,1);
38 |
39 | theta(1,:)=start_smc; %set initial values as the ones from the final population of SMC
40 |
41 | for j = 1:n
42 | theta_transf(1,j) = log((theta(1,j) - mylims(j,1))./(mylims(j,2) - theta(1,j)));
43 | end
44 |
45 | logprior_current=log_prior_unif(theta_transf(1,:),mylims);
46 | logprior(1) = logprior_current;
47 | [~, loglike_current,~,~,~] = bege_gjrgarch_likelihood_unbiased_is(theta_transf(1,:),rate_return,mylims);
48 | loglike(1) = loglike_current;
49 |
50 | for i=1:N
51 | fprintf('Just starting with the %ith move.\n',i);
52 | theta_transf_prop=mvnrnd( theta_transf(i,:),h^2*cov_smc);
53 | logprior_prop=log_prior_unif(theta_transf_prop,mylims);
54 |
55 | if isinf(logprior_prop) ==1
56 | theta_transf(i+1,:) = theta_transf(i,:);
57 | theta(i+1,:) = theta(i,:);
58 | loglike(i+1) = loglike(i);
59 | logprior(i+1) = logprior(i);
60 | continue;
61 | end
62 |
63 | [~, loglike_prop,~,~,~] = bege_gjrgarch_likelihood_unbiased_is(theta_transf_prop,rate_return,mylims);
64 |
65 | %Acceptance rate
66 | Alpha=exp(loglike_prop - loglike_current + logprior_prop - logprior_current);
67 |
68 | if rand1;
199 | % n=1&p>1.
200 | %
201 | % Input:
202 | % z - the point at which the pdf is evaluated
203 | % p - good environment shape parameter
204 | % n - bad environment shape parameter
205 | % tp - good environment scale parameter
206 | % tn - bad environment scale parameter
207 | %
208 | % Output:
209 | % loglikedgam - the loglikelihood of the observations
210 |
211 |
212 | wp_bar = -p*tp;
213 | wn_bar = -n*tn;
214 | sigma = 1/tp + 1/tn;
215 | delta = max(wp_bar, wn_bar + z);
216 | N = 1000;
217 |
218 | if p==1 && n==1
219 | loglikedgam = -log(tp) - log(tn) + wp_bar/tp + (z+wn_bar)/tn - log(sigma)...
220 | - sigma*delta;
221 | return;
222 | end
223 |
224 | if p==1 && n>1
225 |
226 | if (delta == wn_bar + z)
227 | loglikedgam = -log(tp) - n*log(tn) + wp_bar/tp - (z + wn_bar)/tp - n*log(sigma);
228 | return;
229 | else
230 | loglikedgam = -log(tp) - n*log(tn) + wp_bar/tp - (z + wn_bar)/tp - n*log(sigma) + log(1 - gamcdf(delta - (wn_bar + z), n, 1/sigma));
231 | return;
232 | end
233 |
234 | end
235 |
236 | if n==1 && p>1
237 | if (delta == wp_bar)
238 | loglikedgam = -log(tn) - p*log(tp) + (z + wn_bar)/tn - wp_bar/tn - p*log(sigma);
239 | return;
240 | else
241 | loglikedgam = -log(tn) - p*log(tp) + (z + wn_bar)/tn - wp_bar/tn - p*log(sigma) + log(1 - gamcdf(delta - wp_bar, p, 1/sigma));
242 | return;
243 | end
244 | end
245 |
246 | % n>1 && p>1
247 | bneg_mode = sigma*(z+wn_bar+wp_bar)+(p+n-2);
248 | constant = sigma*(z*wp_bar + wp_bar*wn_bar) + (p-1)*(z+wn_bar) + (n-1)*wp_bar;
249 | mode = (bneg_mode + sqrt(bneg_mode^2 - 4*sigma*constant))/(2*sigma);
250 |
251 | variance = -1/((1-p)/(mode-wp_bar)^2 + (1-n)/(mode-z-wn_bar)^2);
252 | bneg = 2*variance + (mode - delta)^2;
253 | a = (bneg + sqrt(bneg^2 - 4*variance^2))/(2*variance);
254 | b = (mode - delta)/(a-1);
255 | wp = gamrnd(a,b,N,1) + delta;
256 | log_target = -gammaln(p) - p*log(tp) + (p-1)*log(wp-wp_bar) - (wp-wp_bar)/tp...
257 | - gammaln(n) - n*log(tn) + (n-1)*log(wp-z-wn_bar) - (wp-z-wn_bar)/tn;
258 | log_importance = -a*log(b) - gammaln(a) +(a-1)*log(wp - delta) -(wp - delta)/b;
259 | logw = log_target - log_importance;
260 | loglikedgam = -log(N) + logsumexp(logw);
261 |
262 | end
263 |
--------------------------------------------------------------------------------
/BEGE/bege_BayesPostPredict_volatility_sim_unbiased_is.m:
--------------------------------------------------------------------------------
1 | function [h_predict, CI_low_h_t, CI_up_h_t, Predict_median_h_t, ELPD_LFO_true_var] = bege_BayesPostPredict_volatility_sim_unbiased_is(N, Sim_Var, theta, theta_particle, W)
2 | %One-step ahead forecast of conditional variances together with LFO-CV under BEGE model for Simulated Data, with using the output of parameters sampled from Data Annealing SMC
3 | %The likelihood is unbiasedly estimated using importance sampling method
4 | %
5 | %%%%%%%%%%%%%%%%%%%% INPUT %%%%%%%%%%%%%%%%%%%%
6 | % N - Size of population of particles
7 | % Sim_Var - Conditional variance of the simulated data
8 | % theta - Parameters sampled from Data Annealing SMC: N samples from different amount of available observations: y_{1:t}, t=1,..,T
9 | % theta_particle - Transformed theta
10 | % W - The weights of weighted samples, corresponding to the above thetas/theta_particles
11 |
12 | %%%%%%%%%%%%%%%%%%%% OUTPUT %%%%%%%%%%%%%%%%%%%
13 | % h_predict - N samples of the one-step ahead forecast of conditional variances
14 | % CI_low_h_t - 0.025 quantile of the above posterior predictive sample of the conditional variances
15 | % CI_up_h_t - 0.975 quantile of the above posterior predictive sample of the conditional variances
16 | % Predict_median_h_t - Median of the above posterior predictive sample of the conditional variances
17 | % ELPD_LFO_true_var - Approximated epld_LFO
18 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
19 |
20 | %loading simulated data
21 | load('Data_Sims.mat'); %Simulated data from BEGE model; can be changed to simulated data from other models as desired
22 | rate_return = Simulation;
23 | rate_return(isnan(rate_return)) = 0;
24 |
25 | %Ranges for parameters transformation
26 | mylims = zeros(11,2);
27 | mylims(:,1) = 1e-4.*ones(11,1);
28 | mylims(9,1) = -0.2;
29 | mylims(11,1) = -0.9;
30 | mylims(:,2) = [0.5;0.3;0.99;0.5;0.5;1;0.3;0.99;0.1;0.75;0.9];
31 |
32 | T = length(rate_return); % total number of data
33 | start=200; % Starting time point for making prediction; the time point for the 1st predicted conditional variance is: t = start+1
34 | PT=T-start;
35 |
36 | CI_low_h_t=zeros(1,PT);
37 | CI_up_h_t=zeros(1,PT);
38 | Predict_median_h_t=zeros(1,PT);
39 | t1=10e-1;
40 | params=theta;
41 | h_predict = zeros(N,PT);
42 | h_bege = Sim_Var;
43 |
44 | parfor t=1:PT
45 | fprintf('Just starting with the %ith prediction.\n',t);
46 |
47 | ptseries_prop = zeros(N,length(rate_return(1:t+start-1)));
48 | ntseries_prop = zeros(N,length(rate_return(1:t+start-1)));
49 | for i=1:N
50 | [~,ptseries_prop(i,:),ntseries_prop(i,:)] = bege_gjrgarch_likelihood_unbiased_is(theta_particle(i,:,t+start-1),rate_return(1:t+start-1),mylims);
51 |
52 | end
53 |
54 | ptseries = ptseries_prop;
55 | ntseries = ntseries_prop;
56 |
57 | p_bar = params(:,1,t+start-1);
58 | tp = params(:,2,t+start-1);
59 | rho_p = params(:,3,t+start-1);
60 | phi_pp = params(:,4,t+start-1);
61 | phi_pn = params(:,5,t+start-1);
62 | n_bar = params(:,6,t+start-1);
63 | tn = params(:,7,t+start-1);
64 | rho_n = params(:,8,t+start-1);
65 | phi_np = params(:,9,t+start-1);
66 | phi_nn = params(:,10,t+start-1);
67 | r_bar = params(:,11,t+start-1);
68 |
69 | p_t=ptseries(:,end);
70 | n_t=ntseries(:,end);
71 |
72 | %Forecast BEGE - using information at time 't-1' to predict time 't'
73 | inds_neg=find(rate_return(t+start-1)-r_bar<0);
74 | inds_pos=find(rate_return(t+start-1)-r_bar>=0);
75 |
76 | if isempty(inds_neg)==0
77 | p_t(inds_neg)=max(p_bar(inds_neg)+rho_p(inds_neg).*ptseries(inds_neg,end)+...
78 | phi_pn(inds_neg).*(((rate_return(t+start-1)-r_bar(inds_neg)).^2)./(2*(tp(inds_neg).^2))),t1);
79 | n_t(inds_neg)=max(n_bar(inds_neg)+rho_n(inds_neg).*ntseries(inds_neg,end)+...
80 | phi_nn(inds_neg).*(((rate_return(t+start-1)-r_bar(inds_neg)).^2)./(2*(tn(inds_neg).^2))),t1);
81 | end
82 |
83 | if isempty(inds_pos)==0
84 | p_t(inds_pos)=max(p_bar(inds_pos)+rho_p(inds_pos).*ptseries(inds_pos,end)+...
85 | phi_pp(inds_pos).*(((rate_return(t+start-1)-r_bar(inds_pos)).^2)./(2*(tp(inds_pos).^2))),t1);
86 | n_t(inds_pos)=max(n_bar(inds_pos)+rho_n(inds_pos).*ntseries(inds_pos,end)+...
87 | phi_np(inds_pos).*(((rate_return(t+start-1)-r_bar(inds_pos)).^2)./(2*(tn(inds_pos).^2))),t1);
88 | end
89 |
90 | h_t = tp.^2.*p_t+tn.^2.*n_t;
91 | h_predict(:,t) = h_t;
92 |
93 | [likelihood_predict_TureData_var(t),x] = ksdensity(h_t,h_bege(start+t),'Weights',W(:,start+t));
94 |
95 | CI_low_h_t(:,t)=quantile_weighted( h_predict(:,t),0.025,W(:,t+start));
96 | CI_up_h_t(:,t)=quantile_weighted( h_predict(:,t),0.975,W(:,t+start));
97 | Predict_median_h_t(:,t)=quantile_weighted(h_predict(:,t),0.5,W(:,t+start));
98 |
99 | end
100 | ELPD_LFO_true_var = sum(log(likelihood_predict_TureData_var));
101 | %save('results_BayesPostPredict_SimulatedData_ELPD_var_unbiased.mat');
102 | end
103 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
104 |
105 |
106 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
107 | function [loglikelihood,ptseries,ntseries] = bege_gjrgarch_likelihood_unbiased_is(params,data,mylims)
108 | % Computing the likelihood of the time series under BEGE-GJR-GARCH dynamics, given observed data and model parameters
109 | %====================================================================================================================
110 |
111 | % Transforming back to original scale;
112 | for j = 1:length(params)
113 | params(j) = (mylims(j,2)'.*exp(params(j))+mylims(j,1)')/(exp(params(j))+1);
114 | end
115 |
116 | %%%%%%%%%%%%%%%%%%%
117 | %SETTING PARAMETERS
118 | %%%%%%%%%%%%%%%%%%%
119 | r_bar = params(11); % (mu)
120 | p_bar=params(1); % (p_0)
121 | tp=params(2); % (sigma_p)
122 | rho_p=params(3); %
123 | phi_pp=params(4); %
124 | phi_pn=params(5); %
125 | n_bar=params(6); % (n_0)
126 | tn=params(7); % (sigma_n)
127 | rho_n=params(8); %
128 | phi_np=params(9); %
129 | phi_nn=params(10); %
130 |
131 |
132 | %Computing underlying pt and nt processes
133 | ptseries=zeros(length(data),1);
134 | ntseries=zeros(length(data),1);
135 |
136 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%
137 | %COMPUTING THE LOG-LIKELIHOOD
138 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%
139 |
140 | loglikelihood=0;
141 | t1=10e-1;
142 |
143 | previous_p=max(p_bar/(1-rho_p-(phi_pp+phi_pn)/2),t1);
144 | previous_n=max(n_bar/(1-rho_n-(phi_np+phi_nn)/2),t1);
145 | ptseries(1)=max(previous_p,t1);
146 | ntseries(1)=max(previous_n,t1);
147 |
148 | loglikelihood=loglikelihood+loglikedgam_unbiased_is(data(1)-r_bar,ptseries(1),ntseries(1),tp,tn);
149 |
150 | for t=2:length(data)
151 | if ((data(t-1)-r_bar)<0)
152 | p_t=max(p_bar+rho_p*previous_p+...
153 | phi_pn*(((data(t-1)-r_bar)^2)/(2*(tp^2))),t1);
154 | n_t=max(n_bar+rho_n*previous_n+...
155 | phi_nn*(((data(t-1)-r_bar)^2)/(2*(tn^2))),t1);
156 | else
157 | p_t=max(p_bar+rho_p*previous_p+...
158 | phi_pp*(((data(t-1)-r_bar)^2)/(2*(tp^2))),t1);
159 | n_t=max(n_bar+rho_n*previous_n+...
160 | phi_np*(((data(t-1)-r_bar)^2)/(2*(tn^2))),t1);
161 | end
162 | obs=data(t)-r_bar;
163 | tmp = loglikedgam_unbiased_is(obs,p_t,n_t,tp,tn);
164 | loglikelihood=loglikelihood+tmp;
165 |
166 | ptseries(t)=p_t;
167 | ntseries(t)=n_t;
168 | previous_p=p_t;
169 | previous_n=n_t;
170 | end
171 |
172 | end
173 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
174 |
175 | function loglikedgam = loglikedgam_unbiased_is(z,p,n,tp,tn)
176 | % This function unbiasedly estimates the likelihood of an observation under
177 | % the BEGE density by using importance sampling.
178 | % The likelihood can be exactly computed in the cases of p=n=1; p=1&n>1;
179 | % n=1&p>1.
180 | %
181 | % Input:
182 | % z - the point at which the pdf is evaluated
183 | % p - good environment shape parameter
184 | % n - bad environment shape parameter
185 | % tp - good environment scale parameter
186 | % tn - bad environment scale parameter
187 | %
188 | % Output:
189 | % loglikedgam - the loglikelihood of the observations
190 |
191 |
192 | wp_bar = -p*tp;
193 | wn_bar = -n*tn;
194 | sigma = 1/tp + 1/tn;
195 | delta = max(wp_bar, wn_bar + z);
196 | N = 1000;
197 |
198 | if p==1 && n==1
199 | loglikedgam = -log(tp) - log(tn) + wp_bar/tp + (z+wn_bar)/tn - log(sigma)...
200 | - sigma*delta;
201 | return;
202 | end
203 |
204 | if p==1 && n>1
205 |
206 | if (delta == wn_bar + z)
207 | loglikedgam = -log(tp) - n*log(tn) + wp_bar/tp - (z + wn_bar)/tp - n*log(sigma);
208 | return;
209 | else
210 | loglikedgam = -log(tp) - n*log(tn) + wp_bar/tp - (z + wn_bar)/tp - n*log(sigma) + log(1 - gamcdf(delta - (wn_bar + z), n, 1/sigma));
211 | return;
212 | end
213 |
214 | end
215 |
216 | if n==1 && p>1
217 | if (delta == wp_bar)
218 | loglikedgam = -log(tn) - p*log(tp) + (z + wn_bar)/tn - wp_bar/tn - p*log(sigma);
219 | return;
220 | else
221 | loglikedgam = -log(tn) - p*log(tp) + (z + wn_bar)/tn - wp_bar/tn - p*log(sigma) + log(1 - gamcdf(delta - wp_bar, p, 1/sigma));
222 | return;
223 | end
224 | end
225 |
226 | % n>1 && p>1
227 | bneg_mode = sigma*(z+wn_bar+wp_bar)+(p+n-2);
228 | constant = sigma*(z*wp_bar + wp_bar*wn_bar) + (p-1)*(z+wn_bar) + (n-1)*wp_bar;
229 | mode = (bneg_mode + sqrt(bneg_mode^2 - 4*sigma*constant))/(2*sigma);
230 |
231 | variance = -1/((1-p)/(mode-wp_bar)^2 + (1-n)/(mode-z-wn_bar)^2);
232 | bneg = 2*variance + (mode - delta)^2;
233 | a = (bneg + sqrt(bneg^2 - 4*variance^2))/(2*variance);
234 | b = (mode - delta)/(a-1);
235 | wp = gamrnd(a,b,N,1) + delta;
236 | log_target = -gammaln(p) - p*log(tp) + (p-1)*log(wp-wp_bar) - (wp-wp_bar)/tp...
237 | - gammaln(n) - n*log(tn) + (n-1)*log(wp-z-wn_bar) - (wp-z-wn_bar)/tn;
238 | log_importance = -a*log(b) - gammaln(a) +(a-1)*log(wp - delta) -(wp - delta)/b;
239 | logw = log_target - log_importance;
240 | loglikedgam = -log(N) + logsumexp(logw);
241 |
242 | end
243 |
--------------------------------------------------------------------------------
/GARCH/garch_SMC_RW_LikeAnneal_parallel.m:
--------------------------------------------------------------------------------
1 | function [theta, theta_particle, loglike, logprior, gamma, log_evidence] = garch_SMC_RW_LikeAnneal_parallel(N)
2 | %SMC utilised vectorisation and parallelisation for estimating GARCH(1,1) model's
3 | %parameters;
4 | %The sequence of distributions constructed by using Likelihood Annealing method
5 |
6 | %%%%%%%%%%%%%%%%%%%% INPUT %%%%%%%%%%%%%%%%%%%%
7 | % N - Size of population of particles
8 |
9 | %%%%%%%%%%%%%%%%%%%% OUTPUT %%%%%%%%%%%%%%%%%%%
10 | % theta - N samples from each temperature
11 | % theta_particle - N transformed samples from each temperature
12 | % loglike - Log likelihood of the GARCH(1,1) model, corresponding to the above thetas
13 | % logprior - Log prior of the GARCH(1,1) model, corresponding to the above thetas
14 | % gamma - The temperatures from likelihood annealing strategy
15 | % log_evidence - The estimate of log evidence used for model selection
16 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
17 |
18 | %loading monthly S&P stock log return
19 | load('Data.mat');
20 | rate_return = MonthlyReturns2018;
21 | rate_return(isnan(rate_return)) = 0;
22 |
23 | %Setting ranges for parameters to do transformation
24 | mylims = zeros(4,2);
25 | mylims(3,1) = 0.2;
26 | mylims(4,1) = -0.9;
27 | mylims(:,2) = [0.3; 0.5; 0.99; 0.9];
28 |
29 | %Starting parallel pool
30 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
31 | quthpc = parcluster('local');
32 | parpool(quthpc);
33 | poolsize = quthpc.NumWorkers;
34 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
35 |
36 | %Initialising
37 | t = 1;
38 | Num_Parameter = 4; % dimension of theta
39 | log_evidence = 0;
40 | gamma = 0; % temperatures
41 | logw_previous = log(ones(N,1)*(1/N));
42 | h = 0.1:0.1:1; % For choosing optimal scales and thus to reduce the number of MCMC repeats.
43 |
44 | %Transform theta
45 | theta = zeros(N, Num_Parameter);
46 | theta_particle = zeros(N, Num_Parameter);
47 | for j = 1:Num_Parameter
48 | theta(:,j) = unifrnd(mylims(j,1),mylims(j,2),[N,1]);
49 | theta_particle(:,j) = log((theta(:,j) - mylims(j,1))./(mylims(j,2) - theta(:,j)));
50 | end
51 |
52 | %calculating initial log prior
53 | logprior = garch_logPrior_parallel_fv(theta_particle,mylims);
54 |
55 |
56 | inds=1:N;
57 | while isempty(inds)==0 %this while loop is used to eliminated any infinity value from simulation
58 | for j = 1:Num_Parameter
59 | theta(inds,j) = unifrnd(mylims(j,1),mylims(j,2),[length(inds),1]);
60 | theta_particle(inds,j) = log((theta(inds,j) - mylims(j,1))./(mylims(j,2) - theta(inds,j)));
61 | end
62 | logprior(inds) = garch_logPrior_parallel_fv(theta_particle(inds,:),mylims);
63 | inds = find(isinf(logprior));
64 | end
65 |
66 | %calculating initial log likelihood
67 | [loglike,~] = garch_loglike_parallel(theta_particle, rate_return, mylims);
68 |
69 | while gamma(t) < 1
70 | %Testing gamma=1
71 | logw = logw_previous + (1-gamma(t))*loglike(:,t);
72 | w = logw - max(logw); %stability
73 | w = exp(w);
74 | w = w/sum(w);
75 | ESS1 = 1/sum(w.^2);
76 |
77 | % Choosing next temperature
78 | if (ESS1 >= N/2)
79 | gamma(t+1) = 1;
80 | else
81 | %use bisection method to find the sequence of gamma
82 | fun = @(gamma_Current)Gamma(gamma_Current,gamma(t),N,loglike(:,t),logw_previous);
83 | interval = [gamma(t) 1];
84 | gamma_Current = fzero(fun,interval);
85 | gamma(t+1) = gamma_Current;
86 | end
87 | fprintf('The current temperature is %.3f.\n',gamma(t+1));
88 |
89 | %Substitute the value of just calculated gamma(t+1)
90 | logw = logw_previous +(gamma(t+1)-gamma(t))*loglike(:,t);
91 | log_evidence = log_evidence + logsumexp(logw);
92 | w = logw -max(logw);
93 | w = exp(w);
94 | w = w/sum(w);
95 |
96 | %systematic resampling - duplicating particles with more weights
97 | u=rand;
98 | indices = zeros(N,1);
99 | cumsum_w = cumsum(w);
100 | for i=1:N
101 | A = find(u1
105 | u=u-1;
106 | end
107 | end
108 | theta_particle(:,:,t) = theta_particle(indices,:,t);
109 | loglike(:,t) = loglike(indices,t);
110 | logprior(:,t) = logprior(indices,t);
111 | %%% after resampling, all the particles will have equal weights %%%
112 |
113 | %(Move with MCMC Kernel)
114 | cov_rw = cov(theta_particle(:,:,t)); %covariance of resampled particles
115 | Cov_inv = cov_rw^(-1); % inverse of the above covariance
116 |
117 | % compute the mahalanobis distance form the Sample before moving
118 | [~,dists] = rangesearch(theta_particle(:,:,t), theta_particle(:,:,t),inf,'distance','mahalanobis','cov',cov_rw);
119 | dists = cell2mat(dists);
120 | median_dist = median(dists(:));
121 |
122 | h_ind = mod(randperm(N),length(h))'+1;
123 | h_all = h(h_ind);
124 | ESJD = zeros(N,1);
125 |
126 | %MVN RW
127 | parfor i=1:N
128 | theta_particle_prop(i,:) = mvnrnd(theta_particle(i,:,t),h_all(i)^2*cov_rw);
129 | end
130 | logprior_prop = garch_logPrior_parallel_fv(theta_particle_prop,mylims);
131 |
132 | inds = find(isinf(logprior_prop)==0);
133 | while isempty(inds) == 0
134 | %this while loop is used to eliminated any infinity value from simulation
135 | for j = 1:length(inds)
136 | theta_particle_prop(inds(j),:) = mvnrnd(theta_particle(inds(j),:,t),h_all(inds(j))^2*cov_rw);
137 | end
138 | logprior_prop(inds) = garch_logPrior_parallel_fv(theta_particle_prop(inds,:),mylims);
139 | inds = find(isinf(logprior_prop));
140 | end
141 |
142 | percore = ceil(N/poolsize);
143 | parfor core = 1:poolsize %Divide Sample into different 'poolsize' groups
144 | current_core = (percore*(core-1) + 1):min(percore*core,N);
145 | [loglike_prop_cell{core},~] = garch_loglike_parallel(theta_particle_prop(current_core,:),rate_return,mylims);
146 | end
147 | loglike_prop = loglike_prop_cell{1};
148 | for core = 2:poolsize
149 | loglike_prop = [loglike_prop; loglike_prop_cell{core}];
150 | end
151 |
152 | log_mh = gamma(t+1)*loglike_prop - gamma(t+1)*loglike(:,t) + logprior_prop - logprior(:,t);
153 |
154 | acc_probs = exp(log_mh);
155 |
156 | parfor i=1:N
157 | ESJD(i) = ((theta_particle(i,:,t)-theta_particle_prop(i,:))*Cov_inv*(theta_particle(i,:,t)-theta_particle_prop(i,:))')*acc_probs(i);
158 | end
159 |
160 | toacc_sub = find(rand(N,1) < acc_probs);
161 | toacc = toacc_sub;
162 | theta_particle(toacc,:,t)=theta_particle_prop(toacc,:);
163 | loglike(toacc,t)=loglike_prop(toacc);
164 | logprior(toacc,t)=logprior_prop(toacc);
165 |
166 | median_ESJD = accumarray(h_ind,ESJD,[],@median);%Median value of ESJD for different h indices from 1 to 10
167 | ind=find(median_ESJD==max(median_ESJD));
168 | h_opt = h(ind);
169 | fprintf('the scale is %f\n',h_opt);
170 |
171 | dist_move = zeros(N,1);
172 | belowThreshold = true;
173 | R_move = 0;
174 |
175 |
176 | % Getting number of remaining MCMC repeats
177 | %Performing remaining repeats
178 | while belowThreshold
179 | R_move = R_move + 1;
180 |
181 | theta_particle_prop = mvnrnd(theta_particle(:,:,t),h_opt^2*cov_rw);
182 |
183 | logprior_prop = garch_logPrior_parallel_fv(theta_particle_prop,mylims);
184 |
185 | inds = find(isinf(logprior_prop)==0);
186 | while isempty(inds)==0 %this while loop is used to eliminated any infinity value from simulation
187 | for j = 1:length(inds)
188 | theta_particle_prop(inds(j),:) = mvnrnd(theta_particle(inds(j),:,t),h_opt^2*cov_rw);
189 | end
190 | logprior_prop(inds) = garch_logPrior_parallel_fv(theta_particle_prop(inds,:),mylims);
191 | inds = find(isinf(logprior_prop));
192 | end
193 |
194 | percore = ceil(N/poolsize);
195 | parfor core = 1:poolsize %Divide Sample into different 'poolsize' groups
196 | current_core = (percore*(core-1) + 1):min(percore*core,N);
197 | [loglike_prop_cell{core},~] = garch_loglike_parallel(theta_particle_prop(current_core,:),rate_return,mylims);
198 | end
199 | loglike_prop = loglike_prop_cell{1};
200 | for core = 2:poolsize
201 | loglike_prop = [loglike_prop; loglike_prop_cell{core}];
202 | end
203 |
204 |
205 | log_mh = gamma(t+1)*loglike_prop - gamma(t+1)*loglike(:,t) + logprior_prop - logprior(:,t);
206 |
207 | acc_probs = exp(log_mh);
208 |
209 | toacc_sub = find(rand(N,1) median_dist)>=ceil(0.5*N)
221 | belowThreshold = false;
222 | end
223 | end
224 | fprintf('the value of R_move was %d\n',R_move);
225 |
226 | theta_particle(:,:,t+1) = theta_particle(:,:,t);
227 | loglike(:,t+1) = loglike(:,t);
228 | logprior(:,t+1) = logprior(:,t);
229 |
230 | t = t+1;
231 | end
232 |
233 | %Transforming back to original scale
234 | theta = zeros(N,Num_Parameter,size(theta_particle,3));
235 | parfor j=1:Num_Parameter
236 | theta(:,j,:) = (mylims(j,2)'.*exp(theta_particle(:,j,:))+mylims(j,1)')./(exp(theta_particle(:,j,:))+1);
237 | end
238 |
239 | delete(gcp); %shut down parallel pool
240 |
241 | save('results_garch_LikeAnneal.mat');
242 | end
243 |
244 |
245 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
246 | function [logprior] = garch_logPrior_parallel_fv(theta_particle,mylims)
247 | % Computes the log prior (takes transformed parameters as input)
248 |
249 | sumA = [-eye(4); eye(4); 0 1 1 0];
250 | sumB = [-1e-9; -1e-8; -.2; 0.9; 0.3; 0.5; 0.99; 0.9; 0.9999];
251 |
252 | [N,d] = size(theta_particle);
253 |
254 | % Transforming back to original scale
255 | theta = theta_particle;
256 | for j = 1:d
257 | theta(:,j) = (mylims(j,2)'.*exp(theta(:,j))+mylims(j,1)')./(exp(theta(:,j))+1);
258 | end
259 |
260 | logprior = -inf*ones(N,1);
261 | inds = find(all(sumA*theta'<=sumB*ones(1,N))==1);
262 | logprior(inds) = sum(-theta_particle(inds,:)-2*log(1+exp(-theta_particle(inds,:))),2);
263 |
264 | end
265 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
266 |
267 |
268 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
269 | function [loglikelihood,h] = garch_loglike_parallel(params,data,mylims)
270 | % Computing the likelihood of the time series under GARCH dynamics, given observed data and model parameters
271 | %====================================================================================================================
272 |
273 | [N,d]=size(params);
274 |
275 | % Transforming back to original scale;
276 | for j = 1:d
277 | params(:,j) = (mylims(j,2)'.*exp(params(:,j))+mylims(j,1)')./(exp(params(:,j))+1);
278 | end
279 |
280 | %%%%%%%%%%%%%%%%%%%
281 | %SETTING PARAMETERS
282 | %%%%%%%%%%%%%%%%%%%
283 | a0 = params(:,1); %alpha_0
284 | a1 = params(:,2); %alpha_1
285 | b1 = params(:,3); %beta_1
286 | r_bar = params(:,4); %mu
287 |
288 | n = length(data);
289 | h = zeros(N,n);
290 |
291 | t1=10e-6;
292 |
293 | h(:,1) = max(a0./(1-a1-b1),t1);
294 |
295 | logl = zeros(N,1);
296 | logl = -0.5*log(h(:,1)) - 0.5*(data(1)-r_bar).^2./h(:,1) - 0.5*log(2*pi*ones(N,1));
297 |
298 | for t = 2:n
299 | h(:,t) = a0+a1.*(data(t-1)-r_bar).^2 + b1.*h(:,t-1);
300 | logl_tmp = -0.5*log(h(:,t)) - 0.5*(data(t)-r_bar).^2./h(:,t) - 0.5*log(2*pi*ones(N,1));
301 | logl = logl + logl_tmp;
302 | end
303 |
304 | loglikelihood = logl;
305 | end
306 |
307 |
--------------------------------------------------------------------------------
/GJR-GARCH/GJR_SMC_RW_LikeAnneal_parallel.m:
--------------------------------------------------------------------------------
1 | function [theta, theta_particle, loglike, logprior, gamma, log_evidence] = GJR_SMC_RW_LikeAnneal_parallel(N)
2 | %SMC utilised vectorisation and parallelisation for estimating GJR-GARCH(1,1) model's parameters;
3 | %The sequence of distributions are constructed by using Likelihood Annealing method
4 |
5 | %%%%%%%%%%%%%%%%%%%% INPUT %%%%%%%%%%%%%%%%%%%%
6 | % N - Size of population of particles
7 |
8 | %%%%%%%%%%%%%%%%%%%% OUTPUT %%%%%%%%%%%%%%%%%%%
9 | % theta - N samples from each temperature
10 | % theta_particle - N transformed samples from each temperature
11 | % loglike - Log likelihood of the GJR-GARCH(1,1) model, corresponding to the above thetas
12 | % logprior - Log prior of the GJR-GARCH(1,1) model, corresponding to the above thetas
13 | % gamma - The temperatures from likelihood annealing strategy
14 | % log_evidence - The estimate of log evidence used for model selection
15 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
16 |
17 | %loading monthly S&P stock log return
18 | load('Data.mat');
19 | rate_return = MonthlyReturns2018;
20 | rate_return(isnan(rate_return)) = 0;
21 |
22 | %Setting ranges for parameters to do transformation
23 | mylims = zeros(5,2);
24 | mylims(:,1) = zeros(5,1);
25 | mylims(4,1) = 0.5;
26 | mylims(5,1) = -0.9;
27 | mylims(:,2) = [0.3; 0.3; 0.3; 0.99; 0.9];
28 |
29 | %Starting parallel pool
30 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
31 | quthpc = parcluster('local');
32 | parpool(quthpc);
33 | poolsize = quthpc.NumWorkers;
34 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
35 |
36 | %Initialising
37 | t = 1;
38 | Num_Parameter = 5; % dimension of theta
39 | log_evidence = 0;
40 | gamma = 0; % temperatures
41 | logw_previous = log(ones(N,1)*(1/N));
42 | h = 0.1:0.1:1; % For choosing optimal scales and thus to reduce the number of MCMC repeats.
43 |
44 | %Transform theta
45 | theta = zeros(N, Num_Parameter);
46 | theta_particle = zeros(N, Num_Parameter);
47 | for j = 1:Num_Parameter
48 | theta(:,j) = unifrnd(mylims(j,1),mylims(j,2),[N,1]);
49 | theta_particle(:,j) = log((theta(:,j) - mylims(j,1))./(mylims(j,2) - theta(:,j)));
50 | end
51 |
52 | %calculating initial log prior
53 | logprior = GJR_logPrior_parallel_unif(theta_particle,mylims);
54 |
55 |
56 | inds = 1:N;
57 | while isempty(inds)==0 %this while loop is used to eliminated any infinity value from simulation
58 | for j = 1:Num_Parameter
59 | theta(inds,j) = unifrnd(mylims(j,1),mylims(j,2),[length(inds),1]);
60 | theta_particle(inds,j) = log((theta(inds,j) - mylims(j,1))./(mylims(j,2) - theta(inds,j)));
61 | end
62 | logprior(inds) = GJR_logPrior_parallel_unif(theta_particle(inds,:),mylims);
63 | inds = find(isinf(logprior));
64 | end
65 |
66 | %calculating initial log likelihood
67 | [loglike,~] = gjrloglike_parallel_unif(theta_particle,rate_return,mylims);
68 |
69 | while gamma(t) < 1
70 | %Testing gamma=1
71 | logw = logw_previous + (1-gamma(t))*loglike(:,t);
72 | w = logw - max(logw); %stability
73 | w = exp(w);
74 | w = w/sum(w);
75 | ESS1 = 1/sum(w.^2);
76 |
77 | % Choosing next temperature
78 | if (ESS1 >= N/2)
79 | gamma(t+1) = 1;
80 | else
81 | %use bisection method to find the sequence of gamma
82 | fun=@(gamma_Current)Gamma(gamma_Current,gamma(t),N,loglike(:,t),logw_previous);
83 | interval=[gamma(t) 1];
84 | gamma_Current = fzero(fun,interval);
85 | gamma(t+1) = gamma_Current;
86 | end
87 | fprintf('The current temperature is %.3f.\n',gamma(t+1));
88 |
89 | %Subsitute the value of just calculated gamma(t+1)
90 | logw = logw_previous + (gamma(t+1)-gamma(t))*loglike(:,t);
91 | log_evidence = log_evidence + logsumexp(logw);
92 | w = logw - max(logw);
93 | w = exp(w);
94 | w = w/sum(w);
95 |
96 | % (systematic reseampling) - duplicating particles with more weights
97 | u = rand;
98 | indices = zeros(N,1);
99 | cumsum_w = cumsum(w);
100 | for i=1:N
101 | A = find(u1
105 | u=u-1;
106 | end
107 | end
108 | theta_particle(:,:,t) = theta_particle(indices,:,t);
109 | loglike(:,t) = loglike(indices,t);
110 | logprior(:,t) = logprior(indices,t);
111 | %%% after resampling, all the particles will have equal weights %%%
112 |
113 | %(Move with MCMC Kernel)
114 | cov_rw = cov(theta_particle(:,:,t)); %covariance of resampled particles
115 | Cov_inv=cov_rw^(-1); % inverse of the above covariance
116 |
117 | %compute mahalanobis distance from the Sample before moving
118 | [~,dists] = rangesearch(theta_particle(:,:,t),theta_particle(:,:,t),inf,'distance','mahalanobis','cov',cov_rw);
119 | dists = cell2mat(dists);
120 | median_dist = median(dists(:));
121 |
122 | h_ind = mod(randperm(N),length(h))'+1;
123 | h_all = h(h_ind);
124 | ESJD = zeros(N,1);
125 |
126 | %MVN RW
127 | parfor i=1:N
128 | theta_particle_prop(i,:) = mvnrnd(theta_particle(i,:,t),h_all(i)^2*cov_rw);
129 | end
130 |
131 | logprior_prop = GJR_logPrior_parallel_unif(theta_particle_prop,mylims);
132 |
133 | inds = find(isinf(logprior_prop)==0);
134 | while isempty(inds)==0 %this while loop is used to eliminated any infinity value from simulation
135 | for j = 1:length(inds)
136 | theta_particle_prop(inds(j),:) = mvnrnd(theta_particle(inds(j),:,t),h_all(inds(j))^2*cov_rw);
137 | end
138 | logprior_prop(inds) = GJR_logPrior_parallel_unif(theta_particle_prop(inds,:),mylims);
139 | inds = find(isinf(logprior_prop));
140 | end
141 |
142 | percore = ceil(N/poolsize);
143 | parfor core = 1:poolsize %Divide Sample into different 'poolsize' groups
144 | current_core = (percore*(core-1) + 1):min(percore*core,N);
145 | [loglike_prop_cell{core},~] = gjrloglike_parallel_unif(theta_particle_prop(current_core,:),rate_return,mylims);
146 | end
147 | loglike_prop = loglike_prop_cell{1};
148 | for core = 2:poolsize
149 | loglike_prop = [loglike_prop; loglike_prop_cell{core}];
150 | end
151 |
152 | log_mh = gamma(t+1)*loglike_prop - gamma(t+1)*loglike(:,t) + logprior_prop - logprior(:,t);
153 | acc_probs = exp(log_mh);
154 |
155 | parfor i=1:N
156 | ESJD(i) = ((theta_particle(i,:,t)-theta_particle_prop(i,:))*Cov_inv*(theta_particle(i,:,t)-theta_particle_prop(i,:))')*acc_probs(i);
157 | end
158 |
159 | toacc_sub = find(rand(N,1) < acc_probs);
160 | toacc = toacc_sub;
161 | theta_particle(toacc,:,t)=theta_particle_prop(toacc,:);
162 | loglike(toacc,t)=loglike_prop(toacc);
163 | logprior(toacc,t)=logprior_prop(toacc);
164 |
165 | median_ESJD = accumarray(h_ind,ESJD,[],@median);%Median value of ESJD for different h indices from 1 to 10
166 | ind=find(median_ESJD==max(median_ESJD));
167 | h_opt = h(ind);
168 | fprintf('the scale is %f\n',h_opt);
169 |
170 | dist_move = zeros(N,1);
171 | belowThreshold = true;
172 | R_move = 0;
173 |
174 |
175 | % Getting number of remaining MCMC repeats
176 | %Performing remaining repeats
177 | while belowThreshold
178 | R_move = R_move + 1;
179 |
180 | theta_particle_prop = mvnrnd(theta_particle(:,:,t),h_opt^2*cov_rw);
181 |
182 | logprior_prop = GJR_logPrior_parallel_unif(theta_particle_prop,mylims);
183 |
184 | inds = find(isinf(logprior_prop)==0);
185 | while isempty(inds)==0 %this while loop is used to eliminated any infinity value from simulation
186 | for j = 1:length(inds)
187 | theta_particle_prop(inds(j),:) = mvnrnd(theta_particle(inds(j),:,t),h_opt^2*cov_rw);
188 | end
189 | logprior_prop(inds) = GJR_logPrior_parallel_unif(theta_particle_prop(inds,:),mylims);
190 | inds = find(isinf(logprior_prop));
191 | end
192 |
193 | percore = ceil(N/poolsize);
194 | parfor core = 1:poolsize %Divide Sample into different 'poolsize' groups
195 | current_core = (percore*(core-1) + 1):min(percore*core,N);
196 | [loglike_prop_cell{core},~] = gjrloglike_parallel_unif(theta_particle_prop(current_core,:),rate_return,mylims);
197 | end
198 | loglike_prop = loglike_prop_cell{1};
199 | for core = 2:poolsize
200 | loglike_prop = [loglike_prop; loglike_prop_cell{core}];
201 | end
202 |
203 |
204 | log_mh = gamma(t+1)*loglike_prop - gamma(t+1)*loglike(:,t) + logprior_prop - logprior(:,t);
205 | acc_probs = exp(log_mh);
206 |
207 | toacc_sub = find(rand(N,1)median_dist)>=ceil(0.5*N)
219 | belowThreshold = false;
220 | end
221 | end
222 | fprintf('the value of R_move was %d\n',R_move);
223 |
224 | theta_particle(:,:,t+1) = theta_particle(:,:,t);
225 | loglike(:,t+1) = loglike(:,t);
226 | logprior(:,t+1) = logprior(:,t);
227 |
228 | t = t+1;
229 | end
230 |
231 | %Transforming back to original scale
232 | theta = zeros(N,Num_Parameter,size(theta_particle,3));
233 | parfor j=1:Num_Parameter
234 | theta(:,j,:) = (mylims(j,2)'.*exp(theta_particle(:,j,:))+mylims(j,1)')./(exp(theta_particle(:,j,:))+1);
235 | end
236 |
237 | delete(gcp);%shut down parallel pool
238 |
239 | save('results_GJR_LikeAnneal.mat')
240 | end
241 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
242 |
243 |
244 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
245 | function [logprior] = GJR_logPrior_parallel_unif(theta_particle,mylims)
246 | % Computes the log prior (takes transformed parameters as input)
247 |
248 | sumA = [-eye(5); eye(5); 0 1 0.5 1 0];
249 | sumB = [-1e-9; -1e-8; -1e-2; -.5; .9; 0.3; 0.3; 0.3; 0.99; 0.9; 0.9999];
250 |
251 | [N,d]=size(theta_particle);
252 |
253 | % Transforming back to original scale
254 | theta = theta_particle;
255 | for j = 1:d
256 | theta(:,j) = (mylims(j,2)'.*exp(theta(:,j))+mylims(j,1)')./(exp(theta(:,j))+1);
257 | end
258 |
259 | logprior = -inf*ones(N,1);
260 | inds=find(all(sumA*theta'<=sumB*ones(1,N))==1);
261 | logprior(inds)=sum(-theta_particle(inds,:)-2*log(1+exp(-theta_particle(inds,:))),2);
262 |
263 | end
264 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
265 |
266 |
267 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
268 | function [loglikelihood,h] = gjrloglike_parallel_unif(params,data,mylims)
269 | % Computing the likelihood of the time series under GJR-GARCH dynamics, given observed data and model parameters
270 | %====================================================================================================================
271 |
272 | [N,d]=size(params);
273 |
274 | % Transforming back to original scale;
275 | for j = 1:d
276 | params(:,j) = (mylims(j,2)'.*exp(params(:,j))+mylims(j,1)')./(exp(params(:,j))+1);
277 | end
278 |
279 | %%%%%%%%%%%%%%%%%%%
280 | %SETTING PARAMETERS
281 | %%%%%%%%%%%%%%%%%%%
282 | h0 = params(:,1); %alpha_0
283 | phi_p = params(:,2); %phi
284 | phi_n = params(:,3); %phi_{-}
285 | rho_h = params(:,4); %beta_sigma
286 | r_bar = params(:,5); %mu
287 |
288 | t1=10e-6;
289 |
290 | n = length(data);
291 | h = zeros(N,n);
292 |
293 | h(:,1) = max(h0./(1-phi_p-phi_n./2-rho_h),t1);
294 |
295 | logl=zeros(N,1);
296 | logl = - .5*log(h(:,1)) - .5*(data(1)-r_bar).^2./h(:,1) -.5*log(2*pi*ones(N,1));
297 |
298 | for t = 2:n
299 | inds_neg = find(data(t-1)-r_bar<0);
300 | inds_pos = find(data(t-1)-r_bar>0);
301 | h(inds_pos,t) = h0(inds_pos) + rho_h(inds_pos).*h(inds_pos,t-1)+ phi_p(inds_pos).*(data(t-1)-r_bar(inds_pos)).^2;
302 | h(inds_neg,t) = h0(inds_neg) + rho_h(inds_neg).*h(inds_neg,t-1)+ phi_p(inds_neg).*(data(t-1)-r_bar(inds_neg)).^2+ phi_n(inds_neg).*(data(t-1)-r_bar(inds_neg)).^2;
303 | logl_tmp = - .5*log(h(:,t)) - .5*(data(t)-r_bar).^2./h(:,t) -.5*log(2*pi*ones(N,1));
304 | logl=logl+logl_tmp;
305 |
306 | end
307 |
308 | loglikelihood=logl;
309 | end
310 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
311 |
312 |
313 |
314 |
315 |
316 |
--------------------------------------------------------------------------------
/BEGE/SMC_RW_LikeAnneal_parallel_biased.m:
--------------------------------------------------------------------------------
1 | function [theta, theta_particle, loglike, logprior, gamma, log_evidence] = SMC_RW_LikeAnneal_parallel_biased(N)
2 | %SMC utilised vectorisation and parallelisation for estimating BEGE model's
3 | %parameters;
4 | %The sequence of distributions constructed by using Likelihood Annealing method
5 |
6 | %%%%%%%%%%%%%%%%%%%% INPUT %%%%%%%%%%%%%%%%%%%%
7 | % N - Size of population of particles
8 |
9 | %%%%%%%%%%%%%%%%%%%% OUTPUT %%%%%%%%%%%%%%%%%%%
10 | % theta - N samples from each temperature
11 | % theta_particle - N transformed samples from each temperature
12 | % loglike - Log likelihood of the BEGE model, corresponding to the above thetas
13 | % logprior - Log prior of the BEGE model, corresponding to the above thetas
14 | % gamma - The temperatures from likelihood annealing strategy
15 | % log_evidence - The estimate of log evidence used for model selection
16 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
17 |
18 | %loading monthly S&P stock log return
19 | load('Data.mat');
20 | rate_return = MonthlyReturns2018;
21 | rate_return(isnan(rate_return)) = 0;
22 |
23 | %Setting ranges for parameters to do transformation
24 | mylims = zeros(11,2);
25 | mylims(:,1) = 1e-4.*ones(11,1);
26 | mylims(9,1) = -0.2;
27 | mylims(11,1) = -0.9;
28 | mylims(:,2) = [0.5;0.3;0.99;0.5;0.5;1;0.3;0.99;0.1;0.75;0.9];
29 |
30 | %Starting parallel pool
31 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
32 | quthpc = parcluster('local');
33 | parpool(quthpc);
34 | poolsize = quthpc.NumWorkers;
35 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
36 |
37 | %Initialising
38 | t = 1;
39 | Num_Parameter = 11; % dimension of theta
40 | log_evidence = 0;
41 | gamma = 0; % temperatures
42 | logw_previous=log(ones(N,1)*(1/N));
43 | h = 0.1:0.1:1; % For choosing optimal scales and thus to reduce the number of MCMC repeats.
44 |
45 | %Transform theta
46 | theta = zeros(N,Num_Parameter);
47 | theta_particle = zeros(N,Num_Parameter);
48 | for j = 1:Num_Parameter
49 | theta(:,j) = unifrnd(mylims(j,1),mylims(j,2),[N,1]);
50 | theta_particle(:,j) = log((theta(:,j) - mylims(j,1))./(mylims(j,2) - theta(:,j)));
51 | end
52 |
53 | %calculating initial log prior
54 | logprior = logPrior_parallel_fv(theta_particle,mylims);
55 |
56 |
57 | inds = 1:N;
58 | while isempty(inds)==0 %this while loop is used to eliminated any infinity value from simulation
59 | for j=1:Num_Parameter
60 | theta(inds,j) = unifrnd(mylims(j,1),mylims(j,2),[length(inds),1]);
61 | theta_particle(inds,j) = log((theta(inds,j) - mylims(j,1))./(mylims(j,2) - theta(inds,j)));
62 | end
63 | logprior(inds) = logPrior_parallel_fv(theta_particle(inds,:),mylims);
64 | inds = find(isinf(logprior));
65 | end
66 |
67 | %calculating initial log likelihood
68 | [loglike, ~, ~] = bege_gjrgarch_likelihood_parallel(theta_particle,rate_return,mylims);
69 |
70 | while gamma(t)< 1
71 | %Testing gammavar=1
72 | logw = logw_previous+(1-gamma(t))*loglike(:,t);
73 | w = logw-max(logw); %stability
74 | w = exp(w);
75 | w = w/sum(w);
76 | ESS1 = 1/sum(w.^2);
77 |
78 | %Choosing next temperature
79 | if (ESS1 >= N/2)
80 | gamma(t+1) = 1;
81 | else
82 | %use bisection method to find the sequence of gamma
83 | fun=@(gamma_Current)Gamma(gamma_Current,gamma(t),N,loglike(:,t),logw_previous);
84 | interval=[gamma(t) 1];
85 | gamma_Current=fzero(fun,interval);
86 | gamma(t+1)=gamma_Current;
87 | end
88 | fprintf('The current temperature is %.3f.\n',gamma(t+1));
89 |
90 | %Subsitute the value of just calculated gamma(t)
91 | logw = logw_previous + (gamma(t+1)- gamma(t))*loglike(:,t);
92 | log_evidence=log_evidence + logsumexp(logw);
93 | w = logw-max(logw);
94 | w = exp(w);
95 | w = w/sum(w);
96 |
97 | % (systematic resampling)
98 | u=rand;
99 | indices = zeros(N,1);
100 | cumsum_w = cumsum(w);
101 | for i = 1:N
102 | A = find(u < cumsum_w);
103 | indices(i) = A(1);
104 | u=u+1/N;
105 | if u>1
106 | u=u-1;
107 | end
108 | end
109 | theta_particle(:,:,t) = theta_particle(indices,:,t);
110 | loglike(:,t) = loglike(indices,t);
111 | logprior(:,t) = logprior(indices,t);
112 |
113 | %(Move with MCMC Kernel)
114 | cov_rw = cov(theta_particle(:,:,t)); %covariance of resampled
115 | Cov_inv=cov_rw^(-1); % inverse of the above covariance
116 |
117 | %compute mahalanobis distance from the Sample before moving
118 | [~,dists]=rangesearch(theta_particle(:,:,t),theta_particle(:,:,t),inf,'distance','mahalanobis','cov',cov_rw);
119 | dists=cell2mat(dists);
120 | median_dist=median(dists(:));
121 |
122 | h_ind=mod(randperm(N),length(h))'+1;
123 | h_all=h(h_ind);
124 | ESJD=zeros(N,1);
125 |
126 | %MVN RW
127 | parfor i=1:N
128 | theta_particle_prop(i,:) = mvnrnd(theta_particle(i,:,t),h_all(i)^2*cov_rw);
129 | end
130 | logprior_prop = logPrior_parallel_fv(theta_particle_prop,mylims);
131 |
132 | inds = find(isinf(logprior_prop)==0);
133 | len_inds = length(inds);
134 |
135 | percore = ceil(len_inds/poolsize);
136 | parfor core = 1:poolsize
137 | current_core = percore*(core-1)+1: min(percore*core,len_inds);
138 | [loglike_prop_cell{core},~,~] = bege_gjrgarch_likelihood_parallel(theta_particle_prop(inds(current_core),:),rate_return,mylims);
139 | end
140 | loglike_prop = loglike_prop_cell{1};
141 | for core = 2:poolsize
142 | loglike_prop = [loglike_prop; loglike_prop_cell{core}];
143 | end
144 |
145 | log_mh = gamma(t+1)*loglike_prop - gamma(t+1)*loglike(inds,t) + logprior_prop(inds) - logprior(inds,t);
146 |
147 | acc_probs = -inf*ones(N,1);
148 | acc_probs(inds) = exp(log_mh);
149 |
150 | for i=1:length(inds)
151 | ESJD(inds(i))=((theta_particle(inds(i),:,t)-theta_particle_prop(inds(i),:))*Cov_inv*(theta_particle(inds(i),:,t)-theta_particle_prop(inds(i),:))').^(1/2)*acc_probs(inds(i));
152 | end
153 |
154 | toacc_sub = find(rand(len_inds,1)median_dist)>=ceil(0.5*N)
208 | belowThreshold = false;
209 | end
210 | end
211 | fprintf('the value of R_move was %d\n',R_move);
212 |
213 | theta_particle(:,:,t+1) = theta_particle(:,:,t);
214 | loglike(:,t+1) = loglike(:,t);
215 | logprior(:,t+1) = logprior(:,t);
216 |
217 | t = t+1;
218 | end
219 |
220 | %Transforming back to original scale
221 | theta = zeros(N,Num_Parameter,size(theta_particle,3));
222 | for j=1:Num_Parameter
223 | theta(:,j,:) = (mylims(j,2)'.*exp(theta_particle(:,j,:))+mylims(j,1)')./(exp(theta_particle(:,j,:))+1);
224 | end
225 |
226 | delete(gcp);%shut down parallel pool
227 | save('results_bege_LikeAnneal.mat');
228 | end
229 |
230 |
231 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
232 | function [logprior] = logPrior_parallel_fv(theta_particle,mylims)
233 | % Computes the log prior (takes transformed parameters as input)
234 |
235 | sumA = [-eye(11); eye(11); 0 0 1 0.5 0.5 0 0 0 0 0 0; 0 0 0 0 0 0 0 1 0.5 0.5 0];
236 |
237 | sumB = [zeros(10,1); 0.9;...
238 | 0.5;0.3;0.99;0.5;0.5;1;0.3;0.99;0.1;0.75;0.9; 0.995;0.995];
239 | sumB(9) = 0.2;
240 |
241 | [N,d]=size(theta_particle);
242 |
243 | % Transforming back to original scale
244 | theta = theta_particle;
245 | for j = 1:d
246 | theta(:,j) = (mylims(j,2)'.*exp(theta(:,j))+mylims(j,1)')./(exp(theta(:,j))+1);
247 | end
248 |
249 | logprior = -inf*ones(N,1);
250 | inds=find(all(sumA*theta'<=sumB*ones(1,N))==1);
251 | logprior(inds)=sum(-theta_particle(inds,:)-2*log(1+exp(-theta_particle(inds,:))),2);
252 |
253 | end
254 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
255 |
256 |
257 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
258 | function [loglikelihood,ptseries,ntseries] = bege_gjrgarch_likelihood_parallel(params,data,mylims)
259 | % Computing the likelihood of the time series under BEGE-GJR-GARCH dynamics, given observed data and model parameters
260 | %====================================================================================================================
261 |
262 | [N,d]=size(params);
263 |
264 | % Transforming back to original scale;
265 | for j = 1:d
266 | params(:,j) = (mylims(j,2)'.*exp(params(:,j))+mylims(j,1)')./(exp(params(:,j))+1);
267 | end
268 |
269 | %%%%%%%%%%%%%%%%%%%
270 | %SETTING PARAMETERS
271 | %%%%%%%%%%%%%%%%%%%
272 | r_bar = params(:,11); % (mu)
273 | p_bar=params(:,1); % (p_0)
274 | tp=params(:,2); % (sigma_p)
275 | rho_p=params(:,3); %
276 | phi_pp=params(:,4); %
277 | phi_pn=params(:,5); %
278 | n_bar=params(:,6); % (n_0)
279 | tn=params(:,7); % (sigma_n)
280 | rho_n=params(:,8); %
281 | phi_np=params(:,9); %
282 | phi_nn=params(:,10); %
283 |
284 |
285 | %Computing underlying pt and nt processes
286 | ptseries=zeros(N,length(data));
287 | ntseries=zeros(N,length(data));
288 |
289 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%
290 | %COMPUTING THE LOG-LIKELIHOOD
291 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%
292 |
293 | loglikelihood=zeros(N,1);
294 | t1=10e-1;
295 |
296 | previous_p=max(p_bar./(1-rho_p-(phi_pp+phi_pn)/2),t1);
297 | previous_n=max(n_bar./(1-rho_n-(phi_np+phi_nn)/2),t1);
298 | ptseries(:,1)=max(previous_p,t1);
299 | ntseries(:,1)=max(previous_n,t1);
300 |
301 | loglikelihood=loglikelihood+loglikedgam_parallel(data(1)-r_bar,ptseries(:,1),ntseries(:,1),tp,tn,0.001);
302 |
303 | p_t=zeros(N,1);
304 | n_t=zeros(N,1);
305 | for t=2:length(data)
306 | inds_neg=find(data(t-1)-r_bar<0);
307 | inds_pos=find(data(t-1)-r_bar>=0);
308 |
309 | if isempty(inds_neg)==0
310 | p_t(inds_neg)=max(p_bar(inds_neg)+rho_p(inds_neg).*previous_p(inds_neg)+...
311 | phi_pn(inds_neg).*(((data(t-1)-r_bar(inds_neg)).^2)./(2*(tp(inds_neg).^2))),t1);
312 | n_t(inds_neg)=max(n_bar(inds_neg)+rho_n(inds_neg).*previous_n(inds_neg)+...
313 | phi_nn(inds_neg).*(((data(t-1)-r_bar(inds_neg)).^2)./(2*(tn(inds_neg).^2))),t1);
314 | end
315 | if isempty(inds_pos)==0
316 | p_t(inds_pos)=max(p_bar(inds_pos)+rho_p(inds_pos).*previous_p(inds_pos)+...
317 | phi_pp(inds_pos).*(((data(t-1)-r_bar(inds_pos)).^2)./(2*(tp(inds_pos).^2))),t1);
318 | n_t(inds_pos)=max(n_bar(inds_pos)+rho_n(inds_pos).*previous_n(inds_pos)+...
319 | phi_np(inds_pos).*(((data(t-1)-r_bar(inds_pos)).^2)./(2*(tn(inds_pos).^2))),t1);
320 | end
321 |
322 | obs=data(t)-r_bar;
323 | tmp = loglikedgam_parallel(obs,p_t,n_t,tp,tn,0.001);
324 | loglikelihood=loglikelihood+tmp;
325 | ptseries(:,t)=p_t;
326 | ntseries(:,t)=n_t;
327 | previous_p=p_t;
328 | previous_n=n_t;
329 | end
330 |
331 | end
332 |
--------------------------------------------------------------------------------
/GARCH/garch_SMC_RW_DataAnneal_parallel.m:
--------------------------------------------------------------------------------
1 | function [theta, theta_particle, loglike, logprior, W, log_evidence] = garch_SMC_RW_DataAnneal_parallel(N)
2 | %SMC utilised vectorisation and parallelisation for estimating GARCH(1,1) model's parameters;
3 | %The sequence of distributions constructed by using Data Annealing method
4 |
5 | %%%%%%%%%%%%%%%%%%%% INPUT %%%%%%%%%%%%%%%%%%%%
6 | % N - Size of population of particles
7 |
8 | %%%%%%%%%%%%%%%%%%%% OUTPUT %%%%%%%%%%%%%%%%%%%
9 | % theta - N samples from currently available observations: y_{1:t}, t=1,..,T
10 | % theta_particle - N transformed samples from currently available observations
11 | % loglike - Log likelihood of the GARCH(1,1) model, corresponding to the above thetas
12 | % logprior - Log prior of the GARCH(1,1) model, corresponding to the above thetas
13 | % W - The weights of weighted samples, corresponding to the above thetas/theta_particles
14 | % log_evidence - The estimate of log evidence used for model selection
15 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
16 | %loading monthly S&P stock log return
17 | load('Data.mat');
18 | rate_return = MonthlyReturns2018;
19 | rate_return(isnan(rate_return)) = 0;
20 |
21 | %Setting ranges for parameters to do transformation
22 | mylims = zeros(4,2);
23 | mylims(3,1) = 0.2;
24 | mylims(4,1) = -0.9;
25 | mylims(:,2) = [0.3; 0.5; 0.99; 0.9];
26 |
27 | %Starting parallel pool
28 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
29 | quthpc = parcluster('local');
30 | parpool(quthpc);
31 | poolsize = quthpc.NumWorkers;
32 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
33 |
34 | %Initialising
35 | Num_Parameter=4; % dimension of theta
36 | h = 0.1:0.1:1; % For choosing optimal scales and thus to reduce the number of MCMC repeats.
37 | propotion = 0.5; %to determine the target ESS
38 | T = length(rate_return); % total number of data
39 |
40 | %Transform theta
41 | theta=zeros(N,Num_Parameter);
42 | theta_particle=zeros(N,Num_Parameter);
43 | for j = 1:Num_Parameter
44 | theta(:,j) = unifrnd(mylims(j,1),mylims(j,2),[N,1]);
45 | theta_particle(:,j) = log((theta(:,j) - mylims(j,1))./(mylims(j,2) - theta(:,j)));
46 | end
47 |
48 | %calculating initial log prior
49 | logprior = garch_logPrior_parallel_fv(theta_particle,mylims);
50 |
51 |
52 | inds=1:N;
53 | while isempty(inds)==0 %this while loop is used to eliminated any infinity value from simulation
54 | for j = 1:Num_Parameter
55 | theta(inds,j) = unifrnd(mylims(j,1),mylims(j,2),[length(inds),1]);
56 | theta_particle(inds,j) = log((theta(inds,j) - mylims(j,1))./(mylims(j,2) - theta(inds,j)));
57 | end
58 | logprior(inds) = garch_logPrior_parallel_fv(theta_particle(inds,:),mylims);
59 | inds = find(isinf(logprior));
60 | end
61 |
62 | logw=zeros(N,T+1);
63 | logw(:,1)=-log(N)*ones(N,1);
64 | loglike=zeros(N,T);
65 | loglike_inc=zeros(N,1); %incremental weights; new observation's loglikelihood
66 | ESS=zeros(T,1);
67 | W=(1/N)*ones(N,1); % Initialising weights
68 | logsum_w = zeros(T,1);
69 |
70 | % Start sampling
71 | for t=1:T
72 | fprintf('Just starting with the %ith observation.\n',t);
73 | rate_return_sub = rate_return(1:t);
74 |
75 | if t==1
76 | [loglike(:,t),h_gjr] = garch_loglike_parallel(theta_particle(:,:,t),rate_return(1),mylims);
77 | logw(:,t+1) = log(W(:,t)) + loglike(:,t);
78 | else
79 | %log(f(y_t|y_{1:t-1},\theta))
80 | [loglike_inc,h_gjr] = gloglike_danneal_in_parallel(theta_particle(:,:,t),rate_return(t),mylims,h_gjr,rate_return(t-1));
81 | loglike(:,t) = loglike(:,t-1) + loglike_inc;
82 | logw(:,t+1) = log(W(:,t)) + loglike_inc;
83 | end
84 |
85 | logsum_w(t) = logsumexp(logw(:,t+1));
86 | logw(:,t+1)=logw(:,t+1)-max(logw(:,t+1));
87 | W(:,t+1)=exp(logw(:,t+1))./sum(exp(logw(:,t+1)));
88 | ESS(t)=1/sum(W(:,t+1).^2);
89 |
90 |
91 | if ESS(t)1
101 | u=u-1;
102 | end
103 | end
104 | theta_particle(:,:,t)=theta_particle(indices,:,t);
105 | loglike(:,t) = loglike(indices,t);
106 | logprior(:,t) = logprior(indices,t);
107 | h_gjr = h_gjr(indices,:);
108 |
109 | logw(:,t+1) = -log(N)*ones(N,1);
110 | W(:,t+1)=exp(logw(:,t+1))/sum(exp(logw(:,t+1)));
111 |
112 | %(Move with MCMC Kernel)
113 | cov_rw=cov(theta_particle(:,:,t)); % covariance of resampled
114 | Cov_inv=cov_rw^(-1); % inverse of the above covariance
115 |
116 | %compute mahalanobis distance from the Sample before moving
117 | [~,dists]=rangesearch(theta_particle(:,:,t),theta_particle(:,:,t),inf,'distance','mahalanobis','cov',cov_rw);
118 | dists=cell2mat(dists);
119 | median_dist=median(dists(:));
120 |
121 | h_ind=mod(randperm(N),length(h))'+1;
122 | h_all=h(h_ind);
123 | ESJD=zeros(N,1);
124 |
125 | %MVN RW
126 | parfor i=1:N
127 | theta_particle_prop(i,:) = mvnrnd(theta_particle(i,:,t),h_all(i)^2*cov_rw);
128 | end
129 |
130 | logprior_prop = garch_logPrior_parallel_fv(theta_particle_prop,mylims);
131 |
132 | inds = find(isinf(logprior_prop)==0);%only choose non-inf values of proposed particles
133 | len_inds = length(inds);
134 |
135 |
136 | percore = ceil(len_inds/poolsize);
137 | parfor core = 1:poolsize
138 | current_core = percore*(core-1)+1: min(percore*core,len_inds);
139 | [loglike_prop_cell{core},h_gjr_cell{core}] = garch_loglike_parallel(theta_particle_prop(inds(current_core),:),rate_return_sub,mylims);
140 | end
141 | loglike_prop = loglike_prop_cell{1};
142 | h_gjr_prop = h_gjr_cell{1};
143 | parfor core = 2:poolsize
144 | loglike_prop = [loglike_prop; loglike_prop_cell{core}];
145 | h_gjr_prop = [h_gjr_prop; h_gjr_cell{core}];
146 | end
147 |
148 | log_mh = loglike_prop - loglike(inds,t) + logprior_prop(inds) - logprior(inds,t);
149 |
150 | acc_probs = -inf*ones(N,1);
151 | acc_probs(inds) = exp(log_mh);
152 |
153 | for i=1:length(inds)
154 | ESJD(inds(i))=((theta_particle(inds(i),:,t)-theta_particle_prop(inds(i),:))*Cov_inv*(theta_particle(inds(i),:,t)-theta_particle_prop(inds(i),:))').^(1/2)*acc_probs(inds(i));
155 | end
156 |
157 | toacc_sub = find(rand(len_inds,1)median_dist)>=ceil(0.5*N)
215 | belowThreshold = false;
216 | end
217 | end
218 | fprintf('the value of R_move was %d\n',R_move);
219 |
220 | end
221 | if t~=T
222 | theta_particle(:,:,t+1) = theta_particle(:,:,t);
223 | loglike(:,t+1) = loglike(:,t);
224 | logprior(:,t+1) = logprior(:,t);
225 | end
226 | end
227 |
228 | %Transforming back to original scale
229 | theta = zeros(N,Num_Parameter,T);
230 | parfor j=1:Num_Parameter
231 | theta(:,j,:) = (mylims(j,2)'.*exp(theta_particle(:,j,:))+mylims(j,1)')./(exp(theta_particle(:,j,:))+1);
232 | end
233 | log_evidence = sum(logsum_w);
234 |
235 | delete(gcp);%shut down parallel pool
236 | save('results_garch_DaAnneal.mat')
237 | end
238 |
239 |
240 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
241 | function [logprior] = garch_logPrior_parallel_fv(theta_particle,mylims)
242 | % Computes the log prior (takes transformed parameters as input)
243 |
244 | sumA = [-eye(4); eye(4); 0 1 1 0];
245 | sumB = [-1e-9; -1e-8; -.2; 0.9; 0.3; 0.5; 0.99; 0.9; 0.9999];
246 |
247 | [N,d] = size(theta_particle);
248 |
249 | % Transforming back to original scale
250 | theta = theta_particle;
251 | for j = 1:d
252 | theta(:,j) = (mylims(j,2)'.*exp(theta(:,j))+mylims(j,1)')./(exp(theta(:,j))+1);
253 | end
254 |
255 | logprior = -inf*ones(N,1);
256 | inds = find(all(sumA*theta'<=sumB*ones(1,N))==1);
257 | logprior(inds) = sum(-theta_particle(inds,:)-2*log(1+exp(-theta_particle(inds,:))),2);
258 |
259 | end
260 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
261 |
262 |
263 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
264 | function [loglikelihood,h] = garch_loglike_parallel(params,data,mylims)
265 | % Computing the likelihood of the time series under GARCH dynamics, given observed data and model parameters
266 | %====================================================================================================================
267 |
268 | [N,d]=size(params);
269 |
270 | % Transforming back to original scale;
271 | for j = 1:d
272 | params(:,j) = (mylims(j,2)'.*exp(params(:,j))+mylims(j,1)')./(exp(params(:,j))+1);
273 | end
274 |
275 | %%%%%%%%%%%%%%%%%%%
276 | %SETTING PARAMETERS
277 | %%%%%%%%%%%%%%%%%%%
278 | a0 = params(:,1); %alpha_0
279 | a1 = params(:,2); %alpha_1
280 | b1 = params(:,3); %beta_1
281 | r_bar = params(:,4); %mu
282 |
283 | n = length(data);
284 | h = zeros(N,n);
285 |
286 | t1=10e-6;
287 |
288 | h(:,1) = max(a0./(1-a1-b1),t1);
289 |
290 | logl = zeros(N,1);
291 | logl = -0.5*log(h(:,1)) - 0.5*(data(1)-r_bar).^2./h(:,1) - 0.5*log(2*pi*ones(N,1));
292 |
293 | for t = 2:n
294 | h(:,t) = a0+a1.*(data(t-1)-r_bar).^2 + b1.*h(:,t-1);
295 | logl_tmp = -0.5*log(h(:,t)) - 0.5*(data(t)-r_bar).^2./h(:,t) - 0.5*log(2*pi*ones(N,1));
296 | logl = logl + logl_tmp;
297 | end
298 |
299 | loglikelihood = logl;
300 | end
301 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
302 |
303 |
304 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
305 | function [loglikelihood,h] = gloglike_danneal_in_parallel(params,new_data,mylims,h,data_previous)
306 | % Computing the likelihood of the "new observation" under GARCH(1,1) dynamics, given previous and new observed data and model parameters
307 | %====================================================================================================================
308 |
309 | [N,d]=size(params);
310 |
311 | % Transforming back to original scale;
312 | for j = 1:d
313 | params(:,j) = (mylims(j,2)'.*exp(params(:,j))+mylims(j,1)')./(exp(params(:,j))+1);
314 | end
315 |
316 | %%%%%%%%%%%%%%%%%%%
317 | %SETTING PARAMETERS
318 | %%%%%%%%%%%%%%%%%%%
319 | a0 = params(:,1); %alpha_0
320 | a1 = params(:,2); %alpha_1
321 | b1 = params(:,3); %beta_1
322 | r_bar = params(:,4); %mu
323 |
324 |
325 | h_t = zeros(N,1);
326 |
327 | h_t = a0+a1.*(data_previous-r_bar).^2 + b1.*h(:,end);
328 | obs=new_data-r_bar;
329 | loglikelihood = -0.5*log(h_t) - 0.5*(obs).^2./h_t - 0.5*log(2*pi*ones(N,1));
330 | h = [h h_t];
331 |
332 | end
333 |
--------------------------------------------------------------------------------
/GJR-GARCH/GJR_SMC_RW_DataAnneal_parallel.m:
--------------------------------------------------------------------------------
1 | function [theta, theta_particle, loglike, logprior, W, log_evidence] = GJR_SMC_RW_DataAnneal_parallel(N)
2 | %SMC utilised vectorisation and parallelisation for estimating GJR-GARCH(1,1) model's parameters;
3 | %The sequence of distributions constructed by using Data Annealing method
4 |
5 | %%%%%%%%%%%%%%%%%%%% INPUT %%%%%%%%%%%%%%%%%%%%
6 | % N - Size of population of particles
7 |
8 | %%%%%%%%%%%%%%%%%%%% OUTPUT %%%%%%%%%%%%%%%%%%%
9 | % theta - N samples from currently available observations: y_{1:t}, t=1,..,T
10 | % theta_particle - N transformed samples from currently available observations
11 | % loglike - Log likelihood of the GJR-GARCH(1,1) model, corresponding to the above thetas
12 | % logprior - Log prior of the GJR-GARCH(1,1) model, corresponding to the above thetas
13 | % W - The weights of weighted samples, corresponding to the above thetas/theta_particles
14 | % log_evidence - The estimate of log evidence used for model selection
15 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
16 | %loading monthly S&P stock log return
17 | load('Data.mat');
18 | rate_return = MonthlyReturns2018;
19 | rate_return(isnan(rate_return)) = 0;
20 |
21 | %Setting ranges for parameters to do transformation
22 | mylims = zeros(5,2);
23 | mylims(:,1) = zeros(5,1);
24 | mylims(4,1) = 0.5;
25 | mylims(5,1) = -0.9;
26 | mylims(:,2) = [0.3; 0.3; 0.3; 0.99; 0.9];
27 |
28 | %Starting parallel pool
29 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
30 | quthpc = parcluster('local');
31 | poolsize = quthpc.NumWorkers;
32 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
33 |
34 | %Initialising
35 | Num_Parameter=5; % dimension of theta
36 | h = 0.1:0.1:1; % For choosing optimal scales and thus to reduce the number of MCMC repeats.
37 | propotion = 0.5; %to determine the target ESS
38 | T = length(rate_return); % total number of data
39 |
40 | %Transform theta
41 | theta=zeros(N,Num_Parameter);
42 | theta_particle=zeros(N,Num_Parameter);
43 | for j = 1:Num_Parameter
44 | theta(:,j) = unifrnd(mylims(j,1),mylims(j,2),[N,1]);
45 | theta_particle(:,j) = log((theta(:,j) - mylims(j,1))./(mylims(j,2) - theta(:,j)));
46 | end
47 |
48 | %calculating initial log prior
49 | logprior = GJR_logPrior_parallel_unif(theta_particle,mylims);
50 |
51 |
52 | inds=1:N;
53 | while isempty(inds)==0 %this while loop is used to eliminated any infinity value from simulation
54 | for j = 1:Num_Parameter
55 | theta(inds,j) = unifrnd(mylims(j,1),mylims(j,2),[length(inds),1]);
56 | theta_particle(inds,j) = log((theta(inds,j) - mylims(j,1))./(mylims(j,2) - theta(inds,j)));
57 | end
58 | logprior(inds) = GJR_logPrior_parallel_unif(theta_particle(inds,:),mylims);
59 | inds = find(isinf(logprior));
60 | end
61 |
62 | logw=zeros(N,T+1);
63 | logw(:,1)=-log(N)*ones(N,1);
64 | loglike=zeros(N,T);
65 | loglike_inc=zeros(N,1); %incremental weights; new observation's loglikelihood
66 | ESS=zeros(T,1);
67 | W=(1/N)*ones(N,1); % Initialising weights
68 | logsum_w = zeros(T,1);
69 |
70 | % Start sampling
71 | for t=1:T
72 | fprintf('Just starting with the %ith observation.\n',t);
73 | rate_return_sub = rate_return(1:t);
74 |
75 | if t==1
76 | [loglike(:,t),h_gjr] = gjrloglike_parallel_unif(theta_particle(:,:,t),rate_return(1),mylims);
77 | logw(:,t+1) = log(W(:,t)) + loglike(:,t);
78 | else
79 | %log(f(y_t|y_{1:t-1},\theta))
80 | [loglike_inc,h_gjr] = gjrloglike_danneal_in_parallel(theta_particle(:,:,t),rate_return(t),mylims,h_gjr,rate_return(t-1));
81 | loglike(:,t) = loglike(:,t-1) + loglike_inc;
82 | logw(:,t+1) = log(W(:,t)) + loglike_inc;
83 | end
84 |
85 | logsum_w(t) = logsumexp(logw(:,t+1));
86 | logw(:,t+1)=logw(:,t+1)-max(logw(:,t+1));
87 | W(:,t+1)=exp(logw(:,t+1))./sum(exp(logw(:,t+1)));
88 | ESS(t)=1/sum(W(:,t+1).^2);
89 |
90 |
91 | if ESS(t)1
101 | u=u-1;
102 | end
103 | end
104 | theta_particle(:,:,t)=theta_particle(indices,:,t);
105 | loglike(:,t) = loglike(indices,t);
106 | logprior(:,t) = logprior(indices,t);
107 | h_gjr = h_gjr(indices,:);
108 |
109 | logw(:,t+1) = -log(N)*ones(N,1);
110 | W(:,t+1)=exp(logw(:,t+1))/sum(exp(logw(:,t+1)));
111 |
112 | %(Move with MCMC Kernel)
113 | cov_rw=cov(theta_particle(:,:,t)); % covariance of resampled
114 | Cov_inv=cov_rw^(-1); % inverse of the above covariance
115 |
116 | %compute mahalanobis distance from the Sample before moving
117 | [~,dists]=rangesearch(theta_particle(:,:,t),theta_particle(:,:,t),inf,'distance','mahalanobis','cov',cov_rw);
118 | dists=cell2mat(dists);
119 | median_dist=median(dists(:));
120 |
121 | h_ind=mod(randperm(N),length(h))'+1;
122 | h_all=h(h_ind);
123 | ESJD=zeros(N,1);
124 |
125 | %MVN RW
126 | parfor i=1:N
127 | theta_particle_prop(i,:) = mvnrnd(theta_particle(i,:,t),h_all(i)^2*cov_rw);
128 | end
129 |
130 | logprior_prop = GJR_logPrior_parallel_unif(theta_particle_prop,mylims);
131 |
132 | inds = find(isinf(logprior_prop)==0);%only choose non-inf values of proposed particles
133 | len_inds = length(inds);
134 |
135 |
136 | percore = ceil(len_inds/poolsize);
137 | parfor core = 1:poolsize
138 | current_core = percore*(core-1)+1: min(percore*core,len_inds);
139 | [loglike_prop_cell{core},h_gjr_cell{core}] = gjrloglike_parallel_unif(theta_particle_prop(inds(current_core),:),rate_return_sub,mylims);
140 | end
141 | loglike_prop = loglike_prop_cell{1};
142 | h_gjr_prop = h_gjr_cell{1};
143 | parfor core = 2:poolsize
144 | loglike_prop = [loglike_prop; loglike_prop_cell{core}];
145 | h_gjr_prop = [h_gjr_prop; h_gjr_cell{core}];
146 | end
147 |
148 | log_mh = loglike_prop - loglike(inds,t) + logprior_prop(inds) - logprior(inds,t);
149 |
150 | acc_probs = -inf*ones(N,1);
151 | acc_probs(inds) = exp(log_mh);
152 |
153 | for i=1:length(inds)
154 | ESJD(inds(i))=((theta_particle(inds(i),:,t)-theta_particle_prop(inds(i),:))*Cov_inv*(theta_particle(inds(i),:,t)-theta_particle_prop(inds(i),:))').^(1/2)*acc_probs(inds(i));
155 | end
156 |
157 | toacc_sub = find(rand(len_inds,1)median_dist)>=ceil(0.5*N)
215 | belowThreshold = false;
216 | end
217 | end
218 | fprintf('the value of R_move was %d\n',R_move);
219 |
220 | end
221 | if t~=T
222 | theta_particle(:,:,t+1) = theta_particle(:,:,t);
223 | loglike(:,t+1) = loglike(:,t);
224 | logprior(:,t+1) = logprior(:,t);
225 | end
226 | end
227 |
228 | %Transforming back to original scale
229 | theta = zeros(N,Num_Parameter,T);
230 | parfor j=1:Num_Parameter
231 | theta(:,j,:) = (mylims(j,2)'.*exp(theta_particle(:,j,:))+mylims(j,1)')./(exp(theta_particle(:,j,:))+1);
232 | end
233 | log_evidence = sum(logsum_w);
234 |
235 | delete(gcp);%shut down parallel pool
236 | save('results_GJR_DaAnneal.mat')
237 | end
238 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
239 |
240 |
241 |
242 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
243 | function [logprior] = GJR_logPrior_parallel_unif(theta_particle,mylims)
244 | % Computes the log prior (takes transformed parameters as input)
245 |
246 | sumA = [-eye(5); eye(5); 0 1 0.5 1 0];
247 | sumB = [-1e-9; -1e-8; -1e-2; -.5; .9; 0.3; 0.3; 0.3; 0.99; 0.9; 0.9999];
248 |
249 | [N,d]=size(theta_particle);
250 |
251 | % Transforming back to original scale
252 | theta = theta_particle;
253 | for j = 1:d
254 | theta(:,j) = (mylims(j,2)'.*exp(theta(:,j))+mylims(j,1)')./(exp(theta(:,j))+1);
255 | end
256 |
257 | logprior = -inf*ones(N,1);
258 | inds=find(all(sumA*theta'<=sumB*ones(1,N))==1);
259 | logprior(inds)=sum(-theta_particle(inds,:)-2*log(1+exp(-theta_particle(inds,:))),2);
260 |
261 | end
262 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
263 |
264 |
265 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
266 | function [loglikelihood,h] = gjrloglike_parallel_unif(params,data,mylims)
267 | % Computing the likelihood of the time series under GJR-GARCH dynamics, given observed data and model parameters
268 | %====================================================================================================================
269 |
270 | [N,d]=size(params);
271 |
272 | % Transforming back to original scale;
273 | for j = 1:d
274 | params(:,j) = (mylims(j,2)'.*exp(params(:,j))+mylims(j,1)')./(exp(params(:,j))+1);
275 | end
276 |
277 | %%%%%%%%%%%%%%%%%%%
278 | %SETTING PARAMETERS
279 | %%%%%%%%%%%%%%%%%%%
280 | h0 = params(:,1); %alpha_0
281 | phi_p = params(:,2); %phi
282 | phi_n = params(:,3); %phi_{-}
283 | rho_h = params(:,4); %beta_sigma
284 | r_bar = params(:,5); %mu
285 |
286 | t1=10e-6;
287 |
288 | n = length(data);
289 | h = zeros(N,n);
290 |
291 | h(:,1) = max(h0./(1-phi_p-phi_n./2-rho_h),t1);
292 |
293 | logl=zeros(N,1);
294 | logl = - .5*log(h(:,1)) - .5*(data(1)-r_bar).^2./h(:,1) -.5*log(2*pi*ones(N,1));
295 |
296 | for t = 2:n
297 | inds_neg = find(data(t-1)-r_bar<0);
298 | inds_pos = find(data(t-1)-r_bar>0);
299 | h(inds_pos,t) = h0(inds_pos) + rho_h(inds_pos).*h(inds_pos,t-1)+ phi_p(inds_pos).*(data(t-1)-r_bar(inds_pos)).^2;
300 | h(inds_neg,t) = h0(inds_neg) + rho_h(inds_neg).*h(inds_neg,t-1)+ phi_p(inds_neg).*(data(t-1)-r_bar(inds_neg)).^2+ phi_n(inds_neg).*(data(t-1)-r_bar(inds_neg)).^2;
301 | logl_tmp = - .5*log(h(:,t)) - .5*(data(t)-r_bar).^2./h(:,t) -.5*log(2*pi*ones(N,1));
302 | logl=logl+logl_tmp;
303 |
304 | end
305 |
306 | loglikelihood=logl;
307 | end
308 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
309 |
310 |
311 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
312 | function [loglikelihood,h] = gjrloglike_danneal_in_parallel(params,new_data,mylims,h,data_previous)
313 | % Computing the likelihood of the time series under GJR-GARCH(1,1) dynamics, given previous and new observed data and model parameters
314 | %====================================================================================================================
315 |
316 | [N,d] = size(params);
317 |
318 | % Transforming back to original scale;
319 | for j = 1:d
320 | params(:,j) = (mylims(j,2)'.*exp(params(:,j))+mylims(j,1)')./(exp(params(:,j))+1);
321 | end
322 |
323 | %%%%%%%%%%%%%%%%%%%
324 | %SETTING PARAMETERS
325 | %%%%%%%%%%%%%%%%%%%
326 | h0 = params(:,1); %alpha_0
327 | phi_p = params(:,2); %phi
328 | phi_n = params(:,3); %phi_{-}
329 | rho_h = params(:,4); %beta_sigma
330 | r_bar = params(:,5); %mu
331 |
332 |
333 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%
334 | %COMPUTING THE LOG-LIKELIHOOD
335 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%
336 |
337 | h_t = zeros(N,1);
338 |
339 | inds_neg = find(data_previous-r_bar<0);
340 | inds_pos = find(data_previous-r_bar>=0);
341 |
342 | if isempty(inds_neg)==0
343 | h_t(inds_neg) = h0(inds_neg) + rho_h(inds_neg).*h(inds_neg,end)+ phi_p(inds_neg).*(data_previous-r_bar(inds_neg)).^2 + phi_n(inds_neg).*(data_previous-r_bar(inds_neg)).^2;
344 | end
345 |
346 | if isempty(inds_pos)==0
347 | h_t(inds_pos) = h0(inds_pos) + rho_h(inds_pos).*h(inds_pos,end)+ phi_p(inds_pos).*(data_previous-r_bar(inds_pos)).^2;
348 | end
349 |
350 | obs=new_data-r_bar;
351 | loglikelihood =- .5*log(h_t) - .5*(obs).^2./h_t -.5*log(2*pi*ones(N,1));
352 | h = [h h_t];
353 |
354 | end
355 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
356 |
357 |
358 |
359 |
360 |
361 |
362 |
363 |
364 |
365 |
366 |
--------------------------------------------------------------------------------
/BEGE/SMC_RW_DataAnneal_Par_unif_unbiased_mc.m:
--------------------------------------------------------------------------------
1 | function [theta, theta_particles, loglike, logprior, W, log_evidence] = SMC_RW_DataAnneal_Par_unif_unbiased_mc(N)
2 | %SMC utilised parallelisation for estimating BEGE model's parameters;
3 | %The sequence of distributions constructed by using Data Annealing method
4 | %The likelihood is unbiasedly estimated using Monte Carlo Integration
5 | %
6 | %%%%%%%%%%%%%%%%%%%% INPUT %%%%%%%%%%%%%%%%%%%%
7 | % N - Size of population of particles
8 |
9 | %%%%%%%%%%%%%%%%%%%% OUTPUT %%%%%%%%%%%%%%%%%%%
10 | % theta - N samples from currently available observations: y_{1:t}, t=1,..,T
11 | % theta_particle - N transformed samples from currently available observations
12 | % loglike - Log likelihood of the BEGE model, corresponding to the above thetas
13 | % logprior - Log prior of the BEGE model, corresponding to the above thetas
14 | % W - The weights of weighted samples, corresponding to the above thetas/theta_particles
15 | % log_evidence - The estimate of log evidence used for model selection
16 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
17 |
18 | %loading monthly S&P stock log return
19 | load('Data.mat');
20 | rate_return = MonthlyReturns2018;
21 | rate_return(isnan(rate_return)) = 0;
22 |
23 | %Setting ranges for parameters to do transformation
24 | mylims = zeros(11,2);
25 | mylims(:,1) = zeros(11,1);
26 | mylims(9,1) = -0.2;
27 | mylims(11,1) = -0.9;
28 | mylims(:,2) = [0.5;0.3;0.99;0.5;0.5;1;0.3;0.99;0.1;0.75;0.9];
29 |
30 | %Initialising
31 | Num_Parameter=11; % dimension of theta
32 | h = 0.1:0.1:1; % For choosing optimal scales and thus to reduce the number of MCMC repeats.
33 | propotion = 0.5; %to determine the target ESS
34 | T = length(rate_return); % total number of data
35 | theta=zeros(N,Num_Parameter);
36 | theta_particles=zeros(N,Num_Parameter,T);
37 | logprior=zeros(N,T);
38 |
39 | %Transform theta
40 | parfor j = 1:Num_Parameter
41 | theta(:,j,1) = unifrnd(mylims(j,1),mylims(j,2),[N,1]);
42 | theta_particles(:,j,1) = log((theta(:,j,1) - mylims(j,1))./(mylims(j,2) - theta(:,j,1)));
43 | end
44 |
45 | %calculating initial log prior
46 | parfor i = 1:N
47 | logprior(i,1) = log_prior(theta_particles(i,:,1),mylims);
48 | end
49 |
50 | logW = zeros(N,T+1);
51 | logW=-log(N)*ones(N,1);
52 | loglike = zeros(N,T);
53 | loglike_inc = zeros(N,1); %incremental weights; new observation's loglikelihood
54 | ESS = zeros(T,1);
55 | W = (1/N)*ones(N,1); % Initialising weights
56 | logsum_w = zeros(T,1);
57 |
58 | inf_ind = 1;
59 | while inf_ind %this while loop is used to eliminated any infinity value from simulation
60 | inds = find(isinf(logprior(:,1))==1);
61 | for j = 1:Num_Parameter
62 | theta(inds,j,1) = unifrnd(mylims(j,1),mylims(j,2),[length(inds),1]);
63 | theta_particles(inds,j,1) = log((theta(inds,j,1) - mylims(j,1))./(mylims(j,2) - theta(inds,j,1)));
64 | end
65 | for i = inds'
66 | logprior(i,1) = log_prior(theta_particles(i,:,1),mylims);
67 | end
68 | inf_ind = any(isinf(logprior(:,1)));
69 | end
70 |
71 | % Start sampling
72 | for t=1:T
73 | fprintf('Just starting with the %ith observation.\n',t);
74 | rate_return_sub = rate_return(1:t);
75 | if t==1
76 | parfor i=1:N
77 | [~, loglike(i,t),ptseries{i},ntseries{i},~] = bege_gjrgarch_likelihood_unbiased_mc(theta_particles(i,:,t),rate_return(1),mylims);
78 | end
79 | logW(:,t+1) = log(W(:,t)) + loglike(:,t);
80 | else
81 | parfor i=1:N
82 | [loglike_inc(i),ptseries{i},ntseries{i}] = bege_gjrgarch_likelihood_danneal_in_unbiased_mc(theta_particles(i,:,t),rate_return(t),mylims,ptseries{i},ntseries{i},rate_return(t-1));
83 | end
84 | logW(:,t+1) = log(W(:,t)) + loglike_inc;
85 | loglike(:,t) = loglike(:,t-1) + loglike_inc;
86 | end
87 |
88 | logsum_w(t) = logsumexp(logW(:,t+1));
89 | logW(:,t+1)=logW(:,t+1)-max(logW(:,t+1));
90 | W(:,t+1)=exp(logW(:,t+1))./sum(exp(logW(:,t+1)));
91 | ESS(t)=1/sum(W(:,t+1).^2);
92 |
93 |
94 | if ESS(t)1
104 | u=u-1;
105 | end
106 | end
107 | theta_particles(:,:,t)=theta_particles(indices,:,t);
108 | loglike(:,t) = loglike(indices,t);
109 | logprior(:,t) = logprior(indices,t);
110 | ptseries = ptseries(indices);
111 | ntseries = ntseries(indices);
112 |
113 | logW(:,t+1) = -log(N)*ones(N,1);
114 | W(:,t+1)=exp(logW(:,t+1))/sum(exp(logW(:,t+1)));
115 |
116 | %(Move with MCMC Kernel)
117 | cov_rw=cov(theta_particles(:,:,t)); % covariance of resampled
118 | Cov_inv=cov_rw^(-1); % inverse of the above covariance
119 |
120 | %compute mahalanobis distance from the Sample before moving
121 | [~,dists]=rangesearch(theta_particles(:,:,t),theta_particles(:,:,t),inf,'distance','mahalanobis','cov',cov_rw);
122 | dists=cell2mat(dists);
123 | median_dist=median(dists(:));
124 |
125 | h_ind=mod(randperm(N),length(h))'+1;
126 | h_all=h(h_ind);
127 | ESJD=zeros(N,1);
128 | acc_prob=zeros(N,1);
129 |
130 | %MVN RW
131 | parfor i=1:N
132 |
133 | theta_particles_prop = mvnrnd(theta_particles(i,:,t),h_all(i)^2*cov_rw);
134 | logprior_prop=log_prior(theta_particles_prop,mylims);
135 | [~, loglike_prop,ptseries_prop,ntseries_prop,~] = bege_gjrgarch_likelihood_unbiased_mc(theta_particles_prop, rate_return_sub,mylims);
136 | if isinf(logprior_prop)
137 | continue;
138 | end
139 |
140 | Alpha=exp(loglike_prop - loglike(i,t) + logprior_prop - logprior(i,t));
141 | acc_prob(i)=min(1,Alpha);
142 |
143 | ESJD(i)=((theta_particles(i,:,t)-theta_particles_prop)*Cov_inv*(theta_particles(i,:,t)-theta_particles_prop)').^(1/2)*acc_prob(i);
144 |
145 | if randmedian_dist)>=ceil(0.5*N)
191 | belowThreshold = false;
192 | end
193 | end
194 | fprintf('the value of R_move was %d\n',R_move);
195 | end
196 | if t~=T
197 | theta_particles(:,:,t+1) = theta_particles(:,:,t);
198 | loglike(:,t+1) = loglike(:,t);
199 | logprior(:,t+1) = logprior(:,t);
200 | end
201 | end
202 |
203 | %Transforming back to original scale
204 | theta = theta_particles;
205 | parfor j=1:Num_Parameter
206 | theta(:,j,:) = (mylims(j,2)'.*exp(theta_particles(:,j,:))+mylims(j,1)')./(exp(theta_particles(:,j,:))+1);
207 | end
208 | log_evidence = sum(logsum_w);
209 |
210 | %save('results_DataAnneal_unbiased_mc.mat');
211 | end
212 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
213 |
214 |
215 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
216 | function [logprior] = log_prior(phi,mylims)
217 | % Computes the log prior (takes transformed parameters as input)
218 |
219 | sumA = [-eye(11); eye(11); 0 0 1 0.5 0.5 0 0 0 0 0 0; 0 0 0 0 0 0 0 1 0.5 0.5 0];
220 |
221 | sumB = [zeros(10,1); 0.9;...
222 | 0.5;0.3;0.99;0.5;0.5;1;0.3;0.99;0.1;0.75;0.9; 0.995;0.995];
223 | sumB(9) = 0.2;
224 |
225 | % Transforming back to original scale
226 | theta = phi;
227 | for j = 1:length(phi)
228 | theta(j) = (mylims(j,2)'.*exp(theta(j))+mylims(j,1)')/(exp(theta(j))+1);
229 | end
230 |
231 | if all(sumA*theta'<=sumB)
232 | logprior = sum(-phi - 2*log(1 + exp(-phi)));
233 | else
234 | logprior = -inf;
235 | end
236 |
237 | end
238 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
239 |
240 |
241 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
242 | function [neglogl,loglikelihood,ptseries,ntseries,likelihoods] = bege_gjrgarch_likelihood_unbiased_mc(params,data,mylims)
243 | % Computing unbiased estimator of the likelihood of the time series under BEGE-GJR-GARCH dynamics, given observed data and model parameters
244 | %====================================================================================================================
245 |
246 | % Transforming back to original scale;
247 | for j = 1:length(params)
248 | params(j) = (mylims(j,2)'.*exp(params(j))+mylims(j,1)')/(exp(params(j))+1);
249 | end
250 |
251 | %%%%%%%%%%%%%%%%%%%
252 | %SETTING PARAMETERS
253 | %%%%%%%%%%%%%%%%%%%
254 | r_bar = params(11); % (mu)
255 | p_bar=params(1); % (p_0)
256 | tp=params(2); % (sigma_p)
257 | rho_p=params(3); %
258 | phi_pp=params(4); %
259 | phi_pn=params(5); %
260 | n_bar=params(6); % (n_0)
261 | tn=params(7); % (sigma_n)
262 | rho_n=params(8); %
263 | phi_np=params(9); %
264 | phi_nn=params(10); %
265 |
266 | %Vector containing likelihood for every observation
267 | likelihoods=zeros(length(data),1);
268 |
269 | %Computing underlying pt and nt processes
270 | ptseries=zeros(length(data),1);
271 | ntseries=zeros(length(data),1);
272 |
273 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%
274 | %COMPUTING THE LOG-LIKELIHOOD
275 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%
276 |
277 | loglikelihood=0;
278 | t1=10e-1;
279 |
280 | previous_p=max(p_bar/(1-rho_p-(phi_pp+phi_pn)/2),t1);
281 | previous_n=max(n_bar/(1-rho_n-(phi_np+phi_nn)/2),t1);
282 | ptseries(1)=max(previous_p,t1);
283 | ntseries(1)=max(previous_n,t1);
284 |
285 | loglikelihood=loglikelihood+loglikedgam_unbiased_mc(data(1)-r_bar,ptseries(1),ntseries(1),tp,tn);
286 | likelihoods(1)=loglikedgam_unbiased_mc(data(1)-r_bar,ptseries(1),ntseries(1),tp,tn);
287 |
288 | for t=2:length(data)
289 | if ((data(t-1)-r_bar)<0)
290 | p_t=max(p_bar+rho_p*previous_p+...
291 | phi_pn*(((data(t-1)-r_bar)^2)/(2*(tp^2))),t1);
292 | n_t=max(n_bar+rho_n*previous_n+...
293 | phi_nn*(((data(t-1)-r_bar)^2)/(2*(tn^2))),t1);
294 | else
295 | p_t=max(p_bar+rho_p*previous_p+...
296 | phi_pp*(((data(t-1)-r_bar)^2)/(2*(tp^2))),t1);
297 | n_t=max(n_bar+rho_n*previous_n+...
298 | phi_np*(((data(t-1)-r_bar)^2)/(2*(tn^2))),t1);
299 | end
300 |
301 | obs=data(t)-r_bar;
302 | tmp = loglikedgam_unbiased_mc(obs,p_t,n_t,tp,tn);
303 |
304 | loglikelihood=loglikelihood+tmp;
305 | likelihoods(t)=tmp;
306 | ptseries(t)=p_t;
307 | ntseries(t)=n_t;
308 | previous_p=p_t;
309 | previous_n=n_t;
310 | end
311 | neglogl = -loglikelihood;
312 | end
313 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
314 |
315 |
316 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
317 | function [loglikelihood,ptseries,ntseries] = bege_gjrgarch_likelihood_danneal_in_unbiased_mc(params,new_data,mylims,ptseries,ntseries,data_previous)
318 | % Computing unbiased estimator of the likelihood of the "new observation" under BEGE-GJR-GARCH dynamics, given previous and new observed data and model parameters
319 | %====================================================================================================================
320 |
321 | % Transforming back to original scale;
322 | for j = 1:length(params)
323 | params(j) = (mylims(j,2)'.*exp(params(j))+mylims(j,1)')/(exp(params(j))+1);
324 | end
325 |
326 | %%%%%%%%%%%%%%%%%%%
327 | %SETTING PARAMETERS
328 | %%%%%%%%%%%%%%%%%%%
329 | r_bar = params(11); % (mu)
330 | p_bar=params(1); % (p_0)
331 | tp=params(2); % (sigma_p)
332 | rho_p=params(3); %
333 | phi_pp=params(4); %
334 | phi_pn=params(5); %
335 | n_bar=params(6); % (n_0)
336 | tn=params(7); % (sigma_n)
337 | rho_n=params(8); %
338 | phi_np=params(9); %
339 | phi_nn=params(10); %
340 |
341 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%
342 | %COMPUTING THE LOG-LIKELIHOOD
343 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%
344 |
345 |
346 | t1=10e-1;
347 |
348 | previous_p = ptseries(end);
349 | previous_n = ntseries(end);
350 |
351 | if ((data_previous-r_bar)<0)
352 | p_t=max(p_bar+rho_p*previous_p+...
353 | phi_pn*(((data_previous-r_bar)^2)/(2*(tp^2))),t1);
354 | n_t=max(n_bar+rho_n*previous_n+...
355 | phi_nn*(((data_previous-r_bar)^2)/(2*(tn^2))),t1);
356 | else
357 | p_t=max(p_bar+rho_p*previous_p+...
358 | phi_pp*(((data_previous-r_bar)^2)/(2*(tp^2))),t1);
359 | n_t=max(n_bar+rho_n*previous_n+...
360 | phi_np*(((data_previous-r_bar)^2)/(2*(tn^2))),t1);
361 | end
362 | obs = new_data -r_bar;
363 | loglikelihood = loglikedgam_unbiased_mc(obs,p_t,n_t,tp,tn);
364 | ptseries = [ptseries; p_t];
365 | ntseries = [ntseries; n_t];
366 |
367 | end
368 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
369 |
370 |
371 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
372 | function loglikedgam = loglikedgam_unbiased_mc(z,p,n,tp,tn)
373 | % This function unbiasedly estimates the likelihood of an observation under
374 | % the BEGE density by using Monte Carlo integration.
375 | % The likelihood can be exactly computed in the cases of p=n=1; p=1&n>1;
376 | % n=1&p>1.
377 | %
378 | % Input:
379 | % z - the point at which the pdf is evaluated
380 | % p - good environment shape parameter
381 | % n - bad environment shape parameter
382 | % tp - good environment scale parameter
383 | % tn - bad environment scale parameter
384 | %
385 | % Output:
386 | % loglikedgam - the loglikelihood of the observations
387 |
388 |
389 | N = 10000;
390 | wp_bar = -p*tp;
391 | wn_bar = -n*tn;
392 | sigma = 1/tp + 1/tn;
393 | delta = max(wp_bar, wn_bar + z);
394 |
395 | if p==1 && n==1
396 | loglikedgam = -log(tp) - log(tn) + wp_bar/tp + (z+wn_bar)/tn - log(sigma)...
397 | - sigma*delta;
398 | return;
399 | end
400 |
401 | if p==1 && n>1
402 |
403 | if (delta == wn_bar + z)
404 | loglikedgam = -log(tp) - n*log(tn) + wp_bar/tp - (z + wn_bar)/tp - n*log(sigma);
405 | return;
406 | else
407 | loglikedgam = -log(tp) - n*log(tn) + wp_bar/tp - (z + wn_bar)/tp - n*log(sigma) + log(1 - gamcdf(delta - (wn_bar + z), n, 1/sigma));
408 | return;
409 | end
410 |
411 | end
412 |
413 | if n==1 && p>1
414 | if (delta == wp_bar)
415 | loglikedgam = -log(tn) - p*log(tp) + (z + wn_bar)/tn - wp_bar/tn - p*log(sigma);
416 | return;
417 | else
418 | loglikedgam = -log(tn) - p*log(tp) + (z + wn_bar)/tn - wp_bar/tn - p*log(sigma) + log(1 - gamcdf(delta - wp_bar, p, 1/sigma));
419 | return;
420 | end
421 | end
422 |
423 | % n>1 && p>1
424 | wp = gamrnd(p,tp,N,1);
425 | wp = wp - p*tp; % de-meaned gamma draws
426 | wp = wp-z;
427 |
428 | x = wp + n*tn;
429 | ind_pos = x>0;
430 |
431 | logw = zeros(N,1);
432 | logw(ind_pos) = -gammaln(n) - n*log(tn) + (n-1)*log(x(ind_pos)) - 1/tn*(x(ind_pos));
433 | logw(~ind_pos) = -Inf;
434 | loglikedgam = -log(N) + logsumexp(logw);
435 | end
436 |
437 |
438 |
439 |
440 |
441 |
442 |
443 |
444 |
445 |
--------------------------------------------------------------------------------
/BEGE/SMC_RW_DataAnneal_Par_unif_unbiased_is.m:
--------------------------------------------------------------------------------
1 | function [theta, theta_particles, loglike, logprior, W, log_evidence] = SMC_RW_DataAnneal_Par_unif_unbiased_is(N)
2 | %SMC utilised parallelisation for estimating BEGE model's parameters;
3 | %The sequence of distributions constructed by using Data Annealing method
4 | %The likelihood is unbiasedly estimated using importance sampling
5 | %
6 | %%%%%%%%%%%%%%%%%%%% INPUT %%%%%%%%%%%%%%%%%%%%
7 | % N - Size of population of particles
8 |
9 | %%%%%%%%%%%%%%%%%%%% OUTPUT %%%%%%%%%%%%%%%%%%%
10 | % theta - N samples from currently available observations: y_{1:t}, t=1,..,T
11 | % theta_particle - N transformed samples from currently available observations
12 | % loglike - Log likelihood of the BEGE model, corresponding to the above thetas
13 | % logprior - Log prior of the BEGE model, corresponding to the above thetas
14 | % W - The weights of weighted samples, corresponding to the above thetas/theta_particles
15 | % log_evidence - The estimate of log evidence used for model selection
16 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
17 |
18 | %loading monthly S&P stock log return
19 | load('Data.mat');
20 | rate_return = MonthlyReturns2018;
21 | rate_return(isnan(rate_return)) = 0;
22 |
23 | %Setting ranges for parameters to do transformation
24 | mylims = zeros(11,2);
25 | mylims(:,1) = zeros(11,1);
26 | mylims(9,1) = -0.2;
27 | mylims(11,1) = -0.9;
28 | mylims(:,2) = [0.5;0.3;0.99;0.5;0.5;1;0.3;0.99;0.1;0.75;0.9];
29 |
30 | %Initialising
31 | Num_Parameter=11; % dimension of theta
32 | h = 0.1:0.1:1; % For choosing optimal scales and thus to reduce the number of MCMC repeats.
33 | propotion = 0.5; %to determine the target ESS
34 | T = length(rate_return); % total number of data
35 | theta=zeros(N,Num_Parameter);
36 | theta_particles=zeros(N,Num_Parameter,T);
37 | logprior=zeros(N,T);
38 |
39 | %Transform theta
40 | parfor j = 1:Num_Parameter
41 | theta(:,j,1) = unifrnd(mylims(j,1),mylims(j,2),[N,1]);
42 | theta_particles(:,j,1) = log((theta(:,j,1) - mylims(j,1))./(mylims(j,2) - theta(:,j,1)));
43 | end
44 |
45 | %calculating initial log prior
46 | parfor i = 1:N
47 | logprior(i,1) = log_prior(theta_particles(i,:,1),mylims);
48 | end
49 |
50 | logW = zeros(N,T+1);
51 | logW=-log(N)*ones(N,1);
52 | loglike = zeros(N,T);
53 | loglike_inc = zeros(N,1); %incremental weights; new observation's loglikelihood
54 | ESS = zeros(T,1);
55 | W = (1/N)*ones(N,1); % Initialising weights
56 | logsum_w = zeros(T,1);
57 |
58 | inf_ind = 1;
59 | while inf_ind %this while loop is used to eliminated any infinity value from simulation
60 | inds = find(isinf(logprior(:,1))==1);
61 | for j = 1:Num_Parameter
62 | theta(inds,j,1) = unifrnd(mylims(j,1),mylims(j,2),[length(inds),1]);
63 | theta_particles(inds,j,1) = log((theta(inds,j,1) - mylims(j,1))./(mylims(j,2) - theta(inds,j,1)));
64 | end
65 | for i = inds'
66 | logprior(i,1) = log_prior(theta_particles(i,:,1),mylims);
67 | end
68 | inf_ind = any(isinf(logprior(:,1)));
69 | end
70 |
71 | % Start sampling
72 | for t=1:T
73 | fprintf('Just starting with the %ith observation.\n',t);
74 | rate_return_sub = rate_return(1:t);
75 | if t==1
76 | parfor i=1:N
77 | [~, loglike(i,t),ptseries{i},ntseries{i},~] = bege_gjrgarch_likelihood_unbiased_is(theta_particles(i,:,t),rate_return(1),mylims);
78 | end
79 | logW(:,t+1) = log(W(:,t)) + loglike(:,t);
80 | else
81 | parfor i=1:N
82 | [loglike_inc(i),ptseries{i},ntseries{i}] = bege_gjrgarch_likelihood_danneal_in_unbiased_is(theta_particles(i,:,t),rate_return(t),mylims,ptseries{i},ntseries{i},rate_return(t-1));
83 | end
84 | logW(:,t+1) = log(W(:,t)) + loglike_inc;
85 | loglike(:,t) = loglike(:,t-1) + loglike_inc;
86 | end
87 |
88 | logsum_w(t) = logsumexp(logW(:,t+1));
89 | logW(:,t+1)=logW(:,t+1)-max(logW(:,t+1));
90 | W(:,t+1)=exp(logW(:,t+1))./sum(exp(logW(:,t+1)));
91 | ESS(t)=1/sum(W(:,t+1).^2);
92 |
93 |
94 | if ESS(t)1
104 | u=u-1;
105 | end
106 | end
107 | theta_particles(:,:,t)=theta_particles(indices,:,t);
108 | loglike(:,t) = loglike(indices,t);
109 | logprior(:,t) = logprior(indices,t);
110 | ptseries = ptseries(indices);
111 | ntseries = ntseries(indices);
112 |
113 | logW(:,t+1) = -log(N)*ones(N,1);
114 | W(:,t+1)=exp(logW(:,t+1))/sum(exp(logW(:,t+1)));
115 |
116 | %(Move with MCMC Kernel)
117 | cov_rw=cov(theta_particles(:,:,t)); % covariance of resampled
118 | Cov_inv=cov_rw^(-1); % inverse of the above covariance
119 |
120 | %compute mahalanobis distance from the Sample before moving
121 | [~,dists]=rangesearch(theta_particles(:,:,t),theta_particles(:,:,t),inf,'distance','mahalanobis','cov',cov_rw);
122 | dists=cell2mat(dists);
123 | median_dist=median(dists(:));
124 |
125 | h_ind=mod(randperm(N),length(h))'+1;
126 | h_all=h(h_ind);
127 | ESJD=zeros(N,1);
128 | acc_prob=zeros(N,1);
129 |
130 | %MVN RW
131 | parfor i=1:N
132 |
133 | theta_particles_prop = mvnrnd(theta_particles(i,:,t),h_all(i)^2*cov_rw);
134 | logprior_prop=log_prior(theta_particles_prop,mylims);
135 | [~, loglike_prop,ptseries_prop,ntseries_prop,~] = bege_gjrgarch_likelihood_unbiased_is(theta_particles_prop, rate_return_sub,mylims);
136 | if isinf(logprior_prop)
137 | continue;
138 | end
139 |
140 | Alpha=exp(loglike_prop - loglike(i,t) + logprior_prop - logprior(i,t));
141 | acc_prob(i)=min(1,Alpha);
142 |
143 | ESJD(i)=((theta_particles(i,:,t)-theta_particles_prop)*Cov_inv*(theta_particles(i,:,t)-theta_particles_prop)').^(1/2)*acc_prob(i);
144 |
145 | if randmedian_dist)>=ceil(0.5*N)
191 | belowThreshold = false;
192 | end
193 | end
194 | fprintf('the value of R_move was %d\n',R_move);
195 | end
196 | if t~=T
197 | theta_particles(:,:,t+1) = theta_particles(:,:,t);
198 | loglike(:,t+1) = loglike(:,t);
199 | logprior(:,t+1) = logprior(:,t);
200 | end
201 | end
202 |
203 | %Transforming back to original scale
204 | theta = theta_particles;
205 | parfor j=1:Num_Parameter
206 | theta(:,j,:) = (mylims(j,2)'.*exp(theta_particles(:,j,:))+mylims(j,1)')./(exp(theta_particles(:,j,:))+1);
207 | end
208 | log_evidence = sum(logsum_w);
209 |
210 | %save('results_DataAnneal_unbiased_is.mat');
211 | end
212 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
213 |
214 |
215 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
216 | function [logprior] = log_prior(phi,mylims)
217 | % Computes the log prior (takes transformed parameters as input)
218 |
219 | sumA = [-eye(11); eye(11); 0 0 1 0.5 0.5 0 0 0 0 0 0; 0 0 0 0 0 0 0 1 0.5 0.5 0];
220 |
221 | sumB = [zeros(10,1); 0.9;...
222 | 0.5;0.3;0.99;0.5;0.5;1;0.3;0.99;0.1;0.75;0.9; 0.995;0.995];
223 | sumB(9) = 0.2;
224 |
225 | % Transforming back to original scale
226 | theta = phi;
227 | for j = 1:length(phi)
228 | theta(j) = (mylims(j,2)'.*exp(theta(j))+mylims(j,1)')/(exp(theta(j))+1);
229 | end
230 |
231 | if all(sumA*theta'<=sumB)
232 | logprior = sum(-phi - 2*log(1 + exp(-phi)));
233 | else
234 | logprior = -inf;
235 | end
236 |
237 | end
238 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
239 |
240 |
241 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
242 | function [neglogl,loglikelihood,ptseries,ntseries,likelihoods] = bege_gjrgarch_likelihood_unbiased_is(params,data,mylims)
243 | % Computing the likelihood of the time series under BEGE-GJR-GARCH dynamics, given observed data and model parameters
244 | %====================================================================================================================
245 |
246 | % Transforming back to original scale;
247 | for j = 1:length(params)
248 | params(j) = (mylims(j,2)'.*exp(params(j))+mylims(j,1)')/(exp(params(j))+1);
249 | end
250 |
251 | %%%%%%%%%%%%%%%%%%%
252 | %SETTING PARAMETERS
253 | %%%%%%%%%%%%%%%%%%%
254 | r_bar = params(11); % (mu)
255 | p_bar=params(1); % (p_0)
256 | tp=params(2); % (sigma_p)
257 | rho_p=params(3); %
258 | phi_pp=params(4); %
259 | phi_pn=params(5); %
260 | n_bar=params(6); % (n_0)
261 | tn=params(7); % (sigma_n)
262 | rho_n=params(8); %
263 | phi_np=params(9); %
264 | phi_nn=params(10); %
265 |
266 | %Vector containing likelihood for every observation
267 | likelihoods=zeros(length(data),1);
268 |
269 | %Computing underlying pt and nt processes
270 | ptseries=zeros(length(data),1);
271 | ntseries=zeros(length(data),1);
272 |
273 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%
274 | %COMPUTING THE LOG-LIKELIHOOD
275 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%
276 |
277 | loglikelihood=0;
278 | t1=10e-1;
279 |
280 | previous_p=max(p_bar/(1-rho_p-(phi_pp+phi_pn)/2),t1);
281 | previous_n=max(n_bar/(1-rho_n-(phi_np+phi_nn)/2),t1);
282 | ptseries(1)=max(previous_p,t1);
283 | ntseries(1)=max(previous_n,t1);
284 |
285 | loglikelihood=loglikelihood+loglikedgam_unbiased_is(data(1)-r_bar,ptseries(1),ntseries(1),tp,tn);
286 | likelihoods(1)=loglikedgam_unbiased_is(data(1)-r_bar,ptseries(1),ntseries(1),tp,tn);
287 |
288 | for t=2:length(data)
289 | if ((data(t-1)-r_bar)<0)
290 | p_t=max(p_bar+rho_p*previous_p+...
291 | phi_pn*(((data(t-1)-r_bar)^2)/(2*(tp^2))),t1);
292 | n_t=max(n_bar+rho_n*previous_n+...
293 | phi_nn*(((data(t-1)-r_bar)^2)/(2*(tn^2))),t1);
294 | else
295 | p_t=max(p_bar+rho_p*previous_p+...
296 | phi_pp*(((data(t-1)-r_bar)^2)/(2*(tp^2))),t1);
297 | n_t=max(n_bar+rho_n*previous_n+...
298 | phi_np*(((data(t-1)-r_bar)^2)/(2*(tn^2))),t1);
299 | end
300 |
301 | obs=data(t)-r_bar;
302 | tmp = loglikedgam_unbiased_is(obs,p_t,n_t,tp,tn);
303 |
304 | loglikelihood=loglikelihood+tmp;
305 | likelihoods(t)=tmp;
306 | ptseries(t)=p_t;
307 | ntseries(t)=n_t;
308 | previous_p=p_t;
309 | previous_n=n_t;
310 | end
311 | neglogl = -loglikelihood;
312 | end
313 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
314 |
315 |
316 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
317 | function [loglikelihood,ptseries,ntseries] = bege_gjrgarch_likelihood_danneal_in_unbiased_is(params,new_data,mylims,ptseries,ntseries,data_previous)
318 | % Computing unbiased estimator of the likelihood of the "new observation" under BEGE-GJR-GARCH dynamics, given previous and new observed data and model parameters
319 | %====================================================================================================================
320 |
321 | % Transforming back to original scale;
322 | for j = 1:length(params)
323 | params(j) = (mylims(j,2)'.*exp(params(j))+mylims(j,1)')/(exp(params(j))+1);
324 | end
325 |
326 | %%%%%%%%%%%%%%%%%%%
327 | %SETTING PARAMETERS
328 | %%%%%%%%%%%%%%%%%%%
329 | r_bar = params(11); % (mu)
330 | p_bar=params(1); % (p_0)
331 | tp=params(2); % (sigma_p)
332 | rho_p=params(3); %
333 | phi_pp=params(4); %
334 | phi_pn=params(5); %
335 | n_bar=params(6); % (n_0)
336 | tn=params(7); % (sigma_n)
337 | rho_n=params(8); %
338 | phi_np=params(9); %
339 | phi_nn=params(10); %
340 |
341 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%
342 | %COMPUTING THE LOG-LIKELIHOOD
343 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%
344 |
345 |
346 | t1=10e-1;
347 |
348 | previous_p = ptseries(end);
349 | previous_n = ntseries(end);
350 |
351 | if ((data_previous-r_bar)<0)
352 | p_t=max(p_bar+rho_p*previous_p+...
353 | phi_pn*(((data_previous-r_bar)^2)/(2*(tp^2))),t1);
354 | n_t=max(n_bar+rho_n*previous_n+...
355 | phi_nn*(((data_previous-r_bar)^2)/(2*(tn^2))),t1);
356 | else
357 | p_t=max(p_bar+rho_p*previous_p+...
358 | phi_pp*(((data_previous-r_bar)^2)/(2*(tp^2))),t1);
359 | n_t=max(n_bar+rho_n*previous_n+...
360 | phi_np*(((data_previous-r_bar)^2)/(2*(tn^2))),t1);
361 | end
362 | obs = new_data -r_bar;
363 | loglikelihood = loglikedgam_unbiased_is(obs,p_t,n_t,tp,tn);
364 | ptseries = [ptseries; p_t];
365 | ntseries = [ntseries; n_t];
366 |
367 | end
368 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
369 |
370 |
371 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
372 | function loglikedgam = loglikedgam_unbiased_is(z,p,n,tp,tn)
373 | % This function unbiasedly estimates the likelihood of an observation under
374 | % the BEGE density by using importance sampling.
375 | % The likelihood can be exactly computed in the cases of p=n=1; p=1&n>1;
376 | % n=1&p>1.
377 | %
378 | % Input:
379 | % z - the point at which the pdf is evaluated
380 | % p - good environment shape parameter
381 | % n - bad environment shape parameter
382 | % tp - good environment scale parameter
383 | % tn - bad environment scale parameter
384 | %
385 | % Output:
386 | % loglikedgam - the loglikelihood of the observations
387 |
388 |
389 | wp_bar = -p*tp;
390 | wn_bar = -n*tn;
391 | sigma = 1/tp + 1/tn;
392 | delta = max(wp_bar, wn_bar + z);
393 | N = 1000;
394 |
395 | if p==1 && n==1
396 | loglikedgam = -log(tp) - log(tn) + wp_bar/tp + (z+wn_bar)/tn - log(sigma)...
397 | - sigma*delta;
398 | return;
399 | end
400 |
401 | if p==1 && n>1
402 |
403 | if (delta == wn_bar + z)
404 | loglikedgam = -log(tp) - n*log(tn) + wp_bar/tp - (z + wn_bar)/tp - n*log(sigma);
405 | return;
406 | else
407 | loglikedgam = -log(tp) - n*log(tn) + wp_bar/tp - (z + wn_bar)/tp - n*log(sigma) + log(1 - gamcdf(delta - (wn_bar + z), n, 1/sigma));
408 | return;
409 | end
410 |
411 | end
412 |
413 | if n==1 && p>1
414 | if (delta == wp_bar)
415 | loglikedgam = -log(tn) - p*log(tp) + (z + wn_bar)/tn - wp_bar/tn - p*log(sigma);
416 | return;
417 | else
418 | loglikedgam = -log(tn) - p*log(tp) + (z + wn_bar)/tn - wp_bar/tn - p*log(sigma) + log(1 - gamcdf(delta - wp_bar, p, 1/sigma));
419 | return;
420 | end
421 | end
422 |
423 | % n>1 && p>1
424 | bneg_mode = sigma*(z+wn_bar+wp_bar)+(p+n-2);
425 | constant = sigma*(z*wp_bar + wp_bar*wn_bar) + (p-1)*(z+wn_bar) + (n-1)*wp_bar;
426 | mode = (bneg_mode + sqrt(bneg_mode^2 - 4*sigma*constant))/(2*sigma);
427 |
428 | variance = -1/((1-p)/(mode-wp_bar)^2 + (1-n)/(mode-z-wn_bar)^2);
429 | bneg = 2*variance + (mode - delta)^2;
430 | a = (bneg + sqrt(bneg^2 - 4*variance^2))/(2*variance);
431 | b = (mode - delta)/(a-1);
432 | wp = gamrnd(a,b,N,1) + delta;
433 | log_target = -gammaln(p) - p*log(tp) + (p-1)*log(wp-wp_bar) - (wp-wp_bar)/tp...
434 | - gammaln(n) - n*log(tn) + (n-1)*log(wp-z-wn_bar) - (wp-z-wn_bar)/tn;
435 | log_importance = -a*log(b) - gammaln(a) +(a-1)*log(wp - delta) -(wp - delta)/b;
436 | logw = log_target - log_importance;
437 | loglikedgam = -log(N) + logsumexp(logw);
438 |
439 | end
440 |
441 |
442 |
--------------------------------------------------------------------------------
/BEGE/SMC_RW_DataAnneal_parallel_biased.m:
--------------------------------------------------------------------------------
1 | function [theta, theta_particle, loglike, logprior, W, log_evidence] = SMC_RW_DataAnneal_parallel_biased(N)
2 | %SMC utilised vectorisation and parallelisation for estimating BEGE model's
3 | %parameters;
4 | %The sequence of distributions constructed by using Data Annealing method
5 |
6 | %%%%%%%%%%%%%%%%%%%% INPUT %%%%%%%%%%%%%%%%%%%%
7 | % N - Size of population of particles
8 |
9 | %%%%%%%%%%%%%%%%%%%% OUTPUT %%%%%%%%%%%%%%%%%%%
10 | % theta - N samples from currently available observations: y_{1:t}, t=1,..,T
11 | % theta_particle - N transformed samples from currently available observations
12 | % loglike - Log likelihood of the BEGE model, corresponding to the above thetas
13 | % logprior - Log prior of the BEGE model, corresponding to the above thetas
14 | % W - The weights of weighted samples, corresponding to the above thetas/theta_particles
15 | % log_evidence - The estimate of log evidence used for model selection
16 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
17 |
18 | %loading monthly S&P stock log return
19 | load('Data.mat');
20 | rate_return = MonthlyReturns2018;
21 | rate_return(isnan(rate_return)) = 0;
22 |
23 | %Setting ranges for parameters to do transformation
24 | mylims = zeros(11,2);
25 | mylims(:,1) = 1e-4.*ones(11,1);
26 | mylims(9,1) = -0.2;
27 | mylims(11,1) = -0.9;
28 | mylims(:,2) = [0.5;0.3;0.99;0.5;0.5;1;0.3;0.99;0.1;0.75;0.9];
29 |
30 | %Starting parallel pool
31 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
32 | quthpc = parcluster('local');
33 | parpool(quthpc);
34 | poolsize = quthpc.NumWorkers;
35 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
36 |
37 | %Initialising
38 | Num_Parameter=11; % dimension of theta
39 | h = 0.1:0.1:1; % For choosing optimal scales and thus to reduce the number of MCMC repeats.
40 | propotion = 0.5; %to determine the target ESS
41 | T = length(rate_return); % total number of data
42 |
43 | %Transform theta
44 | theta=zeros(N,Num_Parameter);
45 | theta_particle=zeros(N,Num_Parameter);
46 | for j = 1:Num_Parameter
47 | theta(:,j) = unifrnd(mylims(j,1),mylims(j,2),[N,1]);
48 | theta_particle(:,j) = log((theta(:,j) - mylims(j,1))./(mylims(j,2) - theta(:,j)));
49 | end
50 |
51 | %calculating initial log prior
52 | logprior=logPrior_parallel_fv(theta_particle,mylims);
53 |
54 |
55 | inds=1:N;
56 | while isempty(inds)==0 %this while loop is used to eliminated any infinity value from simulation
57 | for j=1:Num_Parameter
58 | theta(inds,j)=unifrnd(mylims(j,1),mylims(j,2),[length(inds),1]);
59 | theta_particle(inds,j) = log((theta(inds,j) - mylims(j,1))./(mylims(j,2) - theta(inds,j)));
60 | end
61 | logprior(inds)=logPrior_parallel_fv(theta_particle(inds,:),mylims);
62 | inds=find(isinf(logprior));
63 | end
64 |
65 | logw=zeros(N,T+1);
66 | logw(:,1)=-log(N)*ones(N,1);
67 | loglike=zeros(N,T);
68 | loglike_inc=zeros(N,1); %incremental weights; new observation's loglikelihood
69 | ESS=zeros(T,1);
70 | W=(1/N)*ones(N,1); % Initialising weights
71 | logsum_w = zeros(T,1);
72 |
73 | % Start sampling
74 | for t=1:T
75 | fprintf('Just starting with the %ith observation.\n',t);
76 | rate_return_sub = rate_return(1:t);
77 | if t==1
78 | [loglike(:,t),ptseries,ntseries]=bege_gjrgarch_likelihood_parallel(theta_particle(:,:,t),rate_return(1),mylims);
79 | logw(:,t+1) = log(W(:,t)) + loglike(:,t);
80 | else
81 | %log(f(y_t|y_{1:t-1},\theta))
82 | [loglike_inc,ptseries,ntseries] = bege_gjrgarch_likelihood_danneal_in_parallel(theta_particle(:,:,t),rate_return(t),mylims,ptseries,ntseries,rate_return(t-1));
83 | loglike(:,t) = loglike(:,t-1) + loglike_inc;
84 | logw(:,t+1) = log(W(:,t)) + loglike_inc;
85 | end
86 |
87 | logsum_w(t) = logsumexp(logw(:,t+1));
88 | logw(:,t+1)=logw(:,t+1)-max(logw(:,t+1));
89 | W(:,t+1)=exp(logw(:,t+1))./sum(exp(logw(:,t+1)));
90 | ESS(t)=1/sum(W(:,t+1).^2);
91 |
92 |
93 | if ESS(t)1
103 | u=u-1;
104 | end
105 | end
106 | theta_particle(:,:,t)=theta_particle(indices,:,t);
107 | loglike(:,t) = loglike(indices,t);
108 | logprior(:,t) = logprior(indices,t);
109 | ptseries = ptseries(indices,:);
110 | ntseries = ntseries(indices,:);
111 |
112 | logw(:,t+1) = -log(N)*ones(N,1);
113 | W(:,t+1)=exp(logw(:,t+1))/sum(exp(logw(:,t+1)));
114 |
115 | %(Move with MCMC Kernel)
116 | cov_rw=cov(theta_particle(:,:,t)); % covariance of resampled
117 | Cov_inv=cov_rw^(-1); % inverse of the above covariance
118 |
119 | %compute mahalanobis distance from the Sample before moving
120 | [~,dists]=rangesearch(theta_particle(:,:,t),theta_particle(:,:,t),inf,'distance','mahalanobis','cov',cov_rw);
121 | dists=cell2mat(dists);
122 | median_dist=median(dists(:));
123 |
124 | h_ind=mod(randperm(N),length(h))'+1;
125 | h_all=h(h_ind);
126 | ESJD=zeros(N,1);
127 |
128 | %MVN RW
129 | parfor i=1:N
130 | theta_particle_prop(i,:) = mvnrnd(theta_particle(i,:,t),h_all(i)^2*cov_rw);
131 | end
132 |
133 | logprior_prop = logPrior_parallel_fv(theta_particle_prop,mylims);
134 |
135 | inds = find(isinf(logprior_prop)==0);%only choose non-inf values of proposed particles
136 | len_inds = length(inds);
137 |
138 | percore = ceil(len_inds/poolsize);
139 | parfor core = 1:poolsize
140 | current_core = percore*(core-1)+1: min(percore*core,len_inds);
141 | [loglike_prop_cell{core},ptseries_prop_cell{core},ntseries_prop_cell{core}] = bege_gjrgarch_likelihood_parallel(theta_particle_prop(inds(current_core),:),rate_return_sub,mylims);
142 | end
143 | loglike_prop = loglike_prop_cell{1};
144 | ptseries_prop = ptseries_prop_cell{1};
145 | ntseries_prop = ntseries_prop_cell{1};
146 | for core = 2:poolsize
147 | loglike_prop = [loglike_prop; loglike_prop_cell{core}];
148 | ptseries_prop = [ptseries_prop; ptseries_prop_cell{core}];
149 | ntseries_prop = [ntseries_prop; ntseries_prop_cell{core}];
150 | end
151 |
152 | log_mh = loglike_prop - loglike(inds,t) + logprior_prop(inds) - logprior(inds,t);
153 |
154 | acc_probs = -inf*ones(N,1);
155 | acc_probs(inds) = exp(log_mh);
156 |
157 | for i=1:length(inds)
158 | ESJD(inds(i))=((theta_particle(inds(i),:,t)-theta_particle_prop(inds(i),:))*Cov_inv*(theta_particle(inds(i),:,t)-theta_particle_prop(inds(i),:))').^(1/2)*acc_probs(inds(i));
159 | end
160 |
161 | toacc_sub = find(rand(len_inds,1)median_dist)>=ceil(0.5*N)
226 | belowThreshold = false;
227 | end
228 | end
229 | fprintf('the value of R_move was %d\n',R_move);
230 |
231 | end
232 | if t~=T
233 | theta_particle(:,:,t+1) = theta_particle(:,:,t);
234 | loglike(:,t+1) = loglike(:,t);
235 | logprior(:,t+1) = logprior(:,t);
236 | end
237 | end
238 |
239 | %Transforming back to original scale
240 | theta = zeros(N,Num_Parameter,T);
241 | parfor j=1:Num_Parameter
242 | theta(:,j,:) = (mylims(j,2)'.*exp(theta_particle(:,j,:))+mylims(j,1)')./(exp(theta_particle(:,j,:))+1);
243 | end
244 | log_evidence = sum(logsum_w);
245 |
246 | delete(gcp);%shut down parallel pool
247 | save('results_bege_DataAnneal.mat');
248 | end
249 |
250 |
251 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
252 | function [logprior] = logPrior_parallel_fv(theta_particle,mylims)
253 | % Computes the log prior (takes transformed parameters as input)
254 |
255 | sumA = [-eye(11); eye(11); 0 0 1 0.5 0.5 0 0 0 0 0 0; 0 0 0 0 0 0 0 1 0.5 0.5 0];
256 |
257 | sumB = [zeros(10,1); 0.9;...
258 | 0.5;0.3;0.99;0.5;0.5;1;0.3;0.99;0.1;0.75;0.9; 0.995;0.995];
259 | sumB(9) = 0.2;
260 |
261 | [N,d]=size(theta_particle);
262 |
263 | % Transforming back to original scale
264 | theta = theta_particle;
265 | for j = 1:d
266 | theta(:,j) = (mylims(j,2)'.*exp(theta(:,j))+mylims(j,1)')./(exp(theta(:,j))+1);
267 | end
268 |
269 | logprior = -inf*ones(N,1);
270 | inds=find(all(sumA*theta'<=sumB*ones(1,N))==1);
271 | logprior(inds)=sum(-theta_particle(inds,:)-2*log(1+exp(-theta_particle(inds,:))),2);
272 |
273 | end
274 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
275 |
276 |
277 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
278 | function [loglikelihood,ptseries,ntseries] = bege_gjrgarch_likelihood_parallel(params,data,mylims)
279 | % Computing the likelihood of the time series under BEGE-GJR-GARCH dynamics, given observed data and model parameters
280 | %====================================================================================================================
281 |
282 | [N,d]=size(params);
283 |
284 | % Transforming back to original scale;
285 | for j = 1:d
286 | params(:,j) = (mylims(j,2)'.*exp(params(:,j))+mylims(j,1)')./(exp(params(:,j))+1);
287 | end
288 |
289 | %%%%%%%%%%%%%%%%%%%
290 | %SETTING PARAMETERS
291 | %%%%%%%%%%%%%%%%%%%
292 | r_bar = params(:,11); % (mu)
293 | p_bar=params(:,1); % (p_0)
294 | tp=params(:,2); % (sigma_p)
295 | rho_p=params(:,3); %
296 | phi_pp=params(:,4); %
297 | phi_pn=params(:,5); %
298 | n_bar=params(:,6); % (n_0)
299 | tn=params(:,7); % (sigma_n)
300 | rho_n=params(:,8); %
301 | phi_np=params(:,9); %
302 | phi_nn=params(:,10); %
303 |
304 |
305 | %Computing underlying pt and nt processes
306 | ptseries=zeros(N,length(data));
307 | ntseries=zeros(N,length(data));
308 |
309 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%
310 | %COMPUTING THE LOG-LIKELIHOOD
311 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%
312 |
313 | loglikelihood=zeros(N,1);
314 | t1=10e-1;
315 |
316 | previous_p=max(p_bar./(1-rho_p-(phi_pp+phi_pn)/2),t1);
317 | previous_n=max(n_bar./(1-rho_n-(phi_np+phi_nn)/2),t1);
318 | ptseries(:,1)=max(previous_p,t1);
319 | ntseries(:,1)=max(previous_n,t1);
320 |
321 | loglikelihood=loglikelihood+loglikedgam_parallel(data(1)-r_bar,ptseries(:,1),ntseries(:,1),tp,tn,0.001);
322 |
323 | p_t=zeros(N,1);
324 | n_t=zeros(N,1);
325 | for t=2:length(data)
326 | inds_neg=find(data(t-1)-r_bar<0);
327 | inds_pos=find(data(t-1)-r_bar>=0);
328 |
329 | if isempty(inds_neg)==0
330 | p_t(inds_neg)=max(p_bar(inds_neg)+rho_p(inds_neg).*previous_p(inds_neg)+...
331 | phi_pn(inds_neg).*(((data(t-1)-r_bar(inds_neg)).^2)./(2*(tp(inds_neg).^2))),t1);
332 | n_t(inds_neg)=max(n_bar(inds_neg)+rho_n(inds_neg).*previous_n(inds_neg)+...
333 | phi_nn(inds_neg).*(((data(t-1)-r_bar(inds_neg)).^2)./(2*(tn(inds_neg).^2))),t1);
334 | end
335 | if isempty(inds_pos)==0
336 | p_t(inds_pos)=max(p_bar(inds_pos)+rho_p(inds_pos).*previous_p(inds_pos)+...
337 | phi_pp(inds_pos).*(((data(t-1)-r_bar(inds_pos)).^2)./(2*(tp(inds_pos).^2))),t1);
338 | n_t(inds_pos)=max(n_bar(inds_pos)+rho_n(inds_pos).*previous_n(inds_pos)+...
339 | phi_np(inds_pos).*(((data(t-1)-r_bar(inds_pos)).^2)./(2*(tn(inds_pos).^2))),t1);
340 | end
341 |
342 | obs=data(t)-r_bar;
343 | tmp = loglikedgam_parallel(obs,p_t,n_t,tp,tn,0.001);
344 | loglikelihood=loglikelihood+tmp;
345 | ptseries(:,t)=p_t;
346 | ntseries(:,t)=n_t;
347 | previous_p=p_t;
348 | previous_n=n_t;
349 | end
350 |
351 | end
352 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
353 |
354 |
355 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
356 | function [loglikelihood,ptseries,ntseries] = bege_gjrgarch_likelihood_danneal_in_parallel(params,new_data,mylims,ptseries,ntseries,data_previous)
357 | % Computing the likelihood of the "new observation" under BEGE-GJR-GARCH dynamics, given previous and new observed data and model parameters
358 | %====================================================================================================================
359 |
360 | [N,d] = size(params);
361 |
362 | % Transforming back to original scale;
363 | for j = 1:d
364 | params(:,j) = (mylims(j,2)'.*exp(params(:,j))+mylims(j,1)')./(exp(params(:,j))+1);
365 | end
366 |
367 | %%%%%%%%%%%%%%%%%%%
368 | %SETTING PARAMETERS
369 | %%%%%%%%%%%%%%%%%%%
370 | r_bar = params(:,11); % (mu)
371 | p_bar=params(:,1); % (p_0)
372 | tp=params(:,2); % (sigma_p)
373 | rho_p=params(:,3); %
374 | phi_pp=params(:,4); %
375 | phi_pn=params(:,5); %
376 | n_bar=params(:,6); % (n_0)
377 | tn=params(:,7); % (sigma_n)
378 | rho_n=params(:,8); %
379 | phi_np=params(:,9); %
380 | phi_nn=params(:,10); %
381 |
382 |
383 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%
384 | %COMPUTING THE LOG-LIKELIHOOD
385 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%
386 |
387 |
388 | t1=10e-1;
389 |
390 | previous_p = ptseries(:,end);
391 | previous_n = ntseries(:,end);
392 |
393 | p_t = zeros(N,1);
394 | n_t = zeros(N,1);
395 | inds_neg = find(data_previous-r_bar<0);
396 | inds_pos = find(data_previous-r_bar>=0);
397 |
398 | if isempty(inds_neg)==0
399 | p_t(inds_neg)=max(p_bar(inds_neg)+rho_p(inds_neg).*previous_p(inds_neg)+...
400 | phi_pn(inds_neg).*(((data_previous-r_bar(inds_neg)).^2)./(2*(tp(inds_neg).^2))),t1);
401 | n_t(inds_neg)=max(n_bar(inds_neg)+rho_n(inds_neg).*previous_n(inds_neg)+...
402 | phi_nn(inds_neg).*(((data_previous-r_bar(inds_neg)).^2)./(2*(tn(inds_neg).^2))),t1);
403 | end
404 | if isempty(inds_pos)==0
405 | p_t(inds_pos)=max(p_bar(inds_pos)+rho_p(inds_pos).*previous_p(inds_pos)+...
406 | phi_pp(inds_pos).*(((data_previous-r_bar(inds_pos)).^2)./(2*(tp(inds_pos).^2))),t1);
407 | n_t(inds_pos)=max(n_bar(inds_pos)+rho_n(inds_pos).*previous_n(inds_pos)+...
408 | phi_np(inds_pos).*(((data_previous-r_bar(inds_pos)).^2)./(2*(tn(inds_pos).^2))),t1);
409 | end
410 |
411 | obs=new_data-r_bar;
412 | loglikelihood = loglikedgam_parallel(obs,p_t,n_t,tp,tn,0.001);
413 | ptseries = [ptseries p_t];
414 | ntseries = [ntseries n_t];
415 |
416 |
417 | end
418 |
419 |
--------------------------------------------------------------------------------
/BEGE/SMC_RW_LikeAnneal_ParVec_unbiased_is.m:
--------------------------------------------------------------------------------
1 | function [theta, theta_particle, loglike, logprior, gamma, log_evidence] = SMC_RW_LikeAnneal_ParVec_unbiased_is(N)
2 | %SMC utilised vectorisation and parallelisation for estimating BEGE model's
3 | %parameters;
4 | %The sequence of distributions constructed by using Likelihood Annealing method
5 | %The likelihood is unbiasedly estimated using importance sampling method
6 |
7 | %%%%%%%%%%%%%%%%%%%% INPUT %%%%%%%%%%%%%%%%%%%%
8 | % N - Size of population of particles
9 |
10 | %%%%%%%%%%%%%%%%%%%% OUTPUT %%%%%%%%%%%%%%%%%%%
11 | % theta - N samples from each temperature
12 | % theta_particle - N transformed samples from each temperature
13 | % loglike - Log likelihood of the BEGE model, corresponding to the above thetas
14 | % logprior - Log prior of the BEGE model, corresponding to the above thetas
15 | % gamma - The temperatures from likelihood annealing strategy
16 | % log_evidence - The estimate of log evidence used for model selection
17 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
18 |
19 | %loading monthly S&P stock log return
20 | load('Data.mat');
21 | rate_return = MonthlyReturns2018;
22 | rate_return(isnan(rate_return)) = 0;
23 |
24 | %Setting ranges for parameters to do transformation
25 | mylims = zeros(11,2);
26 | mylims(:,1) = 1e-4.*ones(11,1);
27 | mylims(9,1) = -0.2;
28 | mylims(11,1) = -0.9;
29 | mylims(:,2) = [0.5;0.3;0.99;0.5;0.5;1;0.3;0.99;0.1;0.75;0.9];
30 |
31 | %Starting parallel pool
32 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
33 | quthpc = parcluster('local');
34 | parpool(quthpc);
35 | poolsize = quthpc.NumWorkers;
36 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
37 |
38 | %Initialising
39 | t = 1;
40 | Num_Parameter = 11; % dimension of theta
41 | log_evidence = 0;
42 | gamma = 0; % temperatures
43 | logw_previous=log(ones(N,1)*(1/N));
44 | h = 0.1:0.1:1; % For choosing optimal scales and thus to reduce the number of MCMC repeats.
45 |
46 | %Transform theta
47 | theta = zeros(N,Num_Parameter);
48 | theta_particle = zeros(N,Num_Parameter);
49 | for j = 1:Num_Parameter
50 | theta(:,j) = unifrnd(mylims(j,1),mylims(j,2),[N,1]);
51 | theta_particle(:,j) = log((theta(:,j) - mylims(j,1))./(mylims(j,2) - theta(:,j)));
52 | end
53 |
54 | %calculating initial log prior
55 | logprior = logPrior_parallel_fv(theta_particle,mylims);
56 |
57 |
58 | inds = 1:N;
59 | while isempty(inds)==0 %this while loop is used to eliminated any infinity value from simulation
60 | for j=1:Num_Parameter
61 | theta(inds,j) = unifrnd(mylims(j,1),mylims(j,2),[length(inds),1]);
62 | theta_particle(inds,j) = log((theta(inds,j) - mylims(j,1))./(mylims(j,2) - theta(inds,j)));
63 | end
64 | logprior(inds) = logPrior_parallel_fv(theta_particle(inds,:),mylims);
65 | inds = find(isinf(logprior));
66 | end
67 |
68 | %calculating initial log likelihood
69 | [loglike, ~, ~] = bege_gjrgarch_likelihood_parallel_unbiased_is(theta_particle,rate_return,mylims);
70 |
71 | while gamma(t)< 1
72 | %Testing gammavar=1
73 | logw = logw_previous+(1-gamma(t))*loglike(:,t);
74 | w = logw-max(logw); %stability
75 | w = exp(w);
76 | w = w/sum(w);
77 | ESS1 = 1/sum(w.^2);
78 |
79 | %Choosing next temperature
80 | if (ESS1 >= N/2)
81 | gamma(t+1) = 1;
82 | else
83 | %use bisection method to find the sequence of gamma
84 | fun=@(gamma_Current)Gamma(gamma_Current,gamma(t),N,loglike(:,t),logw_previous);
85 | interval=[gamma(t) 1];
86 | gamma_Current=fzero(fun,interval);
87 | gamma(t+1)=gamma_Current;
88 | end
89 | fprintf('The current temperature is %.3f.\n',gamma(t+1));
90 |
91 | %Subsitute the value of just calculated gamma(t)
92 | logw = logw_previous + (gamma(t+1)- gamma(t))*loglike(:,t);
93 | log_evidence=log_evidence + logsumexp(logw);
94 | w = logw-max(logw);
95 | w = exp(w);
96 | w = w/sum(w);
97 |
98 | % (systematic resampling)
99 | u=rand;
100 | indices = zeros(N,1);
101 | cumsum_w = cumsum(w);
102 | for i = 1:N
103 | A = find(u < cumsum_w);
104 | indices(i) = A(1);
105 | u=u+1/N;
106 | if u>1
107 | u=u-1;
108 | end
109 | end
110 | theta_particle(:,:,t) = theta_particle(indices,:,t);
111 | loglike(:,t) = loglike(indices,t);
112 | logprior(:,t) = logprior(indices,t);
113 |
114 | %(Move with MCMC Kernel)
115 | cov_rw = cov(theta_particle(:,:,t)); %covariance of resampled
116 | Cov_inv=cov_rw^(-1); % inverse of the above covariance
117 |
118 | %compute mahalanobis distance from the Sample before moving
119 | [~,dists]=rangesearch(theta_particle(:,:,t),theta_particle(:,:,t),inf,'distance','mahalanobis','cov',cov_rw);
120 | dists=cell2mat(dists);
121 | median_dist=median(dists(:));
122 |
123 | h_ind=mod(randperm(N),length(h))'+1;
124 | h_all=h(h_ind);
125 | ESJD=zeros(N,1);
126 |
127 | %MVN RW
128 | parfor i=1:N
129 | theta_particle_prop(i,:) = mvnrnd(theta_particle(i,:,t),h_all(i)^2*cov_rw);
130 | end
131 | logprior_prop = logPrior_parallel_fv(theta_particle_prop,mylims);
132 |
133 | inds = find(isinf(logprior_prop)==0);
134 | len_inds = length(inds);
135 |
136 | percore = ceil(len_inds/poolsize);
137 | parfor core = 1:poolsize
138 | current_core = percore*(core-1)+1: min(percore*core,len_inds);
139 | [loglike_prop_cell{core},~,~] = bege_gjrgarch_likelihood_parallel_unbiased_is(theta_particle_prop(inds(current_core),:),rate_return,mylims);
140 | end
141 | loglike_prop = loglike_prop_cell{1};
142 | for core = 2:poolsize
143 | loglike_prop = [loglike_prop; loglike_prop_cell{core}];
144 | end
145 |
146 | log_mh = gamma(t+1)*loglike_prop - gamma(t+1)*loglike(inds,t) + logprior_prop(inds) - logprior(inds,t);
147 |
148 | acc_probs = -inf*ones(N,1);
149 | acc_probs(inds) = exp(log_mh);
150 |
151 | for i=1:length(inds)
152 | ESJD(inds(i))=((theta_particle(inds(i),:,t)-theta_particle_prop(inds(i),:))*Cov_inv*(theta_particle(inds(i),:,t)-theta_particle_prop(inds(i),:))').^(1/2)*acc_probs(inds(i));
153 | end
154 |
155 | toacc_sub = find(rand(len_inds,1)median_dist)>=ceil(0.5*N)
209 | belowThreshold = false;
210 | end
211 | end
212 | fprintf('the value of R_move was %d\n',R_move);
213 |
214 | theta_particle(:,:,t+1) = theta_particle(:,:,t);
215 | loglike(:,t+1) = loglike(:,t);
216 | logprior(:,t+1) = logprior(:,t);
217 |
218 | t = t+1;
219 | end
220 |
221 | %Transforming back to original scale
222 | theta = zeros(N,Num_Parameter,size(theta_particle,3));
223 | for j=1:Num_Parameter
224 | theta(:,j,:) = (mylims(j,2)'.*exp(theta_particle(:,j,:))+mylims(j,1)')./(exp(theta_particle(:,j,:))+1);
225 | end
226 |
227 | delete(gcp);%shut down parallel pool
228 | %save('results_bege_LikeAnneal_is.mat');
229 | end
230 |
231 |
232 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
233 | function [logprior] = logPrior_parallel_fv(theta_particle,mylims)
234 | % Computes the log prior (takes transformed parameters as input)
235 |
236 | sumA = [-eye(11); eye(11); 0 0 1 0.5 0.5 0 0 0 0 0 0; 0 0 0 0 0 0 0 1 0.5 0.5 0];
237 |
238 | sumB = [zeros(10,1); 0.9;...
239 | 0.5;0.3;0.99;0.5;0.5;1;0.3;0.99;0.1;0.75;0.9; 0.995;0.995];
240 | sumB(9) = 0.2;
241 |
242 | [N,d]=size(theta_particle);
243 |
244 | % Transforming back to original scale
245 | theta = theta_particle;
246 | for j = 1:d
247 | theta(:,j) = (mylims(j,2)'.*exp(theta(:,j))+mylims(j,1)')./(exp(theta(:,j))+1);
248 | end
249 |
250 | logprior = -inf*ones(N,1);
251 | inds=find(all(sumA*theta'<=sumB*ones(1,N))==1);
252 | logprior(inds)=sum(-theta_particle(inds,:)-2*log(1+exp(-theta_particle(inds,:))),2);
253 |
254 | end
255 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
256 |
257 |
258 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
259 | function [loglikelihood,ptseries,ntseries] = bege_gjrgarch_likelihood_parallel_unbiased_is(params,data,mylims)
260 | % Computing unbiased estimator of the likelihood of the time series under BEGE-GJR-GARCH dynamics, given observed data and model parameters
261 | %====================================================================================================================
262 |
263 | [N,d]=size(params);
264 |
265 | % Transforming back to original scale;
266 | for j = 1:d
267 | params(:,j) = (mylims(j,2)'.*exp(params(:,j))+mylims(j,1)')./(exp(params(:,j))+1);
268 | end
269 |
270 | %%%%%%%%%%%%%%%%%%%
271 | %SETTING PARAMETERS
272 | %%%%%%%%%%%%%%%%%%%
273 | r_bar = params(:,11); % (mu)
274 | p_bar=params(:,1); % (p_0)
275 | tp=params(:,2); % (sigma_p)
276 | rho_p=params(:,3); %
277 | phi_pp=params(:,4); %
278 | phi_pn=params(:,5); %
279 | n_bar=params(:,6); % (n_0)
280 | tn=params(:,7); % (sigma_n)
281 | rho_n=params(:,8); %
282 | phi_np=params(:,9); %
283 | phi_nn=params(:,10); %
284 |
285 |
286 | %Computing underlying pt and nt processes
287 | ptseries=zeros(N,length(data));
288 | ntseries=zeros(N,length(data));
289 |
290 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%
291 | %COMPUTING THE LOG-LIKELIHOOD
292 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%
293 |
294 | loglikelihood=zeros(N,1);
295 | t1=10e-1;
296 |
297 | previous_p=max(p_bar./(1-rho_p-(phi_pp+phi_pn)/2),t1);
298 | previous_n=max(n_bar./(1-rho_n-(phi_np+phi_nn)/2),t1);
299 | ptseries(:,1)=max(previous_p,t1);
300 | ntseries(:,1)=max(previous_n,t1);
301 |
302 | loglikelihood=loglikelihood+loglikedgam_parallel_unbiased_is(data(1)-r_bar,ptseries(:,1),ntseries(:,1),tp,tn);
303 |
304 | p_t=zeros(N,1);
305 | n_t=zeros(N,1);
306 | for t=2:length(data)
307 | inds_neg=find(data(t-1)-r_bar<0);
308 | inds_pos=find(data(t-1)-r_bar>=0);
309 |
310 | if isempty(inds_neg)==0
311 | p_t(inds_neg)=max(p_bar(inds_neg)+rho_p(inds_neg).*previous_p(inds_neg)+...
312 | phi_pn(inds_neg).*(((data(t-1)-r_bar(inds_neg)).^2)./(2*(tp(inds_neg).^2))),t1);
313 | n_t(inds_neg)=max(n_bar(inds_neg)+rho_n(inds_neg).*previous_n(inds_neg)+...
314 | phi_nn(inds_neg).*(((data(t-1)-r_bar(inds_neg)).^2)./(2*(tn(inds_neg).^2))),t1);
315 | end
316 | if isempty(inds_pos)==0
317 | p_t(inds_pos)=max(p_bar(inds_pos)+rho_p(inds_pos).*previous_p(inds_pos)+...
318 | phi_pp(inds_pos).*(((data(t-1)-r_bar(inds_pos)).^2)./(2*(tp(inds_pos).^2))),t1);
319 | n_t(inds_pos)=max(n_bar(inds_pos)+rho_n(inds_pos).*previous_n(inds_pos)+...
320 | phi_np(inds_pos).*(((data(t-1)-r_bar(inds_pos)).^2)./(2*(tn(inds_pos).^2))),t1);
321 | end
322 |
323 | obs=data(t)-r_bar;
324 | tmp = loglikedgam_parallel_unbiased_is(obs,p_t,n_t,tp,tn);
325 | loglikelihood=loglikelihood+tmp;
326 | ptseries(:,t)=p_t;
327 | ntseries(:,t)=n_t;
328 | previous_p=p_t;
329 | previous_n=n_t;
330 | end
331 |
332 | end
333 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
334 |
335 |
336 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
337 | function loglikedgam = loglikedgam_parallel_unbiased_is(z,p,n,tp,tn)
338 | % This function unbiasedly estimates the likelihood of an observation under
339 | % the BEGE density by using importance sampling.
340 | % The likelihood can be exactly computed in the cases of p=n=1; p=1&n>1;
341 | % n=1&p>1.
342 | %
343 | % Input:
344 | % z - the point at which the pdf is evaluated
345 | % p - good environment shape parameter
346 | % n - bad environment shape parameter
347 | % tp - good environment scale parameter
348 | % tn - bad environment scale parameter
349 | %
350 | % Output:
351 | % loglikedgam - the loglikelihood of the observations
352 |
353 |
354 | wp_bar = -p.*tp;
355 | wn_bar = -n.*tn;
356 | sigma = 1./tp + 1./tn;
357 | delta = max(wp_bar, wn_bar + z);
358 | N = 1000;
359 | len = length(p);
360 | loglikedgam = zeros(len,1);
361 |
362 | np = [n p];
363 | inds_n = np(:,1)==1;
364 | inds_p = np(:,2)==1;
365 | inds_one = inds_n == 1 & inds_p == 1;
366 | inds_p_one = inds_n == 0 & inds_p == 1;
367 | inds_p_one_1 = inds_n == 0 & inds_p == 1 & delta == wn_bar + z;
368 | inds_n_one = inds_n == 1 & inds_p == 0;
369 | inds_n_one_1 = inds_n == 1 & inds_p == 0 & delta == wp_bar;
370 | inds_p_n = inds_n == 0 & inds_p == 0;
371 |
372 | if isempty(inds_one)==0%p==1 && n==1
373 | loglikedgam(inds_one) = -log(tp(inds_one)) - log(tn(inds_one)) + wp_bar(inds_one)./tp(inds_one)...
374 | + (z(inds_one)+wn_bar(inds_one))./tn(inds_one) - log(sigma(inds_one))...
375 | - sigma(inds_one).*delta(inds_one);
376 | end
377 |
378 | if isempty(inds_p_one)==0 %p==1 && n>1
379 |
380 | loglikedgam(inds_p_one_1) = -log(tp(inds_p_one_1)) - n(inds_p_one_1).*log(tn(inds_p_one_1))...
381 | + wp_bar(inds_p_one_1)./tp(inds_p_one_1) - (z(inds_p_one_1) + wn_bar(inds_p_one_1))./tp(inds_p_one_1)...
382 | - n(inds_p_one_1).*log(sigma(inds_p_one_1));
383 | loglikedgam(~inds_p_one_1) = -log(tp(~inds_p_one_1)) - n(~inds_p_one_1).*log(tn(~inds_p_one_1))...
384 | + wp_bar(~inds_p_one_1)./tp(~inds_p_one_1) - (z(~inds_p_one_1) + wn_bar(~inds_p_one_1))./tp(~inds_p_one_1)...
385 | - n(~inds_p_one_1).*log(sigma(~inds_p_one_1)) + log(1 - gamcdf(delta(~inds_p_one_1) - (wn_bar(~inds_p_one_1) + z(~inds_p_one_1)), n(~inds_p_one_1), 1./sigma(~inds_p_one_1)));
386 | end
387 |
388 | if isempty(inds_n_one)==0 %n==1 && p>1
389 |
390 | loglikedgam(inds_n_one_1) = -log(tn(inds_n_one_1)) - p(inds_n_one_1).*log(tp(inds_n_one_1))...
391 | + (z(inds_n_one_1) + wn_bar(inds_n_one_1))./tn(inds_n_one_1) - wp_bar(inds_n_one_1)./tn(inds_n_one_1)...
392 | - p(inds_n_one_1).*log(sigma(inds_n_one_1));
393 |
394 | loglikedgam(~inds_n_one_1) = -log(tn(~inds_n_one_1)) - p(~inds_n_one_1).*log(tp(~inds_n_one_1))...
395 | + (z(~inds_n_one_1) + wn_bar(~inds_n_one_1))./tn(~inds_n_one_1) - wp_bar(~inds_n_one_1)./tn(~inds_n_one_1)...
396 | - p(~inds_n_one_1).*log(sigma(~inds_n_one_1)) + log(1 - gamcdf(delta(~inds_n_one_1) - wp_bar(~inds_n_one_1), p(~inds_n_one_1), 1./sigma(~inds_n_one_1)));
397 | end
398 |
399 | if isempty(inds_p_n)==0 %n>1 && p>1
400 | bneg_mode = sigma(inds_p_n).*(z(inds_p_n)+wn_bar(inds_p_n)+wp_bar(inds_p_n))+(p(inds_p_n)+n(inds_p_n)-2);
401 | constant = sigma(inds_p_n).*(z(inds_p_n).*wp_bar(inds_p_n) + wp_bar(inds_p_n).*wn_bar(inds_p_n))...
402 | + (p(inds_p_n)-1).*(z(inds_p_n)+wn_bar(inds_p_n)) + (n(inds_p_n)-1).*wp_bar(inds_p_n);
403 | mode = (bneg_mode + sqrt(bneg_mode.^2 - 4*sigma(inds_p_n).*constant))./(2*sigma(inds_p_n));
404 |
405 | variance = -1./((1-p(inds_p_n))./(mode-wp_bar(inds_p_n)).^2 + (1-n(inds_p_n))./(mode-z(inds_p_n)-wn_bar(inds_p_n)).^2);
406 | bneg = 2*variance + (mode - delta(inds_p_n)).^2;
407 | a = (bneg + sqrt(bneg.^2 - 4*variance.^2))./(2*variance);
408 | b = (mode - delta(inds_p_n))./(a-1);
409 | length_inds_p_n = length(find(inds_p_n == 1));
410 | wp = zeros(length_inds_p_n,N);
411 | for i=1:length_inds_p_n
412 | wp(i,:) = gamrnd(a(i),b(i),1,N);
413 | end
414 | wp = wp + delta(inds_p_n);
415 | log_target = -gammaln(p(inds_p_n)) - p(inds_p_n).*log(tp(inds_p_n)) + (p(inds_p_n)-1).*log(wp-wp_bar(inds_p_n))...
416 | - (wp-wp_bar(inds_p_n))./tp(inds_p_n) - gammaln(n(inds_p_n)) - n(inds_p_n).*log(tn(inds_p_n))...
417 | + (n(inds_p_n)-1).*log(wp-z(inds_p_n)-wn_bar(inds_p_n)) - (wp-z(inds_p_n)-wn_bar(inds_p_n))./tn(inds_p_n);
418 | log_importance = -a.*log(b) - gammaln(a) +(a-1).*log(wp - delta(inds_p_n)) -(wp - delta(inds_p_n))./b;
419 | logw = log_target - log_importance;
420 | loglikedgam(inds_p_n) = -log(N) + logsumexp(logw')';
421 | end
422 | end
423 |
424 |
425 |
426 |
--------------------------------------------------------------------------------