12 |
13 |
14 |
--------------------------------------------------------------------------------
/docs/search/search_l.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nonlinear-Analysis-Core/NONANLibrary/55e19d216c3b59c1ca5c46c6025ba786ab4a0853/docs/search/search_l.png
--------------------------------------------------------------------------------
/docs/search/search_m.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nonlinear-Analysis-Core/NONANLibrary/55e19d216c3b59c1ca5c46c6025ba786ab4a0853/docs/search/search_m.png
--------------------------------------------------------------------------------
/docs/search/search_r.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nonlinear-Analysis-Core/NONANLibrary/55e19d216c3b59c1ca5c46c6025ba786ab4a0853/docs/search/search_r.png
--------------------------------------------------------------------------------
/docs/search/searchdata.js:
--------------------------------------------------------------------------------
1 | var indexSectionsWithContent =
2 | {
3 | 0: "abdefgiklmorsu",
4 | 1: "adeflrs",
5 | 2: "adeflrs",
6 | 3: "abdefgiklmorsu"
7 | };
8 |
9 | var indexSectionNames =
10 | {
11 | 0: "all",
12 | 1: "namespaces",
13 | 2: "files",
14 | 3: "functions"
15 | };
16 |
17 | var indexSectionLabels =
18 | {
19 | 0: "All",
20 | 1: "Namespaces",
21 | 2: "Files",
22 | 3: "Functions"
23 | };
24 |
25 |
--------------------------------------------------------------------------------
/docs/splitbar.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nonlinear-Analysis-Core/NONANLibrary/55e19d216c3b59c1ca5c46c6025ba786ab4a0853/docs/splitbar.png
--------------------------------------------------------------------------------
/docs/sync_off.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nonlinear-Analysis-Core/NONANLibrary/55e19d216c3b59c1ca5c46c6025ba786ab4a0853/docs/sync_off.png
--------------------------------------------------------------------------------
/docs/sync_on.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nonlinear-Analysis-Core/NONANLibrary/55e19d216c3b59c1ca5c46c6025ba786ab4a0853/docs/sync_on.png
--------------------------------------------------------------------------------
/docs/tab_a.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nonlinear-Analysis-Core/NONANLibrary/55e19d216c3b59c1ca5c46c6025ba786ab4a0853/docs/tab_a.png
--------------------------------------------------------------------------------
/docs/tab_b.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nonlinear-Analysis-Core/NONANLibrary/55e19d216c3b59c1ca5c46c6025ba786ab4a0853/docs/tab_b.png
--------------------------------------------------------------------------------
/docs/tab_h.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nonlinear-Analysis-Core/NONANLibrary/55e19d216c3b59c1ca5c46c6025ba786ab4a0853/docs/tab_h.png
--------------------------------------------------------------------------------
/docs/tab_s.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nonlinear-Analysis-Core/NONANLibrary/55e19d216c3b59c1ca5c46c6025ba786ab4a0853/docs/tab_s.png
--------------------------------------------------------------------------------
/matlab/AMI_Thomas.m:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nonlinear-Analysis-Core/NONANLibrary/55e19d216c3b59c1ca5c46c6025ba786ab4a0853/matlab/AMI_Thomas.m
--------------------------------------------------------------------------------
/matlab/CRQA021525.m:
--------------------------------------------------------------------------------
1 | function [RP, RESULTS]=CRQA021525(data,tau,dim,param,threshold,options)
2 | arguments
3 | data double {mustBeTwoColumns}
4 | tau (1,1) {mustBeInteger, mustBePositive} = 1
5 | dim (1,1) {mustBeInteger, mustBePositive} = 1
6 | param (1,1) string {mustBeMember(param,["rad", "rec"])} = "rec"
7 | threshold (1,1) double {mustBePositive} = 2.5
8 | options.Zscore (1,1) {mustBeMember(options.Zscore,[0,1])} = 1
9 | options.Norm (1,1) {mustBeMember(options.Norm,["euc", "max", "min", "none"])} = "none"
10 | options.Dmin (1,1) {mustBeInteger, mustBePositive} = 2
11 | options.Vmin (1,1) {mustBeInteger, mustBePositive} = 2
12 | options.Plot (1,1) {mustBeMember(options.Plot,[0,1])} = 0
13 | options.Orient (1,1) {mustBeMember(options.Orient,["col", "row"])} = "col"
14 | options.Iter (1,1) {mustBeInteger, mustBePositive} = 20
15 | end
16 |
17 | %% Begin code
18 | dbstop if error % If error occurs, enters debug mode
19 |
20 | %% Change variable names for readability
21 | dmin = options.Dmin;
22 | vmin = options.Vmin;
23 |
24 | %% Standardize data if zscore is true
25 | % If zscore is selected then zscore the data
26 | if options.Zscore
27 | data = zscore(data);
28 | end
29 |
30 | % Embed the data onto phase space
31 | if dim > 1
32 | data = psr(data, tau, dim);
33 | end
34 |
35 | % Calculate distance matrix based on the type of RQA
36 | a = pdist2(data(:,1:2:end),data(:,2:2:end));
37 | a = abs(a)*-1;
38 |
39 | % Normalize distance matrix
40 | if contains(options.Norm, 'euc')
41 | b = mean(a(a<0));
42 | b = -sqrt(abs(((b^2)+2*(2*dim))));
43 | a = a/abs(b);
44 | elseif contains(options.Norm, 'min')
45 | b = max(a(a<0));
46 | a = a/abs(b);
47 | elseif contains(options.Norm, 'max')
48 | b = min(a(a<0));
49 | a = a/abs(b);
50 | end
51 |
52 | % % Compute weighted recurrence plot (doesn't seem like it's doing anything
53 | % % for univariate RQA, cross RQA)
54 | % wrp = a;
55 | % for i = 1:size(a,2)-1
56 | % wrp{i+1} = wrp{i}.*wrp{i+1};
57 | % end
58 | % if i
59 | % wrp = -(abs(wrp{i+1})).^(1/(i+1));
60 | % end
61 | % if iscell(wrp)
62 | % wrp = wrp{1};
63 | % end
64 |
65 | % Calculate recurrence plot
66 | switch param
67 | case 'rad'
68 | [recurrence, diag_hist, vertical_hist,A] = linehist(data,a,threshold,'crqa');
69 | case 'rec'
70 | radius_start = 0.01;
71 | radius_end = 0.5;
72 | [recurrence, diag_hist, vertical_hist, radius, A] = setradius(data,a,radius_start,radius_end,threshold,'crqa',options.Iter);
73 | end
74 |
75 | %% Calculate RQA variabes
76 | RESULTS.DIM = 1;
77 | RESULTS.EMB = dim;
78 | RESULTS.DEL = tau;
79 | RESULTS.RADIUS = radius;
80 | RESULTS.NORM = options.Norm;
81 | RESULTS.ZSCORE = options.Zscore;
82 | RESULTS.Size=length(A);
83 | RESULTS.REC = recurrence;
84 | if RESULTS.REC > 0
85 | RESULTS.DET=100*sum(diag_hist(diag_hist>=dmin))/sum(diag_hist);
86 | RESULTS.MeanL=mean(diag_hist(diag_hist>=dmin));
87 | RESULTS.MaxL=max(diag_hist(diag_hist>=dmin));
88 | [count,bin]=hist(diag_hist(diag_hist>=dmin),min(diag_hist(diag_hist>=dmin)):max(diag_hist(diag_hist>=dmin)));
89 | total=sum(count);
90 | p=count./total;
91 | del=find(count==0); p(del)=[];
92 | RESULTS.EntrL=-sum(p.*log2(p));
93 | RESULTS.LAM=100*sum(vertical_hist(vertical_hist>=vmin))/sum(vertical_hist);
94 | RESULTS.MeanV=mean(vertical_hist(vertical_hist>=vmin));
95 | RESULTS.MaxV=max(vertical_hist(vertical_hist>=vmin));
96 | [count,bin]=hist(vertical_hist(vertical_hist>=vmin),min(vertical_hist(vertical_hist>=vmin)):max(vertical_hist(vertical_hist>=vmin)));
97 | total=sum(count);
98 | p=count./total;
99 | del=find(count==0); p(del)=[];
100 | RESULTS.EntrV=-sum(p.*log2(p));
101 | RESULTS.EntrW=Ent_Weighted(a);
102 | else
103 | RESULTS.DET=NaN;
104 | RESULTS.MeanL=NaN;
105 | RESULTS.MaxL=NaN;
106 | RESULTS.EntrL=NaN;
107 | RESULTS.LAM=NaN;
108 | RESULTS.MeanV=NaN;
109 | RESULTS.MaxV=NaN;
110 | RESULTS.EntrV=NaN;
111 | RESULTS.EntrW=NaN;
112 | end
113 | RP=imrotate(1-A,90);
114 |
115 | %% Plot
116 | if options.Plot
117 | RQA_plot(data, RP, RESULTS, tau, dim, 2, options.Zscore, options.Norm, radius, a, 'crqa');
118 | end
119 |
120 | end
121 |
122 | % Custom validation function
123 | function mustBeTwoColumns(data)
124 | % Test for size
125 | if size(data,2) ~= 2
126 | error('Data must be two column vectors.')
127 | end
128 | end
--------------------------------------------------------------------------------
/matlab/Ent_Ap.m:
--------------------------------------------------------------------------------
1 | function [AE] = Ent_Ap( data, dim, r )
2 | %Ent_Ap20120321
3 | % data : time-series data
4 | % dim : embedded dimension
5 | % r : tolerance (typically 0.2)
6 | %
7 | % Changes in version 1
8 | % Ver 0 had a minor error in the final step of calculating ApEn
9 | % because it took logarithm after summation of phi's.
10 | % In Ver 1, I restored the definition according to original paper's
11 | % definition, to be consistent with most of the work in the
12 | % literature. Note that this definition won't work for Sample
13 | % Entropy which doesn't count self-matching case, because the count
14 | % can be zero and logarithm can fail.
15 | %
16 | % *NOTE: This code is faster and gives the same result as ApEn =
17 | % ApEnt(data,m,R) created by John McCamley in June of 2015.
18 | % -Will Denton
19 | %
20 | %---------------------------------------------------------------------
21 | % coded by Kijoon Lee, kjlee@ntu.edu.sg
22 | % Ver 0 : Aug 4th, 2011
23 | % Ver 1 : Mar 21st, 2012
24 | %---------------------------------------------------------------------
25 |
26 | r = r*std(data);
27 | N = length(data);
28 | phim = zeros(1,2);
29 | for j = 1:2
30 | m = dim+j-1;
31 | phi = zeros(1,N-m+1);
32 | dataMat = zeros(m,N-m+1);
33 | for i = 1:m
34 | dataMat(i,:) = data(i:N-m+i);
35 | end
36 | for i = 1:N-m+1
37 | tempMat = abs(dataMat - repmat(dataMat(:,i),1,N-m+1));
38 | AorB = any( (tempMat > r),1);
39 | phi(i) = sum(~AorB)/(N-m+1);
40 | end
41 | phim(j) = sum(log(phi))/(N-m+1);
42 | end
43 | AE = phim(1)-phim(2);
44 | end
--------------------------------------------------------------------------------
/matlab/Ent_MS_Plus.m:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nonlinear-Analysis-Core/NONANLibrary/55e19d216c3b59c1ca5c46c6025ba786ab4a0853/matlab/Ent_MS_Plus.m
--------------------------------------------------------------------------------
/matlab/Ent_Permu.m:
--------------------------------------------------------------------------------
1 | function [permEnt, hist] = Ent_Permu(data, m, tau)
2 | % [permEnt, hist] = Ent_Permu20180320(data, m, tau)
3 | % inputs - data: 1-D array of data being analyzed
4 | % m: embedding dimension (order of permutation entropy)
5 | % tau: time delay
6 | % outputs - permuEnt: value calculated using a log base of 2
7 | % hist: number of occurences for each permutation order
8 | % Remarks
9 | % - It differs from the permutation entropy code found on MatLab Central in
10 | % one way (see MathWorks reference). The code on MatLab Central uses the
11 | % log function (base e, natural log), whereas this code uses log2 (base 2
12 | % ), as per Bandt & Pompe, 2002. However, this code does include a lag
13 | % (time delay) feature like the one on MatLab Central does.
14 | % - Complexity parameters for time series based on comparison of
15 | % neighboring values. Based on the distributions of ordinal patterns,
16 | % which describe order relations between the values of a time series.
17 | % Based on the algorithm described by Bandt & Pompe, 2002.
18 | % References
19 | % - Bandt, C., Pompe, B. Permutation entropy: A natural complexity measure
20 | % for time series. Phys Rev Lett 2002, 88, 174102,
21 | % doi:10.1103/PhysRevLett.88.174102
22 | % - MathWorks: http:www.mathworks.com/matlabcentral/fileexchange/
23 | % 37289-permutation-entropy)
24 | % Jun 2016 - Created by Patrick Meng-Frecker, unonbcf@unomaha.edu
25 | % Dec 2016 - Edited by Casey Wiens, email: unonbcf@unomaha.edu
26 | % Copyright 2020 Nonlinear Analysis Core, Center for Human Movement
27 | % Variability, University of Nebraska at Omaha
28 | %
29 | % Redistribution and use in source and binary forms, with or without
30 | % modification, are permitted provided that the following conditions are
31 | % met:
32 | %
33 | % 1. Redistributions of source code must retain the above copyright notice,
34 | % this list of conditions and the following disclaimer.
35 | %
36 | % 2. Redistributions in binary form must reproduce the above copyright
37 | % notice, this list of conditions and the following disclaimer in the
38 | % documentation and/or other materials provided with the distribution.
39 | %
40 | % 3. Neither the name of the copyright holder nor the names of its
41 | % contributors may be used to endorse or promote products derived from
42 | % this software without specific prior written permission.
43 | %
44 | % THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
45 | % IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
46 | % THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
47 | % PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
48 | % CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
49 | % EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
50 | % PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
51 | % PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
52 | % LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
53 | % NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
54 | % SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
55 | %%
56 |
57 | N = length(data); % length of time series
58 | perm = perms(1:m); % create all possible permutation vectors
59 | hist(1:length(perm)) = 0; % designate variable to store values
60 |
61 | for cnt1=1:N-tau*(m-1) % steps from 1 through length of data minus time delay multiplied by order minus 1
62 | [~, permVal] = sort(data(cnt1:tau:cnt1+tau*(m-1))); % creates permutation of selected data range
63 | for cnt2=1:length(perm) % steps through length of possible permutation vectors
64 | if perm(cnt2,:) - permVal == 0 % compares current permutation of selected data to possible permutation vectors
65 | hist(cnt2) = hist(cnt2) + 1; % if above comparison is equal, then adds one to bin for appropriate permutation vector
66 | end
67 | end
68 | end
69 |
70 | histNew = hist(hist ~= 0); % remove any permutation orders with 0 for proper calculation
71 | per = histNew/sum(histNew); % ratio of each permutation vector match to total matches
72 | permEnt = -sum(per .* log2(per)); % performs entropy calucation
73 |
74 |
75 |
76 |
--------------------------------------------------------------------------------
/matlab/Ent_Samp.m:
--------------------------------------------------------------------------------
1 | function [sampen_value] = sampen(data, m, r, flag)
2 | %SAMPEN Calculate the sample entropy of a time series.
3 | %
4 | % sampen_value = sampen(data, m, r, flag)
5 | %
6 | % Inputs:
7 | % data - A vector containing the time series data.
8 | % m - Embedding dimension.
9 | % r - Tolerance threshold. If flag is 'prop', then r is a proportion
10 | % of the standard deviation of data (e.g., 0.2 means 0.2*std(data)).
11 | % If flag is 'const', then r is used as the constant threshold.
12 | % flag - (optional) A string that specifies how to interpret r.
13 | % Use 'prop' (default) if r is a proportion of std(data),
14 | % or 'const' if r is a constant.
15 | %
16 | % Output:
17 | % sampen_value - The computed sample entropy value.
18 | %
19 | % The sample entropy is defined as:
20 | % SampEn = -log( A / B )
21 | % where:
22 | % B = number of pairs of vectors of length m that are similar.
23 | % A = number of pairs of vectors of length m+1 that are similar.
24 | %
25 | % Reference:
26 | % Richman, J. S. & Moorman, J. R. (2000),
27 | % "Physiological time-series analysis using approximate entropy and sample entropy",
28 | % American Journal of Physiology-Heart and Circulatory Physiology, 278(6), H2039-H2049.
29 | %
30 | % Written by Aaron D. Likens and Seung Kyeom Kim
31 |
32 |
33 | % Ensure that the data is a column vector
34 | data = data(:);
35 | N = length(data);
36 |
37 | % Check if the time series is long enough
38 | if N < m+1
39 | error('The time series must have at least m+1 data points.');
40 | end
41 |
42 | % Set default r and flag if not provided
43 | if nargin < 3 || isempty(r)
44 | r = 0.2;
45 | end
46 | if nargin < 4 || isempty(flag)
47 | flag = 'prop';
48 | end
49 |
50 | % If r is a proportion of std(data), update r accordingly.
51 | if strcmpi(flag, 'prop')
52 | r = r * std(data);
53 | end
54 |
55 | %% Create embedding vectors of length m
56 | % Each row of X is a vector of m consecutive data points.
57 | X = zeros(N - m + 1, m);
58 | for i = 1:(N - m + 1)
59 | X(i, :) = data(i:i+m-1);
60 | end
61 |
62 | % Count the number of similar pairs for vectors of length m (B)
63 | B = 0;
64 | for i = 1:size(X, 1)
65 | for j = i+1:size(X, 1)
66 | % Using Chebyshev distance: maximum absolute difference
67 | if max(abs(X(i,:) - X(j,:))) <= r
68 | B = B + 1;
69 | end
70 | end
71 | end
72 |
73 | %% Create embedding vectors of length m+1
74 | X1 = zeros(N - m, m+1);
75 | for i = 1:(N - m)
76 | X1(i, :) = data(i:i+m);
77 | end
78 |
79 | % Count the number of similar pairs for vectors of length m+1 (A)
80 | A = 0;
81 | for i = 1:size(X1, 1)
82 | for j = i+1:size(X1, 1)
83 | if max(abs(X1(i,:) - X1(j,:))) <= r
84 | A = A + 1;
85 | end
86 | end
87 | end
88 |
89 | % Normalize counts by the number of comparisons (excluding self-matches)
90 | A = A/(N - m - 1);
91 | B = B/(N - m);
92 |
93 | fprintf("ratio is %f\n", A/B);
94 |
95 | %% Calculate sample entropy
96 | if B == 0
97 | sampen_value = Inf;
98 | else
99 | sampen_value = -log(A / B);
100 | end
101 |
102 | end
--------------------------------------------------------------------------------
/matlab/Ent_Symbolic.m:
--------------------------------------------------------------------------------
1 | function [ NCSE ] = Ent_Symbolic( X, L )
2 | % [ SymEnt ] = Ent_Symbolic20180320( X, L )
3 | % symbolicEnt Calculates the Symbolic Entropy for given data.
4 | % Input - X: 1-Dimensional binary array of data
5 | % L: Word length
6 | % Output - NCSE: Normalized Corrected Shannon Entropy
7 | % Remarks
8 | % - This code calculates the Symbbolic Entropy value for the provided data
9 | % at a given word length described by - Aziz, W., Arif, M., 2006.
10 | % "Complexity analysis of stride interval time series by threshold
11 | % dependent symbolic entropy." Eur. J. Appl. Physiol. 98: 30-40.
12 | % Jun 2017 - Created by William Denton, unonbcf@unomaha.edu
13 | % Copyright 2020 Nonlinear Analysis Core, Center for Human Movement
14 | % Variability, University of Nebraska at Omaha
15 | %
16 | % Redistribution and use in source and binary forms, with or without
17 | % modification, are permitted provided that the following conditions are
18 | % met:
19 | %
20 | % 1. Redistributions of source code must retain the above copyright notice,
21 | % this list of conditions and the following disclaimer.
22 | %
23 | % 2. Redistributions in binary form must reproduce the above copyright
24 | % notice, this list of conditions and the following disclaimer in the
25 | % documentation and/or other materials provided with the distribution.
26 | %
27 | % 3. Neither the name of the copyright holder nor the names of its
28 | % contributors may be used to endorse or promote products derived from
29 | % this software without specific prior written permission.
30 | %
31 | % THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
32 | % IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
33 | % THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
34 | % PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
35 | % CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
36 | % EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
37 | % PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
38 | % PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
39 | % LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
40 | % NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
41 | % SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 | %% Begin code: (Do NOT Edit)
43 | %% Correct orientation of array.
44 | [r,c] = size(X);
45 | if r > c
46 | X = X';
47 | end
48 | %% Convert binary values to decimal.
49 | words = zeros(length(X)-L+1,1);
50 | for i = 1:length(X)-L+1
51 | words(i,1) = bin2dec(num2str(X(i:i+L-1)));
52 | end
53 | %% Calculate probability.
54 | max_words = 2^L;
55 | for i = 1:max_words
56 | P(i) = sum(words == i-1)/length(words);
57 | H(i) = P(i)*log2(P(i));
58 | end
59 | H = -sum(H(~isnan(H)));
60 | %% Normalized Corrected Shannon Entropy
61 | So = length(unique(words));
62 | Sm = max_words;
63 | CSE = H+(So-1) / (2*Sm*log(2));
64 | CSEm = -log2(1/Sm) + (Sm-1) / (2*Sm*log(2));
65 | NCSE = CSE/CSEm;
66 | %% Print out Symbolic Entropy Value.
67 | fprintf('Normalized Corrected Shannon Entropy = %2.3f bits\r',NCSE);
68 | end
--------------------------------------------------------------------------------
/matlab/Ent_Weighted.m:
--------------------------------------------------------------------------------
1 | function [w_ent] = Ent_Weighted(wrp)
2 | N = length(wrp); % get size of the weighted recurrence plot
3 | for j = 1:N
4 | si(j) = sum(wrp(:,j)); % compute vertical weights sums
5 | end
6 |
7 | % Compute distribution of vertical weights sums
8 | si_min = min(si);
9 | si_max = max(si);
10 | bin_size = (si_max - si_min)/49; % compute bin size
11 | count = 1;
12 | S = sum(si);
13 | for s = si_min:bin_size:si_max
14 | P = sum(si(si>= s&si<(s+bin_size)));
15 | p1(count) = P / S;
16 | count = count+1;
17 | end
18 |
19 | % Compute weighted entropy
20 | for I = 1:length(p1)
21 | pp(I) = (p1(I)*log(p1(I)));
22 | end
23 | pp(isnan(pp)) = 0;
24 | w_ent = -1*(sum(pp));
25 |
26 | end
--------------------------------------------------------------------------------
/matlab/Ent_xAp.m:
--------------------------------------------------------------------------------
1 | function [xAP]=Ent_xAp(X,Y,M,r,k)
2 |
3 | %[xAP]=Ent_xAp201603(X,Y,M,r,k)
4 | %
5 | % inputs: X - first time series
6 | % Y - second time series
7 | % M - something vector length
8 | % r - R tolerance to find matches, proportion of the stdev
9 | % k - something lag
10 | % outputs: xAP - cross approximate entropy
11 | %
12 | % Remarks
13 | % - This code finds the cross approximate entropy between two signals of
14 | % equal length.
15 | %
16 | % Future Work
17 | % - This code should be looked over.
18 | % - The scaling of the radius to the standard deviation may need to be
19 | % calculated from the average stdev of both signals and not just one.
20 | % - The first for loop with m=M:k:M+k looks suspicious.
21 | %
22 | % Mar 2016 - Modified by Ben Senderling, email: bensenderling@gmail.com
23 | % - Moved the data normalization from the code that called this
24 | % one into this code.
25 | % - Changed the input r value from a percentage to a decimal for
26 | % consistency with other entropy code.
27 | %
28 | %% Begin Code
29 |
30 | X=(X-mean(X))/std(X);
31 | Y=(Y-mean(Y))/std(Y);
32 |
33 | N=length(X);
34 | Cm=[];
35 | r=std(X)*r;
36 | for m=M:k:M+k
37 | C=[];
38 | for i=1:(N-m+1)
39 | V=[X(i:m+i-1)];
40 | count=0;
41 | for j=1:(N-m+1)
42 | Z=[Y(j:m+j-1)];
43 | dif=(abs(V-Z) 1
35 | data = psr(data, tau, dim);
36 | end
37 |
38 | % Calculate distance matrix based on the type of RQA
39 | for i = 1:DIM
40 | a{i}=pdist2(data(:,i:DIM:end),data(:,i:DIM:end));
41 | a{i}=abs(a{i})*-1;
42 | end
43 |
44 | % Normalize distance matrix
45 | if contains(options.Norm, 'euc')
46 | for i = 1:length(a)
47 | b = mean(a{i}(a{i}<0));
48 | b = -sqrt(abs(((b^2)+2*(DIM*dim))));
49 | a{i} = a{i}/abs(b);
50 | end
51 | elseif contains(options.Norm, 'min')
52 | for i = 1:length(a)
53 | b = max(a{i}(a{i}<0));
54 | a{i} = a{i}/abs(b);
55 | end
56 | elseif contains(options.Norm, 'max')
57 | for i = 1:length(a)
58 | b = min(a{i}(a{i}<0));
59 | a{i} = a{i}/abs(b);
60 | end
61 | end
62 |
63 | % Compute weighted recurrence plot
64 | wrp = a;
65 | for i = 1:size(a,2)-1
66 | wrp{i+1} = wrp{i}.*wrp{i+1};
67 | end
68 | if i
69 | wrp = -(abs(wrp{i+1})).^(1/(i+1));
70 | end
71 | if iscell(wrp)
72 | wrp = wrp{1};
73 | end
74 |
75 | % Calculate recurrence plot
76 | switch param
77 | case 'rad'
78 | [recurrence, diag_hist, vertical_hist,A] = linehist(data,a,threshold,'jrqa');
79 | case 'rec'
80 | radius_start = 0.01;
81 | radius_end = 0.5;
82 | [recurrence, diag_hist, vertical_hist, radius, A] = setradius(data,a,radius_start,radius_end,threshold,'jrqa',options.Iter);
83 | end
84 |
85 | %% Calculate RQA variabes
86 | RESULTS.DIM = 1;
87 | RESULTS.EMB = dim;
88 | RESULTS.DEL = tau;
89 | RESULTS.RADIUS = radius;
90 | RESULTS.NORM = options.Norm;
91 | RESULTS.ZSCORE = options.Zscore;
92 | RESULTS.Size=length(A);
93 | RESULTS.REC = recurrence;
94 | if RESULTS.REC > 0
95 | RESULTS.DET=100*sum(diag_hist(diag_hist>=dmin))/sum(diag_hist);
96 | RESULTS.MeanL=mean(diag_hist(diag_hist>=dmin));
97 | RESULTS.MaxL=max(diag_hist(diag_hist>=dmin));
98 | [count,bin]=hist(diag_hist(diag_hist>=dmin),min(diag_hist(diag_hist>=dmin)):max(diag_hist(diag_hist>=dmin)));
99 | total=sum(count);
100 | p=count./total;
101 | del=find(count==0); p(del)=[];
102 | RESULTS.EntrL=-sum(p.*log2(p));
103 | RESULTS.LAM=100*sum(vertical_hist(vertical_hist>=vmin))/sum(vertical_hist);
104 | RESULTS.MeanV=mean(vertical_hist(vertical_hist>=vmin));
105 | RESULTS.MaxV=max(vertical_hist(vertical_hist>=vmin));
106 | [count,bin]=hist(vertical_hist(vertical_hist>=vmin),min(vertical_hist(vertical_hist>=vmin)):max(vertical_hist(vertical_hist>=vmin)));
107 | total=sum(count);
108 | p=count./total;
109 | del=find(count==0); p(del)=[];
110 | RESULTS.EntrV=-sum(p.*log2(p));
111 | RESULTS.EntrW=Ent_Weighted(wrp);
112 | else
113 | RESULTS.DET=NaN;
114 | RESULTS.MeanL=NaN;
115 | RESULTS.MaxL=NaN;
116 | RESULTS.EntrL=NaN;
117 | RESULTS.LAM=NaN;
118 | RESULTS.MeanV=NaN;
119 | RESULTS.MaxV=NaN;
120 | RESULTS.EntrV=NaN;
121 | RESULTS.EntrW=NaN;
122 | end
123 | RP=imrotate(1-A,90);
124 |
125 | %% Plot
126 | if options.Plot
127 | RQA_plot(data, RP, RESULTS, tau, dim, DIM, options.Zscore, options.Norm, radius, wrp, 'jrqa');
128 | end
129 |
130 | end
131 |
132 | % Custom validation function
133 | function mustbeAtLeastTwoColumns(data)
134 | % Test for size
135 | if size(data,2) < 2
136 | error('Data must be at least two column vectors.')
137 | end
138 | end
--------------------------------------------------------------------------------
/matlab/RQA021525.m:
--------------------------------------------------------------------------------
1 | function [RP, RESULTS]=RQA021525(data,tau,dim,param,threshold,options)
2 | arguments
3 | data double {mustBeSingleColumn}
4 | tau (1,1) {mustBeInteger, mustBePositive} = 1
5 | dim (1,1) {mustBeInteger, mustBePositive} = 1
6 | param (1,1) string {mustBeMember(param,["rad", "rec"])} = "rec"
7 | threshold (1,1) double {mustBePositive} = 2.5
8 | options.Zscore (1,1) {mustBeMember(options.Zscore,[0,1])} = 1
9 | options.Norm (1,1) {mustBeMember(options.Norm,["euc", "max", "min", "none"])} = "none"
10 | options.Dmin (1,1) {mustBeInteger, mustBePositive} = 2
11 | options.Vmin (1,1) {mustBeInteger, mustBePositive} = 2
12 | options.Plot (1,1) {mustBeMember(options.Plot,[0,1])} = 0
13 | options.Orient (1,1) {mustBeMember(options.Orient,["col", "row"])} = "col"
14 | options.Iter (1,1) {mustBeInteger, mustBePositive} = 20
15 | end
16 |
17 | %% Begin code
18 | dbstop if error % If error occurs, enters debug mode
19 |
20 | %% Change variable names for readability
21 | dmin = options.Dmin;
22 | vmin = options.Vmin;
23 |
24 | %% Standardize data if zscore is true
25 | % If zscore is selected then zscore the data
26 | if options.Zscore
27 | data = zscore(data);
28 | end
29 |
30 | % Embed the data onto phase space
31 | if dim > 1
32 | data = psr(data, dim, tau);
33 | end
34 |
35 | % Calculate distance matrix based on the type of RQA
36 | a = pdist2(data,data);
37 | a = abs(a)*-1;
38 |
39 | % Normalize distance matrix
40 | if contains(options.Norm, 'euc')
41 | b = mean(a(a<0));
42 | b = -sqrt(abs(((b^2)+2*(1*dim))));
43 | a = a/abs(b);
44 | elseif contains(options.Norm, 'min')
45 | b = max(a(a<0));
46 | a = a/abs(b);
47 | elseif contains(options.Norm, 'max')
48 | b = min(a(a<0));
49 | a = a/abs(b);
50 | end
51 |
52 | % % Compute weighted recurrence plot (doesn't seem like it's doing anything
53 | % % for univariate RQA)
54 | % wrp = a;
55 | % for i = 1:size(a,2)-1
56 | % wrp{i+1} = wrp{i}.*wrp{i+1};
57 | % end
58 | % if i
59 | % wrp = -(abs(wrp{i+1})).^(1/(i+1));
60 | % end
61 | % if iscell(wrp)
62 | % wrp = wrp{1};
63 | % end
64 |
65 | % Calculate recurrence plot
66 | switch param
67 | case 'rad'
68 | [recurrence, diag_hist, vertical_hist,A] = linehist(data,a,threshold,'rqa');
69 | case 'rec'
70 | radius_start = 0.01;
71 | radius_end = 0.5;
72 | [recurrence, diag_hist, vertical_hist, radius, A] = setradius(data,a,radius_start,radius_end,threshold,'rqa',options.Iter);
73 | end
74 |
75 | %% Calculate RQA variabes
76 | RESULTS.DIM = 1;
77 | RESULTS.EMB = dim;
78 | RESULTS.DEL = tau;
79 | RESULTS.RADIUS = radius;
80 | RESULTS.NORM = options.Norm;
81 | RESULTS.ZSCORE = options.Zscore;
82 | RESULTS.Size=length(A);
83 | RESULTS.REC = recurrence;
84 | if RESULTS.REC > 0
85 | RESULTS.DET=100*sum(diag_hist(diag_hist>=dmin))/sum(diag_hist);
86 | RESULTS.MeanL=mean(diag_hist(diag_hist>=dmin));
87 | RESULTS.MaxL=max(diag_hist(diag_hist>=dmin));
88 | [count,bin]=hist(diag_hist(diag_hist>=dmin),min(diag_hist(diag_hist>=dmin)):max(diag_hist(diag_hist>=dmin)));
89 | total=sum(count);
90 | p=count./total;
91 | del=find(count==0); p(del)=[];
92 | RESULTS.EntrL=-sum(p.*log2(p));
93 | RESULTS.LAM=100*sum(vertical_hist(vertical_hist>=vmin))/sum(vertical_hist);
94 | RESULTS.MeanV=mean(vertical_hist(vertical_hist>=vmin));
95 | RESULTS.MaxV=max(vertical_hist(vertical_hist>=vmin));
96 | [count,bin]=hist(vertical_hist(vertical_hist>=vmin),min(vertical_hist(vertical_hist>=vmin)):max(vertical_hist(vertical_hist>=vmin)));
97 | total=sum(count);
98 | p=count./total;
99 | del=find(count==0); p(del)=[];
100 | RESULTS.EntrV=-sum(p.*log2(p));
101 | RESULTS.EntrW=Ent_Weighted(a);
102 | else
103 | RESULTS.DET=NaN;
104 | RESULTS.MeanL=NaN;
105 | RESULTS.MaxL=NaN;
106 | RESULTS.EntrL=NaN;
107 | RESULTS.LAM=NaN;
108 | RESULTS.MeanV=NaN;
109 | RESULTS.MaxV=NaN;
110 | RESULTS.EntrV=NaN;
111 | RESULTS.EntrW=NaN;
112 | end
113 | RP=imrotate(1-A,90);
114 |
115 | %% Plot
116 | if options.Plot
117 | RQA_plot(data, RP, RESULTS, tau, dim, 1, options.Zscore, options.Norm, radius, a, 'rqa');
118 | end
119 |
120 | end
121 |
122 | % Custom validation function
123 | function mustBeSingleColumn(data)
124 | % Test for size
125 | if size(data,2) ~= 1
126 | error('Data must be single column vector.')
127 | end
128 | end
--------------------------------------------------------------------------------
/matlab/RelPhase_Cont.m:
--------------------------------------------------------------------------------
1 | function [crp, crpH] = RelPhase_Cont(data1,data2,samprate)
2 | % RELPHASE - calculates the relative phase between two segments, joint
3 | % angles, coordinates, etc. It is calculated using the atan2 function in
4 | % order to preserve the quadrant in the phase portrait. The sign of the
5 | % relative phase is determined from segment A - segment B. If the sign is
6 | % negative, B leads A. If the sign is positive, A leads B.
7 | %
8 | % Syntax:
9 | % Calculate relative phase from position and velocity data
10 | % rp = relphase(pa,va,pb,vb)
11 | % pa - angle (or position) of segment A
12 | % va - velocity of segment A
13 | % pb - angle (or position) of segment B
14 | % vb - velocity of segment B
15 | % rp - relative phase in degrees
16 | %
17 | % Copyright 2020 Nonlinear Analysis Core, Center for Human Movement
18 | % Variability, University of Nebraska at Omaha
19 | %
20 | % Redistribution and use in source and binary forms, with or without
21 | % modification, are permitted provided that the following conditions are
22 | % met:
23 | %
24 | % 1. Redistributions of source code must retain the above copyright notice,
25 | % this list of conditions and the following disclaimer.
26 | %
27 | % 2. Redistributions in binary form must reproduce the above copyright
28 | % notice, this list of conditions and the following disclaimer in the
29 | % documentation and/or other materials provided with the distribution.
30 | %
31 | % 3. Neither the name of the copyright holder nor the names of its
32 | % contributors may be used to endorse or promote products derived from
33 | % this software without specific prior written permission.
34 | %
35 | % THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
36 | % IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
37 | % THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
38 | % PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
39 | % CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
40 | % EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
41 | % PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
42 | % PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
43 | % LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
44 | % NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
45 | % SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46 | %% Normalize the data
47 |
48 | data1 = data1 - min(data1) - range(data1)/2;
49 | data2 = data2 - min(data2) - range(data2)/2;
50 |
51 | % Calculate a basic phase
52 |
53 | crp = atan((diff(data1)*samprate.*data2(1:end-1) - diff(data2)*samprate.*data1(1:end-1))./(data1(1:end-1).*data2(1:end-1) - diff(data2)*samprate.*diff(data1)*samprate))*180/pi;
54 |
55 | % Calculate the phase angle using a Hilbert tranform
56 | env1H = hilbert(data1);
57 | env2H = hilbert(data2);
58 | crpH = atan((imag(env1H).*data2 - imag(env2H).*data1)./(data1.*data2 + imag(env1H).*imag(env2H)))*180/pi;
59 |
60 | return
61 |
--------------------------------------------------------------------------------
/matlab/Surr_Theiler.m:
--------------------------------------------------------------------------------
1 | function z =Surr_Theiler(y,algorithm)
2 | % z=Surr_Theiler20200723(y,algorithm)
3 | % inputs - y, time series to be surrogated
4 | % algorithm - the type of algorithm to be completed
5 | % outputs - z, surrogated time series
6 | % Remarks
7 | % - This code creates a surrogate time series according to Algorithm 0,
8 | % Algorithm 1 or Algorithm 2.
9 | % Future Work
10 | % - None.
11 | % References
12 | % - Theiler, J., Eubank, S., Longtin, A., Galdrikian, B., & Doyne
13 | % Farmer, J. (1992). Testing for nonlinearity in time series: the
14 | % method of surrogate data. Physica D: Nonlinear Phenomena, 58(1–4),
15 | % 77–94. https://doi.org/10.1016/0167-2789(92)90102-S
16 | % Jun 2015 - Modified by Ben Senderling
17 | % - Added function help section and plot commands for user
18 | % feedback
19 | % - The code was originally created as two algorithms. It was
20 | % modified so one code included both functions.
21 | % Jul 2020 - Modified by Ben Senderling, bmchnonan@unomaha.edu
22 | % - Changed file and function name.
23 | % - Added reference.
24 | % Copyright 2020 Nonlinear Analysis Core, Center for Human Movement
25 | % Variability, University of Nebraska at Omaha
26 | %
27 | % Redistribution and use in source and binary forms, with or without
28 | % modification, are permitted provided that the following conditions are
29 | % met:
30 | %
31 | % 1. Redistributions of source code must retain the above copyright notice,
32 | % this list of conditions and the following disclaimer.
33 | %
34 | % 2. Redistributions in binary form must reproduce the above copyright
35 | % notice, this list of conditions and the following disclaimer in the
36 | % documentation and/or other materials provided with the distribution.
37 | %
38 | % 3. Neither the name of the copyright holder nor the names of its
39 | % contributors may be used to endorse or promote products derived from
40 | % this software without specific prior written permission.
41 | %
42 | % THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
43 | % IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
44 | % THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
45 | % PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
46 | % CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
47 | % EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
48 | % PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
49 | % PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
50 | % LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
51 | % NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
52 | % SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
53 | %% Begin code
54 | switch (algorithm)
55 | case 0
56 | z=randn(size(y));
57 | [~,idx]=sort(z);
58 | z=y(idx);
59 | case 1
60 | z=surr1(y,1);
61 | case 2
62 | z=surr1(y,2);
63 | end
64 |
65 | end
66 |
67 | function z = surr1(x,algorithm)
68 |
69 | [r,c] = size(x);
70 |
71 | y= zeros(r,c);
72 |
73 | if abs(algorithm)==2
74 | ra= randn(size(x));
75 | [sr,~]= sort(ra);
76 | [sx,xi]= sort(x);
77 | [~,xii]= sort(xi);
78 | for k=1:c
79 | y(:,k)= sr(xii(:,k));
80 | end
81 | else
82 | y= x;
83 | end
84 | m= mean(y);
85 | y= y-m(ones(r,1),:);
86 |
87 | fy = fft(y);
88 |
89 | % randomizing phase
90 | phase= rand(r,1);
91 | phase= phase(:,ones(1,c));
92 |
93 | rot= exp(1) .^ (2*pi*sqrt(-1)*phase);
94 | fyy= fy .* rot;
95 |
96 | yy= real(ifft(fyy)) + m(ones(r,1),:);
97 |
98 | if abs(algorithm)==2
99 | [~,yyi] = sort(yy);
100 | [~,yyii] = sort(yyi);
101 | for k=1:c
102 | z(:,k) = sx(yyii(:,k));
103 | end
104 | else
105 | z= yy;
106 | end
107 |
108 | end
109 |
110 |
111 |
112 |
113 |
114 |
--------------------------------------------------------------------------------
/matlab/corrdim.asv:
--------------------------------------------------------------------------------
1 | function CoD=corrdim(x,tau,de,plotOption)
2 | %Correlation Dimension
3 | %Scaling region mid-one quarter of vertical axis -- mid + OneQuarter
4 | %2/5/2008
5 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
6 | %x: a time series
7 | %tau: time delay
8 | %de: embedding diemnsion
9 | %plotOption: set plotOption=1 to see plots
10 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
11 |
12 | bins=200;
13 | n = length(x)-(de-1)*tau; % total number of reconstructed vectors
14 | %Use embedding to calculate the distances between vectors
15 | y = embed(x,de,tau);
16 |
17 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
18 | %Find the interval (epsilon2 < epsilon < epsilon1), where epsilon1 is the
19 | %order of the size of the attractor in phase space while epsilon2 is the
20 | %smallest spacing |yi - yj|. If epsilon2 = 0, then esp is assigned to
21 | %epsilon2 in order to avoid log(0) = -Inf
22 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
23 |
24 | % initialize epsilon1 and epsilon2
25 | epsilon1 = 0;
26 | epsilon2 = Inf;
27 |
28 | k=de*tau; % removing temporal correlations
29 | for i = 1:n-k-1
30 | distance = sqrt(sum((y(:,i+k+1:n)-y(:,i)*ones(size(i+k+1:n))).^2));
31 | epsilon1 = max(max(distance),epsilon1);
32 | epsilon2 = min(min(distance), epsilon2);
33 | end
34 |
35 | if epsilon2==0 % in order to take log of epsilon2
36 | epsilon2=eps;
37 | end
38 |
39 | epsilon =linspace(log(epsilon2),log(epsilon1),bins);
40 | %using natural log
41 | epsilon=exp(1).^epsilon;
42 |
43 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
44 | % CORRELATION SUM
45 | % C(epsilon) = sum (H(epsilon - |yi-yj|)/N^2 as n --> infinity
46 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
47 |
48 | CI=[];
49 | for i = 1:n-k-1
50 | distance = sqrt(sum((y(:,i+k+1:n)-y(:,i)*ones(size(i+k+1:n))).^2)); %using Eucledian distance
51 | Sum_HeaviSide = histc(distance,epsilon); %Heaviside function H(z)itself returns 1 for positive z and 0 otherwise.
52 | %Store the value into CI matrix
53 | if i==1
54 | CI=[CI; Sum_HeaviSide];
55 | else
56 | CI=sum([CI; Sum_HeaviSide]);
57 | end
58 | end
59 |
60 | %taking natural log of correlation integral
61 | CI = log(cumsum(CI)/((n-k)^2));
62 | CI = CI + log(n-k-1); % renormalizes to natural log of average pts in a neigborhood
63 | epsilon=log(epsilon);
64 |
65 | if isinf(min(CI))==1
66 | i=max(find(CI==-Inf));
67 | minCI=min(CI(i+1:end));
68 | else
69 | minCI=min(CI);
70 | end
71 |
72 | if isinf(max(CI))==1
73 | i=min(find(CI==Inf));
74 | maxCI=max(CI(1:end-i-1));
75 | else
76 | maxCI=max(CI);
77 | end
78 |
79 | %Find scaling region
80 | midCIValue=((max(CI)-minCI)/2)+ minCI; % find the mid value of CI
81 | OneQuarter=(max(CI)-minCI)/4; %calculate 1/4 of CI
82 | y_LowerBound = midCIValue; % let midCIValue be the lower bound on y axis (scaling region)
83 | y_UpperBound=midCIValue+OneQuarter; %upper bound on y axis (scaling region)
84 | intervals=intersect(find(CI > y_LowerBound),find(CI=0);
66 | x= x(id,:);
67 | end;
68 |
--------------------------------------------------------------------------------
/matlab/fgn_sim.m:
--------------------------------------------------------------------------------
1 | function series = fgn_sim(n, H)
2 | % Generate Fractional Gaussian Noise (FGN) time series
3 | % inputs:
4 | % n (integer): Desired length of the series
5 | % H (real number): Hurst exponent for the output series (0 < H < 1)
6 | %
7 | % outputs:
8 | % series: Real-valued time series of length n with Hurst exponent H
9 | %
10 | % References:
11 | % Beran, J. (1994). Statistics for long-memory processes. Chapman & Hall.
12 |
13 | %% ========================================================================
14 | % ------ EXAMPLE ------
15 |
16 | % - Create time series of 1000 datapoints to have an H of 0.7
17 | % n = 1000
18 | % H = 0.7
19 | % dat = fgn_sim(n, H)
20 |
21 | %% ========================================================================
22 |
23 | % Copyright 2024 Nonlinear Analysis Core, Center for Human Movement
24 | % Variability, University of Nebraska at Omaha
25 | %
26 | % Redistribution and use in source and binary forms, with or without
27 | % modification, are permitted provided that the following conditions are
28 | % met:
29 | %
30 | % 1. Redistributions of source code must retain the above copyright notice,
31 | % this list of conditions and the following disclaimer.
32 | %
33 | % 2. Redistributions in binary form must reproduce the above copyright
34 | % notice, this list of conditions and the following disclaimer in the
35 | % documentation and/or other materials provided with the distribution.
36 | %
37 | % 3. Neither the name of the copyright holder nor the names of its
38 | % contributors may be used to endorse or promote products derived from
39 | % this software without specific prior written permission.
40 | %
41 | % THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
42 | % IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
43 | % THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
44 | % PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
45 | % CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
46 | % EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
47 | % PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
48 | % PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
49 | % LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
50 | % NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
51 | % SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
52 |
53 | % FUNCTION:
54 |
55 | % Settings:
56 | mu = 0; % output mean
57 | sd = 1; % output standard deviation
58 |
59 | % Generate Sequence:
60 | z = randn(1,2*n);
61 | zr = z(1:n);
62 |
63 | zi = z((n+1):(2*n));
64 | zic = -zi;
65 | zi(1) = 0;
66 | zr(1) = zr(1)*sqrt(2);
67 | zi(n) = 0;
68 | zr(n) = zr(n)*sqrt(2);
69 | zr = [zr(1:n), zr(fliplr(2:(n-1)))];
70 | zi = [zi(1:n), zic(fliplr(2:(n-1)))];
71 | z = complex(zr, zi);
72 |
73 | k = 0:(n-1);
74 | gammak = ((abs(k-1).^(2*H))-(2*abs(k).^(2*H))+(abs(k+1).^(2*H)))/2;
75 | ind = [0:(n - 2), (n - 1), fliplr(1:(n-2))];
76 | gkFGN0 = ifft(gammak(ind + 1))*length(z); % needs to non-normalized
77 | gksqrt = real(gkFGN0);
78 | if (all(gksqrt > 0))
79 | gksqrt = sqrt(gksqrt);
80 | z = z.*gksqrt;
81 | z = ifft(z)*length(z);
82 | z = 0.5*(n-1).^(-0.5)*z;
83 | z = real(z(1:n));
84 | else
85 | gksqrt = 0*gksqrt;
86 | Error("Re(gk)-vector not positive")
87 | end
88 |
89 | % Standardize:
90 | % (z-mean(z))/sqrt(var(z))
91 | series = sd*z + mu;
92 |
93 | end
94 |
--------------------------------------------------------------------------------
/matlab/linehist.m:
--------------------------------------------------------------------------------
1 | function [recurrence, diag_hist, vertical_hist, a] = linehist(data, a, radius, type)
2 |
3 | % Check if a is cell
4 | if ~iscell(a)
5 | a = {a};
6 | end
7 |
8 | % Convert distance matrices to recurrence matrices
9 | for i2 = 1:length(a)
10 | a{i2} = a{i2}+radius;
11 | a{i2}(a{i2} >= 0) = 1;
12 | a{i2}(a{i2} < 0) = 0;
13 | end
14 |
15 | % If a contains multiple recurrence matrices, compute dot product of all
16 | % matrices...?
17 | if length(a) > 1
18 | for i3 = 1:length(a)-1
19 | a{i3+1} = a{i3}.*a{i3+1};
20 | end
21 | a = a{i3+1};
22 | else
23 | a = a{1};
24 | end
25 |
26 | % Caluculate diagonal line distribution
27 | diag_hist = [];
28 | vertical_hist = [];
29 | for i4 = -(length(data)-1):length(data)-1
30 | c=diag(a,i4);
31 | % bwlabel is taking each diagonal line and looking for the 1's, it will
32 | % return increasing numbers for each new instance of 1's, for example
33 | % the input vector [0 1 1 0 1 0 1 1 0 0 1 1 1] will return
34 | % [0 1 1 0 2 0 3 3 0 0 4 4 4]
35 | d=bwlabel(c,8);
36 | % tabulate counts the instances of each integer, therefore the line
37 | % lengths
38 | if sum(d) ~= 0
39 | d = nonzeros(hist(d)); % This speeds up the code 30-40% and is simpler to understand.
40 | else
41 | d = [];
42 | end
43 | if i4 ~= 0
44 | d=d(2:end);
45 | end
46 | % diag_hist is creating one long array of all of the line lengths for
47 | % all of the diagonals
48 | diag_hist(length(diag_hist)+1:length(diag_hist)+length(d))=d;
49 | end
50 |
51 | % Remove the line of identity in RQA, jRQA, and mdRQA
52 | if ~contains(type,'crqa')
53 | diag_hist=diag_hist(diag_hist threshold
6 | disp('Minimum radius has been adjusted...');
7 | if rec == 0
8 | radius_start = radius_start*2;
9 | elseif rec > threshold
10 | radius_start = radius_start / 1.5;
11 | end
12 | [rec, ~, ~, ~] = linehist(data,a,radius_start,type);
13 | end
14 |
15 | % if radius_end is too large
16 | [rec, ~, ~, ~] = linehist(data,a,radius_end,type);
17 | while rec < threshold
18 | disp('Maximum radius has been increased...');
19 | radius_end = radius_end*2;
20 | [rec, ~, ~, ~] = linehist(data,a,radius_end,type);
21 | end
22 |
23 | % Search for radius with target percent recurrence
24 | % Create wait bar to display progress
25 | wb = waitbar(0,['Finding radius to give %REC = ',num2str(threshold), ' Please wait...']);
26 | lv = radius_start; % set low value
27 | hv = radius_end; % set high value
28 | target = threshold; % designate what percent recurrence is wanted
29 | for i1 = 1:iter
30 | mid(i1) = (lv(i1)+hv(i1))/2; % find midpoint between hv and lv
31 | rad(i1) = mid(i1); % new radius for this iteration
32 |
33 | % Compute recurrence matrix with new radius
34 | [rec, diag_hist, vertical_hist,A] = linehist(data,a, rad(i1),type);
35 | rec_iter(i1) = rec; % set percent recurrence
36 | if rec_iter(i1) < target
37 | % if percent recurrence is below target percent recurrence,
38 | % update low value
39 | hv(i1+1) = hv(i1);
40 | lv(i1+1) = mid(i1);
41 | else
42 | % if percent recurrence is above or equal to target percent
43 | % recurrence, update high value
44 | lv(i1+1) = lv(i1);
45 | hv(i1+1) = mid(i1);
46 | end
47 | waitbar(i1/iter,wb); % update wait bar
48 | end
49 | close(wb) % close wait bar
50 | rec_final = rec_iter(end); % set final percent recurrence
51 | rad_final = rad(end); % set radius for final percent recurrence
52 | disp(['% recurrence = ',num2str(rec_final),', radius = ',num2str((rad_final))])
53 | end
--------------------------------------------------------------------------------
/pyEnv.yml:
--------------------------------------------------------------------------------
1 | name: pyEnv
2 | channels:
3 | - defaults
4 | dependencies:
5 | - blas=1.0=mkl
6 | - blosc=1.21.0=h19a0ad4_0
7 | - brotli=1.0.9=ha925a31_2
8 | - bzip2=1.0.8=he774522_0
9 | - ca-certificates=2021.7.5=haa95532_1
10 | - certifi=2021.5.30=py38haa95532_0
11 | - cfitsio=3.470=he774522_6
12 | - charls=2.2.0=h6c2663c_0
13 | - cloudpickle=1.6.0=py_0
14 | - cycler=0.10.0=py38_0
15 | - cytoolz=0.11.0=py38he774522_0
16 | - dask-core=2021.8.1=pyhd3eb1b0_0
17 | - fast-histogram=0.9=py38h080aedc_0
18 | - fonttools=4.25.0=pyhd3eb1b0_0
19 | - freetype=2.10.4=hd328e21_0
20 | - fsspec=2021.7.0=pyhd3eb1b0_0
21 | - giflib=5.2.1=h62dcd97_0
22 | - icc_rt=2019.0.0=h0cc432a_1
23 | - imagecodecs=2021.6.8=py38h5da4933_0
24 | - imageio=2.9.0=pyhd3eb1b0_0
25 | - intel-openmp=2021.3.0=haa95532_3372
26 | - joblib=1.0.1=pyhd3eb1b0_0
27 | - jpeg=9b=hb83a4c4_2
28 | - kiwisolver=1.3.1=py38hd77b12b_0
29 | - lcms2=2.12=h83e58a3_0
30 | - lerc=2.2.1=hd77b12b_0
31 | - libaec=1.0.4=h33f27b4_1
32 | - libdeflate=1.7=h2bbff1b_5
33 | - libpng=1.6.37=h2a8f88b_0
34 | - libtiff=4.2.0=hd0e1b90_0
35 | - libzopfli=1.0.3=ha925a31_0
36 | - llvmlite=0.36.0=py38h34b8924_4
37 | - locket=0.2.1=py38haa95532_1
38 | - lz4-c=1.9.3=h2bbff1b_1
39 | - matplotlib-base=3.4.2=py38h49ac443_0
40 | - mkl=2021.3.0=haa95532_524
41 | - mkl-service=2.4.0=py38h2bbff1b_0
42 | - mkl_fft=1.3.0=py38h277e83a_2
43 | - mkl_random=1.2.2=py38hf11a4ad_0
44 | - munkres=1.1.4=py_0
45 | - networkx=2.6.2=pyhd3eb1b0_0
46 | - numba=0.53.1=py38hf11a4ad_0
47 | - numpy=1.20.1=py38h34a8a5c_0
48 | - numpy-base=1.20.1=py38haf7ebc8_0
49 | - olefile=0.46=py_0
50 | - openjpeg=2.4.0=h4fc8c34_0
51 | - openssl=1.1.1l=h2bbff1b_0
52 | - packaging=21.0=pyhd3eb1b0_0
53 | - partd=1.2.0=pyhd3eb1b0_0
54 | - pillow=8.3.1=py38h4fa10fc_0
55 | - pip=21.0.1=py38haa95532_0
56 | - pyparsing=2.4.7=pyhd3eb1b0_0
57 | - python=3.8.8=hdbf39b2_5
58 | - python-dateutil=2.8.2=pyhd3eb1b0_0
59 | - pywavelets=1.1.1=py38he774522_2
60 | - pyyaml=5.4.1=py38h2bbff1b_1
61 | - scikit-image=0.18.1=py38hf11a4ad_0
62 | - scikit-learn=0.24.1=py38hf11a4ad_0
63 | - scipy=1.6.2=py38h66253e8_1
64 | - setuptools=52.0.0=py38haa95532_0
65 | - six=1.16.0=pyhd3eb1b0_0
66 | - snappy=1.1.8=h33f27b4_0
67 | - sqlite=3.36.0=h2bbff1b_0
68 | - tbb=2020.3=h74a9793_0
69 | - threadpoolctl=2.2.0=pyhbf3da8f_0
70 | - tifffile=2021.7.2=pyhd3eb1b0_2
71 | - tk=8.6.10=he774522_0
72 | - toolz=0.11.1=pyhd3eb1b0_0
73 | - tornado=6.1=py38h2bbff1b_0
74 | - vc=14.2=h21ff451_1
75 | - vs2015_runtime=14.27.29016=h5e58377_2
76 | - wheel=0.37.0=pyhd3eb1b0_0
77 | - wincertstore=0.2=py38_0
78 | - xz=5.2.5=h62dcd97_0
79 | - yaml=0.2.5=he774522_0
80 | - zfp=0.5.5=hd77b12b_6
81 | - zlib=1.2.11=h62dcd97_4
82 | - zstd=1.4.9=h19a0ad4_0
83 | prefix: C:\Users\louis\.conda\envs\pyEnv
84 |
--------------------------------------------------------------------------------
/python/Div_JS.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from numpy import matlib
3 | from Div_KL import Div_KL
4 |
5 | def Div_JS(P,Q):
6 | """
7 | Jensen-Shannon divergence of two probability distributions
8 | dist = JSD(P,Q) Kullback-Leibler divergence of two discrete probability
9 | distributions
10 | P and Q are automatically normalised to have the sum of one on rows
11 | have the length of one at each
12 | P = n x nbins
13 | Q = 1 x nbins
14 | dist = n x 1
15 | Copyright 2020 Nonlinear Analysis Core, Center for Human Movement
16 | Variability, University of Nebraska at Omaha
17 |
18 | Redistribution and use in source and binary forms, with or without
19 | modification, are permitted provided that the following conditions are
20 | met:
21 |
22 | 1. Redistributions of source code must retain the above copyright notice,
23 | this list of conditions and the following disclaimer.
24 |
25 | 2. Redistributions in binary form must reproduce the above copyright
26 | notice, this list of conditions and the following disclaimer in the
27 | documentation and/or other materials provided with the distribution.
28 |
29 | 3. Neither the name of the copyright holder nor the names of its
30 | contributors may be used to endorse or promote products derived from
31 | this software without specific prior written permission.
32 |
33 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
34 | IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
35 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
36 | PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
37 | CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
38 | EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
39 | PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
40 | PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
41 | LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
42 | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
43 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
44 | """
45 |
46 | if not isinstance(P, np.ndarray):
47 | P = np.array(P, ndmin=2)
48 |
49 | if not isinstance(Q, np.ndarray):
50 | Q = np.array(Q, ndmin=2)
51 |
52 | if P.shape[1] != Q.shape[1]:
53 | raise ValueError('The number of columns in P and Q should be the same')
54 |
55 | Q = np.divide(Q,np.sum(Q))
56 | Q = matlib.repmat(Q, P.shape[0], 1)
57 | P = np.divide(P,matlib.repmat(np.sum(P,axis=1,keepdims=True),1,P.shape[1]))
58 |
59 | M = np.multiply(0.5,np.add(P,Q))
60 |
61 | dist = np.multiply(0.5,Div_KL(P,M)) + 0.5*Div_KL(Q,M)
62 | return dist
--------------------------------------------------------------------------------
/python/Div_KL.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from numpy import matlib
3 |
4 | def Div_KL(P,Q):
5 | """
6 | dist = KLDiv(P,Q) Kullback-Leibler divergence of two discrete probability
7 | distributions
8 | P and Q are automatically normalised to have the sum of one on rows
9 | have the length of one at each
10 | P = n x nbins
11 | Q = 1 x nbins or n x nbins(one to one)
12 | dist = n x 1
13 | Copyright 2020 Nonlinear Analysis Core, Center for Human Movement
14 | Variability, University of Nebraska at Omaha
15 |
16 | Redistribution and use in source and binary forms, with or without
17 | modification, are permitted provided that the following conditions are
18 | met:
19 |
20 | 1. Redistributions of source code must retain the above copyright notice,
21 | this list of conditions and the following disclaimer.
22 |
23 | 2. Redistributions in binary form must reproduce the above copyright
24 | notice, this list of conditions and the following disclaimer in the
25 | documentation and/or other materials provided with the distribution.
26 |
27 | 3. Neither the name of the copyright holder nor the names of its
28 | contributors may be used to endorse or promote products derived from
29 | this software without specific prior written permission.
30 |
31 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
32 | IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
33 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
34 | PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
35 | CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
36 | EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
37 | PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
38 | PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
39 | LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
40 | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
41 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 | """
43 |
44 | if not isinstance(P, np.ndarray):
45 | P = np.array(P,ndmin=2)
46 |
47 | if not isinstance(Q, np.ndarray):
48 | Q = np.array(Q,ndmin=2)
49 |
50 | if P.shape[1] != Q.shape[1]:
51 | raise ValueError('the number of columns in P and Q should be the same')
52 |
53 | if not np.isfinite(P).any() or not np.isfinite(Q).any():
54 | raise ValueError('the inputs contain non-finite values.')
55 |
56 | # normalizing the P and Q
57 | # if Q has one row.
58 | if Q.shape[0] == 1:
59 | Q = np.divide(Q, np.sum(Q))
60 | P = np.divide(P, matlib.repmat(np.sum(P,axis=1,keepdims=True),1,P.shape[1])) # repeat the sum of the rows len(rows) times.
61 | dist = np.sum(np.multiply(P,np.log(np.divide(P,matlib.repmat(Q,P.shape[0],1)))),axis=1) # repeat the values of Q len(col) times.
62 | elif Q.shape[0] == P.shape[0]:
63 | Q = np.divide(Q,matlib.repmat(np.sum(Q,axis=1,keepdims=True),1,Q.shape[1]))
64 | P = np.divide(P,matlib.repmat(np.sum(P,axis=1,keepdims=True),1,P.shape[1])) # NOTE: Used to be a 9 not a 1
65 | dist = np.sum(np.multiply(P,np.log(np.divide(P,Q))),axis=1)
66 |
67 | # resolving the case when P(i)==0
68 | dist[np.isnan(dist)]=0
69 | return dist
--------------------------------------------------------------------------------
/python/Ent_Ap.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | def Ent_Ap(data, dim, r):
4 | """
5 | Ent_Ap20120321
6 | data : time-series data
7 | dim : embedded dimension
8 | r : tolerance (typically 0.2)
9 |
10 | Changes in version 1
11 | Ver 0 had a minor error in the final step of calculating ApEn
12 | because it took logarithm after summation of phi's.
13 | In Ver 1, I restored the definition according to original paper's
14 | definition, to be consistent with most of the work in the
15 | literature. Note that this definition won't work for Sample
16 | Entropy which doesn't count self-matching case, because the count
17 | can be zero and logarithm can fail.
18 |
19 | *NOTE: This code is faster and gives the same result as ApEn =
20 | ApEnt(data,m,R) created by John McCamley in June of 2015.
21 | -Will Denton
22 |
23 | ---------------------------------------------------------------------
24 | coded by Kijoon Lee, kjlee@ntu.edu.sg
25 | Ver 0 : Aug 4th, 2011
26 | Ver 1 : Mar 21st, 2012
27 | ---------------------------------------------------------------------
28 | """
29 |
30 |
31 | r = r*np.std(data)
32 | N = len(data)
33 | phim = np.zeros(2)
34 | for j in range(2):
35 | m = dim+j
36 | phi = np.zeros(N-m+1)
37 | data_mat = np.zeros((N-m+1,m))
38 | for i in range(m):
39 | data_mat[:,i] = data[i:N-m+i+1]
40 | for i in range(N-m+1):
41 | temp_mat = np.abs(data_mat - data_mat[i,:])
42 | AorB = np.unique(np.where(temp_mat > r)[0])
43 | AorB = len(temp_mat) - len(AorB)
44 | phi[i] = AorB/(N-m+1)
45 | phim[j] = np.sum(np.log(phi))/(N-m+1)
46 | AE = phim[0] - phim[1]
47 | return AE
48 |
49 |
50 |
--------------------------------------------------------------------------------
/python/Ent_Permu.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | def Ent_Permu(data, m, tau):
4 | """
5 | (permEnt, hist) = Ent_Permu20180320(data, m, tau)
6 | inputs - data: 1-D array of data being analyzed
7 | m: embedding dimension (order of permutation entropy)
8 | tau: time delay
9 | outputs - permuEnt: value calculated using a log base of 2
10 | hist: number of occurences for each permutation order
11 | Remarks
12 | - It differs from the permutation entropy code found on MatLab Central in
13 | one way (see MathWorks reference). The code on MatLab Central uses the
14 | log function (base e, natural log), whereas this code uses log2 (base 2
15 | ), as per Bandt & Pompe, 2002. However, this code does include a lag
16 | (time delay) feature like the one on MatLab Central does.
17 | - Complexity parameters for time series based on comparison of
18 | neighboring values. Based on the distributions of ordinal patterns,
19 | which describe order relations between the values of a time series.
20 | Based on the algorithm described by Bandt & Pompe, 2002.
21 | References
22 | - Bandt, C., Pompe, B. Permutation entropy: A natural complexity measure
23 | for time series. Phys Rev Lett 2002, 88, 174102,
24 | doi:10.1103/PhysRevLett.88.174102
25 | - MathWorks: http:www.mathworks.com/matlabcentral/fileexchange/
26 | 37289-permutation-entropy)
27 | Jun 2016 - Created by Patrick Meng-Frecker, unonbcf@unomaha.edu
28 | Dec 2016 - Edited by Casey Wiens, email: unonbcf@unomaha.edu
29 | """
30 | def permutation_search(data, m, tau, N):
31 | permDict = dict()
32 | for cnt1 in range(N-tau*(m-1)): # steps from 1 through length of data minus time delay multiplied by order minus 1
33 | permVal = np.argsort(data[cnt1:cnt1+tau*(m-1)+1:tau]).astype(str) # creates permutation of selected data range
34 | permVal = ''.join(permVal) # concatenate array together as a string with no delimiter
35 | if permVal not in permDict:
36 | permDict[permVal] = 1
37 | else:
38 | permDict[permVal] += 1
39 | return np.array(list(permDict.values()))
40 |
41 | N = len(data) # length of time series
42 | hist = permutation_search(data,m,tau,N)
43 | per = hist/np.sum(hist) # ratio of each permutation vector match to total matches
44 | permEnt = np.negative(np.sum(np.multiply(per, np.log2(per)))) # performs entropy calucation
45 | return (permEnt, hist)
46 |
47 |
48 |
49 |
--------------------------------------------------------------------------------
/python/Ent_Samp.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | def Ent_Samp(data, m, r):
4 | """
5 | function SE = Ent_Samp20200723(data,m,r)
6 | SE = Ent_Samp20200723(data,m,R) Returns the sample entropy value.
7 | inputs - data, single column time seres
8 | - m, length of vectors to be compared
9 | - r, radius for accepting matches (as a proportion of the
10 | standard deviation)
11 |
12 | output - SE, sample entropy
13 | Remarks
14 | - This code finds the sample entropy of a data series using the method
15 | described by - Richman, J.S., Moorman, J.R., 2000. "Physiological
16 | time-series analysis using approximate entropy and sample entropy."
17 | Am. J. Physiol. Heart Circ. Physiol. 278, H2039–H2049.
18 | - m is generally recommendation as 2
19 | - R is generally recommendation as 0.2
20 | May 2016 - Modified by John McCamley, unonbcf@unomaha.edu
21 | - This is a faster version of the previous code.
22 | May 2019 - Modified by Will Denton
23 | - Added code to check version number in relation to a server
24 | and to automatically update the code.
25 | Jul 2020 - Modified by Ben Senderling, bmchnonan@unomaha.edu
26 | - Removed the code that automatically checks for updates and
27 | keeps a version history.
28 | Define r as R times the standard deviation
29 | """
30 | R = r * np.std(data)
31 | N = len(data)
32 |
33 | data = np.array(data)
34 |
35 | dij = np.zeros((N-m,m+1))
36 | dj = np.zeros((N-m,1))
37 | dj1 = np.zeros((N-m,1))
38 | Bm = np.zeros((N-m,1))
39 | Am = np.zeros((N-m,1))
40 |
41 | for i in range(N-m):
42 | for k in range(m+1):
43 | dij[:,k] = np.abs(data[k:N-m+k]-data[i+k])
44 | dj = np.max(dij[:,0:m],axis=1)
45 | dj1 = np.max(dij,axis=1)
46 | d = np.where(dj <= R)
47 | d1 = np.where(dj1 <= R)
48 | nm = d[0].shape[0]-1 # subtract the self match
49 | Bm[i] = nm/(N-m)
50 | nm1 = d1[0].shape[0]-1 # subtract the self match
51 | Am[i] = nm1/(N-m)
52 |
53 | Bmr = np.sum(Bm)/(N-m)
54 | Amr = np.sum(Am)/(N-m)
55 |
56 | return -np.log(Amr/Bmr)
57 |
--------------------------------------------------------------------------------
/python/Ent_Symbolic.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | def Ent_Symbolic(X, L):
4 | """
5 | SymEnt = Ent_Symbolic20180320(X, L)
6 | symbolicEnt Calculates the Symbolic Entropy for given data.
7 | Input - X: 1-Dimensional binary array of data
8 | L: Word length
9 | Output - NCSE: Normalized Corrected Shannon Entropy
10 | Remarks
11 | - This code calculates the Symbbolic Entropy value for the provided data
12 | at a given word length described by - Aziz, W., Arif, M., 2006.
13 | "Complexity analysis of stride interval time series by threshold
14 | dependent symbolic entropy." Eur. J. Appl. Physiol. 98: 30-40.
15 | Jun 2017 - Created by William Denton, unonbcf@unomaha.edu
16 | Copyright 2020 Nonlinear Analysis Core, Center for Human Movement
17 | Variability, University of Nebraska at Omaha
18 | """
19 | if not isinstance(X, np.ndarray):
20 | X = np.array(X)
21 |
22 | words = np.zeros(X.shape[0] - L+1)
23 |
24 | str_rep = str(np.apply_along_axis(lambda row: row.astype('|S1').tobytes().decode('utf-8'),
25 | axis=0,
26 | arr=X))
27 | for i in range(X.shape[0]-L+1):
28 | words[i] = int(str_rep[i:i+L],2)
29 |
30 | max_words = 2**L
31 | P = np.zeros(max_words)
32 | H = np.zeros(max_words)
33 |
34 | for i in range(max_words):
35 | P[i] = np.where(words == i)[0].size/words.size
36 | Hval = P[i]*np.log2(P[i])
37 | if np.isnan(Hval):
38 | pass
39 | else:
40 | H[i] = Hval
41 |
42 | H = np.negative(np.sum(H))
43 |
44 | So = np.unique(words).size
45 | Sm = max_words
46 | CSE = H+(So-1) / (2*Sm*np.log(2))
47 | CSEm = -np.log2(1/Sm) + (Sm-1) / (2*Sm*np.log(2))
48 | return CSE/CSEm
--------------------------------------------------------------------------------
/python/Ent_xSamp.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | def Ent_xSamp(x,y,m,R,norm):
4 | """
5 | xSE = Ent_xSamp20180320(x,y,m,R,norm)
6 | Inputs - x, first data series
7 | - y, second data series
8 | - m, vector length for matching (usually 2 or 3)
9 | - R, R tolerance to find matches (as a proportion of the average
10 | of the SDs of the data sets, usually between 0.15 and 0.25)
11 | - norm, normalization to perform
12 | - 1 = max rescale/unit interval (data ranges in value from 0 - 1
13 | ) Most commonly used for RQA.
14 | - 2 = mean/Zscore (used when data is more variable or has
15 | outliers) normalized data has SD = 1. This is best for cross
16 | sample entropy.
17 | - Set to any value other than 1 or 2 to not normalize/rescale
18 | the data
19 | Remarks
20 | - Function to calculate cross sample entropy for 2 data series using the
21 | method described by Richman and Moorman (2000).
22 | Sep 2015 - Created by John McCamley, unonbcf@unomaha.edu
23 | """
24 |
25 | # Make sure to have items as numpy arrays
26 | if not isinstance(y, np.ndarray):
27 | y = np.array(y)
28 |
29 | if not isinstance(x, np.ndarray):
30 | x = np.array(x)
31 |
32 | # Check both sets of data are the same length
33 |
34 | if x.shape[0] != y.shape[0]: raise ValueError('The data series provided are not the same length')
35 |
36 | N = x.shape[0]
37 | # normalize the data ensure data fits in the same "space"
38 | if norm == 1: #normalize data to have a range 0 - 1
39 | xn = (x - np.min(x))/(np.max(x) - np.min(x))
40 | yn = (y - np.min(y))/(np.max(y) - np.min(y))
41 | r = R * ((np.std(xn)+np.std(yn))/2)
42 | elif norm == 2: # normalize data to have a SD = 1, and mean = 0
43 | xn = (x - np.mean(x))/np.std(x)
44 | yn = (y - np.mean(y))/np.std(y)
45 | r = R
46 | else: print('These data will not be normalized')
47 |
48 | dij = np.zeros((N-m,m+1))
49 | dj = np.zeros((N-m,1))
50 | dj1 = np.zeros((N-m,1))
51 | Bm = np.zeros((N-m,1))
52 | Am = np.zeros((N-m,1))
53 |
54 | for i in range(N-m):
55 | for k in range(m+1):
56 | dij[:,k] = np.abs(xn[k:N-m+k]-yn[i+k])
57 | dj = np.max(dij[:,0:m],axis=1)
58 | dj1 = np.max(dij,axis=1)
59 | d = np.where(dj<=r)
60 | d1 = np.where(dj1<=r)
61 | nm = d[0].shape[0]
62 | Bm[i] = nm/(N-m)
63 | nm1 = d1[0].shape[0]
64 | Am[i] = nm1/(N-m)
65 |
66 | Bmr = np.sum(Bm)/(N-m)
67 | Amr = np.sum(Am)/(N-m)
68 |
69 | xSE = np.negative(np.log(Amr/Bmr))
70 | return xSE
71 |
72 |
--------------------------------------------------------------------------------
/python/Surr_PseudoPeriodic.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | def Surr_PseudoPeriodic(y,tau,dim,rho):
4 | """
5 | inputs - y, time series
6 | - tau, time lag for phase space reconstruction
7 | - dim, embedding dimension for phase space reconstruction
8 | - rho, noise radius
9 | outputs - ys, surrogate time series
10 | - yi, selected indexes for surrogate from original time series
11 | Remarks
12 | - This code produces one pseudo periodic surrogate. It is appropriate to
13 | run on period time series to remove the long-term correlations. This is
14 | useful when testing for the presense of chaos or testing various
15 | nonlinear analysis methods.
16 | - There may be an optimal value of rho. This can be found by using a
17 | different function. Or it can be specified manually.
18 | - If rho is too low, ~<0.01, the code will not be able to find a
19 | neighbor.
20 | Future Work
21 | - Previous versions had occationally created surrgates with plataues. It
22 | is unknown if these are present in the current version.
23 | References
24 | - Small, M., Yu, D., & G., H. R. (2001). Surrogate Test for
25 | Pseudoperiodic Time Series Data. Physical Revew Letters, 87(18).
26 | https://doi.org/10.1063/1.1487534
27 | Version History
28 | May 2001 - Created by Michael Small
29 | - The original version of this script was converted from
30 | Michael Small's C code to MATLAB by Ben Senderling.
31 | Jun 2020 - Modified by Ben Senderling, bmchnonan@unomaha.edu
32 | - The original was heavily modified while referencing Small,
33 | 2001. For loops and equations were indexed to save space and
34 | speed up the script. The phase space reconstruction was
35 | changed from a backwards to forwards lag. The initial seed was
36 | removed as an input. Added a line to remove self-matches.
37 | Added an exception in case a new value of xi could not be
38 | found.
39 | Mar 2021 - Modified by Ben Senderling, bmchnonan@unomaha.edu
40 | - Tried to fix the problem of new points not being able to be
41 | found.
42 | """
43 | # Phase space reconstruction
44 |
45 | if not isinstance(y, np.ndarray):
46 | y = np.array(y)
47 |
48 | N = len(y)
49 | Y = np.zeros((N-(dim-1)*tau,dim))
50 | for i in range(dim):
51 | Y[:,i] = y[i*tau:N-(dim-(i+1))*tau]
52 |
53 | # Seeding and initial points
54 | lenY = Y.shape[0]
55 | xi = int((np.floor(np.random.rand(1)*lenY)+1)[0])
56 | ys = np.zeros((lenY,1))
57 | ys[0] = y[xi]
58 | yi = np.zeros((lenY,1))
59 | yi[0] = xi
60 |
61 | M = lenY-2
62 |
63 | # Construct the surrogate
64 | for i in range(1,lenY): # steps of one as well.
65 |
66 | # Calculates the distance from the previous point to all other points.
67 | # This is the probability calculation in Small, 2001. Points that are
68 | # close neighbors will end up with a higher value.
69 | prob = np.exp(np.negative(np.sqrt(np.sum(np.power(Y[:M,:]-np.matlib.repmat(Y[xi,:],M,1),2),axis=1)))/rho)
70 | # A self-match will be exp(0)=1, which can be large compared to the
71 | # other values. It could be removed. Adding in this line appears to
72 | # produce decent surrogates but makes the optimization method
73 | # un-applicable.
74 | # prob(xi)=0
75 | # Cummulative sum of the probability
76 | sum3=np.cumsum(prob)
77 | # A random number is chosen between 0 and the cummulative probability.
78 | # Where it goes above the cumsum that is chosen as the next point, +2.
79 | # Most of the values in prob have a very small value, the close
80 | # neighbors are the spikes.
81 | xi_n= np.array([])
82 | ind=0
83 | while xi_n.size == 0:
84 | a = (np.random.rand(1))[0]
85 | xi_n = np.where(sum3<(a*sum3[-1]))[0]
86 | if xi_n.size > 0:
87 | xi_n = xi_n[-1]+2
88 | ind=ind+1
89 | if xi_n == xi and ind == 100:
90 | xi_n = xi_n+1
91 | break
92 | elif ind>100:
93 | raise Exception('a new value of xi could not be found, check that rho is not too low')
94 | xi=xi_n
95 |
96 | # Add the new point to the surrogate time series.
97 | ys[i] = y[xi]
98 | yi[i] = xi
99 |
100 | return (ys, yi)
101 |
102 |
103 |
104 |
--------------------------------------------------------------------------------
/python/Surr_Theiler.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from scipy.fft import fft, ifft
3 |
4 | def Surr_Theiler20200723(y,algorithm):
5 | """
6 | z=Surr_Theiler20200723(y,algorithm)
7 | inputs - y, time series to be surrogated
8 | algorithm - the type of algorithm to be completed
9 | outputs - z, surrogated time series
10 | Remarks
11 | - This code creates a surrogate time series according to Algorithm 0,
12 | Algorithm 1 or Algorithm 2.
13 | Future Work
14 | - None.
15 | References
16 | - Theiler, J., Eubank, S., Longtin, A., Galdrikian, B., & Doyne
17 | Farmer, J. (1992). Testing for nonlinearity in time series: the
18 | method of surrogate data. Physica D: Nonlinear Phenomena, 58(1–4),
19 | 77–94. https://doi.org/10.1016/0167-2789(92)90102-S
20 | Jun 2015 - Modified by Ben Senderling
21 | - Added function help section and plot commands for user
22 | feedback
23 | - The code was originally created as two algorithms. It was
24 | modified so one code included both functions.
25 | Jul 2020 - Modified by Ben Senderling, bmchnonan@unomaha.edu
26 | - Changed file and function name.
27 | - Added reference.
28 | """
29 | if algorithm == 0:
30 | z = np.random.randn(np.shape(y))
31 | idx = np.argsort(z)
32 | z = y[idx]
33 | elif algorithm == 1:
34 | z = surr1(y,algorithm)
35 | elif algorithm == 2:
36 | z = surr1(y,algorithm)
37 |
38 | return z
39 |
40 | def surr1(x, algorithm):
41 | """
42 | z = surr1(x,algorithm)
43 | Inputs: x, The input to be surrogated.
44 | algorithm, The selected algorithm to use.
45 | Output: z, The surrogated time series.
46 | """
47 |
48 | x = np.array(x, ndmin=2).T.copy()
49 |
50 | r,c = np.shape(x)
51 |
52 | y = np.zeros((r,c))
53 |
54 | if abs(algorithm) == 2:
55 | ra = np.random.randn(r,c)
56 | sr = np.sort(ra,axis=0)
57 | xi = np.argsort(x,axis=0)
58 | sx = np.sort(x,axis=0)
59 | xii = np.argsort(xi,axis=0)
60 | for i in range(c):
61 | y[:,i] = sr[xii[:,i]].flatten()
62 | else:
63 | y = x
64 | m = np.mean(y)
65 |
66 | y = y - m
67 |
68 | fy = fft(y,axis=0)
69 |
70 | # randomizing phase
71 | phase = np.random.rand(r,1)
72 | # repeat the random values for each column in the input
73 | if c > 1:
74 | phase = np.tile(phase, c)
75 |
76 | rot = np.exp(1)**(2*np.pi*np.sqrt(-1+0j)*phase)
77 |
78 | fyy = np.multiply(fy,rot)
79 |
80 | yy = np.real(ifft(fyy)) + m
81 |
82 | z = np.ones(np.shape(sx))
83 |
84 | if abs(algorithm) == 2:
85 | yyi = np.argsort(yy,axis=0)
86 | yyii = np.argsort(yyi,axis=0)
87 | for k in range(c):
88 | z[:,k] = sx[yyii[:,k]].flatten()
89 | else:
90 | z = yy
91 |
92 | return z
93 |
94 |
--------------------------------------------------------------------------------
/python/dfa.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import matplotlib.pyplot as plt
3 |
4 | def dfa(data, scales, order=1, plot=True):
5 |
6 | """Perform Detrended Fluctuation Analysis on data
7 |
8 | Inputs:
9 | data: 1D numpy array of time series to be analyzed.
10 | scales: List or array of scales to calculate fluctuations
11 | order: Integer of polynomial fit (default=1 for linear)
12 | plot: Return loglog plot (default=True to return plot)
13 |
14 | Outputs:
15 | scales: The scales that were entered as input
16 | fluctuations: Variability measured at each scale with RMS
17 | alpha value: Value quantifying the relationship between the scales
18 | and fluctuations
19 |
20 | ....References:
21 | ........Damouras, S., Chang, M. D., Sejdi, E., & Chau, T. (2010). An empirical
22 | ..........examination of detrended fluctuation analysis for gait data. Gait &
23 | ..........posture, 31(3), 336-340.
24 | ........Mirzayof, D., & Ashkenazy, Y. (2010). Preservation of long range
25 | ..........temporal correlations under extreme random dilution. Physica A:
26 | ..........Statistical Mechanics and its Applications, 389(24), 5573-5580.
27 | ........Peng, C. K., Havlin, S., Stanley, H. E., & Goldberger, A. L. (1995).
28 | ..........Quantification of scaling exponents and crossover phenomena in
29 | ..........nonstationary heartbeat time series. Chaos: An Interdisciplinary
30 | ..........Journal of Nonlinear Science, 5(1), 82-87.
31 | # =============================================================================
32 | ------ EXAMPLE ------
33 |
34 | - Generate random data
35 | data = np.random.randn(5000)
36 |
37 | - Create a vector of the scales you want to use
38 | scales = [10, 20, 40, 80, 160, 320, 640, 1280, 2560]
39 |
40 | - Set a detrending order. Use 1 for a linear detrend.
41 | order = 1
42 |
43 | - run dfa function
44 | s, f, a = dfa(data, scales, order, plot=True)
45 | # =============================================================================
46 | """
47 |
48 | # Check if data is a column vector (2D array with one column)
49 | if data.shape[0] == 1:
50 | # Reshape the data to be a column vector
51 | data = data.reshape(-1, 1)
52 | else:
53 | # Data is already a column vector
54 | data = data
55 |
56 | # =============================================================================
57 | ########################## START DFA CALCULATION ##########################
58 | # =============================================================================
59 |
60 | # Step 1: Integrate the data
61 | integrated_data = np.cumsum(data - np.mean(data))
62 |
63 | fluctuation = []
64 |
65 | for scale in scales:
66 | # Step 2: Divide data into non-overlapping window of size 'scale'
67 | chunks = len(data) // scale
68 | ms = 0.0
69 |
70 | for i in range(chunks):
71 | this_chunk = integrated_data[i*scale:(i+1)*scale]
72 | x = np.arange(len(this_chunk))
73 |
74 | # Step 3: Fit polynomial (default is linear, i.e., order=1)
75 | coeffs = np.polyfit(x, this_chunk, order)
76 | fit = np.polyval(coeffs, x)
77 |
78 | # Detrend and calculate RMS for the current window
79 | ms += np.mean((this_chunk - fit) ** 2)
80 |
81 | # Calculate average RMS for this scale
82 | fluctuation.append(np.sqrt(ms / chunks))
83 |
84 | # Perform linear regression
85 | alpha, intercept = np.polyfit(np.log(scales), np.log(fluctuation), 1)
86 |
87 |
88 | # Create a log-log plot to visualize the results
89 | if plot:
90 | plt.figure(figsize=(8, 6))
91 | plt.loglog(scales, fluctuation, marker='o', markerfacecolor = 'red', markersize=8,
92 | linestyle='-', color = 'black', linewidth=1.7, label=f'Alpha = {alpha:.3f}')
93 | plt.xlabel('Scale (log)')
94 | plt.ylabel('Fluctuation (log)')
95 | plt.legend()
96 | plt.title('Detrended Fluctuation Analysis')
97 | plt.grid(True)
98 | plt.show()
99 |
100 | # Return the scales used, fluctuation functions and the alpha value
101 | return scales, fluctuation, alpha
102 |
103 |
104 |
105 |
--------------------------------------------------------------------------------
/python/fgn_sim.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | def fgn_sim(n=1000, H=0.7):
4 | """Create Fractional Gaussian Noise
5 | Inputs:
6 | n: Number of data points of the time series. Default is 1000 data points.
7 | H: Hurst parameter of the time series. Default is 0.7.
8 | Outputs:
9 | An array of n data points with variability H
10 | # =============================================================================
11 | ------ EXAMPLE ------
12 |
13 | - Create time series of 1000 datapoints to have an H of 0.7
14 | n = 1000
15 | H = 0.7
16 | dat = fgn_sim(n, H)
17 |
18 | - If you would like to plot the timeseries:
19 | import matplotlib.pyplot as plt
20 | plt.plot(dat)
21 | plt.title(f"Fractional Gaussian Noise (H = {H})")
22 | plt.xlabel("Time")
23 | plt.ylabel("Value")
24 | plt.show()
25 | # =============================================================================
26 | """
27 |
28 | # Settings:
29 | mean = 0
30 | std = 1
31 |
32 | # Generate Sequence:
33 | z = np.random.normal(size=2*n)
34 | zr = z[:n]
35 | zi = z[n:]
36 | zic = -zi
37 | zi[0] = 0
38 | zr[0] = zr[0] * np.sqrt(2)
39 | zi[n-1] = 0
40 | zr[n-1] = zr[n-1] * np.sqrt(2)
41 | zr = np.concatenate([zr[:n], zr[n-2::-1]])
42 | zi = np.concatenate([zi[:n], zic[n-2::-1]])
43 | z = zr + 1j * zi
44 |
45 | k = np.arange(n)
46 | gammak = (np.abs(k - 1)**(2*H) - 2*np.abs(k)**(2*H) + np.abs(k + 1)**(2*H)) / 2
47 | ind = np.concatenate([np.arange(n - 1), [n - 1], np.arange(n - 2, 0, -1)])
48 | gammak = gammak[ind] # Circular shift of gammak to match n
49 | gkFGN0 = np.fft.ifft(gammak)
50 | gksqrt = np.real(gkFGN0)
51 |
52 | if np.all(gksqrt > 0):
53 | gksqrt = np.sqrt(gksqrt)
54 | z = z[:len(gksqrt)] * gksqrt
55 | z = np.fft.ifft(z)
56 | z = 0.5 * (n - 1)**(-0.5) * z
57 | z = np.real(z[:n])
58 | else:
59 | gksqrt = np.zeros_like(gksqrt)
60 | raise ValueError("Re(gk)-vector not positive")
61 |
62 | # Standardize: (z - np.mean(z)) / np.sqrt(np.var(z))
63 | ans = std * z + mean
64 | return ans
65 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | cycler==0.11.0
2 | fonttools==4.32.0
3 | imageio==2.16.1
4 | joblib==1.1.0
5 | kiwisolver==1.4.2
6 | matplotlib==3.5.1
7 | networkx==2.7.1
8 | numpy==1.20.1
9 | packaging==21.3
10 | Pillow==9.1.0
11 | pyparsing==3.0.7
12 | python-dateutil==2.8.2
13 | PyWavelets==1.3.0
14 | scikit-image==0.18.1
15 | scikit-learn==0.24.1
16 | scipy==1.6.2
17 | six==1.16.0
18 | threadpoolctl==3.1.0
19 | tifffile==2022.4.8
20 |
--------------------------------------------------------------------------------