├── .github
└── workflows
│ └── ci.yml
├── .gitignore
├── LICENSE
├── MANIFEST.in
├── README.md
├── REFERENCES.md
├── examples
├── README.md
├── afn
│ ├── ar1.py
│ ├── henon.py
│ ├── ikeda.py
│ ├── laser.py
│ ├── lorenz.py
│ ├── mackey-glass.py
│ ├── roessler-8bit.py
│ └── roessler.py
├── d2
│ ├── ar1.py
│ ├── brown.py
│ ├── curve.py
│ ├── henon.py
│ ├── ikeda.py
│ ├── lorenz.py
│ ├── mackey-glass.py
│ ├── roessler.py
│ ├── spiral.py
│ └── white.py
├── data
│ └── mackey-glass.py
├── delay
│ ├── adfd_mackey-glass.py
│ ├── adfd_roessler.py
│ ├── dmibins.py
│ ├── dmibins2.py
│ ├── henon.py
│ ├── ikeda.py
│ ├── lorenz.py
│ ├── roessler.py
│ └── sine.py
├── fnn
│ ├── corrnum.py
│ ├── henon.py
│ ├── ikeda.py
│ ├── mackey-glass.py
│ ├── metric.py
│ └── noise.py
├── lyapunov
│ ├── curve.py
│ ├── henon.py
│ ├── lorenz.py
│ └── roessler.py
├── noise
│ ├── breath.py
│ ├── goat.py
│ ├── henon_sma.py
│ └── laser.py
├── series
│ ├── br1.dat
│ ├── br2.dat
│ ├── djii.dat
│ ├── goat.dat
│ ├── goat.mp3
│ └── laser.dat
└── surrogates
│ ├── aaft.py
│ ├── corrnoise.py
│ ├── iaaft.py
│ ├── lorenz.py
│ ├── mismatch.py
│ ├── skewlorenz.py
│ ├── skewnoise.py
│ └── unidev.py
├── nolitsa
├── __init__.py
├── d2.py
├── data.py
├── delay.py
├── dimension.py
├── lyapunov.py
├── noise.py
├── surrogates.py
├── tests
│ ├── test_d2.py
│ ├── test_data.py
│ ├── test_delay.py
│ ├── test_dimension.py
│ ├── test_lyapunov.py
│ ├── test_noise.py
│ ├── test_surrogates.py
│ └── test_utils.py
└── utils.py
├── requirements.txt
└── setup.py
/.github/workflows/ci.yml:
--------------------------------------------------------------------------------
1 | name: CI
2 |
3 | on:
4 | push:
5 | branches: ['**']
6 | pull_request:
7 |
8 | jobs:
9 | tests:
10 | runs-on: ubuntu-latest
11 |
12 | steps:
13 | - name: Checkout
14 | uses: actions/checkout@master
15 |
16 | - name: Set up Python
17 | uses: actions/setup-python@v4
18 | with:
19 | python-version: '3.9'
20 | cache: 'pip' # caching pip dependencies
21 |
22 | - name: Run tests
23 | run: |
24 | pip install -r requirements.txt
25 | pip install pytest
26 | python -m pytest
27 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Created by https://www.gitignore.io/api/python
2 |
3 | ### Python ###
4 | # Byte-compiled / optimized / DLL files
5 | __pycache__/
6 | *.py[cod]
7 | *$py.class
8 |
9 | # C extensions
10 | *.so
11 |
12 | # Distribution / packaging
13 | .Python
14 | env/
15 | build/
16 | develop-eggs/
17 | dist/
18 | downloads/
19 | eggs/
20 | .eggs/
21 | lib/
22 | lib64/
23 | parts/
24 | sdist/
25 | var/
26 | *.egg-info/
27 | .installed.cfg
28 | *.egg
29 |
30 | # PyInstaller
31 | # Usually these files are written by a python script from a template
32 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
33 | *.manifest
34 | *.spec
35 |
36 | # Installer logs
37 | pip-log.txt
38 | pip-delete-this-directory.txt
39 |
40 | # Unit test / coverage reports
41 | htmlcov/
42 | .tox/
43 | .coverage
44 | .coverage.*
45 | .cache
46 | nosetests.xml
47 | coverage.xml
48 | *,cover
49 |
50 | # Translations
51 | *.mo
52 | *.pot
53 |
54 | # Django stuff:
55 | *.log
56 |
57 | # Sphinx documentation
58 | docs/_build/
59 |
60 | # PyBuilder
61 | target/
62 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Copyright (c) 2015-2016, Manu Mannattil.
2 | All rights reserved.
3 |
4 | Redistribution and use in source and binary forms, with or without
5 | modification, are permitted provided that the following conditions are
6 | met:
7 |
8 | 1. Redistributions of source code must retain the above copyright
9 | notice, this list of conditions and the following disclaimer.
10 |
11 | 2. Redistributions in binary form must reproduce the above copyright
12 | notice, this list of conditions and the following disclaimer in the
13 | documentation and/or other materials provided with the distribution.
14 |
15 | 3. Neither the name of the copyright holder nor the names of its
16 | contributors may be used to endorse or promote products derived from
17 | this software without specific prior written permission.
18 |
19 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
20 | IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 | TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 | HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
25 | TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
26 | PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
27 | LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
28 | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 |
--------------------------------------------------------------------------------
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | include README.md LICENSE
2 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | NoLiTSA
2 | =======
3 |
4 | NoLiTSA (NonLinear Time Series
5 | Analysis) is a Python module implementing several standard
6 | algorithms used in nonlinear time series analysis.
7 |
8 | [](https://github.com/manu-mannattil/nolitsa/actions/workflows/ci.yml)
9 |
10 | Features
11 | --------
12 |
13 | - Estimation of embedding delay using autocorrelation, delayed mutual
14 | information, and reconstruction expansion.
15 | - Embedding dimension estimation using false nearest neighbors and
16 | averaged false neighbors.
17 | - Computation of correlation sum and correlation dimension from both
18 | scalar and vector time series.
19 | - Estimation of the maximal Lyapunov exponent from both scalar and
20 | vector time series.
21 | - Generation of FT, AAFT, and IAAFT surrogates from a scalar
22 | time series.
23 | - Simple noise reduction scheme for filtering deterministic
24 | time series.
25 | - Miscellaneous functions for end point correction, stationarity
26 | check, fast near neighbor search, etc.
27 |
28 | Installation
29 | ------------
30 |
31 | NoLiTSA can be installed via
32 |
33 | pip install git+https://github.com/manu-mannattil/nolitsa.git
34 |
35 | NoLiTSA requires NumPy, SciPy, and Numba.
36 |
37 | ### Tests
38 |
39 | NoLiTSA’s unit tests can be executed by running `pytest`.
40 |
41 | Publications
42 | ------------
43 |
44 | Versions of NoLiTSA were used in the following publications:
45 |
46 | - M. Mannattil, H. Gupta, and S. Chakraborty, “Revisiting Evidence of
47 | Chaos in X-ray Light Curves: The Case of GRS 1915+105,”
48 | [Astrophys. J. **833**,
49 | 208 (2016)](https://dx.doi.org/10.3847/1538-4357/833/2/208).
50 |
51 | - M. Mannattil, A. Pandey, M. K. Verma, and S. Chakraborty, “On the
52 | applicability of low-dimensional models for convective flow
53 | reversals at extreme Prandtl numbers,” [Eur. Phys. J. B **90**, 259
54 | (2017)](https://dx.doi.org/10.1140/epjb/e2017-80391-1).
55 |
56 | Acknowledgments
57 | ---------------
58 |
59 | Sagar Chakraborty is thanked for several critical discussions.
60 |
61 | License
62 | -------
63 |
64 | NoLiTSA is licensed under the 3-clause BSD license. See the file LICENSE
65 | for more details.
66 |
--------------------------------------------------------------------------------
/REFERENCES.md:
--------------------------------------------------------------------------------
1 | References
2 | ==========
3 |
4 | - Cao, L. (1997). Practical method for determining the minimum embedding dimension of a scalar time series. [_Physica D_ __110__, 43](https://dx.doi.org/10.1016/S0167-2789(97)00118-8).
5 | - Ehlers, C.L., Havstad, J., Prichard, D. & Theiler, J. (1998). Low doses of ethanol reduce evidence for nonlinear structure in brain activity. [_J. Neurosci._ __18__, 7474](https://doi.org/10.1523/JNEUROSCI.18-18-07474.1998).
6 | - Fraser, A.M. & Swinney, H.L. (1986). Independent coordinates for strange attractors from mutual information. [_Phys. Rev. A_ __33__, 1134](https://dx.doi.org/10.1103/PhysRevA.33.1134).
7 | - Galka, A. (2000). _Topics in Nonlinear Time Series Analysis_, World Scientific, Singapore.
8 | - Grassberger, P. & Procaccia, I. (1983). Measuring the strangeness of strange attractors. [_Physica D_ __9__, 189](https://dx.doi.org/10.1016/0167-2789(83)90298-1).
9 | - Hegger, R., Kantz, H. & Schreiber, T. (1999). Practical implementation of nonlinear time series methods: The TISEAN package. [_Chaos_ __9__, 413](https://doi.org/10.1063%2F1.166424).
10 | - Isliker, H. & Kurths, J. (1993). A test for stationarity. [_Int. J. Bifurc. Chaos_ __3__, 1573](https://doi.org/10.1142%2fs0218127493001227).
11 | - Kantz, H., & Schreiber, T. (2004). _Nonlinear Time Series Analysis_, 2nd edition, Cambridge University Press, Cambridge.
12 | - Kennel, M.B., Brown, R. & Abarbanel, H.D.I. (1992). Determining embedding dimension for phase-space reconstruction using a geometrical construction. [_Phys. Rev. A_ __45__, 3403](https://dx.doi.org/10.1103/PhysRevA.45.3403).
13 | - Press, W.H., Teukolsky, S.A., Vetterling, W.T., Flannery, B.P. (2007). _Numerical Recipes_, 3rd edition, Cambridge University Press, Cambridge.
14 | - Rosenstein, M.T., Collins, J.J. & De Luca, C.J. (1993). A practical method for calculating largest Lyapunov exponents from small data sets. [_Physica D_ __65__, 117](https://doi.org/10.1016%2F0167-2789%2893%2990009-p).
15 | - Rosenstein, M.T., Collins, J.J. & De Luca, C.J. (1994). Reconstruction expansion as a geometry-based framework for choosing proper delay times. [_Physica D_ __73__, 82](https://doi.org/10.1016%2F0167-2789%2894%2990226-7).
16 | - Schreiber, T. & Schmitz, A. (1996). Improved surrogate data for nonlinearity tests. [_Phys. Rev. Lett._ __77__, 635](https://dx.doi.org/10.1103/PhysRevLett.77.635).
17 | - Schreiber, T. (1993). Extremely simple nonlinear noise-reduction method. [_Phys. Rev. E_ __47__, 2401](https://doi.org/10.1103%2Fphysreve.47.2401).
18 | - Sprott, J.C. (2003). _Chaos and Time-Series Analysis_, Oxford University Press, New York.
19 | - Theiler, J. (1990). Statistical precision of dimension estimators. [_Phys. Rev. A_ __41__, 3038](https://doi.org/10.1103%2Fphysreva.41.3038).
20 | - Theiler, J., Eubank, S., Longtin, A., Galdrikian, B. & Farmer, J.D. (1992). Testing for nonlinearity in time series. [_Physica D_ __58__, 77](https://dx.doi.org/10.1016/0167-2789(92)90102-S).
21 | - Voss, R.F. (1988). Fractals in nature. In [_The Science of Fractal Images_ (pp. 21-70)](https://dx.doi.org/10.1007/978-1-4612-3784-6_1), Springer, New York.
22 |
--------------------------------------------------------------------------------
/examples/README.md:
--------------------------------------------------------------------------------
1 | Examples
2 | ========
3 |
4 | General Workflow
5 | ----------------
6 |
7 | 1. Check stationarity.
8 | 2. Estimate time delay (e.g., autcorrelation, mutual information, 2D/3D
9 | phase portraits, etc.).
10 | 3. Estimate embedding dimension (AFN, IFNN, FNN, etc.).
11 | 4. Noise reduction (if required).
12 | 5. Estimate an invariant (e.g., Lyapunov exponent, correlation
13 | dimension/entropy, etc.).
14 | 6. Surrogate analysis (IAAFT, FT, cycle shuffled, etc.)
15 | 7. Additional tests for nonlinearity (e.g., prediction error, etc.)
16 | 8. Conclusion.
17 |
18 | Practical Demonstrations
19 | ------------------------
20 |
21 | - Generating test data sets
22 | - [Mackey–Glass system](data/mackey-glass.py)
23 | - Estimating the time delay
24 | - [Autocorrelation function of a finite sine
25 | wave](delay/sine.py)
26 | - [How many bins should one take while estimating the delayed
27 | mutual information?](delay/dmibins.py)
28 | - [Delayed mutual information for map like data can give bad
29 | estimates](delay/henon.py)
30 | - Time delay estimation for
31 | - [Henon map](delay/henon.py)
32 | - [Ikeda map](delay/ikeda.py)
33 | - [Rössler oscillator](delay/roessler.py)
34 | - [Lorenz attractor](delay/lorenz.py)
35 | - Average deviation from the diagonal (ADFD)
36 | - [Mackey–Glass system](delay/adfd_mackey-glass.py)
37 | - [Rössler oscillator](delay/adfd_roessler.py)
38 | - Averaged false neighbors (AFN), aka Cao’s test
39 | - Averaged false neighbors for:
40 | - [Henon map](afn/henon.py)
41 | - [Ikeda map](afn/ikeda.py)
42 | - [Lorenz attractor](afn/lorenz.py)
43 | - [Mackey–Glass system](afn/mackey-glass.py)
44 | - [Rössler oscillator](afn/roessler.py)
45 | - [Data from a far-infrared laser](afn/laser.py)
46 | - [AFN is not impervious to every stochastic data](afn/ar1.py)
47 | - [AFN can cause trouble with discrete data](afn/roessler-8bit.py)
48 | - False nearest neighbors (FNN)
49 | - FNN for:
50 | - [Henon map](fnn/henon.py)
51 | - [Ikeda map](fnn/ikeda.py)
52 | - [Mackey–Glass system](fnn/mackey-glass.py)
53 | - [Uncorrelated noise](fnn/noise.py)
54 | - [FNN results depend on the metric used](fnn/metric.py)
55 | - [FNN can fail when temporal correlations are not
56 | removed](fnn/corrnum.py)
57 | - Noise reduction
58 | - [Filtering human breath data](noise/breath.py)
59 | - [Cleaning the “GOAT” vowel](noise/goat.py)
60 | - [Simple moving average vs. nonlinear noise
61 | reduction](noise/henon_sma.py)
62 | - [Filtering data from a far-infrared laser](noise/laser.py)
63 | - Correlation sum/correlation dimension
64 | - Computing the correlation sum for:
65 | - [Henon map](d2/henon.py)
66 | - [Ikeda map](d2/ikeda.py)
67 | - [Lorenz attractor](d2/lorenz.py)
68 | - [Rössler oscillator](d2/roessler.py)
69 | - [Mackey–Glass system](d2/mackey-glass.py)
70 | - [White noise](d2/white.py)
71 | - Subtleties
72 | - [AR(1) process can mimic a deterministic process](d2/ar1.py)
73 | - [Brown noise can have a saturating
74 | D2](d2/brown.py)
75 | - Computing D2 of a geometrical object
76 | - [Nonstationary spiral](d2/spiral.py)
77 | - [Closed noisy curve](d2/curve.py)
78 | - Maximum Lyapunov exponent (MLE)
79 | - Computing the MLE for:
80 | - [Henon map](lyapunov/henon.py)
81 | - [Lorenz attractor](lyapunov/lorenz.py)
82 | - [Rössler oscillator](lyapunov/roessler.py)
83 | - [Closed noisy curve](lyapunov/curve.py)
84 | - Surrogate analysis
85 | - Algorithms
86 | - [AAFT surrogates](surrogates/aaft.py)
87 | - [IAAFT surrogates](surrogates/iaaft.py)
88 | - Examples of surrogate analysis of
89 | - [Linearly correlated noise](surrogates/corrnoise.py)
90 | - [Nonlinear time series](surrogates/lorenz.py)
91 | - Time reversal asymmetry:
92 | - [Skew statistic fails for linear stochastic
93 | data](surrogates/skewnoise.py)
94 | - [Skew statistic fails for Lorenz](surrogates/skewlorenz.py)
95 | - [Why end point mismatch in a time series ought to be
96 | reduced](surrogates/mismatch.py)
97 | - [Converting a time series to a uniform deviate is
98 | harmful](surrogates/unidev.py)
99 |
100 | General Tips
101 | ------------
102 |
103 | While there is no dearth of good literature on nonlinear time series
104 | analysis, here are a few things that I found to be useful in practical
105 | situations.
106 |
107 | 1. Picking a good time delay hinges on balancing redundance and
108 | irrelevance between the components of the time delayed vectors and
109 | there are no straightforward theoretical results that help us do
110 | this. Therefore, always plot two- and three-dimensional phase
111 | portraits of the reconstructed attractor before settling on a time
112 | delay and verify that the attractor (or whatever structure appears)
113 | looks unfolded. Don’t blindly pick values by just looking at the
114 | delayed mutual information or the autocorrelation function of the
115 | time series.
116 |
117 | 2. If possible, use the Chebyshev metric while computing the
118 | correlation sum C(r). The Chebyshev metric
119 | has many advantages, especially when we are trying to evaluate
120 | C(r) after embedding the time series.
121 |
122 | - It’s computationally faster than the cityblock and the Euclidean
123 | metric. For larger data sets, this is an obvious advantage.
124 |
125 | - Distances are independent of the embedding dimension
126 | d and always remain bounded as opposed to Euclidean
127 | and cityblock distances, which crudely go as d1/2 and
128 | d respectively. This helps in comparing the
129 | correlation sum plots at different embedding dimensions.
130 |
131 | - Since the distances always remain bounded, we can evaluate
132 | C(r) at the same r’s for
133 | all embedding dimensions. And C(r) at
134 | rmax = maxixi − minixi
135 | is 1 regardless of the embedding dimension. Of course,
136 | C(r) could be 1 even for
137 | r < rmax.
138 | Nonetheless, this helps in choosing a range of r
139 | values.
140 |
141 | 3. Ensure that temporal correlations between points are removed in all
142 | cases where it is known to result in spurious estimates of dimension
143 | and/or determinism.
144 |
145 | 4. Avoid the skew statistic which attempts to measures asymmetry w.r.t.
146 | time reversal for detecting nonlinearity. It tells very little about
147 | the origin of nonlinearity and fails miserably in many cases.
148 |
149 | 5. The second FNN test tests for “boundedness” of the reconstructed
150 | attractor.
151 |
152 | 6. When plotting the reconstructed phase space, use same scaling for
153 | all the axes as all d coordinates are sampled from the
154 | same distribution.
155 |
156 | 7. If the series has a strong periodic component, a reasonable time
157 | delay is the quarter of the time period.
158 |
159 | 8. Some chaotic time series such as those from Lorenz attractor may
160 | display long-range correlations (because of its “reversing” nature).
161 | In such cases, a delay may be determined by computing
162 | autocorrelation function of the square of the original
163 | time series.
164 |
--------------------------------------------------------------------------------
/examples/afn/ar1.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | """AFN on correlated random time series.
5 |
6 | The AFN algorithm is not impervious to correlated stochastic data.
7 | Here we use a time series coming from an AR(1) process and see that the
8 | E2(d) curve has a nontrivial appearance, falsely giving the appearance
9 | of determinism.
10 |
11 | Two points are to be noted:
12 |
13 | 1. Although the E2(d) curves have a nontrivial appearance, they are
14 | different from the curves seen for deterministic data. Here the
15 | value of E2(d) first decreases with d and then increases, whereas
16 | for deterministic data, E2(d) is seen to increase right from the
17 | beginning.
18 |
19 | 2. Imposing a minimum temporal separation equal to the autocorrelation
20 | time of the series while searching for near neighbors solves the
21 | problem.
22 | """
23 |
24 | from nolitsa import delay, dimension, utils
25 | import matplotlib.pyplot as plt
26 | import numpy as np
27 |
28 | # Generate stochastic data.
29 | N = 5 * 1000
30 | x = np.empty(N)
31 |
32 | np.random.seed(999)
33 | eta = np.random.normal(size=(N), loc=0, scale=1.0)
34 | a = 0.99
35 |
36 | x[0] = eta[0]
37 | for i in range(1, N):
38 | x[i] = a * x[i - 1] + eta[i]
39 |
40 | x = utils.rescale(x)
41 |
42 | # Calculate the autocorrelation time.
43 | tau = np.argmax(delay.acorr(x) < 1.0 / np.e)
44 |
45 | # AFN without any minimum temporal separation.
46 | dim = np.arange(1, 10 + 2)
47 | F, Fs = dimension.afn(x, tau=tau, dim=dim, window=0)
48 | F1, F2 = F[1:] / F[:-1], Fs[1:] / Fs[:-1]
49 |
50 | # AFN with a minimum temporal separation (equal to the autocorrelation
51 | # time) between near-neighbors.
52 | dim = np.arange(1, 10 + 2)
53 | E, Es = dimension.afn(x, tau=tau, dim=dim, window=tau)
54 | E1, E2 = E[1:] / E[:-1], Es[1:] / Es[:-1]
55 |
56 | plt.figure(1)
57 | plt.title(r'AR(1) process with $a = 0.99$')
58 | plt.xlabel(r'i')
59 | plt.ylabel(r'$x_i$')
60 | plt.plot(x)
61 |
62 | plt.figure(2)
63 | plt.title(r'AFN without any minimum temporal separation')
64 | plt.xlabel(r'Embedding dimension $d$')
65 | plt.ylabel(r'$E_1(d)$ and $E_2(d)$')
66 | plt.plot(dim[:-1], F1, 'bo-', label=r'$E_1(d)$')
67 | plt.plot(dim[:-1], F2, 'go-', label=r'$E_2(d)$')
68 | plt.legend()
69 |
70 | plt.figure(3)
71 | plt.title(r'AFN with a minimum temporal separation of $%d$' % tau)
72 | plt.xlabel(r'Embedding dimension $d$')
73 | plt.ylabel(r'$E_1(d)$ and $E_2(d)$')
74 | plt.plot(dim[:-1], E1, 'bo-', label=r'$E_1(d)$')
75 | plt.plot(dim[:-1], E2, 'go-', label=r'$E_2(d)$')
76 | plt.legend()
77 |
78 | plt.show()
79 |
--------------------------------------------------------------------------------
/examples/afn/henon.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | """AFN for time series from the Henon map.
5 |
6 | E1 saturates near an embedding dimension of 2. E2 != 1 at many values
7 | of d. Thus the series is definitely deterministic. The plot matches
8 | Fig. 1 of Cao (1997) pretty well.
9 | """
10 |
11 | from nolitsa import data, dimension
12 | import matplotlib.pyplot as plt
13 | import numpy as np
14 |
15 | # Generate data.
16 | x = data.henon()[:, 0]
17 |
18 | # AFN algorithm.
19 | dim = np.arange(1, 10 + 2)
20 | E, Es = dimension.afn(x, tau=1, dim=dim, window=5)
21 | E1, E2 = E[1:] / E[:-1], Es[1:] / Es[:-1]
22 |
23 | plt.title(r'AFN for time series from the Henon map')
24 | plt.xlabel(r'Embedding dimension $d$')
25 | plt.ylabel(r'$E_1(d)$ and $E_2(d)$')
26 | plt.plot(dim[:-1], E1, 'bo-', label=r'$E_1(d)$')
27 | plt.plot(dim[:-1], E2, 'go-', label=r'$E_2(d)$')
28 | plt.legend()
29 |
30 | plt.show()
31 |
--------------------------------------------------------------------------------
/examples/afn/ikeda.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | """AFN for time series from the Ikeda map.
5 |
6 | E1 saturates near an embedding dimension of 4. E2 != 1 at many values
7 | of d. Thus, this series is definitely deterministic. Compare with
8 | Fig. 2 of Cao (1997).
9 | """
10 |
11 | from nolitsa import data, dimension
12 | import matplotlib.pyplot as plt
13 | import numpy as np
14 |
15 | # Generate data.
16 | x = data.ikeda()[:, 0]
17 |
18 | # AFN algorithm.
19 | dim = np.arange(1, 10 + 2)
20 | E, Es = dimension.afn(x, tau=1, dim=dim, window=5)
21 | E1, E2 = E[1:] / E[:-1], Es[1:] / Es[:-1]
22 |
23 | plt.title(r'AFN for time series from the Ikeda map')
24 | plt.xlabel(r'Embedding dimension $d$')
25 | plt.ylabel(r'$E_1(d)$ and $E_2(d)$')
26 | plt.plot(dim[:-1], E1, 'bo-', label=r'$E_1(d)$')
27 | plt.plot(dim[:-1], E2, 'go-', label=r'$E_2(d)$')
28 | plt.legend()
29 |
30 | plt.show()
31 |
--------------------------------------------------------------------------------
/examples/afn/laser.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | """AFN for time series from a far-infrared laser.
5 |
6 | The time series is from Data Set A of the Sata Fe Time Series
7 | Competition. This is a map-like data from a far-infrared laser.
8 |
9 | Since each value of the time series is an 8-bit integer (i.e., it's in
10 | the range [0, 255]), the reconstructed phase space is essentially a grid
11 | with zero dimension. To actually measure the dimension of this data
12 | set, we have to "kick" points off the grid a little bit by adding an
13 | insignificant amount of noise. See Example 6.4 in Kantz & Schreiber
14 | (2004).
15 |
16 | From the E1(d) curve, one concludes that the minimum embedding dimension
17 | should be close to 8 [Cao (1997) reports 7 as the minimum embedding
18 | dimension]. This is somewhat surprising since this series has a very
19 | low correlation dimension (near 2.0).
20 | """
21 |
22 | from nolitsa import dimension
23 | import matplotlib.pyplot as plt
24 | import numpy as np
25 |
26 | # Generate data.
27 | x = np.loadtxt('../series/laser.dat')
28 |
29 | # Add uniform noise in [-0.025, 0.025] to "shake" the grid.
30 | x = x + (-0.025 + 0.050 * np.random.random(len(x)))
31 |
32 | # AFN algorithm.
33 | dim = np.arange(1, 15 + 2)
34 | E, Es = dimension.afn(x, tau=1, dim=dim, window=50)
35 | E1, E2 = E[1:] / E[:-1], Es[1:] / Es[:-1]
36 |
37 | plt.title(r'AFN for time series from a far-infrared laser')
38 | plt.xlabel(r'Embedding dimension $d$')
39 | plt.ylabel(r'$E_1(d)$ and $E_2(d)$')
40 | plt.plot(dim[:-1], E1, 'bo-', label=r'$E_1(d)$')
41 | plt.plot(dim[:-1], E2, 'go-', label=r'$E_2(d)$')
42 | plt.legend()
43 |
44 | plt.show()
45 |
--------------------------------------------------------------------------------
/examples/afn/lorenz.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | """AFN for time series from the Lorenz attractor.
5 |
6 | E1 saturates near an embedding dimension of 3. E2 != 1 at many values
7 | of d. Thus the series is definitely deterministic. The plot matches
8 | Fig. 3 of Cao (1997) rather nicely.
9 | """
10 |
11 | from nolitsa import data, dimension
12 | import matplotlib.pyplot as plt
13 | import numpy as np
14 |
15 | # Generate data.
16 | x = data.lorenz()[1][:, 0]
17 |
18 | # AFN algorithm.
19 | dim = np.arange(1, 10 + 2)
20 | E, Es = dimension.afn(x, tau=5, dim=dim, window=20)
21 | E1, E2 = E[1:] / E[:-1], Es[1:] / Es[:-1]
22 |
23 | plt.title(r'AFN for time series from the Lorenz attractor')
24 | plt.xlabel(r'Embedding dimension $d$')
25 | plt.ylabel(r'$E_1(d)$ and $E_2(d)$')
26 | plt.plot(dim[:-1], E1, 'bo-', label=r'$E_1(d)$')
27 | plt.plot(dim[:-1], E2, 'go-', label=r'$E_2(d)$')
28 | plt.legend()
29 |
30 | plt.show()
31 |
--------------------------------------------------------------------------------
/examples/afn/mackey-glass.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | """AFN for data from the Mackey-Glass delay differential equation.
5 |
6 | The minimum embedding dimension comes out to be 5-7 (depending on the
7 | initial condition) with both E1 and E2 curves giving very strong hints
8 | of determinism. According to Grassberger & Procaccia (1983) the
9 | correlation dimension of the Mackey-Glass system with a delay of 23 is
10 | ~ 2.5. Thus, the results are definitely comparable.
11 | """
12 |
13 | import numpy as np
14 | import matplotlib.pyplot as plt
15 | from nolitsa import data, dimension
16 |
17 | x = data.mackey_glass(tau=23.0, sample=0.46, n=1000)
18 |
19 | # Since we're resampling the time series using a sampling step of
20 | # 0.46, the time delay required is 23.0/0.46 = 50.
21 | tau = 50
22 | dim = np.arange(1, 16 + 2)
23 |
24 | # AFN algorithm.
25 | E, Es = dimension.afn(x, tau=tau, dim=dim, window=100)
26 | E1, E2 = E[1:] / E[:-1], Es[1:] / Es[:-1]
27 |
28 | plt.title(r'AFN for time series from the Mackey-Glass system')
29 | plt.xlabel(r'Embedding dimension $d$')
30 | plt.ylabel(r'$E_1(d)$ and $E_2(d)$')
31 | plt.plot(dim[:-1], E1, 'bo-', label=r'$E_1(d)$')
32 | plt.plot(dim[:-1], E2, 'go-', label=r'$E_2(d)$')
33 | plt.legend()
34 |
35 | plt.show()
36 |
--------------------------------------------------------------------------------
/examples/afn/roessler-8bit.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | """AFN for 8-bit time series from the Rössler oscillator.
5 |
6 | Since each point in the time series is an 8-bit integer (i.e., it's in
7 | the range [-127, 127]), the reconstructed phase space is essentially a
8 | grid with zero dimension. To actually measure the dimension of this
9 | data set, we have to "kick" points off the grid a little bit by adding
10 | an insignificant amount of noise. See Example 6.4 in Kantz & Schreiber
11 | (2004).
12 |
13 | But the quality of reconstruction depends on the noise level. Adding
14 | an insignificant amount of noise does not help at all! This is
15 | probably one of the rare case where a higher level of additive noise
16 | improves the results.
17 | """
18 |
19 | from nolitsa import data, dimension, utils
20 | import matplotlib.pyplot as plt
21 | import numpy as np
22 |
23 | # Generate data.
24 | x = data.roessler(length=5000)[1][:, 0]
25 |
26 | # Convert to 8-bit.
27 | x = np.int8(utils.rescale(x, (-127, 127)))
28 |
29 | # Add uniform noise of two different noise levels.
30 | y1 = x + (-0.001 + 0.002 * np.random.random(len(x)))
31 | y2 = x + (-0.5 + 1.0 * np.random.random(len(x)))
32 |
33 | # AFN algorithm.
34 | dim = np.arange(1, 10 + 2)
35 | F, Fs = dimension.afn(y1, tau=14, dim=dim, window=40)
36 | F1, F2 = F[1:] / F[:-1], Fs[1:] / Fs[:-1]
37 |
38 | E, Es = dimension.afn(y2, tau=14, dim=dim, window=40)
39 | E1, E2 = E[1:] / E[:-1], Es[1:] / Es[:-1]
40 |
41 | plt.figure(1)
42 | plt.title(r'AFN after corrupting with uniform noise in $[-0.001, 0.001]$')
43 | plt.xlabel(r'Embedding dimension $d$')
44 | plt.ylabel(r'$E_1(d)$ and $E_2(d)$')
45 | plt.plot(dim[:-1], F1, 'bo-', label=r'$E_1(d)$')
46 | plt.plot(dim[:-1], F2, 'go-', label=r'$E_2(d)$')
47 | plt.legend()
48 |
49 | plt.figure(2)
50 | plt.title(r'AFN after corrupting with uniform noise in $[-0.5, 0.5]$')
51 | plt.xlabel(r'Embedding dimension $d$')
52 | plt.ylabel(r'$E_1(d)$ and $E_2(d)$')
53 | plt.plot(dim[:-1], E1, 'bo-', label=r'$E_1(d)$')
54 | plt.plot(dim[:-1], E2, 'go-', label=r'$E_2(d)$')
55 | plt.legend()
56 |
57 | plt.show()
58 |
--------------------------------------------------------------------------------
/examples/afn/roessler.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | """AFN for time series from the Rössler oscillator.
5 |
6 | E1 saturates near an embedding dimension of 4. E2 != 1 at many values
7 | of d. Thus, the series is definitely deterministic.
8 | """
9 |
10 | from nolitsa import data, dimension
11 | import matplotlib.pyplot as plt
12 | import numpy as np
13 |
14 | # Generate data.
15 | x = data.roessler()[1][:, 0]
16 |
17 | # AFN algorithm.
18 | dim = np.arange(1, 10 + 2)
19 | E, Es = dimension.afn(x, tau=14, dim=dim, window=45, metric='cityblock')
20 | E1, E2 = E[1:] / E[:-1], Es[1:] / Es[:-1]
21 |
22 | plt.title(r'AFN for time series from the Rössler oscillator')
23 | plt.xlabel(r'Embedding dimension $d$')
24 | plt.ylabel(r'$E_1(d)$ and $E_2(d)$')
25 | plt.plot(dim[:-1], E1, 'bo-', label=r'$E_1(d)$')
26 | plt.plot(dim[:-1], E2, 'go-', label=r'$E_2(d)$')
27 | plt.legend()
28 |
29 | plt.show()
30 |
--------------------------------------------------------------------------------
/examples/d2/ar1.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | """Illustration of Theiler window using an AR(1) series.
5 |
6 | An AR(1) time series is temporally correlated. Thus, if a judicious
7 | (nonzero) value of the Theiler window is not used, the estimated
8 | dimension converges to the fractal dimension of the trajectory formed by
9 | the time series in the phase space. This, however, has nothing to do
10 | with any low-dimensional nature of the underlying process.
11 | """
12 |
13 | from nolitsa import d2
14 | import numpy as np
15 | import matplotlib.pyplot as plt
16 |
17 | N = 5000
18 | x = np.empty(N)
19 | np.random.seed(882)
20 | n = np.random.normal(size=(N), loc=0, scale=1.0)
21 | a = 0.998
22 |
23 | x[0] = n[0]
24 | for i in range(1, N):
25 | x[i] = a * x[i - 1] + n[i]
26 |
27 | # Delay is the autocorrelation time.
28 | tau = 400
29 |
30 | dim = np.arange(1, 10 + 1)
31 |
32 | plt.figure(1)
33 | plt.title(r'Local $D_2$ vs $r$ for AR(1) time series with $W = 0$')
34 | plt.xlabel(r'Distance $r$')
35 | plt.ylabel(r'Local $D_2$')
36 |
37 | for r, c in d2.c2_embed(x, tau=tau, dim=dim, window=0):
38 | plt.semilogx(r[3:-3], d2.d2(r, c))
39 |
40 | plt.figure(2)
41 | plt.title(r'Local $D_2$ vs $r$ for AR(1) time series with $W = 400$')
42 | plt.xlabel(r'Distance $r$')
43 | plt.ylabel(r'Local $D_2$')
44 |
45 | for r, c in d2.c2_embed(x, tau=tau, dim=dim, window=400):
46 | plt.semilogx(r[3:-3], d2.d2(r, c))
47 |
48 | plt.show()
49 |
--------------------------------------------------------------------------------
/examples/d2/brown.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | """D2 for Brown noise.
5 |
6 | Expected: D2 = 2 / (alpha - 1) = 2.0
7 |
8 | Of course, this value is not due to the existence of any invariant
9 | measure. What is being measured here is the fractal dimension of the
10 | Brownian trail. The scaling region would vanish if we impose a nonzero
11 | Theiler window, telling us that the underlying system is not
12 | low dimensional.
13 | """
14 |
15 | import numpy as np
16 | import matplotlib.pyplot as plt
17 | from nolitsa import d2, data, utils
18 |
19 | np.random.seed(101)
20 | x = utils.rescale(data.falpha(alpha=2.0, length=(2 ** 14))[:10 * 1000])
21 |
22 | dim = np.arange(1, 10 + 1)
23 | tau = 500
24 |
25 | plt.title('Local $D_2$ vs $r$ for Brown noise')
26 | plt.xlabel(r'Distance $r$')
27 | plt.ylabel(r'Local $D_2$')
28 |
29 | for r, c in d2.c2_embed(x, tau=tau, dim=dim, window=0,
30 | r=utils.gprange(0.001, 1.0, 100)):
31 | plt.semilogx(r[2:-2], d2.d2(r, c, hwin=2), color='#4682B4')
32 |
33 | plt.semilogx(utils.gprange(0.001, 1.0, 100), 2.0 * np.ones(100),
34 | color='#000000')
35 | plt.show()
36 |
--------------------------------------------------------------------------------
/examples/d2/curve.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | """D2 for a closed noisy curve.
5 |
6 | Although there is a proper scaling region with D2 between 1.2 and 1.5,
7 | it is higher than the expected value of 1.0, perhaps due to the additive
8 | noise.
9 | """
10 |
11 | import numpy as np
12 | import matplotlib.pyplot as plt
13 | from nolitsa import d2, utils
14 |
15 | t = np.linspace(0, 100 * np.pi, 5000)
16 | x = np.sin(t) + np.sin(2 * t) + np.sin(3 * t) + np.sin(5 * t)
17 | x = utils.corrupt(x, np.random.normal(size=5000), snr=1000)
18 |
19 | # Time delay.
20 | tau = 25
21 |
22 | window = 100
23 |
24 | # Embedding dimension.
25 | dim = np.arange(1, 10)
26 |
27 | plt.title('Local $D_2$ vs $r$ for a noisy closed curve')
28 | plt.xlabel(r'Distance $r$')
29 | plt.ylabel(r'Local $D_2$')
30 |
31 | for r, c in d2.c2_embed(x, tau=tau, dim=dim, window=window):
32 | plt.semilogx(r[3:-3], d2.d2(r, c), color='#4682B4')
33 |
34 | plt.plot(r[3:-3], np.ones(len(r) - 6), color='#000000')
35 | plt.show()
36 |
--------------------------------------------------------------------------------
/examples/d2/henon.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | """D2 of the Henon map.
5 |
6 | The estimates here match the "accepted" value of 1.220 quite closely.
7 | """
8 |
9 | import numpy as np
10 | import matplotlib.pyplot as plt
11 | from nolitsa import d2, data, utils
12 |
13 | x = utils.rescale(data.henon(length=5000)[:, 0])
14 |
15 | dim = np.arange(1, 10 + 1)
16 | tau = 1
17 |
18 | plt.title('Local $D_2$ vs $r$ for Henon map')
19 | plt.xlabel(r'Distance $r$')
20 | plt.ylabel(r'Local $D_2$')
21 |
22 | for r, c in d2.c2_embed(x, tau=tau, dim=dim, window=2,
23 | r=utils.gprange(0.001, 1.0, 100)):
24 | plt.semilogx(r[3:-3], d2.d2(r, c), color='#4682B4')
25 |
26 | plt.semilogx(utils.gprange(0.001, 1.0, 100), 1.220 * np.ones(100),
27 | color='#000000')
28 | plt.show()
29 |
--------------------------------------------------------------------------------
/examples/d2/ikeda.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | """D2 of the Ikeda map.
5 |
6 | The estimates here match the "accepted" value of 1.690 quite closely.
7 | """
8 |
9 | import numpy as np
10 | import matplotlib.pyplot as plt
11 | from nolitsa import d2, data, utils
12 |
13 | x = utils.rescale(data.ikeda(length=5000)[:, 0])
14 |
15 | dim = np.arange(1, 10 + 1)
16 | tau = 1
17 |
18 | plt.title('Local $D_2$ vs $r$ for Ikeda map')
19 | plt.xlabel(r'Distance $r$')
20 | plt.ylabel(r'Local $D_2$')
21 |
22 | for r, c in d2.c2_embed(x, tau=tau, dim=dim, window=2,
23 | r=utils.gprange(0.001, 1.0, 100)):
24 | plt.semilogx(r[3:-3], d2.d2(r, c), color='#4682B4')
25 |
26 | plt.semilogx(utils.gprange(0.001, 1.0, 100), 1.690 * np.ones(100),
27 | color='#000000')
28 | plt.show()
29 |
--------------------------------------------------------------------------------
/examples/d2/lorenz.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | """D2 of the Lorenz system.
5 |
6 | The estimates here match the "accepted" value of 2.068 quite closely.
7 | """
8 |
9 | import numpy as np
10 | import matplotlib.pyplot as plt
11 | from nolitsa import d2, data, utils
12 |
13 | x = utils.rescale(data.lorenz(length=5000)[1][:, 0])
14 |
15 | dim = np.arange(1, 10 + 1)
16 | tau = 5
17 |
18 | plt.title('Local $D_2$ vs $r$ for Lorenz attractor')
19 | plt.xlabel(r'Distance $r$')
20 | plt.ylabel(r'Local $D_2$')
21 |
22 | for r, c in d2.c2_embed(x, tau=tau, dim=dim, window=50):
23 | plt.semilogx(r[3:-3], d2.d2(r, c), color='#4682B4')
24 |
25 | plt.semilogx(utils.gprange(0.001, 1.0, 100), 2.068 * np.ones(100),
26 | color='#000000')
27 | plt.show()
28 |
--------------------------------------------------------------------------------
/examples/d2/mackey-glass.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | """D2 for the Mackey-Glass system.
5 |
6 | The estimates here are, depending on the initial condition, sometimes
7 | lower than the value (D2 ~ 2.4) calculated by Grassberger & Procaccia
8 | (1983). One should use average over an ensemble of initial conditions
9 | in such a case.
10 | """
11 |
12 | import numpy as np
13 | import matplotlib.pyplot as plt
14 | from nolitsa import d2, data, utils
15 |
16 | x = utils.rescale(data.mackey_glass(tau=23.0, sample=0.46, n=1000))
17 |
18 | # Since we're resampling the time series using a sampling step of
19 | # 0.46, the time delay required is 23.0/0.46 = 50.
20 | tau = 50
21 | dim = np.arange(1, 10 + 1)
22 |
23 | plt.title('Local $D_2$ vs $r$ for Mackey-Glass system')
24 | plt.xlabel(r'Distance $r$')
25 | plt.ylabel(r'Local $D_2$')
26 |
27 | for r, c in d2.c2_embed(x, tau=tau, dim=dim, window=100,
28 | r=utils.gprange(0.001, 1.0, 100)):
29 | plt.semilogx(r[3:-3], d2.d2(r, c), color='#4682B4')
30 |
31 | plt.semilogx(utils.gprange(0.001, 1.0, 100), 2.4 * np.ones(100),
32 | color='#000000')
33 | plt.show()
34 |
--------------------------------------------------------------------------------
/examples/d2/roessler.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | """D2 of the Rössler oscillator.
5 |
6 | The estimates here match the "accepted" value of 1.991 quite closely.
7 | """
8 |
9 | import numpy as np
10 | import matplotlib.pyplot as plt
11 | from nolitsa import d2, data, utils
12 |
13 | x0 = [-3.2916983, -1.42162302, 0.02197593]
14 | x = utils.rescale(data.roessler(length=5000, x0=x0)[1][:, 0])
15 |
16 | dim = np.arange(1, 10 + 1)
17 | tau = 14
18 |
19 | plt.title(u'Local $D_2$ vs $r$ for Rössler oscillator')
20 | plt.xlabel(r'Distance $r$')
21 | plt.ylabel(r'Local $D_2$')
22 |
23 | for r, c in d2.c2_embed(x, tau=tau, dim=dim, window=50,
24 | r=utils.gprange(0.001, 1.0, 100)):
25 | plt.semilogx(r[3:-3], d2.d2(r, c), color='#4682B4')
26 |
27 | plt.semilogx(utils.gprange(0.001, 1.0, 100), 1.991 * np.ones(100),
28 | color='#000000')
29 | plt.show()
30 |
--------------------------------------------------------------------------------
/examples/d2/spiral.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | """Correlation sum/D2 for a spiral.
5 |
6 | A spiral, though a one-dimensional curve, is a nonstationary object.
7 | Thus, the estimated correlation dimension would heavily depend on the
8 | Theiler window used. However, the values of C(r) at large r's would
9 | roughly be the same.
10 | """
11 |
12 | import numpy as np
13 | import matplotlib.pyplot as plt
14 | from nolitsa import d2, utils
15 |
16 | phi = np.linspace(2 * np.pi, 52 * np.pi, 1000)
17 | x = phi * np.cos(phi)
18 | x = utils.rescale(x)
19 |
20 | dim = np.arange(1, 10 + 1)
21 | tau = 10
22 | r = utils.gprange(0.01, 1.0, 100)
23 |
24 | plt.figure(1)
25 | plt.title('Correlation sum $C(r)$ without any Theiler window')
26 | plt.xlabel(r'Distance $r$')
27 | plt.ylabel(r'Correlation sum $C(r)$')
28 |
29 | for r, c in d2.c2_embed(x, tau=tau, dim=dim, window=0, r=r):
30 | plt.loglog(r, c)
31 |
32 | plt.figure(2)
33 | plt.title('Correlation sum $C(r)$ with a Theiler window of 100')
34 | plt.xlabel(r'Distance $r$')
35 | plt.ylabel(r'Correlation sum $C(r)$')
36 |
37 | for r, c in d2.c2_embed(x, tau=tau, dim=dim, window=100, r=r):
38 | plt.loglog(r, c)
39 |
40 | plt.show()
41 |
--------------------------------------------------------------------------------
/examples/d2/white.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | """D2 for white noise.
5 |
6 | D2 is (theoretically) equal to the embedding dimension for white noise.
7 | """
8 |
9 | import numpy as np
10 | import matplotlib.pyplot as plt
11 |
12 | from nolitsa import d2, utils
13 |
14 | x = np.random.random(5 * 1000)
15 |
16 | dim = np.arange(1, 10 + 1)
17 | tau = 1
18 |
19 | plt.title('Local $D_2$ vs $r$ for white noise')
20 | plt.xlabel(r'Distance $r$')
21 | plt.ylabel(r'Local $D_2$')
22 |
23 | for r, c in d2.c2_embed(x, tau=tau, dim=dim, window=2,
24 | r=utils.gprange(0.001, 1.0, 100)):
25 | plt.semilogx(r[1:-1], d2.d2(r, c, hwin=1), color='#4682B4')
26 |
27 | plt.show()
28 |
--------------------------------------------------------------------------------
/examples/data/mackey-glass.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | """Generate time series using the Mackey-Glass equation.
5 |
6 | Generates time series using the discrete approximation of the
7 | Mackey-Glass delay differential equation described by Grassberger &
8 | Procaccia (1983).
9 |
10 | Typical values of the parameters in the Mackey-Glass delay differential
11 | equation are: a = 0.2, b = 0.1, c = 10.0, and tau = 23.0 with the grid
12 | size n usually taken larger than 1000.
13 | """
14 |
15 | import matplotlib.pyplot as plt
16 | from nolitsa import data
17 |
18 | x = data.mackey_glass(tau=23.0, sample=0.46, n=1000)
19 |
20 | # Since we're resampling the time series using a sampling step of
21 | # 0.46, the time delay of the resampled series is 23.0/0.46 = 50.
22 | plt.title('Mackey-Glass delay differential equation')
23 | plt.plot(x[50:], x[:-50])
24 | plt.xlabel(r'$x(t - \tau)$')
25 | plt.ylabel(r'$x(t)$')
26 | plt.show()
27 |
--------------------------------------------------------------------------------
/examples/delay/adfd_mackey-glass.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | """ADFD algorithm using time series from the Mackey-Glass system.
5 |
6 | The time delay is taken to be the delay at which the derivative of the
7 | ADFD falls to 40% of its initial value. The actual time delay used for
8 | generating the time series is 17, and the estimated time delay is 15.
9 | Compare with Fig. 8 of Rosenstein et al. (1994).
10 | """
11 |
12 | import numpy as np
13 | import matplotlib.pyplot as plt
14 | from nolitsa import data, delay
15 |
16 | sample = 0.25
17 | x = data.mackey_glass(length=2500, a=0.2, b=0.1, c=10.0, tau=17.0,
18 | discard=500, sample=sample)
19 |
20 | dim = 7
21 | maxtau = 50
22 | tau = np.arange(maxtau)
23 |
24 | disp = delay.adfd(x, dim=dim, maxtau=maxtau)
25 | ddisp = np.diff(disp)
26 | forty = np.argmax(ddisp < 0.4 * ddisp[1])
27 |
28 | print(r'Time delay %d' % forty)
29 |
30 | fig, ax1 = plt.subplots()
31 |
32 | ax1.set_xlabel(r'Time ($\tau\Delta t$)')
33 | ax1.set_ylabel(r'$\mathrm{ADFD}$')
34 | ax1.plot(tau[1:] * sample, disp[1:])
35 |
36 | ax2 = ax1.twinx()
37 | ax2.plot(tau[1:] * sample, ddisp, 'g--')
38 | ax2.plot(tau[forty + 1] * sample, ddisp[forty], 'o')
39 | ax2.set_ylabel(r'$\frac{d}{d\tau}(\mathrm{ADFD}$)')
40 |
41 | plt.show()
42 |
--------------------------------------------------------------------------------
/examples/delay/adfd_roessler.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | """ADFD algorithm using time series from the Rössler oscillator.
5 |
6 | The time delay is taken to be the delay at which the derivative of the
7 | ADFD falls to 40% of its initial value. The estimated time delay is 5.
8 | Compare with Fig. 6 of Rosenstein et al. (1994).
9 | """
10 |
11 | import numpy as np
12 | import matplotlib.pyplot as plt
13 | from nolitsa import data, delay
14 |
15 | sample = 0.10
16 | x = data.roessler(a=0.20, b=0.40, c=5.7, sample=sample, length=2500,
17 | discard=5000)[1][:, 0]
18 |
19 | dim = 7
20 | maxtau = 50
21 | tau = np.arange(maxtau)
22 |
23 | disp = delay.adfd(x, dim=dim, maxtau=maxtau)
24 | ddisp = np.diff(disp)
25 | forty = np.argmax(ddisp < 0.4 * ddisp[1])
26 |
27 | print('Time delay = %d' % forty)
28 |
29 | fig, ax1 = plt.subplots()
30 |
31 | ax1.set_xlabel(r'Time ($\tau\Delta t$)')
32 | ax1.set_ylabel(r'$\mathrm{ADFD}$')
33 | ax1.plot(tau[1:] * sample, disp[1:])
34 |
35 | ax2 = ax1.twinx()
36 | ax2.plot(tau[1:] * sample, ddisp, 'g--')
37 | ax2.plot(tau[forty + 1] * sample, ddisp[forty], 'o')
38 | ax2.set_ylabel(r'$\frac{d}{d\tau}(\mathrm{ADFD}$)')
39 |
40 | plt.show()
41 |
--------------------------------------------------------------------------------
/examples/delay/dmibins.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | """Delayed mutual information calculation and number of bins.
5 |
6 | The time delay is often picked to be the location of the first minimum
7 | of the delayed mutual information (DMI) of the series. The
8 | probabilities required for its computation are estimated by binning the
9 | time series.
10 |
11 | For many examples, the DMI at a lag of zero computed with 2^m bins is
12 | approximately m bits. This is because the distribution is nearly flat
13 | when the number of bins is small, making the probability of being in a
14 | bin ~ 2^-m.
15 |
16 | Surprisingly, using a small number of bins doesn't seem to affect the
17 | estimation of the delay. Even with two bins, the extremas of the DMI
18 | are clearly visible. (Why?)
19 | """
20 |
21 | import numpy as np
22 | import matplotlib.pyplot as plt
23 | from nolitsa import data, delay
24 |
25 | x = data.roessler()[1][:, 0]
26 |
27 | plt.title(r'Delayed mutual information for the Rössler oscillator')
28 | plt.xlabel(r'$\tau$')
29 | plt.ylabel(r'$I(\tau)$')
30 |
31 | for bins in (2 ** np.arange(1, 8 + 1)):
32 | ii = delay.dmi(x, maxtau=250, bins=bins)
33 | plt.plot(ii, label=(r'Bins = $%d$' % bins))
34 |
35 | plt.legend()
36 | plt.show()
37 |
--------------------------------------------------------------------------------
/examples/delay/dmibins2.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | """Delayed mutual information calculation and number of bins.
5 |
6 | The time delay is often picked to be the location of the first minimum
7 | of the delayed mutual information (DMI) of the series. The
8 | probabilities required for its computation are estimated by binning the
9 | time series.
10 |
11 | For many examples, the DMI at a lag of zero computed with 2^m bins is
12 | approximately m bits. This is because the distribution is nearly flat
13 | when the number of bins is small, making the probability of being in a
14 | bin ~ 2^-m.
15 |
16 | Surprisingly, using a small number of bins doesn't seem to affect the
17 | estimation of the delay. Even with two bins, the extremas of the DMI
18 | are clearly visible. (Why?)
19 | """
20 |
21 | import numpy as np
22 | import matplotlib.pyplot as plt
23 | from nolitsa import data, delay
24 |
25 | x = data.mackey_glass()
26 |
27 | plt.title(r'Delayed mutual information for the Mackey-Glass system')
28 | plt.xlabel(r'$\tau$')
29 | plt.ylabel(r'$I(\tau)$')
30 |
31 | for bins in (2 ** np.arange(1, 8 + 1)):
32 | ii = delay.dmi(x, maxtau=500, bins=bins)
33 | plt.plot(ii, label=(r'Bins = $%d$' % bins))
34 |
35 | plt.legend()
36 | plt.show()
37 |
--------------------------------------------------------------------------------
/examples/delay/henon.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | """Time delay estimation for time series from the Henon map.
5 |
6 | For map like data, the redundancy between components of the time delayed
7 | vectors decrease drastically (or equivalently, the irrelevance increases
8 | rapidly). Best results are often obtained with a time delay of 1.
9 |
10 | Here, we see that for data coming out of the Henon map, the delayed
11 | mutual information curve (which does not have any local minima) gives
12 | us a very bad estimate of the time delay.
13 | """
14 |
15 | import numpy as np
16 | import matplotlib.pyplot as plt
17 | from nolitsa import data, delay
18 |
19 | x = data.henon()[:, 0]
20 |
21 | # Compute autocorrelation and delayed mutual information.
22 | lag = np.arange(50)
23 | r = delay.acorr(x, maxtau=50)
24 | i = delay.dmi(x, maxtau=50)
25 |
26 | r_delay = np.argmax(r < 1.0 / np.e)
27 | print(r'Autocorrelation time = %d' % r_delay)
28 |
29 | plt.figure(1)
30 |
31 | plt.subplot(211)
32 | plt.title(r'Delay estimation for Henon map')
33 | plt.ylabel(r'Delayed mutual information')
34 | plt.plot(lag, i)
35 |
36 | plt.subplot(212)
37 | plt.xlabel(r'Time delay $\tau$')
38 | plt.ylabel(r'Autocorrelation')
39 | plt.plot(lag, r, r_delay, r[r_delay], 'o')
40 |
41 | plt.figure(2)
42 | plt.subplot(121)
43 | plt.title(r'Time delay = 10')
44 | plt.xlabel(r'$x(t)$')
45 | plt.ylabel(r'$x(t + \tau)$')
46 | plt.plot(x[:-10], x[10:], '.')
47 |
48 | plt.subplot(122)
49 | plt.title(r'Time delay = %d' % r_delay)
50 | plt.xlabel(r'$x(t)$')
51 | plt.ylabel(r'$x(t + \tau)$')
52 | plt.plot(x[:-r_delay], x[r_delay:], '.')
53 |
54 | plt.show()
55 |
--------------------------------------------------------------------------------
/examples/delay/ikeda.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | """Time delay estimation for time series from the Ikeda map.
5 |
6 | For map like data, the redundancy between components of the time delayed
7 | vectors decrease drastically (or equivalently, the irrelevance increases
8 | rapidly). Best results are often obtained with a time delay of 1.
9 |
10 | Here, we see that for data coming out of the Ikeda map, the delayed
11 | mutual information curve (which does not have any local minima) gives
12 | us a very bad estimate of the time delay.
13 | """
14 |
15 | import numpy as np
16 | import matplotlib.pyplot as plt
17 | from nolitsa import data, delay
18 |
19 | x = data.ikeda()[:, 0]
20 |
21 | # Compute autocorrelation and delayed mutual information.
22 | lag = np.arange(50)
23 | r = delay.acorr(x, maxtau=50)
24 | i = delay.dmi(x, maxtau=50)
25 |
26 | r_delay = np.argmax(r < 1.0 / np.e)
27 | print(r'Autocorrelation time = %d' % r_delay)
28 |
29 | plt.figure(1)
30 |
31 | plt.subplot(211)
32 | plt.title(r'Delay estimation for Ikeda map')
33 | plt.ylabel(r'Delayed mutual information')
34 | plt.plot(lag, i)
35 |
36 | plt.subplot(212)
37 | plt.xlabel(r'Time delay $\tau$')
38 | plt.ylabel(r'Autocorrelation')
39 | plt.plot(lag, r, r_delay, r[r_delay], 'o')
40 |
41 | plt.figure(2)
42 | plt.subplot(121)
43 | plt.title(r'Time delay = 10')
44 | plt.xlabel(r'$x(t)$')
45 | plt.ylabel(r'$x(t + \tau)$')
46 | plt.plot(x[:-10], x[10:], '.')
47 |
48 | plt.subplot(122)
49 | plt.title(r'Time delay = %d' % r_delay)
50 | plt.xlabel(r'$x(t)$')
51 | plt.ylabel(r'$x(t + \tau)$')
52 | plt.plot(x[:-r_delay], x[r_delay:], '.')
53 |
54 | plt.show()
55 |
--------------------------------------------------------------------------------
/examples/delay/lorenz.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | """Time delay estimation for time series from the Lorenz attractor.
5 |
6 | The first minimum of the delayed mutual information occurs at 5 and the
7 | autocorrelation time is 11. A better choice is provided by the mutual
8 | information curve. If one plots the 2D phase portrait of the attractor
9 | with a delay of 11, one can see that it's overfolded. Note that both
10 | these values depend on the sampling time used and should not be taken as
11 | "universal" time delays for reconstructing the Lorenz attractor.
12 | """
13 |
14 | import numpy as np
15 | import matplotlib.pyplot as plt
16 | from nolitsa import data, delay, noise
17 |
18 |
19 | def localmin(x):
20 | """Return all local minima from the given data set.
21 |
22 | Returns all local minima from the given data set. Note that even
23 | "kinky" minima (which are probably not real minima) will be
24 | returned.
25 |
26 | Parameters
27 | ----------
28 | x : array
29 | 1D scalar data set.
30 |
31 | Returns
32 | -------
33 | i : array
34 | Array containing location of all local minima.
35 | """
36 | return (np.diff(np.sign(np.diff(x))) > 0).nonzero()[0] + 1
37 |
38 |
39 | x = data.lorenz()[1][:, 0]
40 |
41 | # Compute autocorrelation and delayed mutual information.
42 | lag = np.arange(100)
43 | r = delay.acorr(x, maxtau=100)
44 | i = delay.dmi(x, maxtau=100)
45 |
46 | # While looking for local minima in the DMI curve, it's useful to do an
47 | # SMA to remove "kinky" minima.
48 | i_delay = localmin(noise.sma(i, hwin=1)) + 1
49 | r_delay = np.argmax(r < 1.0 / np.e)
50 |
51 | print(r'Minima of delayed mutual information = %s' % i_delay)
52 | print(r'Autocorrelation time = %d' % r_delay)
53 |
54 | plt.figure(1)
55 |
56 | plt.subplot(211)
57 | plt.title(r'Delay estimation for Lorenz attractor')
58 | plt.ylabel(r'Delayed mutual information')
59 | plt.plot(lag, i, i_delay, i[i_delay], 'o')
60 |
61 | plt.subplot(212)
62 | plt.xlabel(r'Time delay $\tau$')
63 | plt.ylabel(r'Autocorrelation')
64 | plt.plot(lag, r, r_delay, r[r_delay], 'o')
65 |
66 | plt.figure(2)
67 | plt.subplot(121)
68 | plt.title(r'Time delay = %d' % i_delay[0])
69 | plt.xlabel(r'$x(t)$')
70 | plt.ylabel(r'$x(t + \tau)$')
71 | plt.plot(x[:-i_delay[0]], x[i_delay[0]:])
72 |
73 | plt.subplot(122)
74 | plt.title(r'Time delay = %d' % r_delay)
75 | plt.xlabel(r'$x(t)$')
76 | plt.ylabel(r'$x(t + \tau)$')
77 | plt.plot(x[:-r_delay], x[r_delay:])
78 |
79 | plt.show()
80 |
--------------------------------------------------------------------------------
/examples/delay/roessler.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | """Time delay estimation for time series from the Rössler oscillator.
5 |
6 | The first minimum of the delayed mutual information occurs at 14 and the
7 | autocorrelation time is 11. Note that both these values depend on the
8 | sampling time used and should not be taken as "universal" time delays
9 | for reconstructing the Rössler oscillator.
10 | """
11 |
12 | import numpy as np
13 | import matplotlib.pyplot as plt
14 | from nolitsa import data, delay, noise
15 |
16 |
17 | def localmin(x):
18 | """Return all local minima from the given data set.
19 |
20 | Returns all local minima from the given data set. Note that even
21 | "kinky" minima (which are probably not real minima) will be
22 | returned.
23 |
24 | Parameters
25 | ----------
26 | x : array
27 | 1D scalar data set.
28 |
29 | Returns
30 | -------
31 | i : array
32 | Array containing location of all local minima.
33 | """
34 | return (np.diff(np.sign(np.diff(x))) > 0).nonzero()[0] + 1
35 |
36 |
37 | x = data.roessler()[1][:, 0]
38 |
39 | # Compute autocorrelation and delayed mutual information.
40 | lag = np.arange(250)
41 | r = delay.acorr(x, maxtau=250)
42 | i = delay.dmi(x, maxtau=250)
43 |
44 | # While looking for local minima in the DMI curve, it's useful to do an
45 | # SMA to remove "kinky" minima.
46 | i_delay = localmin(noise.sma(i, hwin=1)) + 1
47 | r_delay = np.argmax(r < 1.0 / np.e)
48 |
49 | print(r'Minima of delayed mutual information = %s' % i_delay)
50 | print(r'Autocorrelation time = %d' % r_delay)
51 |
52 | plt.figure(1)
53 |
54 | plt.subplot(211)
55 | plt.title(r'Delay estimation for Rössler oscillator')
56 | plt.ylabel(r'Delayed mutual information')
57 | plt.plot(lag, i, i_delay, i[i_delay], 'o')
58 |
59 | plt.subplot(212)
60 | plt.xlabel(r'Time delay $\tau$')
61 | plt.ylabel(r'Autocorrelation')
62 | plt.plot(lag, r, r_delay, r[r_delay], 'o')
63 |
64 | plt.figure(2)
65 | plt.subplot(121)
66 | plt.title(r'Time delay = %d' % i_delay[0])
67 | plt.xlabel(r'$x(t)$')
68 | plt.ylabel(r'$x(t + \tau)$')
69 | plt.plot(x[:-i_delay[0]], x[i_delay[0]:])
70 |
71 | plt.subplot(122)
72 | plt.title(r'Time delay = %d' % r_delay)
73 | plt.xlabel(r'$x(t)$')
74 | plt.ylabel(r'$x(t + \tau)$')
75 | plt.plot(x[:-r_delay], x[r_delay:])
76 |
77 | plt.show()
78 |
--------------------------------------------------------------------------------
/examples/delay/sine.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | """Autocorrelation function of a *finite* sine wave.
5 |
6 | Autocorrelation function of a finite sine wave over n cycles is:
7 |
8 | r(tau) = [(2*n*pi - tau)*cos(tau) + sin(tau)] / 2*n*pi
9 |
10 | As n -> infinity, r(tau) = cos(tau) as expected.
11 | """
12 |
13 | import numpy as np
14 | import matplotlib.pyplot as plt
15 | from nolitsa import delay
16 |
17 | n = 2 ** 3
18 | t = np.linspace(0, n * 2 * np.pi, n * 2 ** 10)
19 | x = np.sin(t)
20 |
21 | r = delay.acorr(x)
22 | r_exp = (((2 * n * np.pi - t) * np.cos(t) + np.sin(t)) / (2 * n * np.pi))
23 |
24 | plt.title(r'Autocorrelation of a finite sine wave')
25 | plt.xlabel(r'$t$')
26 | plt.ylabel(r'$r(t)$')
27 | plt.plot(t[::25], r[::25], 'o', label='Numerical')
28 | plt.plot(t, r_exp, label='Theoretical')
29 |
30 | plt.legend()
31 | plt.show()
32 |
--------------------------------------------------------------------------------
/examples/fnn/corrnum.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | """FNN for correlated random numbers.
5 |
6 | Correlated random numbers are created by running a simple moving average
7 | (with 61 bins) over uncorrelated random numbers in [0, 1]. Without
8 | a Theiler window, the FNN fraction drops to zero soon after
9 | d ~ log(3000) / log(10) ~ 4.0. Ordinarily the second test would have
10 | helped here and an increase in FNN should occur. But here, the strong
11 | temporal correlations between the points in the series prevent it from
12 | working.
13 |
14 | Of course, once we impose a Theiler window equal to the autocorrelation
15 | time of the series, the second test reports large amounts of FNNs.
16 | """
17 |
18 | from nolitsa import dimension, noise, delay
19 | import matplotlib.pyplot as plt
20 | import numpy as np
21 |
22 | # Generate data.
23 | np.random.seed(17)
24 | x = noise.sma(np.random.random(3000), hwin=30)
25 |
26 | tau = np.argmax(delay.acorr(x) < 1 / np.e)
27 |
28 | # FNN without Theiler window.
29 | dim = np.arange(1, 10 + 1)
30 | f1, f2, f3 = dimension.fnn(x, tau=tau, dim=dim, window=0, metric='cityblock')
31 |
32 | plt.figure(1)
33 | plt.title(r'FNN for correlated random numbers (no Theiler window)')
34 | plt.xlabel(r'Embedding dimension $d$')
35 | plt.ylabel(r'FNN (%)')
36 | plt.plot(dim, 100 * f1, 'bo--', label=r'Test I')
37 | plt.plot(dim, 100 * f2, 'g^--', label=r'Test II')
38 | plt.plot(dim, 100 * f3, 'rs-', label=r'Test I + II')
39 | plt.legend()
40 |
41 | # FNN with Theiler window equal to autocorrelation time.
42 | dim = np.arange(1, 10 + 1)
43 | f1, f2, f3 = dimension.fnn(x, tau=tau, dim=dim, window=tau, metric='cityblock')
44 |
45 | plt.figure(2)
46 | plt.title(r'FNN for correlated random numbers (Theiler window = %d)' % tau)
47 | plt.xlabel(r'Embedding dimension $d$')
48 | plt.ylabel(r'FNN (%)')
49 | plt.plot(dim, 100 * f1, 'bo--', label=r'Test I')
50 | plt.plot(dim, 100 * f2, 'g^--', label=r'Test II')
51 | plt.plot(dim, 100 * f3, 'rs-', label=r'Test I or II')
52 | plt.legend()
53 |
54 | plt.show()
55 |
--------------------------------------------------------------------------------
/examples/fnn/henon.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | """FNN for time series from the Henon map.
5 |
6 | As expected, the FNN fraction goes to zero at an embedding dimension
7 | equal to 2.
8 | """
9 |
10 | from nolitsa import data, dimension
11 | import matplotlib.pyplot as plt
12 | import numpy as np
13 |
14 | # Generate data.
15 | x = data.henon(length=5000)[:, 0]
16 |
17 | dim = np.arange(1, 10 + 1)
18 | f1, f2, f3 = dimension.fnn(x, tau=1, dim=dim, window=10, metric='cityblock')
19 |
20 | plt.title(r'FNN for Henon map')
21 | plt.xlabel(r'Embedding dimension $d$')
22 | plt.ylabel(r'FNN (%)')
23 | plt.plot(dim, 100 * f1, 'bo--', label=r'Test I')
24 | plt.plot(dim, 100 * f2, 'g^--', label=r'Test II')
25 | plt.plot(dim, 100 * f3, 'rs-', label=r'Test I + II')
26 | plt.legend()
27 |
28 | plt.show()
29 |
--------------------------------------------------------------------------------
/examples/fnn/ikeda.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | """FNN for time series from the Ikeda map.
5 |
6 | This is a purely deterministic time series, yet we see the second test
7 | reporting FNNs at large embedding dimensions. The whole problem is that
8 | the fraction of FNN strongly depends on the threshold parameters
9 | used (apart from the metric).
10 | """
11 |
12 | from nolitsa import data, dimension
13 | import matplotlib.pyplot as plt
14 | import numpy as np
15 |
16 | # Generate data.
17 | x = data.ikeda(length=5000)[:, 0]
18 | dim = np.arange(1, 15 + 1)
19 |
20 | f1 = dimension.fnn(x, tau=1, dim=dim, window=0, metric='chebyshev')[2]
21 | f2 = dimension.fnn(x, tau=1, dim=dim, window=0, metric='euclidean')[2]
22 | f3 = dimension.fnn(x, tau=1, dim=dim, window=0, metric='cityblock')[2]
23 |
24 | plt.title(r'FNN for the Ikeda map')
25 | plt.xlabel(r'Embedding dimension $d$')
26 | plt.ylabel(r'FNN (%)')
27 | plt.plot(dim, 100 * f1, 'bo-', label=r'Chebyshev')
28 | plt.plot(dim, 100 * f2, 'g^-', label=r'Euclidean')
29 | plt.plot(dim, 100 * f3, 'rs-', label=r'Cityblock')
30 | plt.legend()
31 |
32 | plt.show()
33 |
--------------------------------------------------------------------------------
/examples/fnn/mackey-glass.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | """FNN for time series from the Mackey-Glass equation.
5 |
6 | The embedding dimension reported is around 4-5. But the second test
7 | reports FNNs at larger d's. Should we trust our results in such a case?
8 | """
9 |
10 | from nolitsa import data, dimension
11 | import matplotlib.pyplot as plt
12 | import numpy as np
13 |
14 | # Generate data.
15 | x = data.mackey_glass(tau=23.0, sample=0.46, n=1000)
16 |
17 | # Since we're resampling the time series using a sampling step of
18 | # 0.46, the time delay required is 23.0/0.46 = 50.
19 | tau = 50
20 | dim = np.arange(1, 15 + 1)
21 | f1, f2, f3 = dimension.fnn(x, tau=50, dim=dim, window=100, metric='euclidean')
22 |
23 | plt.title(r'FNN for the Mackey-Glass delay differential equation')
24 | plt.xlabel(r'Embedding dimension $d$')
25 | plt.ylabel(r'FNN (%)')
26 | plt.plot(dim, 100 * f1, 'bo--', label=r'Test I')
27 | plt.plot(dim, 100 * f2, 'g^--', label=r'Test II')
28 | plt.plot(dim, 100 * f3, 'rs-', label=r'Test I or II')
29 | plt.legend()
30 |
31 | plt.show()
32 |
--------------------------------------------------------------------------------
/examples/fnn/metric.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | """FNN is metric depended for noisy data.
5 |
6 | The FNN fraction is metric depended for noisy time series. In
7 | particular, the second FNN test, which measures the "boundedness" of the
8 | reconstructed attractor depends heavily on the metric used. E.g., if
9 | the Chebyshev metric is used, the near-neighbor distances in the
10 | reconstructed attractor are always bounded and therefore the reported
11 | FNN fraction becomes a constant (approximately) instead of increasing
12 | with the embedding dimension.
13 | """
14 |
15 | from nolitsa import dimension
16 | import matplotlib.pyplot as plt
17 | import numpy as np
18 |
19 | # Generate data.
20 | x = np.random.normal(size=5000)
21 | dim = np.arange(1, 10 + 1)
22 |
23 | plt.figure(1)
24 | f1, f2, f3 = dimension.fnn(x, tau=1, dim=dim, window=0, metric='chebyshev')
25 | plt.title(r'FNN with Chebyshev metric')
26 | plt.xlabel(r'Embedding dimension $d$')
27 | plt.ylabel(r'FNN (%)')
28 | plt.plot(dim, 100 * f1, 'bo--', label=r'Test I')
29 | plt.plot(dim, 100 * f2, 'g^--', label=r'Test II')
30 | plt.plot(dim, 100 * f3, 'rs-', label=r'Test I + II')
31 | plt.legend()
32 |
33 | plt.figure(2)
34 | f1, f2, f3 = dimension.fnn(x, tau=1, dim=dim, window=0, metric='euclidean')
35 | plt.title(r'FNN with Euclidean metric')
36 | plt.xlabel(r'Embedding dimension $d$')
37 | plt.ylabel(r'FNN (%)')
38 | plt.plot(dim, 100 * f1, 'bo--', label=r'Test I')
39 | plt.plot(dim, 100 * f2, 'g^--', label=r'Test II')
40 | plt.plot(dim, 100 * f3, 'rs-', label=r'Test I + II')
41 | plt.legend()
42 |
43 | plt.figure(3)
44 | f1, f2, f3 = dimension.fnn(x, tau=1, dim=dim, window=0, metric='cityblock')
45 | plt.title(r'FNN with cityblock (Manhattan) metric')
46 | plt.xlabel(r'Embedding dimension $d$')
47 | plt.ylabel(r'FNN (%)')
48 | plt.plot(dim, 100 * f1, 'bo--', label=r'Test I')
49 | plt.plot(dim, 100 * f2, 'g^--', label=r'Test II')
50 | plt.plot(dim, 100 * f3, 'rs-', label=r'Test I + II')
51 | plt.legend()
52 |
53 | plt.show()
54 |
--------------------------------------------------------------------------------
/examples/fnn/noise.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | """FNN for uncorrelated random numbers.
5 |
6 | Note how the fraction of FNN reported by Test I goes to zero soon after
7 | d = log(10^4) / log(10) = 4.0. Of course, Test II shows that something
8 | is awry.
9 | """
10 |
11 | from nolitsa import dimension
12 | import matplotlib.pyplot as plt
13 | import numpy as np
14 |
15 | # Generate data.
16 | x = np.random.random(5000)
17 |
18 | dim = np.arange(1, 10 + 1)
19 | f1, f2, f3 = dimension.fnn(x, tau=1, dim=dim, window=0, metric='euclidean')
20 |
21 | plt.title(r'FNN for uncorrelated random numbers in $[0, 1]$')
22 | plt.xlabel(r'Embedding dimension $d$')
23 | plt.ylabel(r'FNN (%)')
24 | plt.plot(dim, 100 * f1, 'bo--', label=r'Test I')
25 | plt.plot(dim, 100 * f2, 'g^--', label=r'Test II')
26 | plt.plot(dim, 100 * f3, 'rs-', label=r'Test I + II')
27 | plt.legend()
28 |
29 | plt.show()
30 |
--------------------------------------------------------------------------------
/examples/lyapunov/curve.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | """Maximum Lyapunov exponent of a closed noisy curve.
5 |
6 | A trajectory in the form of a closed curve should have a Lyapunov
7 | exponent equal to zero (or the average divergence should not vary with
8 | time). But our curves for the average divergence appear to be
9 | oscillatory and don't look very flat. What's wrong?
10 | """
11 |
12 | import numpy as np
13 | import matplotlib.pyplot as plt
14 | from nolitsa import lyapunov, utils
15 |
16 | t = np.linspace(0, 100 * np.pi, 5000)
17 | x = np.sin(t) + np.sin(2 * t) + np.sin(3 * t) + np.sin(5 * t)
18 | x = utils.corrupt(x, np.random.normal(size=5000), snr=1000)
19 |
20 | # Time delay.
21 | tau = 25
22 |
23 | window = 100
24 |
25 | # Embedding dimension.
26 | dim = [10]
27 |
28 | d = lyapunov.mle_embed(x, dim=dim, tau=tau, maxt=300, window=window)[0]
29 |
30 | plt.title('Maximum Lyapunov exponent for a closed curve')
31 | plt.xlabel(r'Time $t$')
32 | plt.ylabel(r'Average divergence $\langle d_i(t) \rangle$')
33 | plt.plot(t[:300], d)
34 |
35 | plt.show()
36 |
--------------------------------------------------------------------------------
/examples/lyapunov/henon.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | """Maximum Lyapunov exponent exponent of the Henon map.
5 |
6 | The "accepted" value is ~ 0.419, which is quite close to the estimates
7 | we get here. See Fig. 3(b) of Rosenstein et al. (1993).
8 | """
9 |
10 | from nolitsa import data, lyapunov
11 | import numpy as np
12 | import matplotlib.pyplot as plt
13 |
14 | x = data.henon(length=5000)[:, 0]
15 |
16 | # Time delay.
17 | tau = 1
18 |
19 | # Embedding dimension.
20 | dim = [2]
21 |
22 | d = lyapunov.mle_embed(x, dim=dim, tau=tau, maxt=25)[0]
23 | t = np.arange(25)
24 |
25 | plt.title('Maximum Lyapunov exponent for the Henon system')
26 | plt.xlabel(r'Time $t$')
27 | plt.ylabel(r'Average divergence $\langle d_i(t) \rangle$')
28 | plt.plot(t, d)
29 | plt.plot(t, t * 0.419 + d[0], '--')
30 |
31 | plt.show()
32 |
--------------------------------------------------------------------------------
/examples/lyapunov/lorenz.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | """Maximum Lyapunov exponent for the Lorenz system.
5 |
6 | Our estimate is quite close to the "accepted" value of 1.50.
7 | Cf. Fig. 2 of Rosenstein et al. (1993).
8 | """
9 |
10 | from nolitsa import data, lyapunov
11 | import numpy as np
12 | import matplotlib.pyplot as plt
13 |
14 | sample = 0.01
15 | x0 = [0.62225717, -0.08232857, 30.60845379]
16 | x = data.lorenz(length=5000, sample=sample, x0=x0,
17 | sigma=16.0, beta=4.0, rho=45.92)[1][:, 0]
18 |
19 | # Choose appropriate Theiler window.
20 | window = 60
21 |
22 | # Time delay.
23 | tau = 13
24 |
25 | # Embedding dimension.
26 | dim = [5]
27 |
28 | d = lyapunov.mle_embed(x, dim=dim, tau=tau, maxt=300, window=window)[0]
29 | t = np.arange(300)
30 |
31 | plt.title('Maximum Lyapunov exponent for the Lorenz system')
32 | plt.xlabel(r'Time $t$')
33 | plt.ylabel(r'Average divergence $\langle d_i(t) \rangle$')
34 | plt.plot(sample * t, d)
35 | plt.plot(sample * t, sample * t * 1.50, '--')
36 |
37 | plt.show()
38 |
--------------------------------------------------------------------------------
/examples/lyapunov/roessler.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | """Maximum Lyapunov exponent of the Rössler oscillator.
5 |
6 | The "accepted" value is 0.0714, which is quite close to what we get.
7 | """
8 | from nolitsa import data, lyapunov, utils
9 | import numpy as np
10 | import matplotlib.pyplot as plt
11 |
12 | sample = 0.2
13 | x0 = [-3.2916983, -1.42162302, 0.02197593]
14 | x = data.roessler(length=3000, x0=x0, sample=sample)[1][:, 0]
15 |
16 | # Choose appropriate Theiler window.
17 | # Since Rössler is an aperiodic oscillator, the average time period is
18 | # a good choice.
19 | f, p = utils.spectrum(x)
20 | window = int(1 / f[np.argmax(p)])
21 |
22 | # Time delay.
23 | tau = 7
24 |
25 | # Embedding dimension.
26 | dim = [3]
27 |
28 | d = lyapunov.mle_embed(x, dim=dim, tau=tau, maxt=200, window=window)[0]
29 | t = np.arange(200)
30 |
31 | plt.title(u'Maximum Lyapunov exponent for the Rössler oscillator')
32 | plt.xlabel(r'Time $t$')
33 | plt.ylabel(r'Average divergence $\langle d_i(t) \rangle$')
34 | plt.plot(sample * t, d)
35 | plt.plot(sample * t, d[0] + sample * t * 0.0714, '--')
36 |
37 | plt.show()
38 |
--------------------------------------------------------------------------------
/examples/noise/breath.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | """Filtering human breath data.
5 |
6 | The data was obtained from 2048 continuous samples of dataset B1
7 | (starting from the 12750th) of the Santa Fe time series contest [1].
8 | This data is low dimensional, and is thought to be a limit cycle [See
9 | Example 10.7 of Kantz & Schreiber (2004).] As can be seen, the
10 | structure of the limit cycle is much more prominent when the filtered
11 | time series is used.
12 |
13 | [1]: http://www.physionet.org/physiobank/database/santa-fe/
14 | """
15 |
16 | import numpy as np
17 | from nolitsa import noise, utils
18 | import matplotlib.pyplot as plt
19 |
20 | x = utils.rescale(np.loadtxt('../series/br2.dat')[:, 1])
21 | y = noise.nored(x, dim=7, r=0.23, repeat=5, tau=1)
22 |
23 | plt.figure(1)
24 | plt.title('Noisy human breath data')
25 | plt.xlabel(r'$x(t)$')
26 | plt.ylabel(r'$x(t + \tau)$')
27 | plt.plot(x[:-1], x[1:], '.')
28 |
29 | plt.figure(2)
30 | plt.title('Filtered human breath data')
31 | plt.xlabel(r'$x(t)$')
32 | plt.ylabel(r'$x(t + \tau)$')
33 | plt.plot(y[:-1], y[1:], '.')
34 |
35 | plt.show()
36 |
--------------------------------------------------------------------------------
/examples/noise/goat.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | """Cleaning the GOAT vowel.
5 |
6 | To generate this data set, I recorded a friend saying the "GOAT vowel"
7 | phoneme /əʊ/ (the vowel sound in "goat", "boat", etc. [1]) and took a
8 | 2 second section of the recording that looked fairly periodic.
9 | Although the audio was recorded at 44.1 kHz, I downsampled it to
10 | 11.025 kHz to create a more manageable data set. The original log
11 | that Audacity produced during the recording is given below:
12 |
13 | Sample Rate: 44100 Hz. Sample values on linear scale. 1 channel (mono).
14 | Length processed: 88200 samples, 2.00000 seconds.
15 | Peak amplitude: 0.89125 (lin) -1.00000 dB. Unweighted rms: -6.63167 dB.
16 | DC offset: -0.00015 linear, -76.75034 dB.
17 |
18 | A fairly stationary segment of the time series can be found between
19 | samples 9604 and 14572. This data comes from a limit cycle whose
20 | structure becomes more prominent after filtering. The time delay of 14
21 | used for embedding is the quarter of the average time period of the
22 | oscillations.
23 |
24 | NOTE: An audio recording of this data can be heard in the file
25 | "goat.mp3" in the "series" directory.
26 |
27 | [1]: http://teflpedia.com/IPA_phoneme_/%C9%99%CA%8A/
28 | """
29 |
30 | from nolitsa import noise
31 | import matplotlib.pyplot as plt
32 | import numpy as np
33 |
34 | x = np.loadtxt('../series/goat.dat', usecols=[1])[9604:14572]
35 |
36 | plt.figure(1)
37 | plt.title('Noisy goat vowel')
38 | plt.xlabel(r'$x(t)$')
39 | plt.ylabel(r'$x(t + 14)$')
40 | plt.plot(x[:-14], x[14:], '.')
41 |
42 | y = noise.nored(x, dim=10, tau=14, r=0.2, repeat=5)
43 | y = y[70:-70]
44 |
45 | plt.figure(2)
46 | plt.title('Cleaned goat vowel')
47 | plt.xlabel(r'$x(t)$')
48 | plt.ylabel(r'$x(t + 14)$')
49 | plt.plot(y[:-14], y[14:], '.')
50 |
51 | plt.show()
52 |
--------------------------------------------------------------------------------
/examples/noise/henon_sma.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | """Simple moving average vs. nonlinear noise reduction.
5 |
6 | We will compare the effectiveness of a linear filter like the simple
7 | moving average (SMA) and nonlinear noise reduction in filtering a noisy
8 | deterministic time series (from the Henon map).
9 |
10 | As we can see, SMA performs quite badly and distorts the structure in
11 | the time series considerably (even with a very small averaging window).
12 | However, nonlinear reduction works well (within limits).
13 | """
14 |
15 | import numpy as np
16 | import matplotlib.pyplot as plt
17 | from nolitsa import data, noise, utils
18 |
19 | x = data.henon()[:, 0]
20 | x = utils.corrupt(x, np.random.normal(size=(10 * 1000)), snr=500)
21 |
22 | y1 = noise.nored(x, dim=7, tau=1, r=0.10, repeat=5)
23 | y2 = noise.sma(x, hwin=1)
24 |
25 | plt.figure(1)
26 | plt.title('Time series from the Henon map with an SNR of 500')
27 | plt.xlabel('$x_1$')
28 | plt.ylabel('$x_2$')
29 | plt.plot(x[:-1], x[1:], '.')
30 |
31 | plt.figure(2)
32 | plt.title('After doing an SMA over 3 bins')
33 | plt.xlabel('$x_1$')
34 | plt.ylabel('$x_2$')
35 | plt.plot(y2[:-1], y2[1:], '.')
36 |
37 | plt.figure(3)
38 | plt.title('After using the simple nonlinear filter')
39 | plt.xlabel('$x_1$')
40 | plt.ylabel('$x_2$')
41 | plt.plot(y1[:-1], y1[1:], '.')
42 |
43 | plt.show()
44 |
--------------------------------------------------------------------------------
/examples/noise/laser.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | """Filtering data from a far-infrared laser.
5 |
6 | The data is from Data Set A [1] of the Sata Fe Time Series competition.
7 | This is a map-like data [see Example 4.5 of Kantz & Schreiber (2004)].
8 |
9 | The "structure" in the data (arguably) becomes more prominent after
10 | filtering. Also note that the discreteness of the data disappears after
11 | filtering. Thus, nonlinear filtering can be used as an alternative to
12 | the method of adding a small amount of noise to "undiscretize" such data
13 | sets.
14 |
15 | [1]: http://www-psych.stanford.edu/~andreas/Time-Series/SantaFe.html
16 | """
17 |
18 | from nolitsa import noise
19 | import matplotlib.pyplot as plt
20 | import numpy as np
21 |
22 | x = np.loadtxt('../series/laser.dat')
23 |
24 | plt.figure(1)
25 | plt.title('Noisy series from a far-infrared laser')
26 | plt.xlim(20, 140)
27 | plt.ylim(20, 140)
28 | plt.xlabel('$x(t)$')
29 | plt.ylabel('$x(t + 1)$')
30 | plt.plot(x[:-1], x[1:], '.')
31 |
32 | y = noise.nored(x, dim=7, tau=1, r=2.0, repeat=5)
33 |
34 | plt.figure(2)
35 | plt.title('Cleaned series from a far-infrared laser')
36 | plt.xlim(20, 140)
37 | plt.ylim(20, 140)
38 | plt.xlabel('$x(t)$')
39 | plt.ylabel('$x(t + 1)$')
40 | plt.plot(y[:-1], y[1:], '.')
41 |
42 | plt.show()
43 |
--------------------------------------------------------------------------------
/examples/series/br2.dat:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=conf fdm=marker et sts=2 sw=2
3 | #
4 | # Human breath rate data set II
5 | #
6 | # Data was obtained from 2048 continuous samples of dataset B1 (starting
7 | # from the 12750th) of the Santa Fe time series contest [1].
8 | #
9 | # The first column contains time (in secs) and the second is the
10 | # chest volume (respiration force).
11 | #
12 | # [1]: http://www.physionet.org/physiobank/database/santa-fe/
13 |
14 | 0.0 6668
15 | 0.5 684
16 | 1.0 859
17 | 1.5 2239
18 | 2.0 4606
19 | 2.5 9788
20 | 3.0 8970
21 | 3.5 7501
22 | 4.0 6273
23 | 4.5 5470
24 | 5.0 1392
25 | 5.5 376
26 | 6.0 2047
27 | 6.5 3081
28 | 7.0 9941
29 | 7.5 9576
30 | 8.0 7881
31 | 8.5 6541
32 | 9.0 5547
33 | 9.5 1777
34 | 10.0 402
35 | 10.5 1973
36 | 11.0 3336
37 | 11.5 8847
38 | 12.0 10841
39 | 12.5 8743
40 | 13.0 7001
41 | 13.5 5688
42 | 14.0 5474
43 | 14.5 5709
44 | 15.0 190
45 | 15.5 -804
46 | 16.0 1214
47 | 16.5 2993
48 | 17.0 10108
49 | 17.5 10658
50 | 18.0 8508
51 | 18.5 7299
52 | 19.0 6405
53 | 19.5 3805
54 | 20.0 -567
55 | 20.5 481
56 | 21.0 2185
57 | 21.5 8172
58 | 22.0 11388
59 | 22.5 8975
60 | 23.0 7522
61 | 23.5 6169
62 | 24.0 5273
63 | 24.5 -532
64 | 25.0 286
65 | 25.5 1895
66 | 26.0 3246
67 | 26.5 10417
68 | 27.0 10054
69 | 27.5 8049
70 | 28.0 7236
71 | 28.5 2657
72 | 29.0 1244
73 | 29.5 2053
74 | 30.0 2854
75 | 30.5 9542
76 | 31.0 9992
77 | 31.5 8413
78 | 32.0 7289
79 | 32.5 6284
80 | 33.0 1973
81 | 33.5 1392
82 | 34.0 1990
83 | 34.5 3116
84 | 35.0 9001
85 | 35.5 9893
86 | 36.0 8309
87 | 36.5 7461
88 | 37.0 5768
89 | 37.5 5316
90 | 38.0 1972
91 | 38.5 -426
92 | 39.0 1633
93 | 39.5 3005
94 | 40.0 10589
95 | 40.5 9681
96 | 41.0 8110
97 | 41.5 6723
98 | 42.0 5630
99 | 42.5 5055
100 | 43.0 -288
101 | 43.5 238
102 | 44.0 2166
103 | 44.5 7523
104 | 45.0 10868
105 | 45.5 8610
106 | 46.0 7295
107 | 46.5 6384
108 | 47.0 5530
109 | 47.5 748
110 | 48.0 411
111 | 48.5 2009
112 | 49.0 6365
113 | 49.5 10623
114 | 50.0 8787
115 | 50.5 7095
116 | 51.0 6414
117 | 51.5 5561
118 | 52.0 1638
119 | 52.5 231
120 | 53.0 1736
121 | 53.5 3524
122 | 54.0 11215
123 | 54.5 10121
124 | 55.0 7899
125 | 55.5 6389
126 | 56.0 5760
127 | 56.5 3385
128 | 57.0 -952
129 | 57.5 481
130 | 58.0 3298
131 | 58.5 8517
132 | 59.0 11160
133 | 59.5 9007
134 | 60.0 7415
135 | 60.5 6128
136 | 61.0 5087
137 | 61.5 -293
138 | 62.0 -687
139 | 62.5 1012
140 | 63.0 8203
141 | 63.5 11042
142 | 64.0 8758
143 | 64.5 7231
144 | 65.0 6485
145 | 65.5 5519
146 | 66.0 1907
147 | 66.5 123
148 | 67.0 1764
149 | 67.5 5249
150 | 68.0 11946
151 | 68.5 9803
152 | 69.0 7864
153 | 69.5 6538
154 | 70.0 5630
155 | 70.5 1498
156 | 71.0 598
157 | 71.5 1460
158 | 72.0 9436
159 | 72.5 9079
160 | 73.0 8136
161 | 73.5 7060
162 | 74.0 6096
163 | 74.5 1242
164 | 75.0 -682
165 | 75.5 1320
166 | 76.0 4054
167 | 76.5 12121
168 | 77.0 10088
169 | 77.5 7541
170 | 78.0 5910
171 | 78.5 5421
172 | 79.0 -277
173 | 79.5 -189
174 | 80.0 1065
175 | 80.5 9740
176 | 81.0 9777
177 | 81.5 7513
178 | 82.0 6735
179 | 82.5 5335
180 | 83.0 1733
181 | 83.5 -1036
182 | 84.0 1045
183 | 84.5 8977
184 | 85.0 10381
185 | 85.5 8508
186 | 86.0 7359
187 | 86.5 6204
188 | 87.0 5527
189 | 87.5 80
190 | 88.0 -131
191 | 88.5 3649
192 | 89.0 10529
193 | 89.5 9160
194 | 90.0 7456
195 | 90.5 6396
196 | 91.0 5295
197 | 91.5 4701
198 | 92.0 4908
199 | 92.5 3556
200 | 93.0 -1
201 | 93.5 -203
202 | 94.0 1198
203 | 94.5 5680
204 | 95.0 13093
205 | 95.5 10537
206 | 96.0 7956
207 | 96.5 6222
208 | 97.0 5516
209 | 97.5 3871
210 | 98.0 -376
211 | 98.5 953
212 | 99.0 3859
213 | 99.5 11766
214 | 100.0 9792
215 | 100.5 7484
216 | 101.0 7322
217 | 101.5 5403
218 | 102.0 2479
219 | 102.5 -1031
220 | 103.0 -29
221 | 103.5 2906
222 | 104.0 12153
223 | 104.5 9929
224 | 105.0 7770
225 | 105.5 6236
226 | 106.0 5202
227 | 106.5 1665
228 | 107.0 -1046
229 | 107.5 801
230 | 108.0 6102
231 | 108.5 11937
232 | 109.0 9602
233 | 109.5 7247
234 | 110.0 6377
235 | 110.5 5595
236 | 111.0 1599
237 | 111.5 -675
238 | 112.0 615
239 | 112.5 8382
240 | 113.0 11602
241 | 113.5 9273
242 | 114.0 7532
243 | 114.5 6150
244 | 115.0 5961
245 | 115.5 65
246 | 116.0 -1401
247 | 116.5 461
248 | 117.0 8457
249 | 117.5 11768
250 | 118.0 8958
251 | 118.5 7219
252 | 119.0 6586
253 | 119.5 5551
254 | 120.0 3872
255 | 120.5 462
256 | 121.0 16
257 | 121.5 1789
258 | 122.0 10903
259 | 122.5 10381
260 | 123.0 8019
261 | 123.5 6196
262 | 124.0 4913
263 | 124.5 312
264 | 125.0 192
265 | 125.5 2601
266 | 126.0 11545
267 | 126.5 10052
268 | 127.0 7799
269 | 127.5 6291
270 | 128.0 5329
271 | 128.5 1281
272 | 129.0 -103
273 | 129.5 1344
274 | 130.0 10001
275 | 130.5 9828
276 | 131.0 7787
277 | 131.5 6366
278 | 132.0 5344
279 | 132.5 653
280 | 133.0 -295
281 | 133.5 1187
282 | 134.0 10106
283 | 134.5 10732
284 | 135.0 8383
285 | 135.5 6306
286 | 136.0 5412
287 | 136.5 2715
288 | 137.0 -396
289 | 137.5 610
290 | 138.0 5841
291 | 138.5 11709
292 | 139.0 8913
293 | 139.5 6830
294 | 140.0 5875
295 | 140.5 4918
296 | 141.0 -800
297 | 141.5 -82
298 | 142.0 2597
299 | 142.5 11598
300 | 143.0 10294
301 | 143.5 7670
302 | 144.0 6278
303 | 144.5 4696
304 | 145.0 -625
305 | 145.5 39
306 | 146.0 7845
307 | 146.5 11026
308 | 147.0 8532
309 | 147.5 6722
310 | 148.0 5782
311 | 148.5 5239
312 | 149.0 -45
313 | 149.5 -813
314 | 150.0 1920
315 | 150.5 10783
316 | 151.0 10279
317 | 151.5 8075
318 | 152.0 6908
319 | 152.5 5795
320 | 153.0 5376
321 | 153.5 134
322 | 154.0 -239
323 | 154.5 2073
324 | 155.0 10300
325 | 155.5 10463
326 | 156.0 7715
327 | 156.5 6310
328 | 157.0 5503
329 | 157.5 1235
330 | 158.0 -558
331 | 158.5 220
332 | 159.0 7761
333 | 159.5 10924
334 | 160.0 8681
335 | 160.5 6847
336 | 161.0 5543
337 | 161.5 5343
338 | 162.0 3373
339 | 162.5 -66
340 | 163.0 204
341 | 163.5 4443
342 | 164.0 13095
343 | 164.5 10014
344 | 165.0 7269
345 | 165.5 5980
346 | 166.0 4884
347 | 166.5 4532
348 | 167.0 2162
349 | 167.5 -1380
350 | 168.0 564
351 | 168.5 6240
352 | 169.0 13002
353 | 169.5 10154
354 | 170.0 7396
355 | 170.5 6242
356 | 171.0 5918
357 | 171.5 5422
358 | 172.0 5077
359 | 172.5 4965
360 | 173.0 3994
361 | 173.5 655
362 | 174.0 1042
363 | 174.5 2459
364 | 175.0 10458
365 | 175.5 9640
366 | 176.0 7333
367 | 176.5 6033
368 | 177.0 1189
369 | 177.5 23
370 | 178.0 4821
371 | 178.5 9576
372 | 179.0 9215
373 | 179.5 7151
374 | 180.0 5732
375 | 180.5 4941
376 | 181.0 466
377 | 181.5 103
378 | 182.0 2059
379 | 182.5 11351
380 | 183.0 10592
381 | 183.5 7957
382 | 184.0 6417
383 | 184.5 5792
384 | 185.0 4910
385 | 185.5 2742
386 | 186.0 -468
387 | 186.5 444
388 | 187.0 3577
389 | 187.5 12748
390 | 188.0 10015
391 | 188.5 7521
392 | 189.0 6756
393 | 189.5 4013
394 | 190.0 -963
395 | 190.5 -661
396 | 191.0 2405
397 | 191.5 13384
398 | 192.0 11085
399 | 192.5 8395
400 | 193.0 5980
401 | 193.5 4925
402 | 194.0 1320
403 | 194.5 -1423
404 | 195.0 -189
405 | 195.5 6524
406 | 196.0 12959
407 | 196.5 8681
408 | 197.0 6203
409 | 197.5 4834
410 | 198.0 4124
411 | 198.5 352
412 | 199.0 -493
413 | 199.5 626
414 | 200.0 10836
415 | 200.5 10847
416 | 201.0 9002
417 | 201.5 6703
418 | 202.0 5802
419 | 202.5 2997
420 | 203.0 161
421 | 203.5 1715
422 | 204.0 9775
423 | 204.5 9314
424 | 205.0 7668
425 | 205.5 6219
426 | 206.0 774
427 | 206.5 573
428 | 207.0 1917
429 | 207.5 10695
430 | 208.0 11922
431 | 208.5 9116
432 | 209.0 6921
433 | 209.5 5855
434 | 210.0 3944
435 | 210.5 -421
436 | 211.0 -272
437 | 211.5 3315
438 | 212.0 12328
439 | 212.5 10049
440 | 213.0 6957
441 | 213.5 5839
442 | 214.0 5335
443 | 214.5 -723
444 | 215.0 -789
445 | 215.5 1166
446 | 216.0 11227
447 | 216.5 10320
448 | 217.0 7299
449 | 217.5 5930
450 | 218.0 3392
451 | 218.5 -617
452 | 219.0 580
453 | 219.5 9432
454 | 220.0 10455
455 | 220.5 8051
456 | 221.0 6460
457 | 221.5 5228
458 | 222.0 1058
459 | 222.5 -186
460 | 223.0 1147
461 | 223.5 10754
462 | 224.0 10712
463 | 224.5 7819
464 | 225.0 5981
465 | 225.5 5384
466 | 226.0 2742
467 | 226.5 -465
468 | 227.0 292
469 | 227.5 5644
470 | 228.0 12304
471 | 228.5 9328
472 | 229.0 7094
473 | 229.5 5858
474 | 230.0 4984
475 | 230.5 1086
476 | 231.0 -1074
477 | 231.5 43
478 | 232.0 10018
479 | 232.5 10960
480 | 233.0 8418
481 | 233.5 6646
482 | 234.0 5445
483 | 234.5 4410
484 | 235.0 175
485 | 235.5 -288
486 | 236.0 1801
487 | 236.5 10461
488 | 237.0 11481
489 | 237.5 8455
490 | 238.0 6724
491 | 238.5 2805
492 | 239.0 406
493 | 239.5 1759
494 | 240.0 11967
495 | 240.5 10337
496 | 241.0 7970
497 | 241.5 6164
498 | 242.0 4767
499 | 242.5 2936
500 | 243.0 -285
501 | 243.5 1031
502 | 244.0 5271
503 | 244.5 11764
504 | 245.0 8971
505 | 245.5 6619
506 | 246.0 5565
507 | 246.5 4833
508 | 247.0 175
509 | 247.5 23
510 | 248.0 1558
511 | 248.5 10041
512 | 249.0 10958
513 | 249.5 8432
514 | 250.0 7020
515 | 250.5 5907
516 | 251.0 5011
517 | 251.5 1399
518 | 252.0 242
519 | 252.5 692
520 | 253.0 4618
521 | 253.5 12641
522 | 254.0 9821
523 | 254.5 7482
524 | 255.0 6174
525 | 255.5 5446
526 | 256.0 5340
527 | 256.5 2610
528 | 257.0 -133
529 | 257.5 1240
530 | 258.0 2368
531 | 258.5 12468
532 | 259.0 10505
533 | 259.5 7740
534 | 260.0 6500
535 | 260.5 5040
536 | 261.0 2817
537 | 261.5 -1095
538 | 262.0 -954
539 | 262.5 1406
540 | 263.0 13151
541 | 263.5 11646
542 | 264.0 8427
543 | 264.5 6688
544 | 265.0 5805
545 | 265.5 4714
546 | 266.0 -196
547 | 266.5 -216
548 | 267.0 2722
549 | 267.5 11638
550 | 268.0 9396
551 | 268.5 6683
552 | 269.0 5677
553 | 269.5 4009
554 | 270.0 192
555 | 270.5 -38
556 | 271.0 2750
557 | 271.5 11050
558 | 272.0 8897
559 | 272.5 6872
560 | 273.0 5789
561 | 273.5 3944
562 | 274.0 -172
563 | 274.5 642
564 | 275.0 2507
565 | 275.5 12675
566 | 276.0 10343
567 | 276.5 8436
568 | 277.0 6389
569 | 277.5 4949
570 | 278.0 -255
571 | 278.5 -139
572 | 279.0 2271
573 | 279.5 11873
574 | 280.0 11171
575 | 280.5 8139
576 | 281.0 6398
577 | 281.5 2506
578 | 282.0 -886
579 | 282.5 3313
580 | 283.0 11132
581 | 283.5 9717
582 | 284.0 6878
583 | 284.5 4838
584 | 285.0 -581
585 | 285.5 389
586 | 286.0 3244
587 | 286.5 11465
588 | 287.0 9326
589 | 287.5 6910
590 | 288.0 5736
591 | 288.5 3797
592 | 289.0 -196
593 | 289.5 1404
594 | 290.0 2678
595 | 290.5 9706
596 | 291.0 9778
597 | 291.5 7556
598 | 292.0 5787
599 | 292.5 1762
600 | 293.0 443
601 | 293.5 1850
602 | 294.0 7295
603 | 294.5 11850
604 | 295.0 9138
605 | 295.5 6978
606 | 296.0 6016
607 | 296.5 80
608 | 297.0 307
609 | 297.5 5182
610 | 298.0 11051
611 | 298.5 8414
612 | 299.0 6100
613 | 299.5 5201
614 | 300.0 -595
615 | 300.5 -518
616 | 301.0 2368
617 | 301.5 12040
618 | 302.0 9874
619 | 302.5 7752
620 | 303.0 5198
621 | 303.5 -165
622 | 304.0 536
623 | 304.5 2392
624 | 305.0 11794
625 | 305.5 9628
626 | 306.0 7275
627 | 306.5 5444
628 | 307.0 4784
629 | 307.5 -281
630 | 308.0 -570
631 | 308.5 1454
632 | 309.0 8970
633 | 309.5 12443
634 | 310.0 8994
635 | 310.5 7485
636 | 311.0 6055
637 | 311.5 1820
638 | 312.0 -572
639 | 312.5 317
640 | 313.0 5781
641 | 313.5 12143
642 | 314.0 9669
643 | 314.5 7089
644 | 315.0 5858
645 | 315.5 4906
646 | 316.0 5180
647 | 316.5 3257
648 | 317.0 -161
649 | 317.5 748
650 | 318.0 2425
651 | 318.5 11829
652 | 319.0 9473
653 | 319.5 8094
654 | 320.0 5962
655 | 320.5 67
656 | 321.0 1052
657 | 321.5 6217
658 | 322.0 10586
659 | 322.5 8511
660 | 323.0 7301
661 | 323.5 6172
662 | 324.0 5089
663 | 324.5 4378
664 | 325.0 1970
665 | 325.5 -757
666 | 326.0 2626
667 | 326.5 6770
668 | 327.0 11514
669 | 327.5 9743
670 | 328.0 7408
671 | 328.5 1977
672 | 329.0 -643
673 | 329.5 1114
674 | 330.0 3027
675 | 330.5 11605
676 | 331.0 11780
677 | 331.5 8937
678 | 332.0 6617
679 | 332.5 5389
680 | 333.0 4508
681 | 333.5 102
682 | 334.0 -2036
683 | 334.5 -632
684 | 335.0 3908
685 | 335.5 10886
686 | 336.0 10495
687 | 336.5 8216
688 | 337.0 6741
689 | 337.5 5852
690 | 338.0 5432
691 | 338.5 4004
692 | 339.0 -979
693 | 339.5 -1998
694 | 340.0 -6
695 | 340.5 9084
696 | 341.0 11386
697 | 341.5 9399
698 | 342.0 8130
699 | 342.5 7076
700 | 343.0 5572
701 | 343.5 5358
702 | 344.0 4576
703 | 344.5 1117
704 | 345.0 1084
705 | 345.5 2243
706 | 346.0 10616
707 | 346.5 9901
708 | 347.0 8217
709 | 347.5 7679
710 | 348.0 5783
711 | 348.5 3734
712 | 349.0 717
713 | 349.5 677
714 | 350.0 4762
715 | 350.5 10316
716 | 351.0 8486
717 | 351.5 6916
718 | 352.0 5662
719 | 352.5 4466
720 | 353.0 3705
721 | 353.5 78
722 | 354.0 1181
723 | 354.5 2659
724 | 355.0 11681
725 | 355.5 10359
726 | 356.0 7816
727 | 356.5 5690
728 | 357.0 4587
729 | 357.5 344
730 | 358.0 514
731 | 358.5 2214
732 | 359.0 9129
733 | 359.5 12965
734 | 360.0 9126
735 | 360.5 6552
736 | 361.0 5229
737 | 361.5 3364
738 | 362.0 -1448
739 | 362.5 -768
740 | 363.0 1259
741 | 363.5 9302
742 | 364.0 14282
743 | 364.5 9862
744 | 365.0 7106
745 | 365.5 5657
746 | 366.0 -1355
747 | 366.5 -2388
748 | 367.0 1735
749 | 367.5 14035
750 | 368.0 10670
751 | 368.5 7062
752 | 369.0 5728
753 | 369.5 1139
754 | 370.0 -1816
755 | 370.5 308
756 | 371.0 8122
757 | 371.5 13249
758 | 372.0 9811
759 | 372.5 7848
760 | 373.0 6216
761 | 373.5 296
762 | 374.0 -1702
763 | 374.5 377
764 | 375.0 11979
765 | 375.5 11080
766 | 376.0 7868
767 | 376.5 6276
768 | 377.0 5330
769 | 377.5 1170
770 | 378.0 -1029
771 | 378.5 666
772 | 379.0 11104
773 | 379.5 11651
774 | 380.0 8482
775 | 380.5 6501
776 | 381.0 5449
777 | 381.5 5264
778 | 382.0 2261
779 | 382.5 -1053
780 | 383.0 45
781 | 383.5 7033
782 | 384.0 11184
783 | 384.5 9295
784 | 385.0 7325
785 | 385.5 5713
786 | 386.0 -155
787 | 386.5 137
788 | 387.0 5663
789 | 387.5 10822
790 | 388.0 8849
791 | 388.5 6810
792 | 389.0 5417
793 | 389.5 5461
794 | 390.0 414
795 | 390.5 85
796 | 391.0 1651
797 | 391.5 9937
798 | 392.0 11430
799 | 392.5 8806
800 | 393.0 6804
801 | 393.5 6461
802 | 394.0 4741
803 | 394.5 1340
804 | 395.0 423
805 | 395.5 1692
806 | 396.0 10373
807 | 396.5 10927
808 | 397.0 8305
809 | 397.5 6466
810 | 398.0 5085
811 | 398.5 402
812 | 399.0 -1114
813 | 399.5 722
814 | 400.0 9176
815 | 400.5 10567
816 | 401.0 7713
817 | 401.5 6259
818 | 402.0 4525
819 | 402.5 -1013
820 | 403.0 -147
821 | 403.5 4666
822 | 404.0 12236
823 | 404.5 9020
824 | 405.0 6786
825 | 405.5 5606
826 | 406.0 -412
827 | 406.5 -1008
828 | 407.0 1679
829 | 407.5 11317
830 | 408.0 9830
831 | 408.5 6994
832 | 409.0 5705
833 | 409.5 2812
834 | 410.0 -1522
835 | 410.5 -22
836 | 411.0 8336
837 | 411.5 11570
838 | 412.0 8911
839 | 412.5 7350
840 | 413.0 5194
841 | 413.5 280
842 | 414.0 768
843 | 414.5 7086
844 | 415.0 11719
845 | 415.5 9342
846 | 416.0 7303
847 | 416.5 6235
848 | 417.0 3762
849 | 417.5 -322
850 | 418.0 579
851 | 418.5 6477
852 | 419.0 10929
853 | 419.5 8557
854 | 420.0 6784
855 | 420.5 5755
856 | 421.0 328
857 | 421.5 242
858 | 422.0 3976
859 | 422.5 11233
860 | 423.0 9257
861 | 423.5 7027
862 | 424.0 5747
863 | 424.5 2570
864 | 425.0 -334
865 | 425.5 1233
866 | 426.0 2444
867 | 426.5 9616
868 | 427.0 12266
869 | 427.5 9135
870 | 428.0 6971
871 | 428.5 6535
872 | 429.0 4922
873 | 429.5 1384
874 | 430.0 -1502
875 | 430.5 45
876 | 431.0 8282
877 | 431.5 12348
878 | 432.0 9085
879 | 432.5 6032
880 | 433.0 5343
881 | 433.5 4667
882 | 434.0 1491
883 | 434.5 -1530
884 | 435.0 -264
885 | 435.5 3828
886 | 436.0 11825
887 | 436.5 10041
888 | 437.0 7820
889 | 437.5 6190
890 | 438.0 4045
891 | 438.5 -22
892 | 439.0 -93
893 | 439.5 4888
894 | 440.0 11528
895 | 440.5 9516
896 | 441.0 7115
897 | 441.5 6280
898 | 442.0 5247
899 | 442.5 4903
900 | 443.0 2895
901 | 443.5 447
902 | 444.0 940
903 | 444.5 8932
904 | 445.0 12174
905 | 445.5 9664
906 | 446.0 8185
907 | 446.5 6536
908 | 447.0 5559
909 | 447.5 3368
910 | 448.0 3186
911 | 448.5 2615
912 | 449.0 4202
913 | 449.5 3612
914 | 450.0 3290
915 | 450.5 3920
916 | 451.0 10050
917 | 451.5 8944
918 | 452.0 6414
919 | 452.5 4712
920 | 453.0 3435
921 | 453.5 2494
922 | 454.0 3023
923 | 454.5 7649
924 | 455.0 13520
925 | 455.5 9264
926 | 456.0 9088
927 | 456.5 5741
928 | 457.0 714
929 | 457.5 -1379
930 | 458.0 -182
931 | 458.5 5462
932 | 459.0 11166
933 | 459.5 9636
934 | 460.0 8890
935 | 460.5 8582
936 | 461.0 3476
937 | 461.5 -254
938 | 462.0 -1112
939 | 462.5 -1930
940 | 463.0 1421
941 | 463.5 14102
942 | 464.0 10358
943 | 464.5 6571
944 | 465.0 5415
945 | 465.5 3779
946 | 466.0 -848
947 | 466.5 -3404
948 | 467.0 830
949 | 467.5 12024
950 | 468.0 10256
951 | 468.5 8466
952 | 469.0 6630
953 | 469.5 2397
954 | 470.0 698
955 | 470.5 3515
956 | 471.0 13199
957 | 471.5 11307
958 | 472.0 9761
959 | 472.5 5916
960 | 473.0 6006
961 | 473.5 1656
962 | 474.0 -2641
963 | 474.5 -700
964 | 475.0 10823
965 | 475.5 13619
966 | 476.0 9115
967 | 476.5 7290
968 | 477.0 5609
969 | 477.5 5144
970 | 478.0 -819
971 | 478.5 -994
972 | 479.0 2320
973 | 479.5 10691
974 | 480.0 10931
975 | 480.5 8787
976 | 481.0 6219
977 | 481.5 5189
978 | 482.0 4505
979 | 482.5 4418
980 | 483.0 959
981 | 483.5 3162
982 | 484.0 9073
983 | 484.5 7557
984 | 485.0 6178
985 | 485.5 3408
986 | 486.0 2011
987 | 486.5 8008
988 | 487.0 9815
989 | 487.5 5939
990 | 488.0 3726
991 | 488.5 3365
992 | 489.0 2246
993 | 489.5 3441
994 | 490.0 9832
995 | 490.5 10713
996 | 491.0 6949
997 | 491.5 6691
998 | 492.0 6033
999 | 492.5 4197
1000 | 493.0 1377
1001 | 493.5 387
1002 | 494.0 1972
1003 | 494.5 2819
1004 | 495.0 1627
1005 | 495.5 11672
1006 | 496.0 11167
1007 | 496.5 13539
1008 | 497.0 8473
1009 | 497.5 7326
1010 | 498.0 4489
1011 | 498.5 4016
1012 | 499.0 3384
1013 | 499.5 365
1014 | 500.0 -3428
1015 | 500.5 -3655
1016 | 501.0 243
1017 | 501.5 10066
1018 | 502.0 9559
1019 | 502.5 8186
1020 | 503.0 6672
1021 | 503.5 6864
1022 | 504.0 5784
1023 | 504.5 4568
1024 | 505.0 2144
1025 | 505.5 2695
1026 | 506.0 6303
1027 | 506.5 10483
1028 | 507.0 8973
1029 | 507.5 7341
1030 | 508.0 5537
1031 | 508.5 4814
1032 | 509.0 3718
1033 | 509.5 4314
1034 | 510.0 3421
1035 | 510.5 3645
1036 | 511.0 4027
1037 | 511.5 7292
1038 | 512.0 11305
1039 | 512.5 9362
1040 | 513.0 7786
1041 | 513.5 5378
1042 | 514.0 4559
1043 | 514.5 1874
1044 | 515.0 2230
1045 | 515.5 7360
1046 | 516.0 10454
1047 | 516.5 8421
1048 | 517.0 6573
1049 | 517.5 4197
1050 | 518.0 2202
1051 | 518.5 95
1052 | 519.0 924
1053 | 519.5 3781
1054 | 520.0 11476
1055 | 520.5 9430
1056 | 521.0 7349
1057 | 521.5 5331
1058 | 522.0 4307
1059 | 522.5 2973
1060 | 523.0 1873
1061 | 523.5 679
1062 | 524.0 4659
1063 | 524.5 9749
1064 | 525.0 9638
1065 | 525.5 7352
1066 | 526.0 5742
1067 | 526.5 2615
1068 | 527.0 322
1069 | 527.5 2743
1070 | 528.0 8829
1071 | 528.5 10001
1072 | 529.0 7550
1073 | 529.5 5894
1074 | 530.0 4913
1075 | 530.5 -111
1076 | 531.0 1283
1077 | 531.5 551
1078 | 532.0 12207
1079 | 532.5 12185
1080 | 533.0 8963
1081 | 533.5 6280
1082 | 534.0 2266
1083 | 534.5 -1580
1084 | 535.0 -1122
1085 | 535.5 2980
1086 | 536.0 10021
1087 | 536.5 9619
1088 | 537.0 8000
1089 | 537.5 4765
1090 | 538.0 141
1091 | 538.5 -604
1092 | 539.0 7826
1093 | 539.5 10323
1094 | 540.0 8447
1095 | 540.5 7318
1096 | 541.0 5564
1097 | 541.5 2179
1098 | 542.0 1146
1099 | 542.5 4976
1100 | 543.0 8599
1101 | 543.5 7705
1102 | 544.0 4428
1103 | 544.5 1610
1104 | 545.0 1311
1105 | 545.5 9251
1106 | 546.0 10019
1107 | 546.5 8727
1108 | 547.0 7070
1109 | 547.5 4385
1110 | 548.0 1057
1111 | 548.5 440
1112 | 549.0 1442
1113 | 549.5 10099
1114 | 550.0 10489
1115 | 550.5 9508
1116 | 551.0 6477
1117 | 551.5 6580
1118 | 552.0 2442
1119 | 552.5 -681
1120 | 553.0 478
1121 | 553.5 5264
1122 | 554.0 11370
1123 | 554.5 10480
1124 | 555.0 7271
1125 | 555.5 6012
1126 | 556.0 975
1127 | 556.5 -2187
1128 | 557.0 -838
1129 | 557.5 5530
1130 | 558.0 13094
1131 | 558.5 10484
1132 | 559.0 7101
1133 | 559.5 5187
1134 | 560.0 3461
1135 | 560.5 -2166
1136 | 561.0 -1592
1137 | 561.5 6865
1138 | 562.0 12401
1139 | 562.5 9744
1140 | 563.0 7863
1141 | 563.5 7916
1142 | 564.0 5045
1143 | 564.5 -994
1144 | 565.0 -1768
1145 | 565.5 1341
1146 | 566.0 12182
1147 | 566.5 11418
1148 | 567.0 8741
1149 | 567.5 6526
1150 | 568.0 6914
1151 | 568.5 5993
1152 | 569.0 762
1153 | 569.5 -1741
1154 | 570.0 1226
1155 | 570.5 12313
1156 | 571.0 9915
1157 | 571.5 8174
1158 | 572.0 5704
1159 | 572.5 5033
1160 | 573.0 2882
1161 | 573.5 -525
1162 | 574.0 43
1163 | 574.5 2056
1164 | 575.0 11336
1165 | 575.5 9550
1166 | 576.0 7970
1167 | 576.5 5172
1168 | 577.0 5203
1169 | 577.5 4814
1170 | 578.0 2532
1171 | 578.5 5191
1172 | 579.0 3812
1173 | 579.5 2578
1174 | 580.0 3892
1175 | 580.5 8956
1176 | 581.0 11484
1177 | 581.5 9258
1178 | 582.0 5232
1179 | 582.5 1876
1180 | 583.0 3414
1181 | 583.5 1022
1182 | 584.0 7301
1183 | 584.5 9243
1184 | 585.0 6479
1185 | 585.5 5490
1186 | 586.0 3820
1187 | 586.5 1471
1188 | 587.0 1121
1189 | 587.5 1446
1190 | 588.0 8622
1191 | 588.5 11949
1192 | 589.0 9314
1193 | 589.5 7735
1194 | 590.0 6307
1195 | 590.5 -706
1196 | 591.0 -832
1197 | 591.5 4804
1198 | 592.0 11953
1199 | 592.5 9433
1200 | 593.0 8270
1201 | 593.5 5481
1202 | 594.0 -391
1203 | 594.5 -3004
1204 | 595.0 -887
1205 | 595.5 11298
1206 | 596.0 11269
1207 | 596.5 8846
1208 | 597.0 6915
1209 | 597.5 1481
1210 | 598.0 724
1211 | 598.5 1293
1212 | 599.0 10205
1213 | 599.5 8863
1214 | 600.0 8079
1215 | 600.5 2265
1216 | 601.0 103
1217 | 601.5 1951
1218 | 602.0 11149
1219 | 602.5 9737
1220 | 603.0 7305
1221 | 603.5 7281
1222 | 604.0 978
1223 | 604.5 -718
1224 | 605.0 2102
1225 | 605.5 12142
1226 | 606.0 10599
1227 | 606.5 7151
1228 | 607.0 7666
1229 | 607.5 3749
1230 | 608.0 -400
1231 | 608.5 -1910
1232 | 609.0 1729
1233 | 609.5 11835
1234 | 610.0 9099
1235 | 610.5 7650
1236 | 611.0 5524
1237 | 611.5 4369
1238 | 612.0 1031
1239 | 612.5 217
1240 | 613.0 515
1241 | 613.5 5855
1242 | 614.0 11301
1243 | 614.5 9521
1244 | 615.0 6980
1245 | 615.5 8702
1246 | 616.0 4848
1247 | 616.5 1455
1248 | 617.0 -1002
1249 | 617.5 -69
1250 | 618.0 2380
1251 | 618.5 12094
1252 | 619.0 12691
1253 | 619.5 8241
1254 | 620.0 7591
1255 | 620.5 4725
1256 | 621.0 5500
1257 | 621.5 888
1258 | 622.0 687
1259 | 622.5 1124
1260 | 623.0 8141
1261 | 623.5 13152
1262 | 624.0 11795
1263 | 624.5 9955
1264 | 625.0 5485
1265 | 625.5 2706
1266 | 626.0 877
1267 | 626.5 -1022
1268 | 627.0 -672
1269 | 627.5 1778
1270 | 628.0 11797
1271 | 628.5 10953
1272 | 629.0 8668
1273 | 629.5 7571
1274 | 630.0 6597
1275 | 630.5 5967
1276 | 631.0 -345
1277 | 631.5 -299
1278 | 632.0 5487
1279 | 632.5 10967
1280 | 633.0 11671
1281 | 633.5 7542
1282 | 634.0 2692
1283 | 634.5 -1850
1284 | 635.0 -1229
1285 | 635.5 6075
1286 | 636.0 12988
1287 | 636.5 10409
1288 | 637.0 7020
1289 | 637.5 3876
1290 | 638.0 3193
1291 | 638.5 -673
1292 | 639.0 -1745
1293 | 639.5 -330
1294 | 640.0 4958
1295 | 640.5 13286
1296 | 641.0 10412
1297 | 641.5 8027
1298 | 642.0 6090
1299 | 642.5 4012
1300 | 643.0 38
1301 | 643.5 638
1302 | 644.0 2692
1303 | 644.5 12192
1304 | 645.0 10568
1305 | 645.5 8062
1306 | 646.0 6589
1307 | 646.5 4887
1308 | 647.0 1371
1309 | 647.5 250
1310 | 648.0 1590
1311 | 648.5 6471
1312 | 649.0 12184
1313 | 649.5 9669
1314 | 650.0 7721
1315 | 650.5 5811
1316 | 651.0 4011
1317 | 651.5 3866
1318 | 652.0 1826
1319 | 652.5 28
1320 | 653.0 893
1321 | 653.5 3265
1322 | 654.0 11864
1323 | 654.5 9793
1324 | 655.0 7492
1325 | 655.5 6385
1326 | 656.0 3382
1327 | 656.5 671
1328 | 657.0 1674
1329 | 657.5 5802
1330 | 658.0 11123
1331 | 658.5 9112
1332 | 659.0 7160
1333 | 659.5 3942
1334 | 660.0 -186
1335 | 660.5 593
1336 | 661.0 7302
1337 | 661.5 11233
1338 | 662.0 8680
1339 | 662.5 6376
1340 | 663.0 4553
1341 | 663.5 -833
1342 | 664.0 -632
1343 | 664.5 1740
1344 | 665.0 12213
1345 | 665.5 10670
1346 | 666.0 7925
1347 | 666.5 5979
1348 | 667.0 4478
1349 | 667.5 -257
1350 | 668.0 -460
1351 | 668.5 1833
1352 | 669.0 11439
1353 | 669.5 11909
1354 | 670.0 8587
1355 | 670.5 6604
1356 | 671.0 5796
1357 | 671.5 4091
1358 | 672.0 212
1359 | 672.5 -550
1360 | 673.0 1256
1361 | 673.5 10763
1362 | 674.0 10547
1363 | 674.5 10247
1364 | 675.0 8830
1365 | 675.5 2512
1366 | 676.0 -1181
1367 | 676.5 920
1368 | 677.0 9510
1369 | 677.5 11059
1370 | 678.0 8795
1371 | 678.5 5703
1372 | 679.0 4430
1373 | 679.5 -103
1374 | 680.0 -2305
1375 | 680.5 -202
1376 | 681.0 2946
1377 | 681.5 11457
1378 | 682.0 10777
1379 | 682.5 8153
1380 | 683.0 5535
1381 | 683.5 965
1382 | 684.0 -455
1383 | 684.5 1954
1384 | 685.0 7179
1385 | 685.5 10730
1386 | 686.0 8889
1387 | 686.5 6270
1388 | 687.0 5398
1389 | 687.5 -6
1390 | 688.0 -398
1391 | 688.5 1617
1392 | 689.0 11115
1393 | 689.5 11270
1394 | 690.0 10011
1395 | 690.5 6465
1396 | 691.0 5251
1397 | 691.5 1719
1398 | 692.0 -2111
1399 | 692.5 -39
1400 | 693.0 7487
1401 | 693.5 11468
1402 | 694.0 9893
1403 | 694.5 7444
1404 | 695.0 6216
1405 | 695.5 1706
1406 | 696.0 -1549
1407 | 696.5 191
1408 | 697.0 8074
1409 | 697.5 12031
1410 | 698.0 10933
1411 | 698.5 9118
1412 | 699.0 6853
1413 | 699.5 5357
1414 | 700.0 476
1415 | 700.5 -1963
1416 | 701.0 764
1417 | 701.5 10314
1418 | 702.0 9944
1419 | 702.5 9052
1420 | 703.0 6447
1421 | 703.5 5287
1422 | 704.0 5062
1423 | 704.5 5013
1424 | 705.0 457
1425 | 705.5 -1647
1426 | 706.0 897
1427 | 706.5 10473
1428 | 707.0 10793
1429 | 707.5 8154
1430 | 708.0 6229
1431 | 708.5 6365
1432 | 709.0 5388
1433 | 709.5 4606
1434 | 710.0 4155
1435 | 710.5 3592
1436 | 711.0 -582
1437 | 711.5 910
1438 | 712.0 3925
1439 | 712.5 10232
1440 | 713.0 10025
1441 | 713.5 7787
1442 | 714.0 4415
1443 | 714.5 -188
1444 | 715.0 458
1445 | 715.5 6260
1446 | 716.0 11152
1447 | 716.5 8888
1448 | 717.0 7457
1449 | 717.5 7394
1450 | 718.0 3561
1451 | 718.5 -1612
1452 | 719.0 -888
1453 | 719.5 5875
1454 | 720.0 11827
1455 | 720.5 9579
1456 | 721.0 7194
1457 | 721.5 5998
1458 | 722.0 3252
1459 | 722.5 -1698
1460 | 723.0 851
1461 | 723.5 3207
1462 | 724.0 14567
1463 | 724.5 10605
1464 | 725.0 7505
1465 | 725.5 6178
1466 | 726.0 5843
1467 | 726.5 1532
1468 | 727.0 -526
1469 | 727.5 274
1470 | 728.0 5123
1471 | 728.5 12046
1472 | 729.0 9117
1473 | 729.5 6207
1474 | 730.0 5275
1475 | 730.5 -188
1476 | 731.0 -284
1477 | 731.5 2797
1478 | 732.0 3889
1479 | 732.5 13924
1480 | 733.0 10895
1481 | 733.5 8024
1482 | 734.0 6414
1483 | 734.5 5245
1484 | 735.0 1663
1485 | 735.5 5318
1486 | 736.0 605
1487 | 736.5 1276
1488 | 737.0 7896
1489 | 737.5 4808
1490 | 738.0 3504
1491 | 738.5 4002
1492 | 739.0 12413
1493 | 739.5 10434
1494 | 740.0 7869
1495 | 740.5 6355
1496 | 741.0 5405
1497 | 741.5 3366
1498 | 742.0 1
1499 | 742.5 499
1500 | 743.0 3277
1501 | 743.5 10486
1502 | 744.0 8930
1503 | 744.5 7216
1504 | 745.0 5794
1505 | 745.5 5634
1506 | 746.0 1636
1507 | 746.5 1327
1508 | 747.0 3505
1509 | 747.5 10643
1510 | 748.0 9872
1511 | 748.5 7263
1512 | 749.0 6268
1513 | 749.5 4128
1514 | 750.0 -343
1515 | 750.5 180
1516 | 751.0 2285
1517 | 751.5 11296
1518 | 752.0 10462
1519 | 752.5 7111
1520 | 753.0 6408
1521 | 753.5 5588
1522 | 754.0 119
1523 | 754.5 483
1524 | 755.0 1844
1525 | 755.5 9748
1526 | 756.0 10449
1527 | 756.5 8057
1528 | 757.0 6406
1529 | 757.5 229
1530 | 758.0 -1042
1531 | 758.5 2918
1532 | 759.0 11350
1533 | 759.5 9514
1534 | 760.0 7543
1535 | 760.5 6305
1536 | 761.0 3784
1537 | 761.5 -1156
1538 | 762.0 -391
1539 | 762.5 4235
1540 | 763.0 13548
1541 | 763.5 10085
1542 | 764.0 6921
1543 | 764.5 5341
1544 | 765.0 4862
1545 | 765.5 412
1546 | 766.0 -537
1547 | 766.5 1109
1548 | 767.0 11948
1549 | 767.5 12233
1550 | 768.0 9560
1551 | 768.5 6584
1552 | 769.0 4521
1553 | 769.5 1843
1554 | 770.0 -1865
1555 | 770.5 -857
1556 | 771.0 6477
1557 | 771.5 12259
1558 | 772.0 8974
1559 | 772.5 7319
1560 | 773.0 5609
1561 | 773.5 -1159
1562 | 774.0 -680
1563 | 774.5 1975
1564 | 775.0 13703
1565 | 775.5 11726
1566 | 776.0 8393
1567 | 776.5 7375
1568 | 777.0 5389
1569 | 777.5 4223
1570 | 778.0 1522
1571 | 778.5 -2309
1572 | 779.0 577
1573 | 779.5 4282
1574 | 780.0 13424
1575 | 780.5 10476
1576 | 781.0 7484
1577 | 781.5 5752
1578 | 782.0 4605
1579 | 782.5 3967
1580 | 783.0 -330
1581 | 783.5 -361
1582 | 784.0 1680
1583 | 784.5 10989
1584 | 785.0 9981
1585 | 785.5 8001
1586 | 786.0 6772
1587 | 786.5 862
1588 | 787.0 -656
1589 | 787.5 3380
1590 | 788.0 10605
1591 | 788.5 9017
1592 | 789.0 7325
1593 | 789.5 5218
1594 | 790.0 144
1595 | 790.5 -537
1596 | 791.0 2054
1597 | 791.5 11036
1598 | 792.0 9658
1599 | 792.5 7227
1600 | 793.0 5827
1601 | 793.5 4673
1602 | 794.0 -58
1603 | 794.5 537
1604 | 795.0 7731
1605 | 795.5 11341
1606 | 796.0 9630
1607 | 796.5 6960
1608 | 797.0 5723
1609 | 797.5 4082
1610 | 798.0 779
1611 | 798.5 118
1612 | 799.0 4061
1613 | 799.5 11784
1614 | 800.0 9728
1615 | 800.5 6796
1616 | 801.0 5015
1617 | 801.5 3710
1618 | 802.0 -494
1619 | 802.5 369
1620 | 803.0 2365
1621 | 803.5 12974
1622 | 804.0 10836
1623 | 804.5 7704
1624 | 805.0 5740
1625 | 805.5 4234
1626 | 806.0 3921
1627 | 806.5 3080
1628 | 807.0 -887
1629 | 807.5 -537
1630 | 808.0 1549
1631 | 808.5 5930
1632 | 809.0 13701
1633 | 809.5 11510
1634 | 810.0 8721
1635 | 810.5 7162
1636 | 811.0 5877
1637 | 811.5 1882
1638 | 812.0 -2121
1639 | 812.5 -946
1640 | 813.0 5948
1641 | 813.5 12469
1642 | 814.0 10641
1643 | 814.5 7392
1644 | 815.0 5984
1645 | 815.5 5163
1646 | 816.0 4538
1647 | 816.5 -212
1648 | 817.0 -1194
1649 | 817.5 816
1650 | 818.0 10866
1651 | 818.5 10778
1652 | 819.0 8765
1653 | 819.5 6808
1654 | 820.0 5870
1655 | 820.5 5307
1656 | 821.0 4286
1657 | 821.5 86
1658 | 822.0 179
1659 | 822.5 4115
1660 | 823.0 12057
1661 | 823.5 10334
1662 | 824.0 8129
1663 | 824.5 7118
1664 | 825.0 5572
1665 | 825.5 4698
1666 | 826.0 3863
1667 | 826.5 291
1668 | 827.0 319
1669 | 827.5 3321
1670 | 828.0 11850
1671 | 828.5 9517
1672 | 829.0 6942
1673 | 829.5 6308
1674 | 830.0 4881
1675 | 830.5 4429
1676 | 831.0 3684
1677 | 831.5 1749
1678 | 832.0 284
1679 | 832.5 434
1680 | 833.0 5038
1681 | 833.5 12877
1682 | 834.0 9812
1683 | 834.5 7243
1684 | 835.0 5995
1685 | 835.5 5328
1686 | 836.0 -12
1687 | 836.5 -890
1688 | 837.0 1055
1689 | 837.5 10850
1690 | 838.0 10770
1691 | 838.5 7587
1692 | 839.0 6294
1693 | 839.5 4818
1694 | 840.0 4083
1695 | 840.5 -165
1696 | 841.0 1265
1697 | 841.5 2532
1698 | 842.0 12478
1699 | 842.5 9969
1700 | 843.0 7520
1701 | 843.5 5692
1702 | 844.0 -418
1703 | 844.5 391
1704 | 845.0 2424
1705 | 845.5 12360
1706 | 846.0 9861
1707 | 846.5 6882
1708 | 847.0 5974
1709 | 847.5 2300
1710 | 848.0 -492
1711 | 848.5 1535
1712 | 849.0 10366
1713 | 849.5 8804
1714 | 850.0 7206
1715 | 850.5 2216
1716 | 851.0 53
1717 | 851.5 2137
1718 | 852.0 10676
1719 | 852.5 9952
1720 | 853.0 7592
1721 | 853.5 3663
1722 | 854.0 212
1723 | 854.5 1797
1724 | 855.0 10417
1725 | 855.5 10306
1726 | 856.0 7627
1727 | 856.5 5763
1728 | 857.0 142
1729 | 857.5 -459
1730 | 858.0 1009
1731 | 858.5 7560
1732 | 859.0 13181
1733 | 859.5 10227
1734 | 860.0 7496
1735 | 860.5 5688
1736 | 861.0 3345
1737 | 861.5 -915
1738 | 862.0 -463
1739 | 862.5 2249
1740 | 863.0 14326
1741 | 863.5 11720
1742 | 864.0 8488
1743 | 864.5 5322
1744 | 865.0 3752
1745 | 865.5 -2420
1746 | 866.0 -2366
1747 | 866.5 3829
1748 | 867.0 12288
1749 | 867.5 10027
1750 | 868.0 7070
1751 | 868.5 4610
1752 | 869.0 3324
1753 | 869.5 -3028
1754 | 870.0 -997
1755 | 870.5 4299
1756 | 871.0 13175
1757 | 871.5 10642
1758 | 872.0 7035
1759 | 872.5 5271
1760 | 873.0 1193
1761 | 873.5 -1517
1762 | 874.0 236
1763 | 874.5 9720
1764 | 875.0 13018
1765 | 875.5 9243
1766 | 876.0 6490
1767 | 876.5 4951
1768 | 877.0 5121
1769 | 877.5 2888
1770 | 878.0 -2022
1771 | 878.5 -865
1772 | 879.0 4304
1773 | 879.5 13159
1774 | 880.0 9928
1775 | 880.5 7476
1776 | 881.0 5821
1777 | 881.5 3414
1778 | 882.0 -1170
1779 | 882.5 28
1780 | 883.0 9145
1781 | 883.5 10713
1782 | 884.0 8730
1783 | 884.5 7499
1784 | 885.0 6665
1785 | 885.5 520
1786 | 886.0 1157
1787 | 886.5 8597
1788 | 887.0 8733
1789 | 887.5 6824
1790 | 888.0 1537
1791 | 888.5 -238
1792 | 889.0 1914
1793 | 889.5 11941
1794 | 890.0 10823
1795 | 890.5 7944
1796 | 891.0 6769
1797 | 891.5 5605
1798 | 892.0 4573
1799 | 892.5 2824
1800 | 893.0 166
1801 | 893.5 516
1802 | 894.0 4272
1803 | 894.5 10202
1804 | 895.0 9224
1805 | 895.5 7064
1806 | 896.0 5951
1807 | 896.5 4433
1808 | 897.0 1368
1809 | 897.5 244
1810 | 898.0 2238
1811 | 898.5 10288
1812 | 899.0 10098
1813 | 899.5 8093
1814 | 900.0 6329
1815 | 900.5 4788
1816 | 901.0 593
1817 | 901.5 637
1818 | 902.0 2844
1819 | 902.5 10110
1820 | 903.0 9433
1821 | 903.5 7638
1822 | 904.0 5545
1823 | 904.5 4900
1824 | 905.0 2166
1825 | 905.5 438
1826 | 906.0 1087
1827 | 906.5 3617
1828 | 907.0 11680
1829 | 907.5 10159
1830 | 908.0 7943
1831 | 908.5 6122
1832 | 909.0 5363
1833 | 909.5 3635
1834 | 910.0 3919
1835 | 910.5 3692
1836 | 911.0 739
1837 | 911.5 752
1838 | 912.0 2451
1839 | 912.5 9211
1840 | 913.0 13501
1841 | 913.5 10462
1842 | 914.0 8136
1843 | 914.5 5850
1844 | 915.0 4459
1845 | 915.5 2904
1846 | 916.0 -1189
1847 | 916.5 -281
1848 | 917.0 2563
1849 | 917.5 12418
1850 | 918.0 10568
1851 | 918.5 7805
1852 | 919.0 6259
1853 | 919.5 5383
1854 | 920.0 3658
1855 | 920.5 -1602
1856 | 921.0 -600
1857 | 921.5 4337
1858 | 922.0 13072
1859 | 922.5 9659
1860 | 923.0 7042
1861 | 923.5 5200
1862 | 924.0 4598
1863 | 924.5 21
1864 | 925.0 -653
1865 | 925.5 1266
1866 | 926.0 11357
1867 | 926.5 10945
1868 | 927.0 8622
1869 | 927.5 6526
1870 | 928.0 4750
1871 | 928.5 -578
1872 | 929.0 471
1873 | 929.5 8863
1874 | 930.0 10068
1875 | 930.5 7981
1876 | 931.0 6978
1877 | 931.5 5990
1878 | 932.0 800
1879 | 932.5 23
1880 | 933.0 2177
1881 | 933.5 10785
1882 | 934.0 10004
1883 | 934.5 8197
1884 | 935.0 6571
1885 | 935.5 3814
1886 | 936.0 -2
1887 | 936.5 685
1888 | 937.0 8625
1889 | 937.5 10559
1890 | 938.0 8010
1891 | 938.5 7054
1892 | 939.0 3246
1893 | 939.5 -383
1894 | 940.0 1156
1895 | 940.5 6192
1896 | 941.0 11159
1897 | 941.5 8709
1898 | 942.0 6722
1899 | 942.5 5607
1900 | 943.0 4465
1901 | 943.5 2190
1902 | 944.0 -274
1903 | 944.5 921
1904 | 945.0 4993
1905 | 945.5 11906
1906 | 946.0 9416
1907 | 946.5 7469
1908 | 947.0 6138
1909 | 947.5 5140
1910 | 948.0 738
1911 | 948.5 -5
1912 | 949.0 5255
1913 | 949.5 9715
1914 | 950.0 8679
1915 | 950.5 7652
1916 | 951.0 5697
1917 | 951.5 -439
1918 | 952.0 108
1919 | 952.5 2029
1920 | 953.0 7370
1921 | 953.5 12107
1922 | 954.0 10819
1923 | 954.5 8847
1924 | 955.0 7258
1925 | 955.5 5916
1926 | 956.0 4703
1927 | 956.5 -1777
1928 | 957.0 -2359
1929 | 957.5 126
1930 | 958.0 6845
1931 | 958.5 10315
1932 | 959.0 9340
1933 | 959.5 7942
1934 | 960.0 6902
1935 | 960.5 5813
1936 | 961.0 4947
1937 | 961.5 -1113
1938 | 962.0 -23
1939 | 962.5 2005
1940 | 963.0 8242
1941 | 963.5 9693
1942 | 964.0 9416
1943 | 964.5 7939
1944 | 965.0 7033
1945 | 965.5 6171
1946 | 966.0 3372
1947 | 966.5 -919
1948 | 967.0 946
1949 | 967.5 4298
1950 | 968.0 11753
1951 | 968.5 10208
1952 | 969.0 8352
1953 | 969.5 6801
1954 | 970.0 5655
1955 | 970.5 4799
1956 | 971.0 4351
1957 | 971.5 3066
1958 | 972.0 -1367
1959 | 972.5 -381
1960 | 973.0 1570
1961 | 973.5 10417
1962 | 974.0 10659
1963 | 974.5 8123
1964 | 975.0 7089
1965 | 975.5 5925
1966 | 976.0 5602
1967 | 976.5 5658
1968 | 977.0 4995
1969 | 977.5 3882
1970 | 978.0 4260
1971 | 978.5 877
1972 | 979.0 988
1973 | 979.5 2914
1974 | 980.0 10593
1975 | 980.5 10518
1976 | 981.0 8278
1977 | 981.5 7049
1978 | 982.0 6497
1979 | 982.5 5780
1980 | 983.0 3780
1981 | 983.5 -506
1982 | 984.0 1099
1983 | 984.5 5233
1984 | 985.0 11415
1985 | 985.5 10007
1986 | 986.0 7548
1987 | 986.5 6510
1988 | 987.0 5753
1989 | 987.5 4542
1990 | 988.0 492
1991 | 988.5 -800
1992 | 989.0 700
1993 | 989.5 9475
1994 | 990.0 10748
1995 | 990.5 8490
1996 | 991.0 6436
1997 | 991.5 5314
1998 | 992.0 5109
1999 | 992.5 -157
2000 | 993.0 -1262
2001 | 993.5 1304
2002 | 994.0 8012
2003 | 994.5 13858
2004 | 995.0 9727
2005 | 995.5 6617
2006 | 996.0 5153
2007 | 996.5 4507
2008 | 997.0 -809
2009 | 997.5 -1375
2010 | 998.0 1330
2011 | 998.5 7784
2012 | 999.0 13168
2013 | 999.5 10040
2014 | 1000.0 7330
2015 | 1000.5 5638
2016 | 1001.0 4865
2017 | 1001.5 -1176
2018 | 1002.0 -501
2019 | 1002.5 3132
2020 | 1003.0 12235
2021 | 1003.5 10147
2022 | 1004.0 7750
2023 | 1004.5 5867
2024 | 1005.0 5120
2025 | 1005.5 4705
2026 | 1006.0 1974
2027 | 1006.5 -211
2028 | 1007.0 734
2029 | 1007.5 8850
2030 | 1008.0 10541
2031 | 1008.5 8588
2032 | 1009.0 7002
2033 | 1009.5 5958
2034 | 1010.0 3350
2035 | 1010.5 -468
2036 | 1011.0 631
2037 | 1011.5 3757
2038 | 1012.0 6623
2039 | 1012.5 11602
2040 | 1013.0 10462
2041 | 1013.5 8322
2042 | 1014.0 6413
2043 | 1014.5 96
2044 | 1015.0 -120
2045 | 1015.5 2557
2046 | 1016.0 11723
2047 | 1016.5 10176
2048 | 1017.0 7389
2049 | 1017.5 5954
2050 | 1018.0 3087
2051 | 1018.5 -1405
2052 | 1019.0 525
2053 | 1019.5 3174
2054 | 1020.0 13374
2055 | 1020.5 9734
2056 | 1021.0 6780
2057 | 1021.5 5587
2058 | 1022.0 4728
2059 | 1022.5 307
2060 | 1023.0 -522
2061 | 1023.5 1304
2062 |
--------------------------------------------------------------------------------
/examples/series/goat.mp3:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/manu-mannattil/nolitsa/ccd9fab08a88fbc6ff63f0fae28730ccb74754de/examples/series/goat.mp3
--------------------------------------------------------------------------------
/examples/surrogates/aaft.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | """Illustration of AAFT surrogates.
5 |
6 | This script illustrates AAFT surrogates for human breath rate data. The
7 | plot corresponds to Fig. 1 of Schreiber & Schmitz (1996). As we can
8 | see, the power spectra of the AAFT surrogates deviate considerably from
9 | the actual power spectrum. Better results can be obtained if IAAFT
10 | surrogates are used instead.
11 | """
12 |
13 | from scipy.signal import welch
14 | from nolitsa import surrogates
15 |
16 | import matplotlib.pyplot as plt
17 | import numpy as np
18 |
19 | x = np.loadtxt('../series/br1.dat', usecols=[1], unpack=True)
20 |
21 | plt.title(r'Power spectrum of human breath rate')
22 | plt.xlabel(r'Frequency $f$')
23 | plt.ylabel(r'Power $P(f)$')
24 |
25 | # Compute 19 AAFT surrogates and plot the spectrum.
26 | for i in range(19):
27 | y = surrogates.aaft(x)
28 | f, p = welch(y, nperseg=128, detrend='constant',
29 | window='boxcar', scaling='spectrum', fs=2.0)
30 |
31 | plt.semilogy(f, p, color='#CA5B7C')
32 |
33 | # Calculate true power spectrum.
34 | f0, p0 = welch(x, nperseg=128, detrend='constant',
35 | window='boxcar', scaling='spectrum', fs=2.0)
36 |
37 | plt.semilogy(f0, p0, color='#000000')
38 | plt.show()
39 |
--------------------------------------------------------------------------------
/examples/surrogates/corrnoise.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | """IAAFT surrogates for correlated noise.
5 |
6 | The properties of linearly correlated noise can be captured quite
7 | accurately by IAAFT surrogates. Thus, they cannot easily fool
8 | a dimension estimator (here we use Takens's maximum likelihood estimator
9 | for the correlation dimension) if surrogate analysis is performed
10 | additionally.
11 | """
12 |
13 | import matplotlib.pyplot as plt
14 | import numpy as np
15 | from nolitsa import surrogates, d2, noise, delay
16 |
17 | x = noise.sma(np.random.normal(size=(2 ** 12)), hwin=100)
18 | ends = surrogates.mismatch(x)[0]
19 | x = x[ends[0]:ends[1]]
20 | act = np.argmax(delay.acorr(x) < 1 / np.e)
21 |
22 | mle = np.empty(19)
23 |
24 | # Compute 19 IAAFT surrogates and compute the correlation sum.
25 | for k in range(19):
26 | y = surrogates.iaaft(x)[0]
27 | r, c = d2.c2_embed(y, dim=[7], tau=act, window=act)[0]
28 |
29 | # Compute the Takens MLE.
30 | r_mle, mle_surr = d2.ttmle(r, c)
31 | i = np.argmax(r_mle > 0.5 * np.std(y))
32 | mle[k] = mle_surr[i]
33 |
34 | plt.loglog(r, c, color='#BC8F8F')
35 |
36 | r, c = d2.c2_embed(x, dim=[7], tau=act, window=act)[0]
37 |
38 | # Compute the Takens MLE.
39 | r_mle, true_mle = d2.ttmle(r, c)
40 | i = np.argmax(r_mle > 0.5 * np.std(x))
41 | true_mle = true_mle[i]
42 |
43 | plt.title('IAAFT surrogates for correlated noise')
44 | plt.xlabel('Distance $r$')
45 | plt.ylabel('Correlation sum $C(r)$')
46 | plt.loglog(r, c, color='#000000')
47 |
48 | plt.figure(2)
49 | plt.title('Takens\'s MLE for correlated noise')
50 | plt.xlabel(r'$D_\mathrm{MLE}$')
51 | plt.vlines(mle, 0.0, 0.5)
52 | plt.vlines(true_mle, 0.0, 1.0)
53 | plt.yticks([])
54 | plt.ylim(0, 3.0)
55 |
56 | plt.show()
57 |
--------------------------------------------------------------------------------
/examples/surrogates/iaaft.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | """Illustration of IAAFT surrogates.
5 |
6 | This script illustrates IAAFT surrogates for human breath rate data.
7 | Note that compared to AAFT surrogates, the power spectra of the
8 | surrogates are closer to the true power spectrum (cf. the plot in
9 | "aaft.py").
10 | """
11 |
12 | from scipy.signal import welch
13 | from nolitsa import surrogates
14 |
15 | import matplotlib.pyplot as plt
16 | import numpy as np
17 |
18 | x = np.loadtxt('../series/br1.dat', usecols=[1], unpack=True)
19 |
20 | plt.title(r'Power spectrum of human breath rate (IAAFT surrogates)')
21 | plt.xlabel(r'Frequency $f$')
22 | plt.ylabel(r'Power $P(f)$')
23 |
24 | # Compute 19 IAAFT surrogates and plot the spectrum.
25 | for k in range(19):
26 | y, i, e = surrogates.iaaft(x)
27 | f, p = welch(y, nperseg=128, detrend='constant',
28 | window='boxcar', scaling='spectrum', fs=2.0)
29 |
30 | plt.semilogy(f, p, color='#CA5B7C')
31 |
32 | # Calculate true power spectrum.
33 | f0, p0 = welch(x, nperseg=128, detrend='constant',
34 | window='boxcar', scaling='spectrum', fs=2.0)
35 |
36 | plt.semilogy(f0, p0, color='#000000')
37 | plt.show()
38 |
--------------------------------------------------------------------------------
/examples/surrogates/lorenz.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | """Surrogate analysis of time series from the Lorenz attractor.
5 |
6 | In this script, we perform surrogate analysis of a time series from the
7 | Lorenz attractor with Takens's maximum likelihood estimator (MLE) of the
8 | correlation dimension as the test statistic. As expected, we can reject
9 | the null hypothesis of a linear correlated stochastic process.
10 | """
11 |
12 | import matplotlib.pyplot as plt
13 | import numpy as np
14 | from nolitsa import surrogates, d2, data
15 |
16 | x = data.lorenz(x0=[-13.5, -16.0, 31.0], length=(2 ** 12))[1][:, 0]
17 | x = x[422:3547]
18 |
19 | mle = np.empty(19)
20 |
21 | # Compute 19 IAAFT surrogates and compute the correlation sum.
22 | for k in range(19):
23 | y = surrogates.iaaft(x)[0]
24 | r, c = d2.c2_embed(y, dim=[5], tau=5, window=100)[0]
25 |
26 | # Compute the Takens MLE.
27 | r_mle, mle_surr = d2.ttmle(r, c, zero=False)
28 | i = np.argmax(r_mle > 0.5 * np.std(y))
29 | mle[k] = mle_surr[i]
30 |
31 | plt.loglog(r, c, color='#BC8F8F')
32 |
33 | r, c = d2.c2_embed(x, dim=[5], tau=5, window=100)[0]
34 |
35 | # Compute the Takens MLE.
36 | r_mle, true_mle = d2.ttmle(r, c, zero=False)
37 | i = np.argmax(r_mle > 0.5 * np.std(x))
38 | true_mle = true_mle[i]
39 |
40 | plt.title('IAAFT surrogates for Lorenz')
41 | plt.xlabel('Distance $r$')
42 | plt.ylabel('Correlation sum $C(r)$')
43 | plt.loglog(r, c, color='#000000')
44 |
45 | plt.figure(2)
46 | plt.title('Takens\'s MLE for Lorenz')
47 | plt.xlabel(r'$D_\mathrm{MLE}$')
48 | plt.vlines(mle, 0.0, 0.5)
49 | plt.vlines(true_mle, 0.0, 1.0)
50 | plt.yticks([])
51 | plt.ylim(0, 3.0)
52 |
53 | plt.show()
54 |
--------------------------------------------------------------------------------
/examples/surrogates/mismatch.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | """Illustration of end-point mismatch.
5 |
6 | As one can clearly see, there are spurious high-frequency oscillations
7 | in the surrogate series generated with the second data set (whose
8 | end-points don't match). These high-frequency oscillations appear as a
9 | sort of "crinkliness" spread throughout the time series.
10 | """
11 |
12 | from nolitsa import data, surrogates
13 |
14 | import matplotlib.pyplot as plt
15 | import numpy as np
16 |
17 | x = data.lorenz(x0=[-13.5, -16.0, 31.0], length=(2 ** 12))[1][:, 0]
18 |
19 | # Maximum mismatch occurs for the segment (537, 3662).
20 | # Minimum mismatch occurs for the segment (422, 3547).
21 | # end, d = surrogates.mismatch(x, length=1024)
22 |
23 | plt.subplot(211)
24 | plt.title(r'Original time series')
25 | plt.ylabel(r'Measurement $x(t)$')
26 |
27 | plt.plot(np.arange(3800), x[100:3900], '--')
28 | plt.plot(np.arange(437, 3562), x[537:3662])
29 |
30 | plt.subplot(212)
31 | plt.xlabel(r'Time $t$')
32 | plt.ylabel(r'Measurement $x(t)$')
33 | plt.plot(np.arange(3800), x[100:3900], '--')
34 | plt.plot(np.arange(322, 3447), x[422:3547])
35 |
36 | y1 = surrogates.iaaft(x[537:3663])[0]
37 | y2 = surrogates.iaaft(x[422:3547])[0]
38 |
39 | plt.figure(2)
40 |
41 | plt.subplot(211)
42 | plt.title(r'Surrogate time series')
43 | plt.ylabel(r'Measurement $x(t)$')
44 | plt.plot(y1[:500])
45 |
46 | plt.subplot(212)
47 | plt.xlabel(r'Time $t$')
48 | plt.ylabel(r'Measurement $x(t)$')
49 | plt.plot(y2[:500])
50 |
51 | plt.show()
52 |
--------------------------------------------------------------------------------
/examples/surrogates/skewlorenz.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | """Skew statistic fails for time series from the Lorenz attractor.
5 |
6 | Time series from the Lorenz attractor is quite symmetric in time.
7 | So the skew statistic fails to detect the very strong nonlinearity
8 | present (cf. "skewnoise.py").
9 | """
10 |
11 | import matplotlib.pyplot as plt
12 | import numpy as np
13 | from nolitsa import data, surrogates
14 |
15 |
16 | def skew(x, t=1):
17 | """Skew statistic to measure asymmetry w.r.t. time reversal.
18 |
19 | Skew statistic measures the asymmetry in the time series w.r.t. time
20 | reversal. This asymmetry is often considered to be an indicator of
21 | nonlinearity (see Notes).
22 |
23 | Parameters
24 | ----------
25 | x : array
26 | 1D real input array containing the time series.
27 | t : int, optional (default = 1)
28 | Skew stastic measures the skewness in the distribution of
29 | t-increments of the time series. By default the skewness in
30 | the distribution of its first-increments is returned.
31 |
32 | Returns
33 | -------
34 | s : float
35 | Coefficient of skewness of the distribution of t-increments.
36 |
37 | Notes
38 | -----
39 | The skew statistic is often touted to have good distinguishing power
40 | between nonlinearity and linearity. But it is known to fail
41 | miserably in both cases (i.e., it often judges nonlinear series as
42 | linear and vice-versa) and should be avoided for serious analysis.
43 | """
44 | dx = x[t:] - x[:-t]
45 | dx = dx - np.mean(dx)
46 | return np.mean(dx ** 3) / np.mean(dx ** 2) ** 1.5
47 |
48 |
49 | x = data.lorenz(length=(2 ** 12))[1][:, 0]
50 |
51 | plt.figure(1)
52 |
53 | plt.subplot(121)
54 | plt.title('Actual')
55 | plt.xlabel(r'$x(t)$')
56 | plt.ylabel(r'$x(t + \tau)$')
57 | plt.plot(x[:-5], x[5:])
58 |
59 | plt.subplot(122)
60 | plt.title('Reversed')
61 | plt.xlabel(r'$\hat{x}(t)$')
62 | plt.ylabel(r'$\hat{x}(t + \tau)$')
63 | x_rev = x[::-1]
64 | plt.plot(x_rev[:-5], x_rev[5:])
65 |
66 | s0 = np.empty(39)
67 | for i in range(39):
68 | y = surrogates.aaft(x)
69 | s0[i] = skew(y)
70 |
71 | plt.figure(2)
72 | plt.title('Skew statistic for time series from the Lorenz attractor')
73 | plt.vlines(s0, 0.0, 0.5)
74 | plt.vlines(skew(x), 0.0, 1.0)
75 | plt.yticks([])
76 | plt.ylim(0, 3.0)
77 |
78 | plt.show()
79 |
--------------------------------------------------------------------------------
/examples/surrogates/skewnoise.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | """Skew statistic fails for linear stochastic data.
5 |
6 | The skew statistic quantifies asymmetry under time reversal by measuring
7 | the skewness in the distribution of the increments of a time series.
8 | This makes sense as this distribution is flipped left-to-right (along
9 | with a sign change) for all kinds of series under time reversal. So if
10 | this distribution is asymmetric, the time series must exhibit asymmetry
11 | under time reversal. But asymmetry in the distribution of increments
12 | isn't a very good measure of nonlinearity as we'll show here.
13 |
14 | Time series from the Henon map shows very strong asymmetry under time
15 | reversal. So if we start with time series from the Henon map, take the
16 | first difference, shuffle the increments, and calculate the cumulative
17 | sum of the shuffled increments, we end up with a time series which would
18 | come out as nonlinear according to the above rule of thumb. But since
19 | this new series is the cumulative sum of uncorrelated random numbers,
20 | it's a purely linear one. Obviously, this doesn't make any sense.
21 |
22 | Of course, the distribution of increments would slowly become symmetric
23 | as we take larger and larger increments, and pretty soon the series
24 | would fail to reject the null hypothesis of linearity. But how large is
25 | large? It should also be noted that many nonlinear series also fail to
26 | reject the null hypothesis of linearity with this statistic when larger
27 | increments are considered. Thus, this statistic is almost useless in
28 | practical situations.
29 | """
30 |
31 | import matplotlib.pyplot as plt
32 | import numpy as np
33 | from nolitsa import data, surrogates
34 |
35 |
36 | def skew(x, t=1):
37 | """Skew statistic to measure asymmetry w.r.t. time reversal.
38 |
39 | Skew statistic measures the asymmetry in the time series w.r.t. time
40 | reversal. This asymmetry is often considered to be an indicator of
41 | nonlinearity (see Notes).
42 |
43 | Parameters
44 | ----------
45 | x : array
46 | 1D real input array containing the time series.
47 | t : int, optional (default = 1)
48 | Skew stastic measures the skewness in the distribution of
49 | t-increments of the time series. By default the skewness in
50 | the distribution of its first-increments is returned.
51 |
52 | Returns
53 | -------
54 | s : float
55 | Coefficient of skewness of the distribution of t-increments.
56 |
57 | Notes
58 | -----
59 | The skew statistic is often touted to have good distinguishing power
60 | between nonlinearity and linearity. But it is known to fail
61 | miserably in both cases (i.e., it often judges nonlinear series as
62 | linear and vice-versa) and should be avoided for serious analysis.
63 | """
64 | dx = x[t:] - x[:-t]
65 | dx = dx - np.mean(dx)
66 | return np.mean(dx ** 3) / np.mean(dx ** 2) ** 1.5
67 |
68 |
69 | # Start with time series from the Henon map, take the first-difference,
70 | # shuffle the increments, and calculate the cumulative sum of the
71 | # shuffled increments.
72 | x = data.henon(length=(2 ** 12))[:, 0]
73 | dx = x[1:] - x[:-1]
74 | np.random.shuffle(dx)
75 | x = np.cumsum(dx)
76 |
77 | plt.figure(1)
78 |
79 | plt.subplot(121)
80 | plt.title('Actual')
81 | plt.xlabel(r'$x(t)$')
82 | plt.ylabel(r'$x(t + \tau)$')
83 | plt.plot(x[:-5], x[5:])
84 |
85 | plt.subplot(122)
86 | plt.title('Reversed')
87 | plt.xlabel(r'$\hat{x}(t)$')
88 | plt.ylabel(r'$\hat{x}(t + \tau)$')
89 | x_rev = x[::-1]
90 | plt.plot(x_rev[:-5], x_rev[5:])
91 |
92 | s0 = np.empty(39)
93 | for i in range(39):
94 | y = surrogates.aaft(x)
95 | s0[i] = skew(y)
96 |
97 | plt.figure(2)
98 | plt.title('Skew statistic fails for stochastic data')
99 | plt.vlines(s0, 0.0, 0.5)
100 | plt.vlines(skew(x), 0.0, 1.0)
101 | plt.yticks([])
102 | plt.ylim(0, 3.0)
103 | plt.show()
104 |
--------------------------------------------------------------------------------
/examples/surrogates/unidev.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | """Transforming a time series into a uniform deviate is harmful.
5 |
6 | Uniform deviate transformation is a nonlinear transformation, and
7 | thus, it does not preserve the linear properties of a time series.
8 | In the example below, we see that the power spectra of the surrogates
9 | don't match the power spectrum of the original time series if they're
10 | both converted into a uniform deviate.
11 |
12 | Some authors (e.g., Harikrishnan et al. [Physica D 215 (2006) 137-145])
13 | perform a uniform deviate transformation just before surrogate analysis.
14 | This can lead to incorrect results.
15 | """
16 |
17 | from __future__ import division
18 |
19 | import numpy as np
20 | import matplotlib.pyplot as plt
21 |
22 | from nolitsa import surrogates
23 | from scipy.signal import welch
24 |
25 |
26 | def uniform(x):
27 | """Convert series into a uniform deviate.
28 |
29 | Converts a time series into a uniform deviate using a probability
30 | integral transform.
31 | """
32 | y = np.empty(len(x))
33 |
34 | for i in range(len(x)):
35 | y[i] = np.count_nonzero(x <= x[i])
36 |
37 | return y / len(x)
38 |
39 |
40 | x = np.loadtxt('../series/br1.dat')[:, 1]
41 |
42 | for i in range(19):
43 | y = surrogates.iaaft(x)[0]
44 | plt.figure(1)
45 | f, p = welch(y, nperseg=256, detrend='constant',
46 | window='boxcar', scaling='spectrum', fs=2.0)
47 | plt.semilogy(f[1:], p[1:], color='#BBBBBB')
48 |
49 | plt.figure(2)
50 | f, p = welch(uniform(y), nperseg=256, detrend='constant',
51 | window='boxcar', scaling='spectrum', fs=2.0)
52 | plt.semilogy(f[1:], p[1:], color='#BBBBBB')
53 |
54 | plt.figure(1)
55 | plt.title('Normal PSD')
56 | plt.xlabel(r'Frequency $f$')
57 | plt.ylabel(r'Power $P(f)$')
58 | f, p = welch(x, nperseg=256, detrend='constant',
59 | window='boxcar', scaling='spectrum', fs=2.0)
60 | plt.semilogy(f[1:], p[1:], color='#000000')
61 |
62 | plt.figure(2)
63 | plt.title('PSD of uniform deviate')
64 | plt.xlabel(r'Frequency $f$')
65 | plt.ylabel(r'Power $P(f)$')
66 | f, p = welch(uniform(x), nperseg=256, detrend='constant',
67 | window='boxcar', scaling='spectrum', fs=2.0)
68 | plt.semilogy(f[1:], p[1:], color='#000000')
69 |
70 | plt.show()
71 |
--------------------------------------------------------------------------------
/nolitsa/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/manu-mannattil/nolitsa/ccd9fab08a88fbc6ff63f0fae28730ccb74754de/nolitsa/__init__.py
--------------------------------------------------------------------------------
/nolitsa/d2.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | """Functions to estimate correlation sums and dimensions.
4 |
5 | This module provides functions to estimate the correlation sum and the
6 | correlation dimension from both scalar and vector time series.
7 |
8 | Correlation Sum
9 | ---------------
10 |
11 | * c2 -- estimates the correlation sum from a vector time series.
12 | * c2_embed -- estimates the correlation sum from a scalar time series
13 | after embedding.
14 |
15 | Correlation Dimension
16 | ---------------------
17 |
18 | * d2 -- estimates the "local" correlation dimension from correlation
19 | sums and distances using a local least squares fit.
20 | * ttmle -- estimates the correlation dimension from correlation sums
21 | and distances using a maximum likelihood estimator.
22 | """
23 |
24 | from __future__ import absolute_import, division, print_function
25 |
26 | import numpy as np
27 |
28 | from scipy.spatial import distance
29 | from . import utils
30 |
31 |
32 | def c2(y, r=100, metric='chebyshev', window=10):
33 | """Compute the correlation sum for the given distances.
34 |
35 | Computes the correlation sum of the given time series for the
36 | specified distances (Grassberger & Procaccia 1983).
37 |
38 | Parameters
39 | ----------
40 | y : ndarray
41 | N-dimensional real input array containing points in the phase
42 | space.
43 | r : int or array, optional (default = 100)
44 | Distances for which the correlation sum should be calculated.
45 | If r is an int, then the distances are taken to be a geometric
46 | progression between a minimum and maximum length scale
47 | (estimated according to the metric and the input series).
48 | metric : string, optional (default = 'chebyshev')
49 | Metric to use for distance computation. Must be one of
50 | "chebyshev" (aka the maximum norm metric), "cityblock" (aka the
51 | Manhattan metric), or "euclidean".
52 | window : int, optional (default = 10)
53 | Minimum temporal separation (Theiler window) that should exist
54 | between pairs.
55 |
56 | Returns
57 | -------
58 | r : array
59 | Distances for which correlation sums have been calculated. Note
60 | that this might be different from the supplied r as only the
61 | distances with a nonzero C(r) are included.
62 | c : array
63 | Correlation sums for the given distances.
64 |
65 | Notes
66 | -----
67 | This function is meant to be used to calculate the correlation sum
68 | from an array of points in the phase space. If you want to
69 | calculate it after embedding a time series, see d2_embed().
70 | """
71 | # Estimate the extent of the reconstructed phase space.
72 | if isinstance(r, int):
73 | if metric == 'chebyshev':
74 | extent = np.max(np.max(y, axis=0) - np.min(y, axis=0))
75 | elif metric == 'cityblock':
76 | extent = np.sum(np.max(y, axis=0) - np.min(y, axis=0))
77 | elif metric == 'euclidean':
78 | extent = np.sqrt(np.sum((np.max(y, axis=0) -
79 | np.min(y, axis=0)) ** 2))
80 | else:
81 | raise ValueError('Unknown metric. Should be one of "chebyshev", '
82 | '"cityblock", or "euclidean".')
83 |
84 | r = utils.gprange(extent / 1000, extent, r)
85 | else:
86 | r = np.asarray(r)
87 | r = np.sort(r[r > 0])
88 |
89 | bins = np.insert(r, 0, -1)
90 | c = np.zeros(len(r))
91 | n = len(y)
92 |
93 | for i in range(n - window - 1):
94 | dists = distance.cdist([y[i]], y[i + window + 1:], metric=metric)[0]
95 | c += np.histogram(dists, bins=bins)[0]
96 |
97 | pairs = 0.5 * (n - window - 1) * (n - window)
98 | c = np.cumsum(c) / pairs
99 |
100 | return r[c > 0], c[c > 0]
101 |
102 |
103 | def c2_embed(x, dim=[1], tau=1, r=100, metric='chebyshev', window=10,
104 | parallel=True):
105 | """Compute the correlation sum using time-delayed vectors.
106 |
107 | Computes the correlation sum using time-delayed vectors constructed
108 | from a time series.
109 |
110 | Parameters
111 | ----------
112 | x : array
113 | 1-D real input array containing the time series.
114 | dim : int array, optional (default = [1])
115 | Embedding dimensions for which the correlation sums ought to be
116 | computed.
117 | tau : int, optional (default = 1)
118 | Time delay.
119 | r : int or array, optional (default = 100)
120 | Distances for which the correlation sum should be calculated.
121 | If r is an int, then the distances are taken to be a geometric
122 | progression between a minimum and maximum length scale
123 | (estimated according to the metric and the input series).
124 | metric : string, optional (default = 'euclidean')
125 | Metric to use for distance computation. Must be one of
126 | "chebyshev" (aka the maximum norm metric), "cityblock" (aka the
127 | Manhattan metric), or "euclidean".
128 | window : int, optional (default = 10)
129 | Minimum temporal separation (Theiler window) that should exist
130 | between pairs.
131 | parallel : bool, optional (default = True)
132 | Calculate the correlation sums for each embedding dimension in
133 | parallel.
134 |
135 | Returns
136 | -------
137 | rc : ndarray
138 | The output is an array with shape (len(dim), 2, len(r)) of
139 | (r, C(r)) pairs for each dimension.
140 | """
141 | if parallel:
142 | processes = None
143 | else:
144 | processes = 1
145 |
146 | yy = [utils.reconstruct(x, dim=d, tau=tau) for d in dim]
147 |
148 | return utils.parallel_map(c2, yy, kwargs={
149 | 'r': r,
150 | 'metric': metric,
151 | 'window': window
152 | }, processes=processes)
153 |
154 |
155 | def d2(r, c, hwin=3):
156 | """Compute D2 using a local least squares fit.
157 |
158 | Computes D2 using a local least squares fit of the equation
159 | C(r) ~ r^D2. D2 at each point is computed by doing a least
160 | squares fit inside a window of size 2*hwin + 1 around it (Galka
161 | 2000).
162 |
163 | Parameters
164 | ----------
165 | r : array
166 | Distances for which correlation sums have been calculated.
167 | c : array
168 | Correlation sums for the given distances.
169 | hwin : int, optional (default = 3)
170 | Half-window length. Actual window size is 2*hwin + 1.
171 |
172 | Returns
173 | -------
174 | d : array
175 | Average D2 at each distance in r[hwin:-hwin]
176 | """
177 | N = len(r) - 2 * hwin
178 |
179 | d = np.empty(N)
180 | x, y = np.log(r), np.log(c)
181 |
182 | for i in range(N):
183 | p, q = x[i:i + 2 * hwin + 1], y[i:i + 2 * hwin + 1]
184 | A = np.vstack([p, np.ones(2 * hwin + 1)]).T
185 | d[i] = np.linalg.lstsq(A, q, rcond=None)[0][0]
186 |
187 | return d
188 |
189 |
190 | def ttmle(r, c, zero=True):
191 | """Compute the Takens-Theiler maximum likelihood estimator.
192 |
193 | Computes the Takens-Theiler maximum likelihood estimator (MLE) for
194 | a given set of distances and the corresponding correlation sums
195 | (Theiler 1990). The MLE is calculated by assuming that C(r) obeys
196 | a true power law between adjacent r's.
197 |
198 | Parameters
199 | ----------
200 | r : array
201 | Distances for which the correlation sums have been calculated.
202 | c : array
203 | Correlation sums for the given distances.
204 | zero : bool, optional (default = True)
205 | Integrate the MLE starting from zero (see Notes).
206 |
207 | Returns
208 | -------
209 | r : array
210 | Distances at which the Takens-Theiler MLE has been computed.
211 | d : array
212 | Takens-Theiler MLE for the given distances.
213 |
214 | Notes
215 | -----
216 | Integrating the expression for MLE from zero has the advantage that
217 | for a true power law of the from C(r) ~ r^D, the MLE gives D as the
218 | estimate for all values of r. Some implementations (e.g., TISEAN,
219 | Hegger et al. 1999) starts the integration only from the minimum
220 | distance supplied. In any case, this does not make much difference
221 | as the only real use of a "dimension" estimator is as a statistic
222 | for surrogate testing.
223 | """
224 | # Prune the arrays so that only unique correlation sums remain.
225 | c, i = np.unique(c, return_index=True)
226 | r = r[i]
227 |
228 | x1, y1 = np.log(r[:-1]), np.log(c[:-1])
229 | x2, y2 = np.log(r[1:]), np.log(c[1:])
230 |
231 | a = (y2 - y1) / (x2 - x1)
232 | b = (y1 * x2 - y2 * x1) / (x2 - x1)
233 |
234 | # To integrate, we use the discrete expression (Eq. 24) given in
235 | # the TISEAN paper (Hegger et al. 1999).
236 | denom = np.cumsum(np.exp(b) / a * (r[1:] ** a - r[:-1] ** a))
237 |
238 | if zero:
239 | # Assume that the power law between r[0] and r[1] holds
240 | # between 0 and r[0].
241 | denom = np.insert(denom, 0, np.exp(b[0]) / a[0] * r[0] ** a[0])
242 | denom[1:] = denom[1:] + denom[0]
243 | return r, c / denom
244 | else:
245 | return r[1:], c[1:] / denom
246 |
--------------------------------------------------------------------------------
/nolitsa/data.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | """Functions to generate time series of some popular chaotic systems.
4 |
5 | This module provides some functions that can be used to generate time
6 | series of some common chaotic systems. Most of the parameters and
7 | initial conditions have been taken from Appendix A of Sprott (2003).
8 |
9 | Noise
10 | -----
11 |
12 | * falpha -- generates (1/f)^alpha noise.
13 |
14 | Deterministic Systems
15 | ---------------------
16 |
17 | * henon -- generates data using the Henon map.
18 | * ikeda -- generates data using the Ikeda map.
19 | * lorenz -- generates data using the Lorenz equations.
20 | * mackey_glass -- generates data using the Mackey-Glass delay
21 | differential equations.
22 | * roessler -- generates data using the Rössler equations.
23 | """
24 |
25 | from __future__ import absolute_import, division, print_function
26 |
27 | import numpy as np
28 |
29 | from numpy import fft
30 | from scipy.integrate import odeint
31 |
32 |
33 | def falpha(length=8192, alpha=1.0, fl=None, fu=None, mean=0.0, var=1.0):
34 | """Generate (1/f)^alpha noise by inverting the power spectrum.
35 |
36 | Generates (1/f)^alpha noise by inverting the power spectrum.
37 | Follows the algorithm described by Voss (1988) to generate
38 | fractional Brownian motion.
39 |
40 | Parameters
41 | ----------
42 | length : int, optional (default = 8192)
43 | Length of the time series to be generated.
44 | alpha : float, optional (default = 1.0)
45 | Exponent in (1/f)^alpha. Pink noise will be generated by
46 | default.
47 | fl : float, optional (default = None)
48 | Lower cutoff frequency.
49 | fu : float, optional (default = None)
50 | Upper cutoff frequency.
51 | mean : float, optional (default = 0.0)
52 | Mean of the generated noise.
53 | var : float, optional (default = 1.0)
54 | Variance of the generated noise.
55 |
56 | Returns
57 | -------
58 | x : array
59 | Array containing the time series.
60 |
61 | Notes
62 | -----
63 | As discrete Fourier transforms assume that the input data is
64 | periodic, the resultant series x_{i} (= x_{i + N}) is also periodic.
65 | To avoid this periodicity, it is recommended to always generate
66 | a longer series (two or three times longer) and trim it to the
67 | desired length.
68 | """
69 | freqs = fft.rfftfreq(length)
70 | power = freqs[1:] ** -alpha
71 | power = np.insert(power, 0, 0) # P(0) = 0
72 |
73 | if fl:
74 | power[freqs < fl] = 0
75 |
76 | if fu:
77 | power[freqs > fu] = 0
78 |
79 | # Randomize complex phases.
80 | phase = 2 * np.pi * np.random.random(len(freqs))
81 | y = np.sqrt(power) * np.exp(1j * phase)
82 |
83 | # The last component (corresponding to the Nyquist frequency) of an
84 | # RFFT with even number of points is always real. (We don't have to
85 | # make the mean real as P(0) = 0.)
86 | if length % 2 == 0:
87 | y[-1] = np.abs(y[-1] * np.sqrt(2))
88 |
89 | x = fft.irfft(y, n=length)
90 |
91 | # Rescale to proper variance and mean.
92 | x = np.sqrt(var) * x / np.std(x)
93 | return mean + x - np.mean(x)
94 |
95 |
96 | def henon(length=10000, x0=None, a=1.4, b=0.3, discard=500):
97 | """Generate time series using the Henon map.
98 |
99 | Generates time series using the Henon map.
100 |
101 | Parameters
102 | ----------
103 | length : int, optional (default = 10000)
104 | Length of the time series to be generated.
105 | x0 : array, optional (default = random)
106 | Initial condition for the map.
107 | a : float, optional (default = 1.4)
108 | Constant a in the Henon map.
109 | b : float, optional (default = 0.3)
110 | Constant b in the Henon map.
111 | discard : int, optional (default = 500)
112 | Number of steps to discard in order to eliminate transients.
113 |
114 | Returns
115 | -------
116 | x : ndarray, shape (length, 2)
117 | Array containing points in phase space.
118 | """
119 | x = np.empty((length + discard, 2))
120 |
121 | if not x0:
122 | x[0] = (0.0, 0.9) + 0.01 * (-1 + 2 * np.random.random(2))
123 | else:
124 | x[0] = x0
125 |
126 | for i in range(1, length + discard):
127 | x[i] = (1 - a * x[i - 1][0] ** 2 + b * x[i - 1][1], x[i - 1][0])
128 |
129 | return x[discard:]
130 |
131 |
132 | def ikeda(length=10000, x0=None, alpha=6.0, beta=0.4, gamma=1.0, mu=0.9,
133 | discard=500):
134 | """Generate time series from the Ikeda map.
135 |
136 | Generates time series from the Ikeda map.
137 |
138 | Parameters
139 | ----------
140 | length : int, optional (default = 10000)
141 | Length of the time series to be generated.
142 | x0 : array, optional (default = random)
143 | Initial condition for the map.
144 | alpha : float, optional (default = 6.0)
145 | Constant alpha in the Ikeda map.
146 | beta : float, optional (default = 0.4)
147 | Constant beta in the Ikeda map.
148 | gamma : float, optional (default = 1.0)
149 | Constant gamma in the Ikeda map.
150 | mu : float, optional (default = 0.9)
151 | Constant mu in the Ikeda map.
152 | discard : int, optional (default = 500)
153 | Number of steps to discard in order to eliminate transients.
154 |
155 | Returns
156 | -------
157 | x : ndarray, shape (length, 2)
158 | Array containing points in phase space.
159 | """
160 | x = np.empty((length + discard, 2))
161 |
162 | if not x0:
163 | x[0] = 0.1 * (-1 + 2 * np.random.random(2))
164 | else:
165 | x[0] = x0
166 |
167 | for i in range(1, length + discard):
168 | phi = beta - alpha / (1 + x[i - 1][0] ** 2 + x[i - 1][1] ** 2)
169 | x[i] = (gamma + mu * (x[i - 1][0] * np.cos(phi) - x[i - 1][1] *
170 | np.sin(phi)),
171 | mu * (x[i - 1][0] * np.sin(phi) + x[i - 1][1] * np.cos(phi)))
172 |
173 | return x[discard:]
174 |
175 |
176 | def lorenz(length=10000, x0=None, sigma=10.0, beta=8.0/3.0, rho=28.0,
177 | step=0.001, sample=0.03, discard=1000):
178 | """Generate time series using the Lorenz system.
179 |
180 | Generates time series using the Lorenz system.
181 |
182 | Parameters
183 | ----------
184 | length : int, optional (default = 10000)
185 | Length of the time series to be generated.
186 | x0 : array, optional (default = random)
187 | Initial condition for the flow.
188 | sigma : float, optional (default = 10.0)
189 | Constant sigma of the Lorenz system.
190 | beta : float, optional (default = 8.0/3.0)
191 | Constant beta of the Lorenz system.
192 | rho : float, optional (default = 28.0)
193 | Constant rho of the Lorenz system.
194 | step : float, optional (default = 0.001)
195 | Approximate step size of integration.
196 | sample : int, optional (default = 0.03)
197 | Sampling step of the time series.
198 | discard : int, optional (default = 1000)
199 | Number of samples to discard in order to eliminate transients.
200 |
201 | Returns
202 | -------
203 | t : array
204 | The time values at which the points have been sampled.
205 | x : ndarray, shape (length, 3)
206 | Array containing points in phase space.
207 | """
208 | def _lorenz(x, t):
209 | return [sigma * (x[1] - x[0]), x[0] * (rho - x[2]) - x[1],
210 | x[0] * x[1] - beta * x[2]]
211 |
212 | if not x0:
213 | x0 = (0.0, -0.01, 9.0) + 0.25 * (-1 + 2 * np.random.random(3))
214 |
215 | sample = int(sample / step)
216 | t = np.linspace(0, (sample * (length + discard)) * step,
217 | sample * (length + discard))
218 |
219 | return (t[discard * sample::sample],
220 | odeint(_lorenz, x0, t)[discard * sample::sample])
221 |
222 |
223 | def mackey_glass(length=10000, x0=None, a=0.2, b=0.1, c=10.0, tau=23.0,
224 | n=1000, sample=0.46, discard=250):
225 | """Generate time series using the Mackey-Glass equation.
226 |
227 | Generates time series using the discrete approximation of the
228 | Mackey-Glass delay differential equation described by Grassberger &
229 | Procaccia (1983).
230 |
231 | Parameters
232 | ----------
233 | length : int, optional (default = 10000)
234 | Length of the time series to be generated.
235 | x0 : array, optional (default = random)
236 | Initial condition for the discrete map. Should be of length n.
237 | a : float, optional (default = 0.2)
238 | Constant a in the Mackey-Glass equation.
239 | b : float, optional (default = 0.1)
240 | Constant b in the Mackey-Glass equation.
241 | c : float, optional (default = 10.0)
242 | Constant c in the Mackey-Glass equation.
243 | tau : float, optional (default = 23.0)
244 | Time delay in the Mackey-Glass equation.
245 | n : int, optional (default = 1000)
246 | The number of discrete steps into which the interval between
247 | t and t + tau should be divided. This results in a time
248 | step of tau/n and an n + 1 dimensional map.
249 | sample : float, optional (default = 0.46)
250 | Sampling step of the time series. It is useful to pick
251 | something between tau/100 and tau/10, with tau/sample being
252 | a factor of n. This will make sure that there are only whole
253 | number indices.
254 | discard : int, optional (default = 250)
255 | Number of n-steps to discard in order to eliminate transients.
256 | A total of n*discard steps will be discarded.
257 |
258 | Returns
259 | -------
260 | x : array
261 | Array containing the time series.
262 | """
263 | sample = int(n * sample / tau)
264 | grids = n * discard + sample * length
265 | x = np.empty(grids)
266 |
267 | if not x0:
268 | x[:n] = 0.5 + 0.05 * (-1 + 2 * np.random.random(n))
269 | else:
270 | x[:n] = x0
271 |
272 | A = (2 * n - b * tau) / (2 * n + b * tau)
273 | B = a * tau / (2 * n + b * tau)
274 |
275 | for i in range(n - 1, grids - 1):
276 | x[i + 1] = A * x[i] + B * (x[i - n] / (1 + x[i - n] ** c) +
277 | x[i - n + 1] / (1 + x[i - n + 1] ** c))
278 | return x[n * discard::sample]
279 |
280 |
281 | def roessler(length=10000, x0=None, a=0.2, b=0.2, c=5.7, step=0.001,
282 | sample=0.1, discard=1000):
283 | """Generate time series using the Rössler oscillator.
284 |
285 | Generates time series using the Rössler oscillator.
286 |
287 | Parameters
288 | ----------
289 | length : int, optional (default = 10000)
290 | Length of the time series to be generated.
291 | x0 : array, optional (default = random)
292 | Initial condition for the flow.
293 | a : float, optional (default = 0.2)
294 | Constant a in the Röessler oscillator.
295 | b : float, optional (default = 0.2)
296 | Constant b in the Röessler oscillator.
297 | c : float, optional (default = 5.7)
298 | Constant c in the Röessler oscillator.
299 | step : float, optional (default = 0.001)
300 | Approximate step size of integration.
301 | sample : int, optional (default = 0.1)
302 | Sampling step of the time series.
303 | discard : int, optional (default = 1000)
304 | Number of samples to discard in order to eliminate transients.
305 |
306 | Returns
307 | -------
308 | t : array
309 | The time values at which the points have been sampled.
310 | x : ndarray, shape (length, 3)
311 | Array containing points in phase space.
312 | """
313 | def _roessler(x, t):
314 | return [-(x[1] + x[2]), x[0] + a * x[1], b + x[2] * (x[0] - c)]
315 |
316 | sample = int(sample / step)
317 | t = np.linspace(0, (sample * (length + discard)) * step,
318 | sample * (length + discard))
319 |
320 | if not x0:
321 | x0 = (-9.0, 0.0, 0.0) + 0.25 * (-1 + 2 * np.random.random(3))
322 |
323 | return (t[discard * sample::sample],
324 | odeint(_roessler, x0, t)[discard * sample::sample])
325 |
--------------------------------------------------------------------------------
/nolitsa/delay.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | """Functions to estimate the embedding delay.
4 |
5 | This module provides a set of functions that can be used to estimate the
6 | time delay required to embed a scalar time series.
7 |
8 | * acorr -- computes the autocorrelation of a scalar time series.
9 | * mi -- computes the mutual information between two scalar time
10 | series.
11 | * dmi -- computes the mutual information between a scalar time series
12 | and its time-delayed counterpart.
13 | * adfd -- computes the average displacement of the time-delayed
14 | vectors from the phase space diagonal as a function of the time
15 | delay.
16 | """
17 |
18 | from __future__ import absolute_import, division, print_function
19 |
20 | import numpy as np
21 |
22 | from . import utils
23 |
24 |
25 | def acorr(x, maxtau=None, norm=True, detrend=True):
26 | """Return the autocorrelation of the given scalar time series.
27 |
28 | Calculates the autocorrelation of the given scalar time series
29 | using the Wiener-Khinchin theorem.
30 |
31 | Parameters
32 | ----------
33 | x : array_like
34 | 1-D real time series of length N.
35 | maxtau : int, optional (default = N)
36 | Return the autocorrelation only up to this time delay.
37 | norm : bool, optional (default = True)
38 | Normalize the autocorrelation so that it is equal to 1 for
39 | zero time delay.
40 | detrend: bool, optional (default = True)
41 | Subtract the mean from the time series (i.e., a constant
42 | detrend). This is done so that for uncorrelated data, the
43 | autocorrelation vanishes for all nonzero time delays.
44 |
45 | Returns
46 | -------
47 | r : array
48 | Array with the autocorrelation up to maxtau.
49 | """
50 | x = np.asarray(x)
51 | N = len(x)
52 |
53 | if not maxtau:
54 | maxtau = N
55 | else:
56 | maxtau = min(N, maxtau)
57 |
58 | if detrend:
59 | x = x - np.mean(x)
60 |
61 | # We have to zero pad the data to give it a length 2N - 1.
62 | # See: http://dsp.stackexchange.com/q/1919
63 | y = np.fft.fft(x, 2 * N - 1)
64 | r = np.real(np.fft.ifft(y * y.conj(), 2 * N - 1))
65 |
66 | if norm:
67 | return r[:maxtau] / r[0]
68 | else:
69 | return r[:maxtau]
70 |
71 |
72 | def mi(x, y, bins=64):
73 | """Calculate the mutual information between two random variables.
74 |
75 | Calculates mutual information, I = S(x) + S(y) - S(x,y), between two
76 | random variables x and y, where S(x) is the Shannon entropy.
77 |
78 | Parameters
79 | ----------
80 | x : array
81 | First random variable.
82 | y : array
83 | Second random variable.
84 | bins : int
85 | Number of bins to use while creating the histogram.
86 |
87 | Returns
88 | -------
89 | i : float
90 | Mutual information.
91 | """
92 | p_x = np.histogram(x, bins)[0]
93 | p_y = np.histogram(y, bins)[0]
94 | p_xy = np.histogram2d(x, y, bins)[0].flatten()
95 |
96 | # Convert frequencies into probabilities. Also, in the limit
97 | # p -> 0, p*log(p) is 0. We need to take out those.
98 | p_x = p_x[p_x > 0] / np.sum(p_x)
99 | p_y = p_y[p_y > 0] / np.sum(p_y)
100 | p_xy = p_xy[p_xy > 0] / np.sum(p_xy)
101 |
102 | # Calculate the corresponding Shannon entropies.
103 | h_x = np.sum(p_x * np.log2(p_x))
104 | h_y = np.sum(p_y * np.log2(p_y))
105 | h_xy = np.sum(p_xy * np.log2(p_xy))
106 |
107 | return h_xy - h_x - h_y
108 |
109 |
110 | def dmi(x, maxtau=1000, bins=64):
111 | """Return the time-delayed mutual information of x_i.
112 |
113 | Returns the mutual information between x_i and x_{i + t} (i.e., the
114 | time-delayed mutual information), up to a t equal to maxtau. Based
115 | on the paper by Fraser & Swinney (1986), but uses a much simpler,
116 | albeit, time-consuming algorithm.
117 |
118 | Parameters
119 | ----------
120 | x : array
121 | 1-D real time series of length N.
122 | maxtau : int, optional (default = min(N, 1000))
123 | Return the mutual information only up to this time delay.
124 | bins : int
125 | Number of bins to use while calculating the histogram.
126 |
127 | Returns
128 | -------
129 | ii : array
130 | Array with the time-delayed mutual information up to maxtau.
131 |
132 | Notes
133 | -----
134 | For the purpose of finding the time delay of minimum delayed mutual
135 | information, the exact number of bins is not very important.
136 | """
137 | N = len(x)
138 | maxtau = min(N, maxtau)
139 |
140 | ii = np.empty(maxtau)
141 | ii[0] = mi(x, x, bins)
142 |
143 | for tau in range(1, maxtau):
144 | ii[tau] = mi(x[:-tau], x[tau:], bins)
145 |
146 | return ii
147 |
148 |
149 | def adfd(x, dim=1, maxtau=100):
150 | """Compute average displacement from the diagonal (ADFD).
151 |
152 | Computes the average displacement of the time-delayed vectors from
153 | the phase space diagonal which helps in picking a suitable time
154 | delay (Rosenstein et al. 1994).
155 |
156 | Parameters
157 | ----------
158 | x : array
159 | 1-D real time series of length N.
160 | dim : int, optional (default = 1)
161 | Embedding dimension.
162 | maxtau : int, optional (default = 100)
163 | Calculate the ADFD only up to this delay.
164 |
165 | Returns
166 | -------
167 | disp : array
168 | ADFD for all time delays up to maxtau.
169 | """
170 | disp = np.zeros(maxtau)
171 | N = len(x)
172 |
173 | maxtau = min(maxtau, int(N / dim))
174 |
175 | for tau in range(1, maxtau):
176 | y1 = utils.reconstruct(x, dim=dim, tau=tau)
177 |
178 | # Reconstruct with zero time delay.
179 | y2 = x[:N - (dim - 1) * tau]
180 | y2 = y2.repeat(dim).reshape(len(y2), dim)
181 |
182 | disp[tau] = np.mean(utils.dist(y1, y2, metric='euclidean'))
183 |
184 | return disp
185 |
--------------------------------------------------------------------------------
/nolitsa/dimension.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | """Functions to estimate embedding dimension.
4 |
5 | This module provides a set of functions to estimate the minimum
6 | embedding dimension required to embed a scalar time series.
7 |
8 | * afn -- use the averaged false neighbors method to estimate the
9 | minimum embedding dimension.
10 | * fnn -- use the false nearest neighbors method to estimate the
11 | minimum embedding dimension.
12 | """
13 |
14 | from __future__ import absolute_import, division, print_function
15 |
16 | import numpy as np
17 | from . import utils
18 |
19 |
20 | def _afn(d, x, tau=1, metric='chebyshev', window=10, maxnum=None):
21 | """Return E(d) and E^*(d) for a single d.
22 |
23 | Returns E(d) and E^*(d) for the AFN method for a single d. This
24 | function is meant to be called from the main afn() function. See
25 | the docstring of afn( for more.)
26 | """
27 | # We need to reduce the number of points in dimension d by tau
28 | # so that after reconstruction, there'll be equal number of points
29 | # at both dimension d as well as dimension d + 1.
30 | y1 = utils.reconstruct(x[:-tau], d, tau)
31 | y2 = utils.reconstruct(x, d + 1, tau)
32 |
33 | # Find near neighbors in dimension d.
34 | index, dist = utils.neighbors(y1, metric=metric, window=window,
35 | maxnum=maxnum)
36 |
37 | # Compute the magnification and the increase in the near-neighbor
38 | # distances and return the averages.
39 | E = utils.dist(y2, y2[index], metric=metric) / dist
40 | Es = np.abs(y2[:, -1] - y2[index, -1])
41 |
42 | return np.mean(E), np.mean(Es)
43 |
44 |
45 | def afn(x, dim=[1], tau=1, metric='chebyshev', window=10, maxnum=None,
46 | parallel=True):
47 | """Averaged false neighbors algorithm.
48 |
49 | This function implements the averaged false neighbors method
50 | described by Cao (1997) to estimate the minimum embedding dimension
51 | required to embed a scalar time series.
52 |
53 | Parameters
54 | ----------
55 | x : array
56 | 1-D scalar time series.
57 | dim : int array (default = [1])
58 | Embedding dimensions for which E(d) and E^*(d) should be
59 | computed.
60 | tau : int, optional (default = 1)
61 | Time delay.
62 | metric : string, optional (default = 'chebyshev')
63 | Metric to use for distance computation. Must be one of
64 | "cityblock" (aka the Manhattan metric), "chebyshev" (aka the
65 | maximum norm metric), or "euclidean".
66 | window : int, optional (default = 10)
67 | Minimum temporal separation (Theiler window) that should exist
68 | between near neighbors.
69 | maxnum : int, optional (default = None (optimum))
70 | Maximum number of near neighbors that should be found for each
71 | point. In rare cases, when there are no neighbors that are at a
72 | nonzero distance, this will have to be increased (i.e., beyond
73 | 2 * window + 3).
74 | parallel : bool, optional (default = True)
75 | Calculate E(d) and E^*(d) for each d in parallel.
76 |
77 | Returns
78 | -------
79 | E : array
80 | E(d) for each of the d's.
81 | Es : array
82 | E^*(d) for each of the d's.
83 | """
84 | if parallel:
85 | processes = None
86 | else:
87 | processes = 1
88 |
89 | r = utils.parallel_map(_afn, dim, (x,), {
90 | 'tau': tau,
91 | 'metric': metric,
92 | 'window': window,
93 | 'maxnum': maxnum
94 | }, processes)
95 |
96 | return np.asarray(r).T
97 |
98 |
99 | def _fnn(d, x, tau=1, R=10.0, A=2.0, metric='euclidean', window=10,
100 | maxnum=None):
101 | """Return fraction of false nearest neighbors for a single d.
102 |
103 | Returns the fraction of false nearest neighbors for a single d.
104 | This function is meant to be called from the main fnn() function.
105 | See the docstring of fnn() for more.
106 | """
107 | # We need to reduce the number of points in dimension d by tau
108 | # so that after reconstruction, there'll be equal number of points
109 | # at both dimension d as well as dimension d + 1.
110 | y1 = utils.reconstruct(x[:-tau], d, tau)
111 | y2 = utils.reconstruct(x, d + 1, tau)
112 |
113 | # Find near neighbors in dimension d.
114 | index, dist = utils.neighbors(y1, metric=metric, window=window,
115 | maxnum=maxnum)
116 |
117 | # Find all potential false neighbors using Kennel et al.'s tests.
118 | f1 = np.abs(y2[:, -1] - y2[index, -1]) / dist > R
119 | f2 = utils.dist(y2, y2[index], metric=metric) / np.std(x) > A
120 | f3 = f1 | f2
121 |
122 | return np.mean(f1), np.mean(f2), np.mean(f3)
123 |
124 |
125 | def fnn(x, dim=[1], tau=1, R=10.0, A=2.0, metric='euclidean', window=10,
126 | maxnum=None, parallel=True):
127 | """Compute the fraction of false nearest neighbors.
128 |
129 | Implements the false nearest neighbors (FNN) method described by
130 | Kennel et al. (1992) to calculate the minimum embedding dimension
131 | required to embed a scalar time series.
132 |
133 | Parameters
134 | ----------
135 | x : array
136 | 1-D real input array containing the time series.
137 | dim : int array (default = [1])
138 | Embedding dimensions for which the fraction of false nearest
139 | neighbors should be computed.
140 | tau : int, optional (default = 1)
141 | Time delay.
142 | R : float, optional (default = 10.0)
143 | Tolerance parameter for FNN Test I.
144 | A : float, optional (default = 2.0)
145 | Tolerance parameter for FNN Test II.
146 | metric : string, optional (default = 'euclidean')
147 | Metric to use for distance computation. Must be one of
148 | "cityblock" (aka the Manhattan metric), "chebyshev" (aka the
149 | maximum norm metric), or "euclidean". Also see Notes.
150 | window : int, optional (default = 10)
151 | Minimum temporal separation (Theiler window) that should exist
152 | between near neighbors.
153 | maxnum : int, optional (default = None (optimum))
154 | Maximum number of near neighbors that should be found for each
155 | point. In rare cases, when there are no neighbors that are at a
156 | nonzero distance, this will have to be increased (i.e., beyond
157 | 2 * window + 3).
158 | parallel : bool, optional (default = True)
159 | Calculate the fraction of false nearest neighbors for each d
160 | in parallel.
161 |
162 | Returns
163 | -------
164 | f1 : array
165 | Fraction of neighbors classified as false by Test I.
166 | f2 : array
167 | Fraction of neighbors classified as false by Test II.
168 | f3 : array
169 | Fraction of neighbors classified as false by either Test I
170 | or Test II.
171 |
172 | Notes
173 | -----
174 | The FNN fraction is metric depended for noisy time series. In
175 | particular, the second FNN test, which measures the boundedness of
176 | the reconstructed attractor depends heavily on the metric used.
177 | E.g., if the Chebyshev metric is used, the near-neighbor distances
178 | in the reconstructed attractor are always bounded and therefore the
179 | reported FNN fraction becomes a nonzero constant (approximately)
180 | instead of increasing with the embedding dimension.
181 | """
182 | if parallel:
183 | processes = None
184 | else:
185 | processes = 1
186 |
187 | r = utils.parallel_map(_fnn, dim, (x,), {
188 | 'tau': tau,
189 | 'R': R,
190 | 'A': A,
191 | 'metric': metric,
192 | 'window': window,
193 | 'maxnum': maxnum
194 | }, processes)
195 |
196 | return np.asarray(r).T
197 |
--------------------------------------------------------------------------------
/nolitsa/lyapunov.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | """Functions to estimate the maximum Lyapunov exponent.
4 |
5 | This module provides two functions to estimate the maximum Lyapunov
6 | exponent (MLE) from a scalar and vector time series.
7 |
8 | * mle -- estimate the MLE from a vector time series
9 | * mle_embed -- estimate the MLE from a scalar time series after
10 | reconstruction.
11 | """
12 |
13 | from __future__ import absolute_import, division, print_function
14 |
15 | import numpy as np
16 |
17 | from . import utils
18 |
19 |
20 | def mle(y, maxt=500, window=10, metric='euclidean', maxnum=None):
21 | """Estimate the maximum Lyapunov exponent.
22 |
23 | Estimates the maximum Lyapunov exponent (MLE) from a
24 | multi-dimensional series using the algorithm described by
25 | Rosenstein et al. (1993).
26 |
27 | Parameters
28 | ----------
29 | y : ndarray
30 | Multi-dimensional real input array containing points in the
31 | phase space.
32 | maxt : int, optional (default = 500)
33 | Maximum time (iterations) up to which the average divergence
34 | should be computed.
35 | window : int, optional (default = 10)
36 | Minimum temporal separation (Theiler window) that should exist
37 | between near neighbors (see Notes).
38 | maxnum : int, optional (default = None (optimum))
39 | Maximum number of near neighbors that should be found for each
40 | point. In rare cases, when there are no neighbors that are at a
41 | nonzero distance, this will have to be increased (i.e., beyond
42 | 2 * window + 3).
43 |
44 | Returns
45 | -------
46 | d : array
47 | Average divergence for each time up to maxt.
48 |
49 | Notes
50 | -----
51 | This function does not directly estimate the MLE. The MLE should be
52 | estimated by linearly fitting the average divergence (i.e., the
53 | average of the logarithms of near-neighbor distances) with time.
54 | It is also important to choose an appropriate Theiler window so that
55 | the near neighbors do not lie on the same trajectory, in which case
56 | the estimated MLE will always be close to zero.
57 | """
58 | index, dist = utils.neighbors(y, metric=metric, window=window,
59 | maxnum=maxnum)
60 | m = len(y)
61 | maxt = min(m - window - 1, maxt)
62 |
63 | d = np.empty(maxt)
64 | d[0] = np.mean(np.log(dist))
65 |
66 | for t in range(1, maxt):
67 | t1 = np.arange(t, m)
68 | t2 = index[:-t] + t
69 |
70 | # Sometimes the nearest point would be farther than (m - maxt)
71 | # in time. Such trajectories needs to be omitted.
72 | valid = t2 < m
73 | t1, t2 = t1[valid], t2[valid]
74 |
75 | d[t] = np.mean(np.log(utils.dist(y[t1], y[t2], metric=metric)))
76 |
77 | return d
78 |
79 |
80 | def mle_embed(x, dim=[1], tau=1, window=10, maxt=500,
81 | metric='euclidean', maxnum=None, parallel=True):
82 | """Estimate the maximum Lyapunov exponent from a scalar time series.
83 |
84 | Estimates the maximum Lyapunov exponent (MLE) using time-delayed
85 | vectors created from a scalar time series (Rosenstein et al. 1993).
86 |
87 | Parameters
88 | ----------
89 | x : ndarray
90 | 1-D real input array containing the time series.
91 | dim : int array, optional (default = [1])
92 | Embedding dimensions for which the average divergence should be
93 | computed.
94 | tau : int, optional (default = 1)
95 | Time delay.
96 | maxt : int, optional (default = 500)
97 | Maximum time (iterations) up to which the average divergence
98 | should be computed.
99 | window : int, optional (default = 10)
100 | Minimum temporal separation (Theiler window) that should exist
101 | between near neighbors (see Notes).
102 | maxnum : int, optional (default = None (optimum))
103 | Maximum number of near neighbors that should be found for each
104 | point. In rare cases, when there are no neighbors that are at a
105 | nonzero distance, this will have to be increased (i.e., beyond
106 | 2 * window + 3).
107 | parallel : bool, optional (default = True)
108 | Compute the average divergence for each embedding dimension in
109 | parallel.
110 |
111 | Returns
112 | -------
113 | d : array
114 | Average divergence for each time up to maxt, for each embedding
115 | dimension.
116 |
117 | Notes
118 | -----
119 | This function does not directly estimate the MLE. The MLE should be
120 | estimated by linearly fitting the average divergence (i.e., the
121 | average of the logarithms of near-neighbor distances) with time.
122 | It is also important to choose an appropriate Theiler window so that
123 | the near neighbors do not lie on the same trajectory, in which case
124 | the estimated MLE will always be close to zero.
125 | """
126 | if parallel:
127 | processes = None
128 | else:
129 | processes = 1
130 |
131 | yy = [utils.reconstruct(x, dim=d, tau=tau) for d in dim]
132 |
133 | return utils.parallel_map(mle, yy, kwargs={
134 | 'maxt': maxt,
135 | 'window': window,
136 | 'metric': metric,
137 | 'maxnum': maxnum
138 | }, processes=processes)
139 |
--------------------------------------------------------------------------------
/nolitsa/noise.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | """Functions for noise reduction.
4 |
5 | This module provides two functions for reducing noise in a time series.
6 |
7 | * sma -- returns the simple moving average of a time series.
8 | * nored -- simple noise reduction algorithm to suppress noise in
9 | deterministic time series.
10 | """
11 |
12 | from __future__ import absolute_import, division, print_function
13 |
14 | import numpy as np
15 | from scipy.spatial import cKDTree as KDTree
16 | from . import utils
17 |
18 |
19 | def sma(x, hwin=5):
20 | """Compute simple moving average.
21 |
22 | Computes the simple moving average (SMA) of a given time series.
23 |
24 | Parameters
25 | ----------
26 | x : array
27 | 1-D real input array of length N containing the time series.
28 | hwin : int, optional (default = 5)
29 | Half-window length. Actual window size is 2*hwin + 1.
30 |
31 | Returns
32 | -------
33 | y : array
34 | Averaged array of length N - 2*hwin
35 |
36 | Notes
37 | -----
38 | An SMA is a linear filter and is known to distort nonlinear
39 | structures in the time series considerably.
40 | """
41 | if hwin > 0:
42 | win = 2 * hwin + 1
43 | y = np.cumsum(x)
44 | y[win:] = y[win:] - y[:-win]
45 |
46 | return y[win - 1:] / win
47 | else:
48 | return x
49 |
50 |
51 | def nored(x, dim=1, tau=1, r=0, metric='chebyshev', repeat=1):
52 | """Simple noise reduction based on local phase space averaging.
53 |
54 | Simple noise reduction scheme based on local phase space averaging
55 | (Schreiber 1993; Kantz & Schreiber 2004).
56 |
57 | Parameters
58 | ----------
59 | x : array
60 | 1-D real input array containing the time series.
61 | dim : int, optional (default = 1)
62 | Embedding dimension.
63 | tau : int, optional (default = 1)
64 | Time delay.
65 | r : float, optional (default = 0)
66 | Radius of neighborhood (see Notes).
67 | metric : string, optional (default = 'chebyshev')
68 | Metric to use for distance computation. Must be one of
69 | "cityblock" (aka the Manhattan metric), "chebyshev" (aka the
70 | maximum norm metric), or "euclidean".
71 | repeat: int, optional (default = 1)
72 | Number of iterations.
73 |
74 | Return
75 | ------
76 | y : array
77 | 1-D real output array containing the time series after noise
78 | reduction.
79 |
80 | Notes
81 | -----
82 | Choosing the right neighborhood radius is crucial for proper noise
83 | reduction. A large radius will result in too much filtering. By
84 | default, a radius of zero is used, which means that no noise
85 | reduction is done. Note that the radius also depends on the metric
86 | used for distance computation. Best results are often obtained
87 | using large embedding dimensions with unit delay and the Chebyshev
88 | metric. (This function is a featureful equivalent of the TISEAN
89 | program "lazy".)
90 | """
91 | if metric == 'cityblock':
92 | p = 1
93 | elif metric == 'euclidean':
94 | p = 2
95 | elif metric == 'chebyshev':
96 | p = np.inf
97 | else:
98 | raise ValueError('Unknown metric. Should be one of "cityblock", '
99 | '"euclidean", or "chebyshev".')
100 |
101 | # Choose the middle coordinate appropriately.
102 | if dim % 2 == 0:
103 | mid = tau * dim // 2
104 | else:
105 | mid = tau * (dim - 1) // 2
106 |
107 | y = np.copy(x)
108 |
109 | for rep in range(repeat):
110 | z = np.copy(y)
111 | ps = utils.reconstruct(y, dim=dim, tau=tau)
112 |
113 | tree = KDTree(ps)
114 |
115 | # State-space averaging.
116 | # (We don't use tree.query_ball_tree() as it almost always
117 | # results in a memory overflow, even though it's faster.)
118 | for i in range(len(ps)):
119 | neighbors = tree.query_ball_point(ps[i], r=r, p=p)
120 | y[i + mid] = np.mean(ps[neighbors][:, mid // tau])
121 |
122 | # Choose the average correction as the new radius.
123 | r = np.sqrt(np.mean((y - z) ** 2))
124 |
125 | # Stop as soon as the series stops changing.
126 | if r == 0:
127 | break
128 |
129 | return y
130 |
--------------------------------------------------------------------------------
/nolitsa/surrogates.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | """Functions to generate surrogate series.
4 |
5 | This module provides a set of functions to generate surrogate series
6 | from a given time series using multiple algorithms.
7 |
8 | Surrogates Generation
9 | ---------------------
10 |
11 | * ft -- generates Fourier transform surrogates.
12 | * aaft -- generates amplitude adjusted Fourier transform surrogates.
13 | * iaaft -- generates iterative amplitude adjusted Fourier transform
14 | surrogates.
15 |
16 | Utilities
17 | ---------
18 |
19 | * mismatch -- finds the segment of a time series with the least
20 | end-point mismatch.
21 | """
22 |
23 | from __future__ import absolute_import, division, print_function
24 |
25 | import numpy as np
26 | from . import utils
27 |
28 |
29 | def ft(x):
30 | """Return simple Fourier transform surrogates.
31 |
32 | Returns phase randomized (FT) surrogates that preserve the power
33 | spectrum (or equivalently the linear correlations), but completely
34 | destroy the probability distribution.
35 |
36 | Parameters
37 | ----------
38 | x : array
39 | Real input array containg the time series.
40 |
41 | Returns
42 | -------
43 | y : array
44 | Surrogates with the same power spectrum as x.
45 | """
46 | y = np.fft.rfft(x)
47 |
48 | phi = 2 * np.pi * np.random.random(len(y))
49 |
50 | phi[0] = 0.0
51 | if len(x) % 2 == 0:
52 | phi[-1] = 0.0
53 |
54 | y = y * np.exp(1j * phi)
55 | return np.fft.irfft(y, n=len(x))
56 |
57 |
58 | def aaft(x):
59 | """Return amplitude adjusted Fourier transform surrogates.
60 |
61 | Returns phase randomized, amplitude adjusted (AAFT) surrogates with
62 | crudely the same power spectrum and distribution as the original
63 | data (Theiler et al. 1992). AAFT surrogates are used in testing
64 | the null hypothesis that the input series is correlated Gaussian
65 | noise transformed by a monotonic time-independent measuring
66 | function.
67 |
68 | Parameters
69 | ----------
70 | x : array
71 | 1-D input array containg the time series.
72 |
73 | Returns
74 | -------
75 | y : array
76 | Surrogate series with (crudely) the same power spectrum and
77 | distribution.
78 | """
79 | # Generate uncorrelated Gaussian random numbers.
80 | y = np.random.normal(size=len(x))
81 |
82 | # Introduce correlations in the random numbers by rank ordering.
83 | y = np.sort(y)[np.argsort(np.argsort(x))]
84 | y = ft(y)
85 |
86 | return np.sort(x)[np.argsort(np.argsort(y))]
87 |
88 |
89 | def iaaft(x, maxiter=1000, atol=1e-8, rtol=1e-10):
90 | """Return iterative amplitude adjusted Fourier transform surrogates.
91 |
92 | Returns phase randomized, amplitude adjusted (IAAFT) surrogates with
93 | the same power spectrum (to a very high accuracy) and distribution
94 | as the original data using an iterative scheme (Schreiber & Schmitz
95 | 1996).
96 |
97 | Parameters
98 | ----------
99 | x : array
100 | 1-D real input array of length N containing the time series.
101 | maxiter : int, optional (default = 1000)
102 | Maximum iterations to be performed while checking for
103 | convergence. The scheme may converge before this number as
104 | well (see Notes).
105 | atol : float, optional (default = 1e-8)
106 | Absolute tolerance for checking convergence (see Notes).
107 | rtol : float, optional (default = 1e-10)
108 | Relative tolerance for checking convergence (see Notes).
109 |
110 | Returns
111 | -------
112 | y : array
113 | Surrogate series with (almost) the same power spectrum and
114 | distribution.
115 | i : int
116 | Number of iterations that have been performed.
117 | e : float
118 | Root-mean-square deviation (RMSD) between the absolute squares
119 | of the Fourier amplitudes of the surrogate series and that of
120 | the original series.
121 |
122 | Notes
123 | -----
124 | To check if the power spectrum has converged, we see if the absolute
125 | difference between the current (cerr) and previous (perr) RMSDs is
126 | within the limits set by the tolerance levels, i.e., if abs(cerr -
127 | perr) <= atol + rtol*perr. This follows the convention used in
128 | the NumPy function numpy.allclose().
129 |
130 | Additionally, atol and rtol can be both set to zero in which
131 | case the iterations end only when the RMSD stops changing or when
132 | maxiter is reached.
133 | """
134 | # Calculate "true" Fourier amplitudes and sort the series.
135 | ampl = np.abs(np.fft.rfft(x))
136 | sort = np.sort(x)
137 |
138 | # Previous and current error.
139 | perr, cerr = (-1, 1)
140 |
141 | # Start with a random permutation.
142 | t = np.fft.rfft(np.random.permutation(x))
143 |
144 | for i in range(maxiter):
145 | # Match power spectrum.
146 | s = np.real(np.fft.irfft(ampl * t / np.abs(t), n=len(x)))
147 |
148 | # Match distribution by rank ordering.
149 | y = sort[np.argsort(np.argsort(s))]
150 |
151 | t = np.fft.rfft(y)
152 | cerr = np.sqrt(np.mean((ampl ** 2 - np.abs(t) ** 2) ** 2))
153 |
154 | # Check convergence.
155 | if abs(cerr - perr) <= atol + rtol * abs(perr):
156 | break
157 | else:
158 | perr = cerr
159 |
160 | # Normalize error w.r.t. mean of the "true" power spectrum.
161 | return y, i, cerr / np.mean(ampl ** 2)
162 |
163 |
164 | def mismatch(x, length=None, weight=0.5, neigh=3):
165 | """Find the segment that minimizes end-point mismatch.
166 |
167 | Finds the segment in the time series that has minimum end-point
168 | mismatch. To do this we calculate the mismatch between the end
169 | points of all segments of the given length and pick the segment with
170 | least mismatch (Ehlers et al. 1998). We also enforce the
171 | condition that the difference between the first derivatives at the
172 | end points must be a minimum.
173 |
174 | Parameters
175 | ----------
176 | x : array
177 | Real input array containg the time series.
178 | length : int, optional
179 | Length of segment. By default the largest possible length which
180 | is a power of one of the first five primes is selected.
181 | weight : float, optional (default = 0.5)
182 | Weight given to discontinuity in the first difference of the
183 | time series. Must be between 0 and 1.
184 | neigh : int, optional (default = 3)
185 | Num of end points using which the discontinuity statistic should
186 | be computed.
187 |
188 | Returns
189 | -------
190 | ends : tuple
191 | Indices of the end points of the segment.
192 | d : float
193 | Discontinuity statistic for the segment.
194 |
195 | Notes
196 | -----
197 | Both the time series and its first difference are linearly rescaled
198 | to [0, 1]. Thus the discontinuity statistic varies between 0 and 1
199 | (0 means no discontinuity and 1 means maximum discontinuity).
200 | """
201 | # Calculate the first difference of the time series and rescale it
202 | # to [0, 1]
203 | dx = utils.rescale(np.diff(x))
204 | x = utils.rescale(x)[1:]
205 | n = len(x)
206 |
207 | if not length:
208 | primes = np.array([2, 3, 5, 7, 11])
209 | i = np.argmax(primes ** np.floor(np.log(n) / np.log(primes)) - n)
210 | length = int(primes[i] ** (np.floor(np.log(n) / np.log(primes[i]))))
211 |
212 | d = np.zeros(n - (length + neigh))
213 |
214 | for i in np.arange(n - (length + neigh)):
215 | d[i] = ((1 - weight) * (np.mean((x[i:i + neigh] -
216 | x[i + length:i + length + neigh]) ** 2.0)) +
217 | weight * (np.mean((dx[i:i + neigh] -
218 | dx[i + length:i + length + neigh]) ** 2.0)))
219 |
220 | return (1 + np.argmin(d), 1 + np.argmin(d) + length), np.min(d)
221 |
--------------------------------------------------------------------------------
/nolitsa/tests/test_d2.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | from __future__ import absolute_import, division, print_function
4 |
5 | import numpy as np
6 |
7 | from nolitsa import d2, utils
8 | from numpy.testing import assert_allclose
9 |
10 |
11 | def test_c2():
12 | # Test d2.c2()
13 | # Particle moving uniformly in 5d: y(t) = a + b*t
14 | a = np.random.random(5)
15 | b = np.random.random(5)
16 |
17 | n = 250
18 | window = 15
19 | t = np.arange(n)
20 | y = a + b * t[:, np.newaxis]
21 |
22 | for metric in ('chebyshev', 'cityblock', 'euclidean'):
23 | if metric == 'chebyshev':
24 | modb = np.max(np.abs(b))
25 | elif metric == 'cityblock':
26 | modb = np.sum(np.abs(b))
27 | elif metric == 'euclidean':
28 | modb = np.sqrt(np.sum(b ** 2))
29 |
30 | # We need to offset the r values a bit so that the half-open
31 | # bins used in np.histogram get closed.
32 | r = np.arange(window + 1, n) * modb + 1e-10
33 |
34 | c = d2.c2(y, r=r, window=window, metric=metric)[1]
35 | desired = (np.cumsum(np.arange(n - window - 1, 0, -1)) /
36 | (0.5 * (n - window - 1) * (n - window)))
37 | assert_allclose(c, desired)
38 |
39 |
40 | def test_c2_embed():
41 | # Test d2.c2_embed()
42 | t = np.linspace(0, 10 * 2 * np.pi, 5000)
43 | y = np.array([np.sin(t), np.cos(t)]).T
44 | r = utils.gprange(0.01, 1, 1000)
45 | desired = d2.c2(y, r=r)[1]
46 |
47 | dim = [2]
48 | tau = 125
49 | x = y[:, 0]
50 |
51 | assert_allclose(desired, d2.c2_embed(x, dim=dim, tau=tau, r=r)[0][1],
52 | atol=1e-3)
53 |
54 |
55 | def test_d2():
56 | # Test d2.d2()
57 | # Compute the local slope of 2x + 3x^2 and verify that
58 | # it is equal to 2 + 6x.
59 | x = np.linspace(-5, 5, 1000)
60 | y = 2 * x + 3 * x ** 2
61 |
62 | p, q = np.exp(x), np.exp(y)
63 | assert_allclose(d2.d2(p, q), (2 + 6 * x[3:-3]))
64 |
65 |
66 | def test_ttmle():
67 | # Test d2.ttmle()
68 | r_min, r_max = 1.0, 10.0
69 | r = utils.gprange(r_min, r_max, 100)
70 | c = np.e * r ** np.pi
71 |
72 | desired = np.pi
73 | assert_allclose(desired, d2.ttmle(r, c, zero=True)[1])
74 |
75 | desired = np.pi * (c[1:] / (c[1:] - c[0]))
76 | assert_allclose(desired, d2.ttmle(r, c, zero=False)[1])
77 |
--------------------------------------------------------------------------------
/nolitsa/tests/test_data.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | from __future__ import absolute_import, division, print_function
4 |
5 | import numpy as np
6 |
7 | from nolitsa import data, utils
8 | from numpy.testing import assert_allclose
9 |
10 |
11 | def test_falpha():
12 | # Tests data.falpha()
13 | x = data.falpha(length=(2 ** 10), mean=np.pi, var=np.e)
14 | assert_allclose(np.mean(x), np.pi)
15 | assert_allclose(np.std(x) ** 2, np.e)
16 |
17 | for length in (2 ** 10, 3 ** 7):
18 | for alpha in (1.0, 2.0, 3.0):
19 | mean, var = 1.0 + np.random.random(2)
20 | x = data.falpha(alpha=alpha, length=length,
21 | mean=mean, var=var)
22 |
23 | # Estimate slope of power spectrum.
24 | freq, power = utils.spectrum(x)
25 | desired = np.mean(np.diff(np.log(power[1:])) /
26 | np.diff(np.log(freq[1:])))
27 |
28 | assert_allclose(-alpha, desired)
29 |
--------------------------------------------------------------------------------
/nolitsa/tests/test_delay.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | from __future__ import absolute_import, division, print_function
4 | import numpy as np
5 | from nolitsa import delay
6 | from numpy.testing import assert_allclose
7 |
8 |
9 | class TestAcorr(object):
10 | # Test delay.acorr()
11 |
12 | def test_random(self):
13 | # Test by calculating autocorrelation by brute force.
14 | n = 32
15 | x = np.random.random(n)
16 | x = x - x.mean()
17 |
18 | desired = np.empty(n)
19 | desired[0] = np.sum(x ** 2)
20 |
21 | for i in range(1, n):
22 | desired[i] = np.sum(x[:-i] * x[i:])
23 |
24 | desired = desired / desired[0]
25 | assert_allclose(delay.acorr(x), desired)
26 |
27 | def test_sin(self):
28 | # Test using a finite sine wave.
29 | #
30 | # Autocorrelation function of a /finite/ sine wave over n
31 | # cycles is:
32 | #
33 | # r(tau) = [(2*n*pi - tau)*cos(tau) + sin(tau)] / 2*n*pi
34 | #
35 | # As n -> infty, r(tau) = cos(tau) as expected.
36 | n = 2 ** 5
37 | t = np.linspace(0, n * 2 * np.pi, n * 2 ** 10)
38 | x = np.sin(t)
39 |
40 | desired = ((np.cos(t) * (2 * n * np.pi - t) + np.sin(t)) /
41 | (2 * n * np.pi))
42 | assert_allclose(delay.acorr(x), desired, atol=1E-5)
43 |
44 |
45 | def test_mi():
46 | # Test delay.mi()
47 | # Silly tests will have to do for now.
48 | x = np.random.normal(loc=5.0, size=100)
49 | y = np.random.normal(loc=-5.0, size=100)
50 | assert_allclose(delay.mi(x, y), delay.mi(y, x))
51 |
52 | bins = 128
53 | x = np.arange(50 * bins)
54 | assert_allclose(delay.mi(x, x, bins=bins), np.log2(bins))
55 |
56 |
57 | def test_adfd():
58 | # Test delay.adfd()
59 | # Embed a straight line.
60 | a, b = 1.0 + np.random.random(2)
61 | t = np.arange(1000)
62 | x = a + b * t
63 |
64 | # Sum of squares of first n natural numbers.
65 | sqsum = lambda n: n * (n + 1) * (2 * n + 1) / 6.0
66 |
67 | dim, maxtau = 7, 25
68 | desired = np.sqrt(sqsum(dim - 1)) * b * np.arange(maxtau)
69 | assert_allclose(delay.adfd(x, dim=dim, maxtau=maxtau), desired)
70 |
--------------------------------------------------------------------------------
/nolitsa/tests/test_dimension.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | from __future__ import absolute_import, division, print_function
4 |
5 | import numpy as np
6 |
7 | from nolitsa import dimension
8 | from numpy.testing import assert_allclose
9 |
10 |
11 | class TestAFN(object):
12 | # Tests for dimension.afn()
13 |
14 | def test_noise(self):
15 | # Test dimension.afn() using uncorrelated random numbers.
16 | x = np.random.random(1000)
17 | dim = np.arange(1, 5 + 2)
18 | window = 10
19 | metric = 'chebyshev'
20 | E, Es = dimension.afn(x, dim=dim, metric=metric, window=window)
21 | _, E2 = E[1:] / E[:-1], Es[1:] / Es[:-1]
22 |
23 | # The standard deviation of E2 should be ~ 0 for uncorrelated
24 | # random numbers [Ramdani et al., Physica D 223, 229 (2006)].
25 | # Additionally, the mean of E2 should be ~ 1.0.
26 | assert_allclose(np.std(E2), 0, atol=0.1)
27 | assert_allclose(np.mean(E2), 1, atol=0.1)
28 |
29 | def test_line(self):
30 | # Test dimension.afn() by embedding a line.
31 | # Particle moving uniformly in 1-D.
32 | a, b = np.random.random(2)
33 | t = np.arange(100)
34 | x = a + b * t
35 | dim = np.arange(1, 10 + 2)
36 | window = 10
37 |
38 | # Chebyshev distances between near-neighbors remain bounded.
39 | # This gives "cleaner" results when embedding known objects like
40 | # a line. For a line, E = 1.0 for all dimensions as expected,
41 | # whereas it is (d + 1) / d (for cityblock) and sqrt(d + 1) /
42 | # sqrt(d) for Euclidean. In both cases, E -> 1.0 at large d,
43 | # but E = 1.0 is definitely preferred.
44 | for metric in ('chebyshev', 'cityblock', 'euclidean'):
45 | Es_des = (window + 1) * b
46 |
47 | if metric == 'chebyshev':
48 | E_des = 1.0
49 | elif metric == 'cityblock':
50 | E_des = (dim + 1) / dim
51 | elif metric == 'euclidean':
52 | E_des = np.sqrt((dim + 1) / dim)
53 |
54 | E, Es = dimension.afn(x, dim=dim, metric=metric)
55 |
56 | assert_allclose(E_des, E)
57 | assert_allclose(Es_des, Es)
58 |
59 |
60 | class TestFNN(object):
61 | # Because of the binary magnifaction function used in the FNN test,
62 | # it's not easy to create unit-tests like AFN. So we make-do with
63 | # silly tests.
64 | def test_line(self):
65 | x = np.linspace(0, 10, 1000)
66 | dim = np.arange(1, 10 + 1)
67 |
68 | # A line has zero FNN at all embedding dimensions.
69 | f1, f2, f3 = dimension.fnn(x, dim=dim, tau=1, window=0)
70 | np.allclose(f1, 0)
71 | np.allclose(f2, 0)
72 | np.allclose(f3, 0)
73 |
74 | def test_circle(self):
75 | t = np.linspace(0, 100 * np.pi, 5000)
76 | x = np.sin(t)
77 | dim = np.arange(1, 10 + 1)
78 |
79 | # A circle has no FNN after d = 2.
80 | desired = np.zeros(10)
81 | desired[0] = 1.0
82 |
83 | f1 = dimension.fnn(x, dim=dim, tau=25)[0]
84 | np.allclose(f1, desired)
85 |
86 | def test_curve(self):
87 | t = np.linspace(0, 100 * np.pi, 5000)
88 | x = np.sin(t) + np.sin(2 * t) + np.sin(3 * t) + np.sin(5 * t)
89 | dim = np.arange(1, 10 + 1)
90 |
91 | # Though this curve is a deformation of a circle, it has zero
92 | # FNN only after d = 3.
93 | desired = np.zeros(10)
94 | desired[:2] = 1.0
95 |
96 | f1 = dimension.fnn(x, dim=dim, tau=25)[0]
97 | np.allclose(f1, desired)
98 |
--------------------------------------------------------------------------------
/nolitsa/tests/test_lyapunov.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | from __future__ import absolute_import, division, print_function
4 |
5 | import numpy as np
6 |
7 | from nolitsa import lyapunov
8 | from numpy.testing import assert_allclose
9 |
10 |
11 | def test_mle():
12 | # Test lyapunov.mle()
13 | # Particle moving uniformly in 7d: y(t) = a + b*t
14 | a = np.random.random(7)
15 | b = np.random.random(7)
16 |
17 | n = 250
18 | window = 15
19 | t = np.arange(n)
20 | y = a + b * t[:, np.newaxis]
21 |
22 | for metric in ('chebyshev', 'cityblock', 'euclidean'):
23 | if metric == 'chebyshev':
24 | modb = np.max(np.abs(b))
25 | elif metric == 'cityblock':
26 | modb = np.sum(np.abs(b))
27 | elif metric == 'euclidean':
28 | modb = np.sqrt(np.sum(b ** 2))
29 |
30 | desired = np.log((window + 1) * modb)
31 | assert_allclose(lyapunov.mle(y, window=window, metric=metric), desired)
32 |
33 |
34 | def test_mle_embed():
35 | # Test lyapunov.mle_embed()
36 | t = np.linspace(0, 10 * 2 * np.pi, 5000)
37 | y = np.array([np.sin(t), np.cos(t)]).T
38 | desired = lyapunov.mle(y, maxt=25)
39 |
40 | dim = [2]
41 | tau = 125
42 | x = y[:, 0]
43 |
44 | assert_allclose(desired, lyapunov.mle_embed(x, dim=dim, tau=tau,
45 | maxt=25)[0], atol=1e-1)
46 |
--------------------------------------------------------------------------------
/nolitsa/tests/test_noise.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | from __future__ import absolute_import, division, print_function
4 | import numpy as np
5 |
6 | from nolitsa import noise
7 | from numpy.testing import assert_allclose
8 |
9 |
10 | class TestNoRed(object):
11 | # Test noise.nored()
12 | def test_zero_radius(self):
13 | # With zero radius the function should simply return the
14 | # original series.
15 | for n in (200, 201):
16 | for tau in (1, 2, 3, 12, 13):
17 | for dim in (1, 2, 3, 12, 13):
18 | x = np.random.random(n)
19 | assert_allclose(noise.nored(x, r=0, tau=tau), x)
20 |
21 | def test_line(self):
22 | # We embed a line of the form x = a + i*b and do noise reduction
23 | # with a radius of 1.5*b. Apart from the end points (of the
24 | # reconstructed series) each of which have only one neighbor,
25 | # all the other points have two neighbors -- the points before
26 | # and after in time. It's not difficult to come up with an
27 | # expression for the new time series from this information. Of
28 | # course the results depend on whether the embedding dimension
29 | # is even or odd. So we test for all odd/even combinations of
30 | # length, delay, and dimension. (TISEAN's `lazy` fails this
31 | # test with a moderate imprecision.)
32 | for n in (200, 201):
33 | for tau in (1, 2, 3, 12, 13):
34 | for dim in (1, 2, 3, 12, 13):
35 | m = n - (dim - 1) * tau
36 |
37 | i = np.arange(1, n + 1)
38 | a, b = 1.0 + np.random.random(2)
39 | x = a + i * b
40 |
41 | y = noise.nored(x, dim=dim, r=(1.5 * b), tau=tau)
42 | z = np.empty(n)
43 |
44 | # I'll be damned if I have to derive this again.
45 | if dim == 1:
46 | z[0] = 0.5 * (x[0] + x[1])
47 | z[-1] = 0.5 * (x[-2] + x[-1])
48 | z[1:-1] = (x[:-2] + x[1:-1] + x[2:]) / 3.0
49 | elif dim % 2 == 0:
50 | c = tau * dim // 2
51 |
52 | # Start points.
53 | z[:c] = x[:c]
54 | z[c] = a + (1 + c + 0.5) * b
55 |
56 | # Points in the middle.
57 | z[c + 1:-(c - tau + 1)] = a + (np.arange(2, m) + c) * b
58 |
59 | # End points.
60 | z[-(c - tau + 1)] = a + (m + c - 0.5) * b
61 | if c > tau:
62 | # If c <= tau, then there is only one end point.
63 | z[-(c - tau):] = x[-(c - tau):]
64 | else:
65 | c = tau * (dim - 1) // 2
66 |
67 | # Start points.
68 | z[:c] = x[:c]
69 | z[c] = a + (1 + c + 0.5) * b
70 |
71 | # Points in the middle.
72 | z[c + 1:-(c + 1)] = a + (np.arange(2, m) + c) * b
73 |
74 | # End points.
75 | z[-(c + 1)] = a + (m + c - 0.5) * b
76 | z[-c:] = x[-c:]
77 |
78 | assert_allclose(y, z)
79 |
--------------------------------------------------------------------------------
/nolitsa/tests/test_surrogates.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | from __future__ import absolute_import, division, print_function
4 |
5 | import numpy as np
6 |
7 | from nolitsa import surrogates, noise, utils
8 | from numpy.testing import assert_allclose
9 |
10 |
11 | def test_ft():
12 | # Test surrogates.ft()
13 | # Always test for both odd and even number of points.
14 | for n in (2 ** 10, 3 ** 7):
15 | # NOTE that zero mean series almost always causes an assertion
16 | # error since the relative tolerance between different "zeros"
17 | # can be quite large. This is not a bug!
18 | x = 1.0 + np.random.random(n)
19 | y = surrogates.ft(x)
20 |
21 | assert_allclose(utils.spectrum(x)[1], utils.spectrum(y)[1])
22 |
23 |
24 | def test_aaft():
25 | # Test surrogates.aaft()
26 | # Always test for both odd and even number of points.
27 | for n in (2 ** 16, 3 ** 10):
28 | # Correlated Gaussian numbers transformed using f(x) = tanh(x)
29 | x = noise.sma(np.random.normal(size=n), hwin=5)
30 | x = np.tanh(x)
31 | y = surrogates.aaft(x)
32 |
33 | assert_allclose(utils.spectrum(x)[1], utils.spectrum(y)[1], atol=1e-3)
34 |
35 |
36 | def test_iaaft():
37 | # Test surrogates.aaft()
38 | # Always test for both odd and even number of points.
39 | for n in (2 ** 14, 3 ** 9):
40 | # Correlated Gaussian numbers transformed using f(x) = tanh(x)
41 | x = noise.sma(np.random.normal(size=n), hwin=5)
42 | x = np.tanh(x)
43 | y, i, e = surrogates.iaaft(x)
44 |
45 | assert_allclose(utils.spectrum(x)[1], utils.spectrum(y)[1], atol=1e-6)
46 |
47 |
48 | def test_mismatch():
49 | # Test surrogates.mismatch()
50 | # A constant series with a minor perturbation.
51 | x = 10.0 + 1e-10 * np.random.random(1000)
52 |
53 | # Remove perturbations in a known interval (729 = 3 ** 6).
54 | neigh = 7
55 | desired = (23, 23 + 729)
56 | x[desired[0] - 1:desired[1] + neigh] = 10.0
57 |
58 | assert_allclose(desired, surrogates.mismatch(x, neigh=neigh)[0])
59 |
--------------------------------------------------------------------------------
/nolitsa/tests/test_utils.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | from __future__ import absolute_import, division, print_function
4 |
5 | import itertools
6 | import numpy as np
7 |
8 | from time import sleep
9 | from nolitsa import utils
10 | from numpy.testing import assert_, assert_allclose
11 |
12 |
13 | def test_corrupt():
14 | # Test utils.corrupt()
15 | x = np.random.random(100)
16 | x = x - np.mean(x)
17 | assert_allclose(utils.corrupt(x, x, snr=16.0), 1.25 * x)
18 |
19 |
20 | def test_dist():
21 | # Test utils.dist()
22 | x = np.random.random((100, 5))
23 | y = np.random.random((100, 5))
24 |
25 | desired = np.max(np.abs(x - y), axis=1)
26 | assert_allclose(utils.dist(x, y), desired)
27 |
28 | desired = np.sum(np.abs(x - y), axis=1)
29 | assert_allclose(utils.dist(x, y, metric='cityblock'), desired)
30 |
31 | desired = np.sqrt(np.sum((x - y) ** 2, axis=1))
32 | assert_allclose(utils.dist(x, y, metric='euclidean'), desired)
33 |
34 |
35 | def test_gprange():
36 | # Test utils.gprange()
37 | num = 10
38 | pi = np.pi
39 |
40 | # Start and end are both positive.
41 | start, end = pi, pi * pi ** (num - 1)
42 | desired = pi * pi ** np.arange(num)
43 | assert_allclose(utils.gprange(start, end, num=num), desired)
44 |
45 | # Start and end have different signs.
46 | start, end = pi, pi * (-pi) ** (num - 1)
47 | desired = pi * (-pi) ** np.arange(num)
48 | assert_allclose(utils.gprange(start, end, num=num), desired)
49 |
50 |
51 | class TestNeighbors(object):
52 | # Test utils.neighbors()
53 |
54 | def test_uniform_acceleration(self):
55 | # As test data, we use the position of a particle under constant
56 | # acceleration moving in a d-dimensional space.
57 | d = 5
58 | t_max = 1000
59 | t = np.arange(t_max)[:, np.newaxis].repeat(d, 1)
60 | a = 1.0 + np.random.random(d)
61 | v0 = 1.0 + np.random.random(d)
62 | x0 = 1.0 + np.random.random(d)
63 | x = x0 + v0 * t + 0.5 * a * t ** 2
64 |
65 | # Since it's uniformly accelerated motion, the closest point at
66 | # each instant of time is the last point visited. (Not true
67 | # when t <= window, in which case it is the next point after
68 | # "window time" in future.) Since the acceleration and velocity
69 | # have the same sign, we don't have to worry about the particle
70 | # reversing its motion either.
71 | window = 15
72 | index, dists = utils.neighbors(x, window=window)
73 | desired = np.hstack((np.arange(window + 1, 2 * window + 2,),
74 | np.arange(t_max - window - 1)))
75 | assert_allclose(index, desired)
76 |
77 | def test_duplicates(self):
78 | # We want to make sure that the right exceptions are raised if a
79 | # neighbor with a nonzero distance is not found satisfying the
80 | # window/maxnum conditions.
81 | repeat = 10
82 | window = 15
83 | x = np.repeat(np.arange(10) ** 2, repeat)[:, np.newaxis]
84 |
85 | # It should fail when maxnum is small.
86 | for maxnum in range(1, repeat + window):
87 | try:
88 | utils.neighbors(x, window=window, maxnum=maxnum)
89 | except:
90 | assert True
91 | else:
92 | assert False
93 |
94 | # Now it should run without any problem.
95 | maxnum = repeat + 2*window
96 | utils.neighbors(x, window=window, maxnum=maxnum)
97 |
98 | def test_grid(self):
99 | # A very simple test to find near neighbors in a 3x3x3 grid.
100 | dx, dy, dz = 1.0 + np.random.random(3)
101 |
102 | # There are probably more elegant ways to do a Cartesian
103 | # product, but this will have to do for now.
104 | grid = np.array([(dx * x, dy * y, dz * z) for x, y, z in
105 | itertools.product(np.arange(10), repeat=3)])
106 | np.random.shuffle(grid)
107 |
108 | index, dists = utils.neighbors(grid)
109 | desired = min(dx, dy, dz)
110 | assert_allclose(dists, desired)
111 |
112 | def test_random(self):
113 | # We are creating a random data set whose near neighbor
114 | # distances are already known for all three metrics.
115 | d = 5
116 | n = 500
117 | x = np.arange(d * n).reshape(n, d) + 100 * np.random.random((n, d))
118 | desired = np.random.random(n)
119 |
120 | y = np.vstack((x, x + desired[:, np.newaxis]))
121 | np.random.shuffle(y)
122 |
123 | index, dists = utils.neighbors(y, metric='euclidean')
124 | assert_allclose(np.sort(dists),
125 | np.sqrt(d) * np.sort(desired).repeat(2))
126 |
127 | index, dists = utils.neighbors(y, metric='cityblock')
128 | assert_allclose(np.sort(dists), d * np.sort(desired).repeat(2))
129 |
130 | index, dists = utils.neighbors(y, metric='chebyshev')
131 | assert_allclose(np.sort(dists), np.sort(desired).repeat(2))
132 |
133 | def test_maxnum(self):
134 | # Make sure that appropriate exceptions are raised if no nonzero
135 | # neighbor is found with the given maxnum.
136 | x = np.arange(10).repeat(15)[:, np.newaxis]
137 |
138 | # Should raise exceptions.
139 | for maxnum in range(1, 15):
140 | try:
141 | utils.neighbors(x, maxnum=maxnum)
142 | except:
143 | assert True
144 | else:
145 | assert False
146 |
147 | # Should work now.
148 | utils.neighbors(x, maxnum=15)
149 |
150 |
151 | def _func_shm(t, ampl, omega=(0.1 * np.pi), phase=0):
152 | # Utility function to test utils.parallel_map()
153 | sleep(0.5 * np.random.random())
154 | return ampl * np.sin(omega * t + phase)
155 |
156 |
157 | def test_parallel_map():
158 | # Test utils.parallel_map()
159 | tt = np.arange(5)
160 | ampl, omega, phase = np.random.random(3)
161 |
162 | desired = [_func_shm(t, ampl, omega=omega, phase=phase) for t in tt]
163 | kwargs = {'omega': omega, 'phase': phase}
164 |
165 | xx = utils.parallel_map(_func_shm, tt, args=(ampl,), kwargs=kwargs)
166 | assert_allclose(xx, desired)
167 |
168 | xx = utils.parallel_map(_func_shm, tt, args=(ampl,), kwargs=kwargs,
169 | processes=1)
170 | assert_allclose(xx, desired)
171 |
172 |
173 | def test_reconstruct():
174 | # Test utils.reconstruct()
175 | # We're reconstructing a circle.
176 | t = np.linspace(0, 10 * 2 * np.pi, 10000)
177 | x = np.sin(t)
178 | dim = 2
179 | tau = 250
180 |
181 | x1, x2 = utils.reconstruct(x, dim, tau).T
182 | desired = np.cos(t[:-tau])
183 |
184 | assert_allclose(x2, desired, atol=1e-3)
185 |
186 |
187 | def test_rescale():
188 | # Test utils.rescale()
189 | x = 1.0 + np.random.random(100)
190 | y = utils.rescale(x, interval=(-np.pi, np.pi))
191 | assert_(abs(np.min(y)) == np.max(y) == np.pi)
192 |
193 |
194 | def test_spectrum():
195 | # Test utils.spectrum()
196 | # Parseval's theorem.
197 | for length in (2 ** 10, 3 ** 7):
198 | x = np.random.random(length)
199 | power = utils.spectrum(x)[1]
200 | assert_allclose(np.mean(x ** 2), np.sum(power))
201 |
202 |
203 | class TestStatcheck(object):
204 | def test_stationary(self):
205 | x = np.arange(500)
206 | x = np.hstack([x, x])
207 | assert_allclose(utils.statcheck(x)[1], 1.0)
208 |
209 | def test_non_stationary(self):
210 | x = np.arange(1000)
211 | assert_(utils.statcheck(x)[1] < 1E-30)
212 |
--------------------------------------------------------------------------------
/nolitsa/utils.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | """Miscellaneous utility functions.
4 |
5 | A module for common utility functions used elsewhere.
6 |
7 | * corrupt -- corrupts a time series with noise.
8 | * dist -- computes the distance between points from two arrays.
9 | * gprange -- generates a geometric progression between two points.
10 | * neighbors -- finds the nearest neighbors of all points in an array.
11 | * parallel_map -- a parallel version of map().
12 | * reconstruct -- constructs time-delayed vectors from a scalar time
13 | series.
14 | * rescale -- rescales a scalar time series into a desired interval.
15 | * spectrum -- returns the power spectrum of a scalar time series.
16 | * statcheck -- checks if a time series is stationary.
17 | """
18 |
19 | from __future__ import absolute_import, division, print_function
20 |
21 | import numpy as np
22 |
23 | from scipy import stats
24 | from scipy.spatial import cKDTree as KDTree
25 | from numba import jit
26 |
27 |
28 | def corrupt(x, y, snr=100):
29 | """Corrupt time series with noise.
30 |
31 | Corrupts input time series with supplied noise to obtain a series
32 | with the specified signal-to-noise ratio.
33 |
34 | Parameters
35 | ----------
36 | x : array
37 | 1-D array with scalar time series (the 'signal').
38 | y : ndarray
39 | 1-D array with noise (the 'noise').
40 | snr : float, optional (default = 100).
41 | Signal-to-noise ratio (SNR) (see Notes).
42 |
43 | Returns
44 | -------
45 | x : array
46 | 1-D array with corrupted series.
47 |
48 | Notes
49 | -----
50 | Contrary to the convention used in engineering sciences, here SNR is
51 | defined as the ratio of the variance of the signal to the variance
52 | of the noise. The noise is also assumed to have zero mean.
53 | """
54 | if len(x) != len(y):
55 | raise ValueError('Signal and noise arrays should be of equal length.)')
56 |
57 | y = y - np.mean(y)
58 | return x + (np.std(x) / np.sqrt(snr)) * (y / np.std(y))
59 |
60 |
61 | def dist(x, y, metric='chebyshev'):
62 | """Compute the distance between all sequential pairs of points.
63 |
64 | Computes the distance between all sequential pairs of points from
65 | two arrays using scipy.spatial.distance.
66 |
67 | Paramters
68 | ---------
69 | x : ndarray
70 | Input array.
71 | y : ndarray
72 | Input array.
73 | metric : string, optional (default = 'chebyshev')
74 | Metric to use while computing distances.
75 |
76 | Returns
77 | -------
78 | d : ndarray
79 | Array containing distances.
80 | """
81 | if metric == 'cityblock':
82 | func = cityblock_dist
83 | elif metric == 'euclidean':
84 | func = euclidean_dist
85 | elif metric == 'chebyshev':
86 | func = chebyshev_dist
87 | else:
88 | raise ValueError('Unknown metric. Should be one of "cityblock", '
89 | '"euclidean", or "chebyshev".')
90 |
91 | return func(x, y)
92 |
93 |
94 | @jit("float64[:](float64[:, :], float64[:, :])", nopython=True)
95 | def cityblock_dist(x, y):
96 | return np.array(list(map(np.sum, np.abs(x - y))))
97 |
98 |
99 | @jit("float64[:](float64[:, :], float64[:, :])", nopython=True)
100 | def euclidean_dist(x, y):
101 | return np.sqrt(np.array(list(map(np.sum, (x - y) ** 2))))
102 |
103 |
104 | @jit("float64[:](float64[:, :], float64[:, :])", nopython=True)
105 | def chebyshev_dist(x, y):
106 | return np.array(list(map(np.max, np.abs(x - y))))
107 |
108 |
109 | def gprange(start, end, num=100):
110 | """Return a geometric progression between start and end.
111 |
112 | Returns a geometric progression between start and end (inclusive).
113 |
114 | Parameters
115 | ----------
116 | start : float
117 | Starting point of the progression.
118 | end : float
119 | Ending point of the progression.
120 | num : int, optional (default = 100)
121 | Number of points between start and end (inclusive).
122 |
123 | Returns
124 | -------
125 | gp : array
126 | Required geometric progression.
127 | """
128 | if end / start > 0:
129 | ratio = (end / start) ** (1.0 / (num - 1))
130 | elif end / start < 0 and num % 2 == 0:
131 | ratio = -abs(end / start) ** (1.0 / (num - 1))
132 | else:
133 | raise ValueError('If start and end have different signs, '
134 | 'a real ratio is possible iff num is even.')
135 |
136 | return start * ratio ** np.arange(num)
137 |
138 |
139 | def neighbors(y, metric='chebyshev', window=0, maxnum=None):
140 | """Find nearest neighbors of all points in the given array.
141 |
142 | Finds the nearest neighbors of all points in the given array using
143 | SciPy's KDTree search.
144 |
145 | Parameters
146 | ----------
147 | y : ndarray
148 | N-dimensional array containing time-delayed vectors.
149 | metric : string, optional (default = 'chebyshev')
150 | Metric to use for distance computation. Must be one of
151 | "cityblock" (aka the Manhattan metric), "chebyshev" (aka the
152 | maximum norm metric), or "euclidean".
153 | window : int, optional (default = 0)
154 | Minimum temporal separation (Theiler window) that should exist
155 | between near neighbors. This is crucial while computing
156 | Lyapunov exponents and the correlation dimension.
157 | maxnum : int, optional (default = None (optimum))
158 | Maximum number of near neighbors that should be found for each
159 | point. In rare cases, when there are no neighbors that are at a
160 | nonzero distance, this will have to be increased (i.e., beyond
161 | 2 * window + 3).
162 |
163 | Returns
164 | -------
165 | index : array
166 | Array containing indices of near neighbors.
167 | dist : array
168 | Array containing near neighbor distances.
169 | """
170 | if metric == 'cityblock':
171 | p = 1
172 | elif metric == 'euclidean':
173 | p = 2
174 | elif metric == 'chebyshev':
175 | p = np.inf
176 | else:
177 | raise ValueError('Unknown metric. Should be one of "cityblock", '
178 | '"euclidean", or "chebyshev".')
179 |
180 | tree = KDTree(y)
181 | n = len(y)
182 |
183 | if not maxnum:
184 | maxnum = (window + 1) + 1 + (window + 1)
185 | else:
186 | maxnum = max(1, maxnum)
187 |
188 | if maxnum >= n:
189 | raise ValueError('maxnum is bigger than array length.')
190 |
191 | dists = np.empty(n)
192 | indices = np.empty(n, dtype=int)
193 |
194 | for i, x in enumerate(y):
195 | for k in range(2, maxnum + 2):
196 | dist, index = tree.query(x, k=k, p=p)
197 | valid = (np.abs(index - i) > window) & (dist > 0)
198 |
199 | if np.count_nonzero(valid):
200 | dists[i] = dist[valid][0]
201 | indices[i] = index[valid][0]
202 | break
203 |
204 | if k == (maxnum + 1):
205 | raise Exception('Could not find any near neighbor with a '
206 | 'nonzero distance. Try increasing the '
207 | 'value of maxnum.')
208 |
209 | return np.squeeze(indices), np.squeeze(dists)
210 |
211 |
212 | def parallel_map(func, values, args=tuple(), kwargs=dict(),
213 | processes=None):
214 | """Use Pool.apply_async() to get a parallel map().
215 |
216 | Uses Pool.apply_async() to provide a parallel version of map().
217 | Unlike Pool's map() which does not let you accept arguments and/or
218 | keyword arguments, this one does.
219 |
220 | Parameters
221 | ----------
222 | func : function
223 | This function will be applied on every element of values in
224 | parallel.
225 | values : array
226 | Input array.
227 | args : tuple, optional (default: ())
228 | Additional arguments for func.
229 | kwargs : dictionary, optional (default: {})
230 | Additional keyword arguments for func.
231 | processes : int, optional (default: None)
232 | Number of processes to run in parallel. By default, the output
233 | of cpu_count() is used.
234 |
235 | Returns
236 | -------
237 | results : array
238 | Output after applying func on each element in values.
239 | """
240 | # True single core processing, in order to allow the func to be executed in
241 | # a Pool in a calling script.
242 | if processes == 1:
243 | return np.asarray([func(value, *args, **kwargs) for value in values])
244 |
245 | from multiprocessing import Pool
246 |
247 | pool = Pool(processes=processes)
248 | results = [pool.apply_async(func, (value,) + args, kwargs)
249 | for value in values]
250 |
251 | pool.close()
252 | pool.join()
253 |
254 | return [result.get() for result in results]
255 |
256 |
257 | def reconstruct(x, dim=1, tau=1):
258 | """Construct time-delayed vectors from a time series.
259 |
260 | Constructs time-delayed vectors from a scalar time series.
261 |
262 | Parameters
263 | ----------
264 | x : array
265 | 1-D scalar time series.
266 | dim : int, optional (default = 1)
267 | Embedding dimension.
268 | tau : int, optional (default = 1)
269 | Time delay
270 |
271 | Returns
272 | -------
273 | ps : ndarray
274 | Array with time-delayed vectors.
275 | """
276 | m = len(x) - (dim - 1) * tau
277 | if m <= 0:
278 | raise ValueError('Length of the time series is <= (dim - 1) * tau.')
279 |
280 | return np.asarray([x[i:i + (dim - 1) * tau + 1:tau] for i in range(m)])
281 |
282 |
283 | def rescale(x, interval=(0, 1)):
284 | """Rescale the given scalar time series into a desired interval.
285 |
286 | Rescales the given scalar time series into a desired interval using
287 | a simple linear transformation.
288 |
289 | Parameters
290 | ----------
291 | x : array_like
292 | Scalar time series.
293 | interval: tuple, optional (default = (0, 1))
294 | Extent of the interval specified as a tuple.
295 |
296 | Returns
297 | -------
298 | y : array
299 | Rescaled scalar time series.
300 | """
301 | x = np.asarray(x)
302 | if interval[1] == interval[0]:
303 | raise ValueError('Interval must have a nonzero length.')
304 |
305 | return (interval[0] + (x - np.min(x)) * (interval[1] - interval[0]) /
306 | (np.max(x) - np.min(x)))
307 |
308 |
309 | def spectrum(x, dt=1.0, detrend=False):
310 | """Return the power spectrum of the given time series.
311 |
312 | Returns the power spectrum of the given time series. This function
313 | is a very simple implementation that does not involve any averaging
314 | or windowing and assumes that the input series is periodic. For
315 | real-world data, use scipy.signal.welch() for accurate estimation of
316 | the power spectrum.
317 |
318 | Parameters
319 | ----------
320 | x : array
321 | 1-D real input array of length N containing the time series.
322 | dt : float, optional (default = 1.0)
323 | Sampling time (= 1/(sampling rate)).
324 | detrend : bool, optional (default=False)
325 | Subtract the mean from the series (i.e., a constant detrend).
326 |
327 | Returns
328 | -------
329 | freqs : array
330 | Array containing frequencies k/(N*dt) for k = 1, ..., N/2.
331 | power : array
332 | Array containing P(f).
333 |
334 | Example
335 | -------
336 | >>> signal = np.random.random(1024)
337 | >>> power = spectrum(signal)[1]
338 | >>> np.allclose(np.mean(signal ** 2), np.sum(power))
339 | True
340 |
341 | The above example is just the Parseval's theorem which states that
342 | the mean squared amplitude of the input signal is equal to the sum
343 | of P(f).
344 | """
345 | N = len(x)
346 |
347 | if detrend:
348 | x = x - np.mean(x)
349 |
350 | # See Section 13.4 of Press et al. (2007) for the convention.
351 | power = 2.0 * np.abs(np.fft.rfft(x)) ** 2 / N ** 2
352 | power[0] = power[0] / 2.0
353 | if N % 2 == 0:
354 | power[-1] = power[-1] / 2.0
355 |
356 | freqs = np.fft.rfftfreq(N, d=dt)
357 | return freqs, power
358 |
359 |
360 | def statcheck(x, bins=100):
361 | """Check for stationarity using a chi-squared test.
362 |
363 | Checks for stationarity in a time series using the stationarity
364 | test discussed by Isliker & Kurths (1993).
365 |
366 | Parameters
367 | ----------
368 | x : array
369 | Input time series
370 | bins : int, optional (default = 100)
371 | Number of equiprobable bins used to compute the histograms.
372 |
373 | Returns
374 | -------
375 | chisq : float
376 | Chi-squared test statistic.
377 | p : float
378 | p-value of the test computed according to the number of bins
379 | used and chisq, using the chi-squared distribution. If it is
380 | smaller than the significance level (say, 0.05), the series is
381 | nonstationary. (One should actually say we can reject the
382 | null hypothesis of stationarity at 0.05 significance level.)
383 |
384 | Notes
385 | -----
386 | The value of bins should be selected such that there is at least 5
387 | points in each bin.
388 | """
389 | if len(x) / bins <= 5:
390 | raise ValueError('Using %d bins will result in bins with '
391 | 'less than 5 points each.' % bins)
392 |
393 | # Use the m-quantile function to compute equiprobable bins.
394 | prob = np.arange(1.0 / bins, 1.0, 1.0 / bins)
395 | bins = np.append(stats.mstats.mquantiles(x, prob=prob), np.max(x))
396 |
397 | p_full = np.histogram(x, bins)[0]
398 | p_full = p_full / np.sum(p_full)
399 |
400 | y = x[:int(len(x) / 2)]
401 | observed = np.histogram(y, bins)[0]
402 | expected = len(y) * p_full
403 |
404 | # Since 2021 or so SciPy fails to do a chisquare test if the sum of
405 | # the observed frequencies don't agree with the sum of expected
406 | # frequencies. So we cheat a bit and make the sums the same.
407 | # https://github.com/scipy/scipy/issues/14298
408 | expected = expected/np.sum(expected)*np.sum(observed)
409 |
410 | return stats.chisquare(observed, expected)
411 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | numpy
2 | scipy
3 | numba
4 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | from setuptools import setup
5 |
6 | setup(
7 | name='nolitsa',
8 | version='0.1',
9 | description='A rudimentary Python module for nonlinear time series analysis',
10 | long_description="""\
11 | NoLiTSA is a Python module that implements some standard algorithms used
12 | for nonlinear time series analysis.""",
13 | keywords='chaos nonlinear time series analysis',
14 | classifiers=[
15 | 'Development Status :: 3 - Alpha',
16 | 'Intended Audience :: Science/Research',
17 | 'Topic :: Scientific/Engineering',
18 | 'License :: OSI Approved :: BSD License',
19 | 'Programming Language :: Python'
20 | 'Programming Language :: Python :: 3',
21 | ],
22 | author='Manu Mannattil',
23 | author_email='manu.mannattil@gmail.com',
24 | license='BSD',
25 | packages=['nolitsa'],
26 | install_requires=['numpy>=1.11.0', 'scipy>=0.17.0'],
27 | test_suite='pytest',
28 | tests_require=['pytest>=7.0.0'],
29 | include_package_data=True,
30 | zip_safe=False
31 | )
32 |
--------------------------------------------------------------------------------