├── .gitignore
├── CITATION.cff
├── LICENSE
├── README.md
├── _config.yml
├── _toc.yml
├── env.yml
├── logo.png
├── notebooks
├── 10
│ ├── media
│ │ ├── RNN.gif
│ │ ├── Readout.png
│ │ ├── Reservoir.gif
│ │ ├── Reservoir_pred1.png
│ │ ├── Reservoir_pred2.png
│ │ ├── bptt.png
│ │ ├── cover.png
│ │ ├── data_split.png
│ │ ├── dynamics.png
│ │ ├── load_vs_temp.png
│ │ ├── mlp.png
│ │ ├── mlp_windowed.png
│ │ ├── robot.png
│ │ ├── sin.gif
│ │ ├── sin_pred.png
│ │ ├── tanh.png
│ │ ├── windowed.png
│ │ └── word_pred.gif
│ └── nn-reservoir-computing.ipynb
├── 11
│ ├── media
│ │ ├── Great-britain-coastline-paradox.gif
│ │ ├── Logistic_Map_Bifurcations_Underneath_Mandelbrot_Set.gif
│ │ ├── cover.png
│ │ ├── cube.png
│ │ ├── dimensions.png
│ │ ├── koch1.png
│ │ ├── koch2.png
│ │ ├── koch3.png
│ │ ├── line.png
│ │ ├── logistic-zoom.gif
│ │ ├── lokta_volt.png
│ │ ├── lorenz.gif
│ │ ├── menger-sr.jpeg
│ │ ├── partial.png
│ │ ├── phase_space.png
│ │ ├── predict.png
│ │ ├── reconstruct.png
│ │ ├── rossler_attractor.png
│ │ ├── sinusoid.png
│ │ ├── square.png
│ │ ├── takens.png
│ │ └── triangle.png
│ └── nonlinear-ts.ipynb
├── 12
│ ├── classification-clustering.ipynb
│ └── media
│ │ ├── DTW_idea_1.gif
│ │ ├── DTW_idea_2.gif
│ │ ├── RC_classifier.png
│ │ ├── UWaveGestureLibrary.jpg
│ │ ├── bidir.png
│ │ ├── conditions.png
│ │ ├── cost.gif
│ │ ├── cover.png
│ │ ├── dim_red.png
│ │ ├── dist_matrix.png
│ │ ├── ensemble.png
│ │ ├── last_state.png
│ │ ├── map.png
│ │ ├── matrix.png
│ │ ├── mts_data.png
│ │ ├── output_ms.png
│ │ ├── path.gif
│ │ ├── precision-recall.png
│ │ ├── recursion.gif
│ │ ├── redundant.png
│ │ ├── reservoir_ms.png
│ │ ├── soft_clust.png
│ │ ├── start_end.png
│ │ ├── tck_scheme.png
│ │ ├── unidir.png
│ │ ├── warping.gif
│ │ ├── warping_constrained.gif
│ │ └── warping_fix.gif
├── 00
│ ├── intro.md
│ ├── knowledge_test.md
│ ├── media
│ │ ├── slides_nb.png
│ │ ├── slides_rise.png
│ │ ├── slides_rise2.png
│ │ ├── slides_rise3.png
│ │ └── topics.png
│ └── resources.md
├── 01
│ ├── introduction_to_time_series.ipynb
│ └── media
│ │ ├── anomaly.png
│ │ ├── bar.png
│ │ ├── bar_unequal.png
│ │ ├── co2.png
│ │ ├── cover.png
│ │ ├── ecommerce.png
│ │ ├── economic.png
│ │ ├── electricity.png
│ │ ├── equally_spaced.png
│ │ ├── not_equally_spaced.png
│ │ ├── partial.png
│ │ ├── passengers.png
│ │ ├── passengers_trend.png
│ │ ├── random_var.png
│ │ ├── sunspots.png
│ │ ├── time_delta.png
│ │ ├── ts_equal.png
│ │ ├── ts_unequal.png
│ │ └── water_temps.png
├── 02
│ ├── media
│ │ ├── cover.png
│ │ ├── int.png
│ │ ├── nonstationary_chunk.png
│ │ ├── periodic_mean.png
│ │ └── stationary_chunk.png
│ └── stationarity.ipynb
├── 03
│ ├── media
│ │ ├── EqWMA.gif
│ │ ├── ExpWMA.gif
│ │ ├── cover.png
│ │ └── values.png
│ └── smoothing.ipynb
├── 04
│ ├── ar-ma.ipynb
│ └── media
│ │ ├── cover.png
│ │ └── timoelliot.png
├── 05
│ ├── arma_arima_sarima.ipynb
│ └── media
│ │ └── cover.png
├── 06
│ ├── media
│ │ ├── cover.png
│ │ └── non-stationarity.png
│ └── unit-root-hurst.ipynb
├── 07
│ ├── kalman-filter.ipynb
│ └── media
│ │ ├── correct.png
│ │ ├── cover.png
│ │ ├── extreme1.png
│ │ ├── extreme2.png
│ │ ├── gps.png
│ │ ├── innovation.png
│ │ ├── inputs.png
│ │ ├── kalman.png
│ │ ├── model_pred.png
│ │ ├── system.png
│ │ └── update.png
├── 08
│ ├── media
│ │ ├── cover.png
│ │ ├── filter.png
│ │ └── ft.png
│ └── signal-transforms-filters.ipynb
└── 09
│ ├── media
│ └── cover.png
│ └── prophet.ipynb
├── setup.py
└── tsa_course
├── __init__.py
├── lecture1.py
├── lecture11.py
├── lecture2.py
└── lecture8.py
/.gitignore:
--------------------------------------------------------------------------------
1 | # Build files
2 | _build/*
3 |
4 | # Byte-compiled / optimized / DLL files
5 | __pycache__/
6 | *.py[cod]
7 | *$py.class
8 |
9 | # C extensions
10 | *.so
11 |
12 | # Distribution / packaging
13 | .Python
14 | build/
15 | develop-eggs/
16 | dist/
17 | downloads/
18 | eggs/
19 | .eggs/
20 | lib/
21 | lib64/
22 | parts/
23 | sdist/
24 | var/
25 | wheels/
26 | share/python-wheels/
27 | *.egg-info/
28 | .installed.cfg
29 | *.egg
30 | MANIFEST
31 |
32 | # PyInstaller
33 | # Usually these files are written by a python script from a template
34 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
35 | *.manifest
36 | *.spec
37 |
38 | # Installer logs
39 | pip-log.txt
40 | pip-delete-this-directory.txt
41 |
42 | # Unit test / coverage reports
43 | htmlcov/
44 | .tox/
45 | .nox/
46 | .coverage
47 | .coverage.*
48 | .cache
49 | nosetests.xml
50 | coverage.xml
51 | *.cover
52 | *.py,cover
53 | .hypothesis/
54 | .pytest_cache/
55 | cover/
56 |
57 | # Translations
58 | *.mo
59 | *.pot
60 |
61 | # Django stuff:
62 | *.log
63 | local_settings.py
64 | db.sqlite3
65 | db.sqlite3-journal
66 |
67 | # Flask stuff:
68 | instance/
69 | .webassets-cache
70 |
71 | # Scrapy stuff:
72 | .scrapy
73 |
74 | # Sphinx documentation
75 | docs/_build/
76 |
77 | # PyBuilder
78 | .pybuilder/
79 | target/
80 |
81 | # Jupyter Notebook
82 | .ipynb_checkpoints
83 |
84 | # IPython
85 | profile_default/
86 | ipython_config.py
87 |
88 | # pyenv
89 | # For a library or package, you might want to ignore these files since the code is
90 | # intended to run in multiple environments; otherwise, check them in:
91 | # .python-version
92 |
93 | # pipenv
94 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
95 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
96 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
97 | # install all needed dependencies.
98 | #Pipfile.lock
99 |
100 | # poetry
101 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
102 | # This is especially recommended for binary packages to ensure reproducibility, and is more
103 | # commonly ignored for libraries.
104 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
105 | #poetry.lock
106 |
107 | # pdm
108 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
109 | #pdm.lock
110 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
111 | # in version control.
112 | # https://pdm.fming.dev/#use-with-ide
113 | .pdm.toml
114 |
115 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
116 | __pypackages__/
117 |
118 | # Celery stuff
119 | celerybeat-schedule
120 | celerybeat.pid
121 |
122 | # SageMath parsed files
123 | *.sage.py
124 |
125 | # Environments
126 | .env
127 | .venv
128 | env/
129 | venv/
130 | ENV/
131 | env.bak/
132 | venv.bak/
133 |
134 | # Spyder project settings
135 | .spyderproject
136 | .spyproject
137 |
138 | # Rope project settings
139 | .ropeproject
140 |
141 | # mkdocs documentation
142 | /site
143 |
144 | # mypy
145 | .mypy_cache/
146 | .dmypy.json
147 | dmypy.json
148 |
149 | # Pyre type checker
150 | .pyre/
151 |
152 | # pytype static type analyzer
153 | .pytype/
154 |
155 | # Cython debug symbols
156 | cython_debug/
157 |
158 | # PyCharm
159 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
160 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
161 | # and can be added to the global gitignore or merged into this file. For a more nuclear
162 | # option (not recommended) you can uncomment the following to ignore the entire idea folder.
163 | #.idea/
164 |
165 | *.DS_Store
--------------------------------------------------------------------------------
/CITATION.cff:
--------------------------------------------------------------------------------
1 | cff-version: 1.0.0
2 | message: "If you use this material in your work, please cite it as below."
3 | authors:
4 | - family-names: "Bianchi"
5 | given-names: "Filippo Maria"
6 | title: "Time Series Analysis with Python"
7 | date-released: 2024-04-15
8 | license: MIT
9 | url: "https://github.com/FilippoMB/python-time-series-handbookl"
10 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2024 Filippo Maria Bianchi
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Time Series Analysis with Python
2 |
3 |
4 |

5 |
6 |
7 |
8 |
11 |
12 |
13 | [](https://pepy.tech/projects/tsa-course)
14 |
15 | This is the collection of notebooks for the course *Time Series Analysis with Python*.
16 | You can view and execute the notebooks by clicking on the buttons below.
17 |
18 | ## 📑 Content
19 |
20 | 1. **Introduction to time series analysis**
21 | - Definition of time series data
22 | - Main applications of time series analysis
23 | - Statistical vs dynamical models perspective
24 | - Components of a time series
25 | - Additive vs multiplicative models
26 | - Time series decomposition techniques
27 |
28 | [](https://nbviewer.jupyter.org/github/FilippoMB/python-time-series-handbook/blob/main/notebooks/01/introduction_to_time_series.ipynb) or [](https://colab.research.google.com/github/FilippoMB/python-time-series-handbook/blob/main/notebooks/01/introduction_to_time_series.ipynb)
29 |
30 |
31 |
32 | 2. **Stationarity in time series**
33 | - Stationarity in time series
34 | - Weak vs strong stationarity
35 | - Autocorrelation and autocovariance
36 | - Common stationary and nonstationary time series
37 | - How to identify stationarity
38 | - Transformations to achieve stationarity
39 |
40 | [](https://nbviewer.jupyter.org/github/FilippoMB/python-time-series-handbook/blob/main/notebooks/02/stationarity.ipynb) or [](https://colab.research.google.com/github/FilippoMB/python-time-series-handbook/blob/main/notebooks/02/stationarity.ipynb)
41 |
42 |
43 |
44 | 3. **Smoothing**
45 | - Smoothing in time series data
46 | - The mean squared error
47 | - Simple average, moving average, and weighted moving average
48 | - Single, double, and triple exponential smoothing
49 |
50 | [](https://nbviewer.jupyter.org/github/FilippoMB/python-time-series-handbook/blob/main/notebooks/03/smoothing.ipynb) or [](https://colab.research.google.com/github/FilippoMB/python-time-series-handbook/blob/main/notebooks/03/smoothing.ipynb)
51 |
52 |
53 |
54 | 4. **AR-MA**
55 | - The autocorrelation function
56 | - The partial autocorrelation function
57 | - The Auto-Regressive model
58 | - The Moving-Average model
59 | - Reverting stationarity transformations in forecasting
60 |
61 | [](https://nbviewer.jupyter.org/github/FilippoMB/python-time-series-handbook/blob/main/notebooks/04/ar-ma.ipynb) or [](https://colab.research.google.com/github/FilippoMB/python-time-series-handbook/blob/main/notebooks/04/ar-ma.ipynb)
62 |
63 |
64 |
65 | 5. **ARMA, ARIMA, SARIMA**
66 | - Autoregressive Moving Average (ARMA) models
67 | - Autoregressive Integrated Moving Average (ARIMA) models
68 | - SARIMA models (ARIMA model for data with seasonality)
69 | - Automatic model selection with AutoARIMA
70 | - Model selection with exploratory data analysis
71 |
72 | [](https://nbviewer.jupyter.org/github/FilippoMB/python-time-series-handbook/blob/main/notebooks/05/arma_arima_sarima.ipynb) or [](https://colab.research.google.com/github/FilippoMB/python-time-series-handbook/blob/main/notebooks/05/arma_arima_sarima.ipynb)
73 |
74 |
75 |
76 | 6. **Unit root test and Hurst exponent**
77 | - Unit root test
78 | - Mean Reversion
79 | - The Hurst exponent
80 | - Geometric Brownian Motion
81 | - Applications in quantitative finance
82 |
83 | [](https://nbviewer.jupyter.org/github/FilippoMB/python-time-series-handbook/blob/main/notebooks/06/unit-root-hurst.ipynb) or [](https://colab.research.google.com/github/FilippoMB/python-time-series-handbook/blob/main/notebooks/06/unit-root-hurst.ipynb)
84 |
85 |
86 |
87 | 7. **Kalman filter**
88 | - Introduction to Kalman Filter
89 | - Model components and assumptions
90 | - The Kalman Filter algorithm
91 | - Application to static and dynamic one-dimensional data
92 | - Application to higher-dimensional data
93 |
94 | [](https://nbviewer.jupyter.org/github/FilippoMB/python-time-series-handbook/blob/main/notebooks/07/kalman-filter.ipynb) or [](https://colab.research.google.com/github/FilippoMB/python-time-series-handbook/blob/main/notebooks/07/kalman-filter.ipynb)
95 |
96 |
97 |
98 | 8. **Signal transforms and filters**
99 | - Introduction to Fourier Transform, Discrete Fourier Transform, and FFT
100 | - Fourier Transform of common signals
101 | - Properties of the Fourier Transform
102 | - Signal filtering with low-pass, high-pass, band-pass, and bass-stop filters
103 | - Application of Fourier Transform to time series forecasting
104 |
105 | [](https://nbviewer.jupyter.org/github/FilippoMB/python-time-series-handbook/blob/main/notebooks/08/signal-transforms-filters.ipynb) or [](https://colab.research.google.com/github/FilippoMB/python-time-series-handbook/blob/main/notebooks/08/signal-transforms-filters.ipynb)
106 |
107 |
108 |
109 | 9. **Prophet**
110 | - Introduction to Prophet for time series forecasting
111 | - Advanced modeling of trend, seasonality, and holidays components
112 | - The Prophet library in Python
113 |
114 | [](https://nbviewer.jupyter.org/github/FilippoMB/python-time-series-handbook/blob/main/notebooks/09/prophet.ipynb) or [](https://colab.research.google.com/github/FilippoMB/python-time-series-handbook/blob/main/notebooks/09/prophet.ipynb)
115 |
116 |
117 |
118 | 10. **Neural networks and Reservoir Computing**
119 | - Windowed approaches and Neural Networks for time series forecasting
120 | - Forecasting with a Multi-Layer Perceptron
121 | - Recurrent Neural Networks: advantages and challenges
122 | - Reservoir Computing and the Echo State Network
123 | - Dimensionality reduction with Principal Component Analysis
124 | - Forecasting electricity consumption with Multi-Layer Perceptron and Echo State Network
125 |
126 | [](https://nbviewer.jupyter.org/github/FilippoMB/python-time-series-handbook/blob/main/notebooks/10/nn-reservoir-computing.ipynb) or [](https://colab.research.google.com/github/FilippoMB/python-time-series-handbook/blob/main/notebooks/10/nn-reservoir-computing.ipynb)
127 |
128 |
129 |
130 | 11. **Non-linear time series analysis**
131 | - Dynamical systems and nonlinear dynamics
132 | - Bifurcation diagrams
133 | - Chaotic systems
134 | - High-dimensional continuous-time systems
135 | - Fractal dimensions
136 | - Phase space reconstruction and Taken's embedding theorem
137 | - Forecasting time series from nonlinear systems
138 |
139 | [](https://nbviewer.jupyter.org/github/FilippoMB/python-time-series-handbook/blob/main/notebooks/11/nonlinear-ts.ipynb) or [](https://colab.research.google.com/github/FilippoMB/python-time-series-handbook/blob/main/notebooks/11/nonlinear-ts.ipynb)
140 |
141 |
142 |
143 | 12. **Time series classification and clustering**
144 | - Multivariate time series
145 | - Time series similarities and dissimilarities
146 | - Dynamic Time Warping
147 | - Time series kernels
148 | - Embedding time series into vectors
149 | - Classification of time series
150 | - Clustering of time series
151 | - Visualize time series with kernel PCA
152 |
153 | [](https://nbviewer.jupyter.org/github/FilippoMB/python-time-series-handbook/blob/main/notebooks/12/classification-clustering.ipynb) or [](https://colab.research.google.com/github/FilippoMB/python-time-series-handbook/blob/main/notebooks/12/classification-clustering.ipynb)
154 |
155 |
156 |
157 | ## 💻 How to code locally
158 |
159 | To run the notebooks locally the recommended steps are the following:
160 |
161 | 1. Download and install [Miniconda](https://docs.conda.io/projects/miniconda/en/latest/miniconda-install.html).
162 |
163 | 2. Download the [env.yml](https://github.com/FilippoMB/python-time-series-handbook/blob/main/env.yml) file.
164 |
165 | 3. Open the shell and navigate to the location with the `yml` file you just downloaded.
166 | - If you are on Windows, open the Miniconda shell.
167 |
168 | 4. Install the environment with
169 | ```{bash}
170 | > conda env create -f env.yml
171 | ```
172 |
173 | 5. Activate your environment:
174 | ```{bash}
175 | > conda activate pytsa
176 | ```
177 |
178 | 6. Go to the folder with the notebooks
179 |
180 | 7. Launch Jupyter Lab with the command
181 | ```{bash}
182 | > jupyter lab
183 | ```
184 |
185 | ## 🎥 Notebook format and slides
186 |
187 | The notebooks are structured as a sequence of slides to be presented using [RISE](https://rise.readthedocs.io/en/latest/).
188 | If you open a notebook you will see the following structure:
189 |
190 |
191 |
192 | The top-right button indicates the type of slide, which is stored in the metadata of the cell. To enable the visualization of the slide type you must first install RISE and then on the top menu select `View -> Cell Toolbar -> Slieshow`. Also, to split the cells like in the example, you must enable `Split Cells Notebook` from the [nbextensions](https://jupyter-contrib-nbextensions.readthedocs.io/en/latest/index.html).
193 |
194 | By pressing the `Enter\Exit RISE Slideshow` button at the top you can enter the slideshow presentation.
195 |
196 |
197 |
198 |
199 |
200 | See the [RISE documentation](https://rise.readthedocs.io/en/latest/) for more info.
201 |
202 | ## 📝 Citation
203 |
204 | If you are using this material in your courses or in your research, please consider citing it as follows:
205 |
206 | ````bibtex
207 | @misc{bianchi2024tsbook,
208 | author = {Filippo Maria Bianchi},
209 | title = {Time Series Analysis with Python},
210 | year = {2024},
211 | howpublished = {Online},
212 | url = {https://github.com/FilippoMB/python-time-series-handbook}
213 | }
214 | ````
215 |
--------------------------------------------------------------------------------
/_config.yml:
--------------------------------------------------------------------------------
1 | # Book settings
2 | title: Time series analysis with Python
3 | author: Filippo Maria Bianchi
4 | logo: logo.png
5 |
6 | # Force re-execution of notebooks on each build.
7 | execute:
8 | execute_notebooks: 'cache' # off, force, auto
9 |
10 | # # Define the name of the latex output file for PDF builds
11 | # latex:
12 | # latex_documents:
13 | # targetname: book.tex
14 |
15 | # # Add a bibtex file so that we can create citations
16 | # bibtex_bibfiles:
17 | # - references.bib
18 |
19 | # Information about where the book exists on the web
20 | repository:
21 | url: https://github.com/FilippoMB/python-time-series-handbook # Online location of the book
22 | # path_to_book: docs # Optional path to the book, relative to the repository root
23 | branch: master # Which branch of the repository should be used when creating links (optional)
24 |
25 | # Add GitHub buttons to your book
26 | # See https://jupyterbook.org/customize/config.html#add-a-link-to-your-repository
27 | html:
28 | use_issues_button: true
29 | use_repository_button: true
30 | favicon : "logo.png"
31 |
32 | parse:
33 | myst_enable_extensions:
34 | # Defaults here: https://jupyterbook.org/en/stable/customize/config.html
35 | - html_image
36 | - amsmath
37 | - dollarmath
38 |
39 | execute:
40 | timeout: -1 # Disable timeouts for executing notebooks
--------------------------------------------------------------------------------
/_toc.yml:
--------------------------------------------------------------------------------
1 | # Table of contents
2 | # Learn more at https://jupyterbook.org/customize/toc.html
3 |
4 | format: jb-book
5 | root: notebooks/00/intro
6 | chapters:
7 | - file: notebooks/01/introduction_to_time_series
8 | - file: notebooks/02/stationarity
9 | - file: notebooks/03/smoothing
10 | - file: notebooks/04/ar-ma
11 | - file: notebooks/05/arma_arima_sarima
12 | - file: notebooks/06/unit-root-hurst
13 | - file: notebooks/07/kalman-filter
14 | - file: notebooks/08/signal-transforms-filters
15 | - file: notebooks/09/prophet
16 | - file: notebooks/10/nn-reservoir-computing
17 | - file: notebooks/11/nonlinear-ts
18 | - file: notebooks/12/classification-clustering
19 | - file: notebooks/00/knowledge_test
20 | - file: notebooks/00/resources
--------------------------------------------------------------------------------
/env.yml:
--------------------------------------------------------------------------------
1 | name: pytsa
2 | channels:
3 | - conda-forge
4 | - defaults
5 | dependencies:
6 | - python=3.10
7 | - pip
8 | - matplotlib
9 | - statsmodels
10 | - pandas
11 | - pandas-datareader
12 | - seaborn
13 | - scipy
14 | - scikit-learn
15 | - jupyterlab
16 | - tqdm
17 | - pmdarima
18 | - opencv
19 | - prophet
20 | - dtaidistance
21 | - plotly
22 | - ipywidgets
23 | - pip:
24 | - tsa-course
25 | - yfinance
26 | - reservoir-computing
27 | - tck
--------------------------------------------------------------------------------
/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/logo.png
--------------------------------------------------------------------------------
/notebooks/00/intro.md:
--------------------------------------------------------------------------------
1 | # Time series analysis with Python
2 |
3 |
4 |
5 | Welcome to a journey through the world of time series analysis using Python! This collection of Jupyter notebooks serves as both a comprehensive course and a practical guide for students, data scientists, and researchers interested in exploring the interplay between statistical theories and practical applications in time series analysis.
6 |
7 | Time series analysis is a crucial discipline in data science, offering insights into patterns over time that are invaluable for forecasting, anomaly detection, and understanding temporal dynamics. The aim of this course is to introduce fundamental concepts of time series analysis from multiple perspectives: statistical, dynamical systems, machine learning, and signal processing. This interdisciplinary approach aims to give the reader a broad view on the world of time series.
8 |
9 | ```{image} media/topics.png
10 | :alt: topics
11 | :width: 500px
12 | :align: center
13 | ```
14 | The course is designed to combine high-level theoretical knowledge with practical programming skills. Each chapter introduces key concepts of time series analysis, followed by hands-on coding sections. This structure allows you to immediately apply the theoretical concepts you learn, seeing first-hand how these translate into functional tools in data analytics. Through this process, you will gain both the knowledge to understand complex time series data and the skills to analyze and predict it effectively.
15 | To reinforce learning and encourage active engagement, each chapter concludes with exercises. These are designed to test your understanding and help you apply the lessons in practical contexts.
16 |
17 | Whether you are new to time series analysis or looking to refine your expertise, this course offers a broad exploration of the field, with Python as your toolkit. I hope that you will find this material both educational and entertaining, brining you a step closer to mastering time series analysis.
18 |
19 |
20 | ## 📖 Chapters
21 |
22 | The course is organized into the following chapters.
23 |
24 | ```{tableofcontents}
25 | ```
26 |
27 | ```{note}
28 | The notebooks are presented in class as slides using RISE (see [here](https://github.com/FilippoMB/python-time-series-handbook?tab=readme-ov-file#-notebook-format-and-slides) for more details).
29 | For this reason, the text in the notebooks is organized with bullet points.
30 | ```
31 |
32 |
33 | ## 🎓 University courses
34 |
35 | These notebooks are currently adopted in [STA-2003 Tidsrekker](https://sa.uit.no/utdanning/emner/emne?p_document_id=822793) at UiT the Arctic University of Tromsø and [062785 - Time Series Analysis](https://www11.ceda.polimi.it/schedaincarico/schedaincarico/controller/scheda_pubblica/SchedaPublic.do?&evn_default=evento&c_classe=843368&lang=IT&__pj0=0&__pj1=2ad8b3dba5bb35c2b94e513147e1e84e) at Polytechnic of Milan.
36 |
37 |
38 | ## 🚀 Getting started with coding
39 |
40 | You can read from here all the content of the course. However, to get your hands-on experience with coding you want to run the code snippets or the whole notebooks that you can download on
41 |
42 | .
43 |
44 | To run the code and the notebooks the recommended steps are the following:
45 |
46 | 1. Download and install [Miniconda](https://docs.conda.io/projects/miniconda/en/latest/miniconda-install.html).
47 |
48 | 2. Download the [env.yml](https://github.com/FilippoMB/python-time-series-handbook/blob/main/env.yml) file.
49 |
50 | 3. Open the shell and navigate to the location with the yml file you just downloaded.
51 | - If you are on Windows, open the Miniconda shell.
52 |
53 | 4. Install the environment with
54 | ```
55 | > conda env create -f env.yml
56 | ```
57 |
58 | 5. Activate your environment:
59 | ```
60 | > conda activate pytsa
61 | ```
62 |
63 | 6. Go to the folder with the notebooks
64 |
65 | 7. Launch Jupyter Lab with the command
66 | ```
67 | > jupyter lab
68 | ```
69 |
70 |
71 | ## ⚒ Roadmap
72 |
73 | ```{warning}
74 | This is an early version of the course. There might be imprecisions and errors. Also, some chapters might undergo significant revisions and changes.
75 | ```
76 |
77 | I am planning to add more content over time to cover additional topics in new chapters and to extend the existing ones with new examples and explanations.
78 | If there is a specific topic you feel is missing or passages that you feel are not clear enough, open an Issue on the repository on Github.
79 |
80 | ### A note on deep learning
81 |
82 | For the moment, I intentionally decided to leave more advanced deep learning techniques aside. There are a couple of reasons for this choice. Firstly, advanced deep learning methods rely heavily on specific knowledge and tools that are generally covered in specialized deep learning courses. This focus does not align with the introductory nature of this course, which is aimed at covering the fundamentals of time series analysis.
83 |
84 | Secondly, while deep learning opens up exciting avenues for new applications—such as NLP and analysis of video and spatio-temporal data—it primarily enhances capabilities in handling diverse data types and scaling to large datasets. However, for the core objectives of this course, which are to understand and manipulate time series data effectively, the advantages of moving from the basic neural networks introduced here to more complex deep learning models do not significantly alter the fundamental approach.
85 |
86 |
87 | ## 🤝 Contributing
88 |
89 | Time series analysis with Python is designed with accessibility in mind. The material is completely open-sourced and uses only free software, based in Python.
90 | You can contribute both by adding new material, fixing typos, and suggesting edits. To do that,
91 | Fork the Github repository and submit a pull request.
92 |
93 | Finally, if you liked this content, please share it with others who might find it useful and give it a
94 | Star on GitHub.
95 |
96 |
97 | ## 📝 Citation
98 |
99 | If you are using this material in your courses or in your research, please consider citing it as follows:
100 |
101 | ````bibtex
102 | @misc{bianchi2024tsbook,
103 | author = {Filippo Maria Bianchi},
104 | title = {Time Series Analysis with Python},
105 | year = {2024},
106 | howpublished = {Online},
107 | url = {https://github.com/FilippoMB/python-time-series-handbook}
108 | }
109 | ````
--------------------------------------------------------------------------------
/notebooks/00/knowledge_test.md:
--------------------------------------------------------------------------------
1 | # Knowledge test
2 |
3 | ## Chapter 1
4 |
5 | **What is the primary purpose of time series analysis?**
6 | - A) To categorize different types of data as time series data.
7 | - B) To use statistical methods to describe and interpret data patterns.
8 | - C) To understand and forecast the behavior of a process that generates time series data.
9 | - D) To analyze the behavior of non-time series data over fixed intervals.
10 |
11 | ```{admonition} Answer
12 | :class: note, dropdown
13 | C
14 | ```
15 |
16 | **What type of data is typically analyzed using time series analysis?**
17 | - A) Textual data
18 | - B) Image data
19 | - C) Numerical data collected over time
20 | - D) Categorical data
21 |
22 | ```{admonition} Answer
23 | :class: note, dropdown
24 | C
25 | ```
26 |
27 | **In the context of time series analysis, which of the following best describes the concept of 'trend'?**
28 | - A) The irregular and unpredictable movement in data over a short period.
29 | - B) The consistent, long-term direction of a time series data set.
30 | - C) A repeating pattern or cycle observed within a given year.
31 | - D) Variations caused by specific one-time events.
32 |
33 | ```{admonition} Answer
34 | :class: note, dropdown
35 | B
36 | ```
37 |
38 | **How is time series analysis applied in the business sector?**
39 | - A) Primarily for historical data archiving
40 | - B) For designing new products
41 | - C) In demand forecasting and sales analysis
42 | - D) Only in employee performance tracking
43 |
44 | ```{admonition} Answer
45 | :class: note, dropdown
46 | C
47 | ```
48 |
49 | **Which method is commonly used to decompose time series data?**
50 | - A) Linear regression analysis
51 | - B) Fourier transform
52 | - C) Principal component analysis
53 | - D) Additive or multiplicative models
54 |
55 | ```{admonition} Answer
56 | :class: note, dropdown
57 | D
58 | ```
59 |
60 | **What is a crucial aspect to consider when dealing with time series data for accurate analysis?**
61 | - A) The frequency of data collection
62 | - B) The color coding of data points
63 | - C) The alphabetical ordering of data entries
64 | - D) The digital format of the data files
65 |
66 | ```{admonition} Answer
67 | :class: note, dropdown
68 | A
69 | ```
70 |
71 | **Which component of time series data adjusts for variations that recur with fixed periods throughout the data?**
72 | - A) Trend
73 | - B) Seasonality
74 | - C) Cyclical
75 | - D) Residual
76 |
77 | ```{admonition} Answer
78 | :class: note, dropdown
79 | B
80 | ```
81 |
82 |
83 |
84 | ## Chapter 2
85 |
86 | **Which of the following tests is used to determine the stationarity of a time series?**
87 | - A) Pearson correlation test
88 | - B) Chi-square test
89 | - C) Augmented Dickey-Fuller test
90 | - D) T-test
91 |
92 | ```{admonition} Answer
93 | :class: note, dropdown
94 | C
95 | ```
96 |
97 | **What is the significance of stationarity in time series analysis?**
98 | - A) Stationarity is not significant; most modern time series models do not require it.
99 | - B) Stationarity is crucial because many time series forecasting models assume it, and nonstationary data can lead to unreliable models.
100 | - C) Stationarity only applies to financial time series and is irrelevant in other fields.
101 | - D) Stationarity ensures that the time series data does not require transformations before analysis.
102 |
103 | ```{admonition} Answer
104 | :class: note, dropdown
105 | B
106 | ```
107 |
108 | **Why is stationarity important for applying statistical models to time series data?**
109 | - A) Stationary data allows for easier identification of outliers.
110 | - B) Non-stationary data can lead to biases in model parameters.
111 | - C) Stationarity assures that the mean and variance are consistent over time, which is a common assumption in many time series models.
112 | - D) Stationary series ensure high performance across all types of data, regardless of the underlying trends.
113 |
114 | ```{admonition} Answer
115 | :class: note, dropdown
116 | C
117 | ```
118 |
119 | **What impact does non-stationarity have on the predictive modeling of time series data?**
120 | - A) It improves the accuracy of predictions by introducing variability.
121 | - B) It has no impact on the predictions as modern models adjust for it automatically.
122 | - C) It can lead to misleading results and poor forecasts because the statistical properties change over time.
123 | - D) It simplifies the model selection process by reducing the number of parameters.
124 |
125 | ```{admonition} Answer
126 | :class: note, dropdown
127 | C
128 | ```
129 |
130 | **What type of transformation is commonly applied to time series data to achieve stationarity?**
131 | - A) Logarithmic transformation
132 | - B) Polynomial transformation
133 | - C) Fourier transformation
134 | - D) Binary transformation
135 |
136 | ```{admonition} Answer
137 | :class: note, dropdown
138 | A
139 | ```
140 |
141 | **In time series analysis, why do we want to apply differencing?**
142 | - A) To increase the mean of the series over time.
143 | - B) To identify and remove trends and cycles, helping achieve stationarity.
144 | - C) To amplify the seasonal patterns in the data.
145 | - D) To convert non-numeric data into a usable format.
146 |
147 | ```{admonition} Answer
148 | :class: note, dropdown
149 | B
150 | ```
151 |
152 | **What is the primary goal of testing for stationarity in a time series dataset?**
153 | - A) To detect the presence of outliers in the dataset.
154 | - B) To ensure the dataset is suitable for seasonal adjustments.
155 | - C) To confirm the data’s statistical properties do not vary over time.
156 | - D) To increase the complexity of the statistical model.
157 |
158 | ```{admonition} Answer
159 | :class: note, dropdown
160 | C
161 | ```
162 |
163 | **What characteristic defines a random walk model?**
164 | - A) The values of the series are based on a deterministic trend.
165 | - B) Each value in the series is the sum of the previous value and a random error term.
166 | - C) The series values change according to a fixed seasonal pattern.
167 | - D) The series strictly follows a linear path without deviation.
168 |
169 | ```{admonition} Answer
170 | :class: note, dropdown
171 | B
172 | ```
173 |
174 | **Why is a random walk typically considered non-stationary in the context of time series analysis?**
175 | - A) Because its variance remains constant over time.
176 | - B) Because its mean and variance depend on the time at which the series is observed.
177 | - C) Because it consistently follows a predictable trend.
178 | - D) Because it has a fixed seasonal pattern.
179 |
180 | ```{admonition} Answer
181 | :class: note, dropdown
182 | B
183 | ```
184 |
185 | **How does the periodicity of a signal affect its stationarity in time series analysis?**
186 | - A) Periodic signals are always considered stationary because they exhibit regular cycles.
187 | - B) Periodic signals are non-stationary because their mean and variance are not constant over time.
188 | - C) Periodic signals become stationary only when their frequency matches the sampling rate.
189 | - D) Periodic signals do not affect the stationarity of a time series as they are considered noise.
190 |
191 | ```{admonition} Answer
192 | :class: note, dropdown
193 | B
194 | ```
195 |
196 | **What is the characteristic of white noise that generally qualifies it as a stationary process in time series analysis?**
197 | - A) Its mean and variance change over time.
198 | - B) Its mean and variance remain constant over time.
199 | - C) It exhibits a clear trend and seasonality.
200 | - D) Its frequency components vary with each observation.
201 |
202 | ```{admonition} Answer
203 | :class: note, dropdown
204 | B
205 | ```
206 |
207 | **How do autocorrelation and autocovariance relate to the concept of stationarity in time series data?**
208 | - A) Autocorrelation and autocovariance are only defined for non-stationary processes.
209 | - B) Stationary processes have constant autocorrelation and autocovariance that do not depend on time.
210 | - C) Autocorrelation and autocovariance decrease as a time series becomes more stationary.
211 | - D) Stationary processes exhibit zero autocorrelation and autocovariance at all times.
212 |
213 | ```{admonition} Answer
214 | :class: note, dropdown
215 | B
216 | ```
217 |
218 | **What does constant autocorrelation over time imply about the stationarity of a time series?**
219 | - A) It suggests that the time series is non-stationary, as autocorrelation should vary with time.
220 | - B) It indicates potential stationarity, as autocorrelation does not change over time.
221 | - C) It implies the need for further seasonal adjustment, irrespective of stationarity.
222 | - D) It demonstrates that the series is under-differenced and needs more transformations.
223 |
224 | ```{admonition} Answer
225 | :class: note, dropdown
226 | B
227 | ```
228 |
229 | **What does it indicate about a time series if the autocorrelations for several lags are very close to zero?**
230 | - A) The series is likely non-stationary with a strong trend.
231 | - B) The series is highly predictable at each time step.
232 | - C) The series likely exhibits white noise characteristics, suggesting it could be stationary.
233 | - D) The series shows a clear seasonal pattern.
234 |
235 | ```{admonition} Answer
236 | :class: note, dropdown
237 | C
238 | ```
239 |
240 | **What implication does a significant autocorrelation at lag 1 indicate about the stationarity of a time series?**
241 | - A) The series is stationary with no dependence between time steps.
242 | - B) The series exhibits long-term cyclic patterns, suggesting non-stationarity.
243 | - C) The series is likely non-stationary, indicating dependence between consecutive observations.
244 | - D) The series is perfectly predictable from one time step to the next.
245 |
246 | ```{admonition} Answer
247 | :class: note, dropdown
248 | C
249 | ```
250 |
251 | **How can summary statistics and histogram plots be used to assess the stationarity of a time series?**
252 | - A) By showing a consistent mean and variance in histograms across different time segments.
253 | - B) By demonstrating a decrease in variance over time in summary statistics.
254 | - C) By identifying a fixed mode in histogram plots regardless of time period.
255 | - D) By showing increasing skewness in summary statistics over different intervals.
256 |
257 | ```{admonition} Answer
258 | :class: note, dropdown
259 | A
260 | ```
261 |
262 | **What do consistent histogram shapes across different time segments suggest about the stationarity of a time series?**
263 | - A) The series likely exhibits non-stationary behavior due to changing distributions.
264 | - B) The series displays stationarity with similar distributions over time.
265 | - C) The histograms are irrelevant to stationarity and should not be used.
266 | - D) The series shows non-stationarity due to the presence of outliers.
267 |
268 | ```{admonition} Answer
269 | :class: note, dropdown
270 | B
271 | ```
272 |
273 |
274 | ## Chapter 3
275 |
276 | **Why is smoothing applied to time series data?**
277 | - A) To increase the frequency of data points
278 | - B) To highlight underlying trends in the data
279 | - C) To create more data points
280 | - D) To prepare data for real-time analysis
281 |
282 | ```{admonition} Answer
283 | :class: note, dropdown
284 | B
285 | ```
286 |
287 | **What is the formula for calculating the Mean Squared Error in time series analysis?**
288 | - A) MSE = Sum((Observed - Predicted)^2) / Number of Observations
289 | - B) MSE = Sum((Observed - Predicted) / Number of Observations)^2
290 | - C) MSE = (Sum(Observed - Predicted)^2) * Number of Observations
291 | - D) MSE = Square Root of [Sum((Observed - Predicted)^2) / Number of Observations]
292 |
293 | ```{admonition} Answer
294 | :class: note, dropdown
295 | A
296 | ```
297 |
298 | **Which of the following is a limitation of the simple average smoothing technique?**
299 | - A) It is computationally intensive
300 | - B) It gives equal weight to all past observations
301 | - C) It focuses primarily on recent data
302 | - D) It automatically adjusts to seasonal variations
303 |
304 | ```{admonition} Answer
305 | :class: note, dropdown
306 | B
307 | ```
308 |
309 | **What is an advantage of using the moving average technique over the simple average technique?**
310 | - A) It can handle large datasets more efficiently
311 | - B) It reduces the lag effect by focusing on more recent data
312 | - C) It gives equal weight to all observations in the series
313 | - D) It automatically detects and adjusts for seasonality
314 |
315 | ```{admonition} Answer
316 | :class: note, dropdown
317 | B
318 | ```
319 |
320 | **How does the window size $P$ in a moving average smoothing technique affect the delay in forecasting?**
321 | - A) Delay decreases as $P$ increases
322 | - B) Delay increases as $P$ increases
323 | - C) Delay remains constant regardless of $P$
324 | - D) Delay is inversely proportional to $P$
325 |
326 | ```{admonition} Answer
327 | :class: note, dropdown
328 | B
329 | ```
330 |
331 | **What is the trade-off between responsiveness and robustness to noise when using a moving average smoothing technique?**
332 | - A) Increasing responsiveness also increases robustness to noise
333 | - B) Decreasing responsiveness decreases robustness to noise
334 | - C) Increasing responsiveness decreases robustness to noise
335 | - D) Responsiveness and robustness to noise are not related in moving average smoothing
336 |
337 | ```{admonition} Answer
338 | :class: note, dropdown
339 | C
340 | ```
341 |
342 | **In the weighted moving average formula $\text{Forecast} = \frac{\sum_{i=1}^P w_i \times X_{n-i+1}}{\sum_{i=1}^P w_i}$, what does $X_{n-i+1}$ represent?**
343 | - A) The weight of the i-th data point
344 | - B) The value of the i-th data point from the end of the data set
345 | - C) The total number of observations in the data set
346 | - D) The average value of the data set
347 |
348 | ```{admonition} Answer
349 | :class: note, dropdown
350 | B
351 | ```
352 |
353 | **What does triple exponential smoothing add to the forecasting model compared to double exponential smoothing?**
354 | - A) An additional smoothing constant for cyclicality
355 | - B) A smoothing component for seasonality
356 | - C) A component that adjusts for random fluctuations
357 | - D) An enhanced trend adjustment component
358 |
359 | ```{admonition} Answer
360 | :class: note, dropdown
361 | B
362 | ```
363 |
364 | **What are the core components of the triple exponential smoothing model?**
365 | - A) Level, trend, and random error
366 | - B) Level, cyclicality, and trend
367 | - C) Level, trend, and seasonality
368 | - D) Trend, seasonality, and cyclicality
369 |
370 | ```{admonition} Answer
371 | :class: note, dropdown
372 | C
373 | ```
374 |
375 | **Can the triple exponential smoothing model account for different types of seasonality?**
376 | - A) Yes, it can model both additive and multiplicative seasonality
377 | - B) No, it only models additive seasonality
378 | - C) No, it only models multiplicative seasonality
379 | - D) Yes, but it requires additional parameters beyond the standard model
380 |
381 | ```{admonition} Answer
382 | :class: note, dropdown
383 | A
384 | ```
385 |
386 | **For which scenario would triple exponential smoothing likely provide more accurate forecasts than double exponential smoothing?**
387 | - A) Data where seasonal patterns vary depending on the level of the time series
388 | - B) Stable data sets with very little change over time
389 | - C) Time series with rapidly changing trends but no seasonality
390 | - D) Short time series data with limited historical records
391 |
392 | ```{admonition} Answer
393 | :class: note, dropdown
394 | A
395 | ```
396 |
397 |
398 | ## Chapter 4
399 |
400 | **In the context of AR and MA models, what role does the correlation play?**
401 | - A) It helps in determining the optimal parameters of the MA model
402 | - B) It identifies the stationary nature of the time series
403 | - C) It measures the strength and direction of a linear relationship between time series observations at different times
404 | - D) It specifies the number of differences needed to make the series stationary
405 |
406 | ```{admonition} Answer
407 | :class: note, dropdown
408 | C
409 | ```
410 |
411 | **How does the autocorrelation function help in analyzing time series data?**
412 | - A) By identifying the underlying patterns of cyclical fluctuations
413 | - B) By determining the strength and sign of a relationship between a time series and its lags
414 | - C) By calculating the average of the time series
415 | - D) By differentiating between seasonal and non-seasonal patterns
416 |
417 | ```{admonition} Answer
418 | :class: note, dropdown
419 | B
420 | ```
421 |
422 | **How can cross-correlation help in understanding relationships in time series data?**
423 | - A) It identifies the internal structure of a single series
424 | - B) It detects the point at which two series are most aligned or have the strongest relationship
425 | - C) It determines the overall trend of a single time series
426 | - D) It measures the variance within one time series
427 |
428 | ```{admonition} Answer
429 | :class: note, dropdown
430 | B
431 | ```
432 |
433 | **How does partial autocorrelation differ from autocorrelation in analyzing time series data?**
434 | - A) PACF isolates the correlation between specific lags ignoring the effects of intermediate lags, while ACF considers all intermediate lags cumulatively.
435 | - B) PACF is used for linear relationships, whereas ACF is used for non-linear relationships.
436 | - C) PACF can only be applied in stationary time series, while ACF can be applied in both stationary and non-stationary series.
437 | - D) There is no difference; PACF is just another term for ACF.
438 |
439 | ```{admonition} Answer
440 | :class: note, dropdown
441 | A
442 | ```
443 |
444 | **How does an autoregressive model differ from a moving average model?**
445 | - A) AR models use past values of the variable itself for prediction, while MA models use past forecast errors.
446 | - B) AR models use past forecast errors for prediction, while MA models use past values of the variable itself.
447 | - C) AR models consider the trend and seasonality in data, while MA models only focus on the recent past.
448 | - D) There is no difference; AR and MA models are identical.
449 |
450 | ```{admonition} Answer
451 | :class: note, dropdown
452 | A
453 | ```
454 |
455 | **What is the primary assumption of an AR model regarding the relationship between past and future values?**
456 | - A) Future values are completely independent of past values
457 | - B) Future values are determined by a weighted sum of past values
458 | - C) Past values have a diminishing linear effect on future values
459 | - D) Future values are predicted by the mean of past values
460 |
461 | ```{admonition} Answer
462 | :class: note, dropdown
463 | B
464 | ```
465 |
466 | **When selecting the optimal order $p$ for an AR model, what are we looking for in the Partial Autocorrelation Function (PACF)?**
467 | - A) The point where the PACF cuts off after a significant spike
468 | - B) The highest peak in the PACF
469 | - C) The lag where the PACF crosses the zero line
470 | - D) The lag with the maximum PACF value
471 |
472 | ```{admonition} Answer
473 | :class: note, dropdown
474 | A
475 | ```
476 |
477 | **In the context of AR models, what is meant by 'stationarity'?**
478 | - A) The mean of the series should not be a function of time.
479 | - B) The series must exhibit clear trends and seasonality.
480 | - C) The variance of the series should increase over time.
481 | - D) The autocorrelations must be close to zero for all time lags.
482 |
483 | ```{admonition} Answer
484 | :class: note, dropdown
485 | A
486 | ```
487 |
488 | **Why would you apply differencing to a time series before fitting an AR model?**
489 | - A) To introduce seasonality into the data
490 | - B) To convert a non-stationary series into a stationary one
491 | - C) To increase the mean of the time series
492 | - D) To reduce the variance of the time series
493 |
494 | ```{admonition} Answer
495 | :class: note, dropdown
496 | B
497 | ```
498 |
499 | **What is a potential consequence of overdifferencing a time series?**
500 | - A) It can introduce a trend into a previously trendless series.
501 | - B) It can create a spurious seasonality in the data.
502 | - C) It can lead to an increase in the variance of the series.
503 | - D) It can produce a series with artificial autocorrelation.
504 |
505 | ```{admonition} Answer
506 | :class: note, dropdown
507 | D
508 | ```
509 |
510 | **What is the primary characteristic of a Moving Average (MA) model in time series analysis?**
511 | - A) It uses past forecast errors in a regression-like model.
512 | - B) It predicts future values based solely on past observed values.
513 | - C) It smooths the time series using a window of observations.
514 | - D) It captures the trend and seasonality of the time series.
515 |
516 | ```{admonition} Answer
517 | :class: note, dropdown
518 | A
519 | ```
520 |
521 | **When analyzing the ACF plot to identify the order $q$ of an MA model, what are you looking for?**
522 | - A) A gradual decline in the lag values
523 | - B) A sharp cut-off after a certain number of lags
524 | - C) A constant value across all lags
525 | - D) Increasing values with increasing lags
526 |
527 | ```{admonition} Answer
528 | :class: note, dropdown
529 | B
530 | ```
531 |
532 |
533 |
534 | ## Chapter 5
535 |
536 | **What are the parameters of an ARMA model that need to be specified?**
537 | - A) $p$ (order of the autoregressive part) and $q$ (order of the moving average part)
538 | - B) $d$ (degree of differencing)
539 | - C) $s$ (seasonal period)
540 | - D) A and B are correct
541 |
542 | ```{admonition} Answer
543 | :class: note, dropdown
544 | A
545 | ```
546 |
547 | **Under what condition is an ARMA model particularly useful compared to exponential smoothing?**
548 | - A) When the time series data is non-stationary
549 | - B) When the time series has a clear seasonal pattern
550 | - C) When the time series exhibits autocorrelations
551 | - D) When the time series is highly erratic and without patterns
552 |
553 | ```{admonition} Answer
554 | :class: note, dropdown
555 | C
556 | ```
557 |
558 | **What is the first step in building an ARMA model?**
559 | - A) Determine whether the time series is stationary
560 | - B) Select the orders p and q for the model
561 | - C) Estimate the parameters of the model
562 | - D) Check the model diagnostics
563 |
564 | ```{admonition} Answer
565 | :class: note, dropdown
566 | A
567 | ```
568 |
569 | **Which criterion is often used to compare different ARIMA models to find the optimal one?**
570 | - A) The least squares criterion
571 | - B) The Akaike Information Criterion (AIC)
572 | - C) The Pearson correlation coefficient
573 | - D) The Durbin-Watson statistic
574 |
575 | ```{admonition} Answer
576 | :class: note, dropdown
577 | B
578 | ```
579 |
580 | **What type of differencing might be necessary when preparing a time series for ARIMA modeling?**
581 | - A) Seasonal differencing
582 | - B) Non-linear differencing
583 | - C) Progressive differencing
584 | - D) Inverse differencing
585 |
586 | ```{admonition} Answer
587 | :class: note, dropdown
588 | A
589 | ```
590 |
591 | **What characteristic of a sinusoidal signal might lead the ADF test to conclude it is stationary?**
592 | - A) Its mean and variance are not constant.
593 | - B) It exhibits clear seasonality.
594 | - C) Its mean and autocovariance are time-invariant.
595 | - D) It has increasing amplitude over time.
596 |
597 | ```{admonition} Answer
598 | :class: note, dropdown
599 | C
600 | ```
601 |
602 | **How does a month plot assist in the preparation for ARIMA modeling?**
603 | - A) By confirming the stationarity of the time series
604 | - B) By revealing seasonal effects that might require seasonal differencing
605 | - C) By estimating the parameters for the model
606 | - D) By determining the appropriate lags for the ARIMA model
607 |
608 | ```{admonition} Answer
609 | :class: note, dropdown
610 | B
611 | ```
612 |
613 | **What does it mean to perform out-of-sample validation on an ARIMA model?**
614 | - A) To re-estimate the model parameters using the same data set
615 | - B) To test the model on data that was not used during the model fitting process
616 | - C) To use cross-validation techniques on randomly selected subsamples
617 | - D) To apply in-sample predictions to check consistency
618 |
619 | ```{admonition} Answer
620 | :class: note, dropdown
621 | B
622 | ```
623 |
624 | **How should the residuals of a properly fitted ARIMA model be distributed?**
625 | - A) Normally distributed around zero
626 | - B) Uniformly distributed across the range of data
627 | - C) Log-normally distributed
628 | - D) Exponentially distributed
629 |
630 | ```{admonition} Answer
631 | :class: note, dropdown
632 | A
633 | ```
634 |
635 | **When evaluating the residuals of an ARIMA model, what plot is used to assess the standardization and distribution of residuals?**
636 | - A) Scatter plot
637 | - B) Box plot
638 | - C) Q-Q plot
639 | - D) Pie chart
640 |
641 | ```{admonition} Answer
642 | :class: note, dropdown
643 | C
644 | ```
645 |
646 | **What aspect of residuals does the Shapiro-Wilk test specifically evaluate?**
647 | - A) The autocorrelation structure
648 | - B) The distribution's adherence to normality
649 | - C) The heteroscedasticity of residuals
650 | - D) The variance stability over time
651 |
652 | ```{admonition} Answer
653 | :class: note, dropdown
654 | B
655 | ```
656 |
657 | **Why is the Ljung-Box test important when validating ARIMA models?**
658 | - A) It confirms the seasonal patterns are significant
659 | - B) It verifies that the residuals do not exhibit significant autocorrelation, suggesting a good model fit
660 | - C) It checks the variance of residuals to ensure homoscedasticity
661 | - D) It evaluates the power of the model's parameters
662 |
663 | ```{admonition} Answer
664 | :class: note, dropdown
665 | B
666 | ```
667 |
668 | **What does the provision of a confidence interval in ARIMA model predictions signify?**
669 | - A) The precision of the estimated parameters
670 | - B) The accuracy of the model’s forecasts
671 | - C) The range within which future forecasts are likely to fall, with a certain probability
672 | - D) The model’s ability to predict exact future values
673 |
674 | ```{admonition} Answer
675 | :class: note, dropdown
676 | C
677 | ```
678 |
679 | **What key feature distinguishes ARIMA models from ARMA models?**
680 | - A) ARIMA models require the data to be stationary.
681 | - B) ARIMA models include an integrated component for differencing non-stationary data.
682 | - C) ARIMA models use only moving average components.
683 | - D) ARIMA models cannot handle seasonal data.
684 |
685 | ```{admonition} Answer
686 | :class: note, dropdown
687 | B
688 | ```
689 |
690 | **Why might an analyst choose an ARIMA model for a financial time series dataset?**
691 | - A) If the dataset is stationary with no underlying trends
692 | - B) If the dataset shows fluctuations that revert to a mean
693 | - C) If the dataset contains underlying trends and requires differencing to become stationary
694 | - D) If the dataset is periodic and predictable
695 |
696 | ```{admonition} Answer
697 | :class: note, dropdown
698 | C
699 | ```
700 |
701 | **What is the primary method for determining the difference order $d$ in an ARIMA model?**
702 | - A) Calculating the AIC for different values of $d$
703 | - B) Observing the stationarity of the time series after successive differencings
704 | - C) Using a fixed $d$ based on the frequency of the data
705 | - D) Applying the highest $d$ to ensure model simplicity
706 |
707 | ```{admonition} Answer
708 | :class: note, dropdown
709 | B
710 | ```
711 |
712 | **What additional parameters are specified in a SARIMA model compared to an ARIMA model?**
713 | - A) Seasonal orders: $P, D, Q$ and the length of the season $s$
714 | - B) Higher non-seasonal orders: $p, d, q$
715 | - C) A constant term to account for trends
716 | - D) Parameters to manage increased data frequency
717 |
718 | ```{admonition} Answer
719 | :class: note, dropdown
720 | A
721 | ```
722 |
723 | **How does the seasonal differencing order $D$ in SARIMA models differ from the regular differencing $d$ in ARIMA models?**
724 | - A) $D$ is used to stabilize the variance, while $d$ stabilizes the mean.
725 | - B) $D$ specifically targets removing seasonal patterns, while $d$ focuses on achieving overall stationarity.
726 | - C) $D$ adjusts for autocorrelation, while $d$ corrects for heteroscedasticity.
727 | - D) $D$ is used for linear trends, and $d$ is used for exponential trends.
728 |
729 | ```{admonition} Answer
730 | :class: note, dropdown
731 | B
732 | ```
733 |
734 | **What methodology is generally used to select the seasonal autoregressive order $P$ and the seasonal moving average order $Q$ in a SARIMA model?**
735 | - A) Examining the ACF and PACF plots specifically for the identified seasonal lags
736 | - B) Applying cross-validation techniques across multiple seasonal cycles
737 | - C) Testing various combinations of $P$ and $Q$ until the model no longer improves
738 | - D) Reducing $P$ and $Q$ iteratively based on the simplest model criterion
739 |
740 | ```{admonition} Answer
741 | :class: note, dropdown
742 | A
743 | ```
744 |
745 | **In what scenario might the AutoARIMA model be particularly beneficial?**
746 | - A) When the user has extensive statistical knowledge and prefers to control every aspect of model building.
747 | - B) When quick deployment and model testing are necessary without detailed prior analysis.
748 | - C) When the data shows no signs of seasonality or non-stationarity.
749 | - D) When only qualitative data is available for analysis.
750 |
751 | ```{admonition} Answer
752 | :class: note, dropdown
753 | B
754 | ```
755 |
756 | **How does the MAPE differ from the Mean Squared Error (MSE) in its interpretation of forecasting accuracy?**
757 | - A) MAPE provides a measure of error in absolute terms, while MSE measures error in squared terms.
758 | - B) MAPE is more sensitive to outliers than MSE.
759 | - C) MAPE gives a relative error which makes it easier to interpret across different data scales, unlike MSE which provides an absolute error.
760 | - D) MAPE is used only for linear models, while MSE is used for non-linear models.
761 |
762 | ```{admonition} Answer
763 | :class: note, dropdown
764 | C
765 | ```
766 |
767 | **Why is it important to consider both MSE and MAPE when conducting a grid search for the best SARIMA model?**
768 | - A) Because some models may perform well in terms of low MSE but might show high percentage errors as indicated by MAPE, especially on smaller data scales.
769 | - B) Because higher values of both MSE and MAPE indicate a more complex and desirable model.
770 | - C) Because lower MSE and higher MAPE together are indicative of a model that is overfitting.
771 | - D) Because the regulatory standards in time series forecasting mandate the use of both metrics for compliance.
772 |
773 | ```{admonition} Answer
774 | :class: note, dropdown
775 | A
776 | ```
777 |
778 | **How is the complexity of an ARIMA model typically quantified?**
779 | - A) By the sum of the parameters $p$, $d$, and $q$.
780 | - B) By the computational time required to fit the model.
781 | - C) By the number of data points used in the model.
782 | - D) By the variance of the residuals produced by the model.
783 |
784 | ```{admonition} Answer
785 | :class: note, dropdown
786 | A
787 | ```
788 |
789 |
790 |
791 | ## Chapter 6
792 |
793 | **What is the main purpose of conducting a unit root test on a time series dataset?**
794 | - A) To determine the optimal parameters for an ARIMA model.
795 | - B) To identify whether the time series is stationary or non-stationary.
796 | - C) To confirm if the time series has a constant mean and variance.
797 | - D) To evaluate the predictive accuracy of a time series model.
798 |
799 | ```{admonition} Answer
800 | :class: note, dropdown
801 | B
802 | ```
803 |
804 | **How is the concept of a "unit root" integral to understanding the behavior of a time series?**
805 | - A) It helps in identifying the periodic components of the series.
806 | - B) It indicates whether the time series will return to a trend path or persist in deviation.
807 | - C) It determines the cyclical amplitude of the time series.
808 | - D) It specifies the frequency of the time series data.
809 |
810 | ```{admonition} Answer
811 | :class: note, dropdown
812 | B
813 | ```
814 |
815 | **How does the ADF test enhance the basic Dickey-Fuller test for more accurate unit root testing?**
816 | - A) By incorporating higher-order regression terms to account for autocorrelation in the residuals.
817 | - B) By increasing the number of lags used in the regression to capture seasonality.
818 | - C) By applying a transformation to the time series data to ensure normal distribution.
819 | - D) By reducing the dataset size to focus on more recent data points.
820 |
821 | ```{admonition} Answer
822 | :class: note, dropdown
823 | A
824 | ```
825 |
826 | **When evaluating a time series for non-stationarity using the ADF test, why might one choose to include both $\alpha$ and $\beta t$ in the regression model?**
827 | - A) To ensure that the test accounts for both constant and linear trend components, thus avoiding spurious rejection of the unit root null hypothesis in the presence of a trend.
828 | - B) To increase the regression model’s fit to the data, thereby reducing residual errors.
829 | - C) To differentiate between seasonal and non-seasonal components effectively.
830 | - D) To comply with regulatory requirements for financial time series analysis.
831 |
832 | ```{admonition} Answer
833 | :class: note, dropdown
834 | A
835 | ```
836 |
837 | **Why is mean reversion considered an important concept in trading and investment strategies?**
838 | - A) It provides a basis for predicting long-term trends in asset prices.
839 | - B) It suggests that price extremes may be temporary, offering potential opportunities for arbitrage.
840 | - C) It indicates constant returns regardless of market conditions.
841 | - D) It guarantees a fixed rate of return on all investments.
842 |
843 | ```{admonition} Answer
844 | :class: note, dropdown
845 | B
846 | ```
847 |
848 | **What does the rejection of the null hypothesis in a mean reversion test like the ADF suggest about the time series?**
849 | - A) The series is likely non-stationary with no mean reversion.
850 | - B) The series does not contain a unit root, suggesting mean reversion.
851 | - C) There is no correlation between sequential data points in the series.
852 | - D) The series exhibits clear seasonal patterns and trends.
853 |
854 | ```{admonition} Answer
855 | :class: note, dropdown
856 | B
857 | ```
858 |
859 | **In which scenario might a time series be mean-reverting but not stationary?**
860 | - A) If the mean to which the series reverts itself changes over time.
861 | - B) If the series displays constant mean and variance.
862 | - C) If the autocorrelation function shows dependency at only very short lags.
863 | - D) If the series exhibits no significant peaks or troughs.
864 |
865 | ```{admonition} Answer
866 | :class: note, dropdown
867 | A
868 | ```
869 |
870 | **What implication does a Hurst exponent greater than 0.5 indicate about a time series?**
871 | - A) The series exhibits mean-reverting behavior.
872 | - B) The series is likely to be stationary.
873 | - C) The series shows persistent behavior, trending in one direction.
874 | - D) The series has no clear long-term trends.
875 |
876 | ```{admonition} Answer
877 | :class: note, dropdown
878 | C
879 | ```
880 |
881 | **What Hurst exponent value is typically associated with an antipersistent time series?**
882 | - A) Around 0.5
883 | - B) Less than 0.5
884 | - C) Exactly 0.0
885 | - D) Greater than 0.7
886 |
887 | ```{admonition} Answer
888 | :class: note, dropdown
889 | B
890 | ```
891 |
892 | **How can the Hurst exponent be utilized by traders or financial analysts when evaluating the behavior of stock prices?**
893 | - A) A Hurst exponent of 0.5 or higher can indicate a profitable opportunity for trend-following strategies.
894 | - B) A Hurst exponent below 0 can suggest opportunities for strategies based on price reversals.
895 | - C) A Hurst exponent near 0 may encourage investment in stable, low-volatility stocks.
896 | - D) A Hurst exponent above 0 indicates high risk and high potential returns, suitable for aggressive investment strategies.
897 |
898 | ```{admonition} Answer
899 | :class: note, dropdown
900 | A
901 | ```
902 |
903 | **What fundamental properties does Geometric Brownian Motion (GBM) assume about the behavior of stock prices?**
904 | - A) Stock prices change in accordance with a Poisson distribution.
905 | - B) Stock prices follow a path determined by both a constant drift and a random shock component.
906 | - C) Stock prices are stable and do not show volatility.
907 | - D) Stock prices are inversely proportional to the market volatility.
908 |
909 | ```{admonition} Answer
910 | :class: note, dropdown
911 | B
912 | ```
913 |
914 | **How does the treatment of volatility differ between Brownian Motion and Geometric Brownian Motion?**
915 | - A) In Brownian Motion, volatility is constant, whereas in GBM, volatility impacts the rate of exponential growth.
916 | - B) Volatility is not a factor in Brownian Motion but is crucial in GBM.
917 | - C) In GBM, volatility decreases as stock prices increase, unlike in Brownian Motion where it remains stable.
918 | - D) Volatility in Brownian Motion leads to negative stock prices, which GBM corrects by allowing only positive values.
919 |
920 | ```{admonition} Answer
921 | :class: note, dropdown
922 | A
923 | ```
924 |
925 |
926 |
927 | ## Chapter 7
928 |
929 | **How could the Kalman Filter benefit aerospace applications?**
930 | - A) It is used to maintain and organize flight schedules.
931 | - B) It aids in the calibration of on-board clocks on satellites.
932 | - C) It provides precise real-time filtering and prediction of spacecraft and satellite trajectories.
933 | - D) It is only used for communication between spacecraft.
934 |
935 | ```{admonition} Answer
936 | :class: note, dropdown
937 | C
938 | ```
939 |
940 | **Why is it important to accurately estimate the noise parameters in the Kalman Filter?**
941 | - A) Incorrect noise parameters can lead to overfitting of the model to noisy data.
942 | - B) Accurate noise parameter estimation is crucial for the filter's ability to adapt its estimates to the level of uncertainty in both the process dynamics and observations.
943 | - C) High noise estimates increase the filter’s processing speed.
944 | - D) Lower noise estimates simplify the mathematical calculations in the filter.
945 |
946 | ```{admonition} Answer
947 | :class: note, dropdown
948 | B
949 | ```
950 |
951 | **Why is the assumption of Gaussian noise important in the operation of the Kalman Filter?**
952 | - A) It allows the use of binary noise models.
953 | - B) It simplifies the mathematical representation of the noise.
954 | - C) Gaussian noise implies that all errors are uniformly distributed.
955 | - D) It ensures that the state estimation errors are normally distributed, facilitating analytical tractability and optimal estimation.
956 |
957 | ```{admonition} Answer
958 | :class: note, dropdown
959 | D
960 | ```
961 |
962 | **What is the primary concept of the Kalman Filter regarding the use of multiple sources of information?**
963 | - A) To disregard noisy measurements in favor of a more precise model.
964 | - B) To combine information from an imprecise model and noisy measurements to optimally estimate the true state of a system.
965 | - C) To enhance the accuracy of measurements by filtering out model predictions.
966 | - D) To use only the most reliable source of information while ignoring others.
967 |
968 | ```{admonition} Answer
969 | :class: note, dropdown
970 | B
971 | ```
972 |
973 | **In the Kalman Filter, what does the 'predict' step specifically calculate?**
974 | - A) The certainty of the measurement data.
975 | - B) The prior estimate of the state before the next measurement is taken into account.
976 | - C) The exact value of external influences on the system.
977 | - D) The posterior state estimate.
978 |
979 | ```{admonition} Answer
980 | :class: note, dropdown
981 | B
982 | ```
983 |
984 | **In the Kalman Filter, what role does the prior error estimate play during the update phase?**
985 | - A) It is used to directly correct the system’s model dynamics.
986 | - B) It determines how much weight to give the new measurement versus the predicted state.
987 | - C) It serves as a constant factor to maintain stability in the filter's performance.
988 | - D) It is adjusted to match the measurement noise for consistency.
989 |
990 | ```{admonition} Answer
991 | :class: note, dropdown
992 | B
993 | ```
994 |
995 | **How does the Kalman Filter algorithm utilize these two sources of error during its operation?**
996 | - A) It ignores these errors to simplify the computations.
997 | - B) It adjusts the error estimates based solely on the measurement error.
998 | - C) It combines both errors to calculate the state estimate and update the error covariance.
999 | - D) It sequentially addresses each error, first correcting for process error, then measurement error.
1000 |
1001 | ```{admonition} Answer
1002 | :class: note, dropdown
1003 | C
1004 | ```
1005 |
1006 | **How does the Kalman Gain affect the outcome of the 'correct' step in the Kalman Filter?**
1007 | - A) A higher Kalman Gain indicates a greater reliance on the model prediction over the actual measurement.
1008 | - B) The Kalman Gain optimizes the balance between the predicted state and the new measurement, updating the state estimate accordingly.
1009 | - C) The Kalman Gain decreases the measurement noise automatically.
1010 | - D) A lower Kalman Gain speeds up the computation by reducing data processing.
1011 |
1012 | ```{admonition} Answer
1013 | :class: note, dropdown
1014 | B
1015 | ```
1016 |
1017 | **What role does measurement innovation play in the Kalman Filter's 'correct' step?**
1018 | - A) It is used to adjust the Kalman Gain to minimize the impact of new measurements.
1019 | - B) It determines how much the estimates should be adjusted, based on the new data received.
1020 | - C) It recalculates the system's baseline parameters without influencing the current state estimate.
1021 | - D) It provides a direct measurement of the system's performance efficiency.
1022 |
1023 | ```{admonition} Answer
1024 | :class: note, dropdown
1025 | B
1026 | ```
1027 |
1028 | **Why is the Kalman Filter particularly effective at dealing with partial observations of a system's state?**
1029 | - A) It can operate without any data, relying solely on system models.
1030 | - B) It integrates available partial data with the system's dynamic model to estimate unobserved components.
1031 | - C) It filters out incomplete data to prevent errors.
1032 | - D) It requires full data observation at each step to function correctly.
1033 |
1034 | ```{admonition} Answer
1035 | :class: note, dropdown
1036 | B
1037 | ```
1038 |
1039 | **What theoretical implication does an error covariance matrix $R$ approaching zero have on the measurement process in the Kalman Filter?**
1040 | - A) It implies that the measurements are becoming less reliable and should be disregarded.
1041 | - B) It indicates an increase in measurement noise, requiring more conservative updates.
1042 | - C) It signals that the measurements are becoming non-linear.
1043 | - D) It suggests that the measurements are believed to be almost perfect, with negligible noise.
1044 |
1045 | ```{admonition} Answer
1046 | :class: note, dropdown
1047 | D
1048 | ```
1049 |
1050 | **In what scenario would the predicted error covariance $P_t^{-}$ approaching zero be considered ideal in the Kalman Filter application?**
1051 | - A) In highly dynamic systems where measurements are less reliable than the model predictions.
1052 | - B) When the system model and the process noise are perfectly known and constant.
1053 | - C) In systems where no prior knowledge of the state dynamics exists.
1054 | - D) In applications where the measurement noise $R$ is extremely high, making $P_t^{-}$ the primary source of information.
1055 |
1056 | ```{admonition} Answer
1057 | :class: note, dropdown
1058 | B
1059 | ```
1060 |
1061 | **What role does the process noise covariance matrix $Q$ play in the Kalman Filter?**
1062 | - A) It affects how much the state prediction is trusted over the actual measurements.
1063 | - B) It adjusts the measurement model to fit better with observed data.
1064 | - C) It is negligible and typically set to zero to simplify calculations.
1065 | - D) It defines the expected noise or uncertainty in the dynamics of the system being modeled.
1066 |
1067 | ```{admonition} Answer
1068 | :class: note, dropdown
1069 | D
1070 | ```
1071 |
1072 | **How does the Kalman Gain influence the Kalman Filter's operation?**
1073 | - A) It determines the rate at which the state estimate converges to the true state.
1074 | - B) It ensures that the filter always trusts the model's predictions over measurements.
1075 | - C) It optimally combines information from the predicted state and the measurement to generate an updated state estimate with minimum error variance.
1076 | - D) It is used to adjust the process noise covariance matrix $Q$ to account for environmental changes.
1077 |
1078 | ```{admonition} Answer
1079 | :class: note, dropdown
1080 | C
1081 | ```
1082 |
1083 |
1084 |
1085 | ## Chapter 8
1086 |
1087 | **What information does the magnitude of the Fourier transform provide about the time series?**
1088 | - A) The overall trend of the time series
1089 | - B) The strength of different frequencies present in the time series
1090 | - C) The exact timestamps of specific events in the time series
1091 | - D) The predictive accuracy of the time series model
1092 |
1093 | ```{admonition} Answer
1094 | :class: note, dropdown
1095 | B
1096 | ```
1097 |
1098 | **What does the inverse Fourier transform achieve in the context of signal processing?**
1099 | - A) It generates the amplitude spectrum of the signal
1100 | - B) It compresses the signal data for better storage
1101 | - C) It reconstructs the original time-domain signal from its frequency-domain representation
1102 | - D) It converts the phase spectrum into a usable format
1103 |
1104 | ```{admonition} Answer
1105 | :class: note, dropdown
1106 | C
1107 | ```
1108 |
1109 | **Is the Fourier transform directly computed in practical applications?**
1110 | - A) Yes, it is directly computed as defined mathematically
1111 | - B) No, it is considered too complex for real-time computations
1112 | - C) Yes, but only for very small datasets
1113 | - D) No, it is approximated using other techniques due to efficiency concerns
1114 |
1115 | ```{admonition} Answer
1116 | :class: note, dropdown
1117 | D
1118 | ```
1119 |
1120 | **Why is the Fast Fourier Transform preferred over the traditional Fourier Transform in most applications?**
1121 | - A) It operates in real-time
1122 | - B) It requires less computational power and is faster due to reduced complexity
1123 | - C) It provides more detailed frequency analysis
1124 | - D) It is easier to implement in software
1125 |
1126 | ```{admonition} Answer
1127 | :class: note, dropdown
1128 | B
1129 | ```
1130 |
1131 | **In the Fourier Transform of a pure sinusoidal function, what indicates the frequency of the sinusoid?**
1132 | - A) The width of the spikes in the frequency domain
1133 | - B) The height of the spikes in the frequency domain
1134 | - C) The position of the spikes along the frequency axis
1135 | - D) The area under the curve in the frequency spectrum
1136 |
1137 | ```{admonition} Answer
1138 | :class: note, dropdown
1139 | C
1140 | ```
1141 |
1142 | **What is the Fourier Transform of a Dirac delta function?**
1143 | - A) A single spike at the origin
1144 | - B) A flat line across all frequencies at zero amplitude
1145 | - C) A continuous spectrum across all frequencies
1146 | - D) Symmetric spikes at specific frequencies
1147 |
1148 | ```{admonition} Answer
1149 | :class: note, dropdown
1150 | C
1151 | ```
1152 |
1153 | **How does the Fourier Transform of a unit step function typically appear in the frequency domain?**
1154 | - A) As a constant amplitude across all frequencies
1155 | - B) As a spike at zero frequency with a symmetric component inversely proportional to frequency
1156 | - C) As increasing amplitudes with increasing frequency
1157 | - D) As decreasing amplitudes with increasing frequency
1158 |
1159 | ```{admonition} Answer
1160 | :class: note, dropdown
1161 | B
1162 | ```
1163 |
1164 | **When does spectral leakage typically occur in the Fourier transform process?**
1165 | - A) When the signal is perfectly periodic within the observed time window
1166 | - B) When the length of the data window does not exactly contain an integer number of cycles of the signal
1167 | - C) When the signal has a very low frequency
1168 | - D) When the signal amplitude is very high
1169 |
1170 | ```{admonition} Answer
1171 | :class: note, dropdown
1172 | B
1173 | ```
1174 |
1175 | **What mathematical expression best describes the linearity property of the Fourier transform?**
1176 | - A) $ F(ax + by) = aF(x) + bF(y) $
1177 | - B) $ F(x + y) = F(x) * F(y) $
1178 | - C) $ F(x + y) = F(x) / F(y) $
1179 | - D) $ F(ax + by) = aF(x) * bF(y) $
1180 |
1181 | ```{admonition} Answer
1182 | :class: note, dropdown
1183 | A
1184 | ```
1185 |
1186 | **If a signal $x(t)$ is shifted in time by $t_0$, what is the effect on its Fourier transform $X(f)$?**
1187 | - A) $X(f)$ is multiplied by $e^{-i2\pi ft_0}$
1188 | - B) $X(f)$ remains unchanged
1189 | - C) $X(f)$ is multiplied by $e^{i2\pi ft_0}$
1190 | - D) $X(f)$ is divided by $e^{i2\pi ft_0}$
1191 |
1192 | ```{admonition} Answer
1193 | :class: note, dropdown
1194 | A
1195 | ```
1196 |
1197 | **What is the effect of multiplying two Fourier transforms in the frequency domain?**
1198 | - A) The corresponding time-domain signals are added.
1199 | - B) The corresponding time-domain signals are multiplied.
1200 | - C) The corresponding time-domain signals are convolved.
1201 | - D) The corresponding time-domain signals are subtracted.
1202 |
1203 | ```{admonition} Answer
1204 | :class: note, dropdown
1205 | C
1206 | ```
1207 |
1208 | **What happens to the Fourier transform of a signal when it is integrated in the time domain?**
1209 | - A) The Fourier transform is multiplied by $-j2\pi f$.
1210 | - B) The Fourier transform is divided by $j2\pi f$.
1211 | - C) The Fourier transform is differentiated.
1212 | - D) The Fourier transform is multiplied by $f$.
1213 |
1214 | ```{admonition} Answer
1215 | :class: note, dropdown
1216 | B
1217 | ```
1218 |
1219 | **What mathematical relationship does Parseval's theorem establish between a time-domain function and its Fourier transform?**
1220 | - A) The integral of the square of the time-domain function equals the integral of the square of the frequency-domain function multiplied by $2\pi$.
1221 | - B) The integral of the square of the time-domain function equals the integral of the square of the frequency-domain function divided by $2\pi$.
1222 | - C) The sum of the squares of a discrete time-domain signal equals the sum of the squares of its discrete Fourier transform divided by the number of samples.
1223 | - D) The integral of the square of the time-domain function equals the integral of the square of the frequency-domain function.
1224 |
1225 | ```{admonition} Answer
1226 | :class: note, dropdown
1227 | D
1228 | ```
1229 |
1230 | **How do filters affect a signal in terms of its frequency components?**
1231 | - A) Filters randomly alter the frequencies present in a signal.
1232 | - B) Filters uniformly amplify all frequencies of a signal.
1233 | - C) Filters remove all frequencies from a signal to simplify it.
1234 | - D) Filters allow certain frequencies to pass while blocking others, based on the filter design.
1235 |
1236 | ```{admonition} Answer
1237 | :class: note, dropdown
1238 | D
1239 | ```
1240 |
1241 | **How is the transfer function of a filter related to its frequency response?**
1242 | - A) The transfer function, when evaluated on the imaginary axis, gives the frequency response.
1243 | - B) The frequency response is the integral of the transfer function over all frequencies.
1244 | - C) The frequency response is the derivative of the transfer function with respect to frequency.
1245 | - D) The transfer function is a simplified version of the frequency response that omits phase information.
1246 |
1247 | ```{admonition} Answer
1248 | :class: note, dropdown
1249 | A
1250 | ```
1251 |
1252 | **How does a Bode plot assist in filter design and analysis?**
1253 | - A) It provides a method to directly measure the filter’s resistance and capacitance.
1254 | - B) It allows designers to visually assess how a filter modifies signal amplitude and phase at various frequencies.
1255 | - C) It calculates the exact dimensions needed for filter components.
1256 | - D) It identifies the specific materials required for constructing the filter.
1257 |
1258 | ```{admonition} Answer
1259 | :class: note, dropdown
1260 | B
1261 | ```
1262 |
1263 | **How does a low-pass filter benefit digital communications?**
1264 | - A) It encrypts the communication signals.
1265 | - B) It enhances the clarity of digital signals by filtering out high-frequency noise and interference.
1266 | - C) It converts analog signals to digital signals.
1267 | - D) It increases the bandwidth of the communication channel.
1268 |
1269 | ```{admonition} Answer
1270 | :class: note, dropdown
1271 | B
1272 | ```
1273 |
1274 | **What is the characteristic of a Butterworth filter?**
1275 | - A) It has a rectangular frequency response, making it ideal for time-domain operations.
1276 | - B) It is known for its maximally flat magnitude response in the passband, providing a smooth transition with no ripples.
1277 | - C) It emphasizes certain frequency components using a tapered cosine function.
1278 | - D) It combines characteristics of both rectangular and triangular filters for versatile applications.
1279 |
1280 | ```{admonition} Answer
1281 | :class: note, dropdown
1282 | B
1283 | ```
1284 |
1285 | **What is the primary function of a high-pass filter?**
1286 | - A) To allow only low-frequency signals to pass and attenuates high-frequency signals.
1287 | - B) To allow only high-frequency signals to pass and attenuates low-frequency signals.
1288 | - C) To amplify all frequencies of a signal equally.
1289 | - D) To stabilize voltage fluctuations within a circuit.
1290 |
1291 | ```{admonition} Answer
1292 | :class: note, dropdown
1293 | B
1294 | ```
1295 |
1296 | **In audio engineering, what is a typical use of a band-stop filter?**
1297 | - A) To enhance the overall loudness of the audio track.
1298 | - B) To eliminate specific unwanted frequencies, like electrical hum or feedback.
1299 | - C) To synchronize audio tracks by adjusting their frequency content.
1300 | - D) To convert stereo audio tracks into mono.
1301 |
1302 | ```{admonition} Answer
1303 | :class: note, dropdown
1304 | B
1305 | ```
1306 |
1307 | **What role does the Fourier transform play in identifying the main seasonalities in a dataset?**
1308 | - A) It decomposes the dataset into its constituent frequencies, highlighting predominant cycles.
1309 | - B) It directly filters out non-seasonal components, leaving only the main seasonal patterns.
1310 | - C) It amplifies the seasonal fluctuations to make them more detectable by standard algorithms.
1311 | - D) It compresses the data to reduce computational requirements for forecasting.
1312 |
1313 | ```{admonition} Answer
1314 | :class: note, dropdown
1315 | A
1316 | ```
1317 |
1318 |
1319 |
1320 | ## Chapter 9
1321 |
1322 | **What role do holiday effects play in the Prophet model?**
1323 | - A) They are considered as outliers and are removed from the dataset.
1324 | - B) They are modeled as part of the trend component.
1325 | - C) They are ignored unless specifically included in the model.
1326 | - D) They provide adjustments for predictable events that cause unusual observations on specific days.
1327 |
1328 | ```{admonition} Answer
1329 | :class: note, dropdown
1330 | D
1331 | ```
1332 |
1333 | **What feature of the Prophet model allows it to adapt to changes in the direction of time-series data trends?**
1334 | - A) The inclusion of a stationary component to stabilize variance
1335 | - B) The use of change points to allow for shifts in the trend
1336 | - C) A constant growth rate applied throughout the model
1337 | - D) Periodic adjustments based on previous forecast errors
1338 |
1339 | ```{admonition} Answer
1340 | :class: note, dropdown
1341 | B
1342 | ```
1343 |
1344 | **What is a key characteristic of using a piecewise linear function for modeling trends in the Prophet model compared to a standard linear function?**
1345 | - A) Piecewise linear functions model trends as constant over time.
1346 | - B) Piecewise linear functions can adapt to abrupt changes in the trend at specific points in time.
1347 | - C) Standard linear functions allow for automatic detection of change points.
1348 | - D) Standard linear functions are more flexible and adapt to non-linear trends.
1349 |
1350 | ```{admonition} Answer
1351 | :class: note, dropdown
1352 | B
1353 | ```
1354 |
1355 | **How are change points typically determined in the Prophet model?**
1356 | - A) Through manual specification by the user.
1357 | - B) By a random selection process to ensure model variability.
1358 | - C) Automatically during model fitting, based on the data's historical fluctuations.
1359 | - D) Using a fixed interval that divides the data series into equal segments.
1360 |
1361 | ```{admonition} Answer
1362 | :class: note, dropdown
1363 | C
1364 | ```
1365 |
1366 | **How does the Logistic growth model handle forecasts for data with inherent upper limits?**
1367 | - A) By using a predefined upper limit known as the carrying capacity.
1368 | - B) By randomly assigning an upper limit based on data variability.
1369 | - C) By continuously adjusting the upper limit as new data becomes available.
1370 | - D) By ignoring any potential upper limits and forecasting based on past growth rates.
1371 |
1372 | ```{admonition} Answer
1373 | :class: note, dropdown
1374 | A
1375 | ```
1376 |
1377 | **How does saturating growth occur in the Logistic growth model within Prophet?**
1378 | - A) It happens when the growth rate exceeds the carrying capacity.
1379 | - B) It occurs as the time series approaches the carrying capacity, causing the growth rate to slow down.
1380 | - C) It is when the growth rate remains constant regardless of the carrying capacity.
1381 | - D) It is defined as the exponential increase in growth without bounds.
1382 |
1383 | ```{admonition} Answer
1384 | :class: note, dropdown
1385 | B
1386 | ```
1387 |
1388 | **What is required to model holidays in the Prophet framework?**
1389 | - A) A list of holidays must be manually specified along with their potential impact on the forecast.
1390 | - B) Holidays are automatically detected based on the country's standard holiday calendar.
1391 | - C) The user needs to input the exact dates and duration of each holiday for the past ten years.
1392 | - D) A statistical test to determine which holidays significantly affect the data.
1393 |
1394 | ```{admonition} Answer
1395 | :class: note, dropdown
1396 | A
1397 | ```
1398 |
1399 |
1400 | ## Chapter 10
1401 |
1402 | **Which of the following is a characteristic of using window-based methods for time series prediction?**
1403 | - A) They can only use linear models for forecasting
1404 | - B) Utilize a fixed window of data points to make predictions
1405 | - C) Predictions are independent of the forecasting horizon
1406 | - D) Do not require sliding the window to generate new predictions
1407 |
1408 | ```{admonition} Answer
1409 | :class: note, dropdown
1410 | B
1411 | ```
1412 |
1413 | **What is a common limitation of linear models when predicting time series data involving trends and seasonal patterns?**
1414 | - A) They are highly sensitive to outliers
1415 | - B) They require no assumptions about data distribution
1416 | - C) They struggle to model the seasonal variations effectively
1417 | - D) They automatically handle missing data
1418 |
1419 | ```{admonition} Answer
1420 | :class: note, dropdown
1421 | C
1422 | ```
1423 |
1424 | **What fundamental concept allows neural networks to model non-linear relationships in data?**
1425 | - A) The use of linear activation functions only
1426 | - B) The application of a fixed number of layers
1427 | - C) The integration of non-linear activation functions
1428 | - D) The reduction of dimensions in the input data
1429 |
1430 | ```{admonition} Answer
1431 | :class: note, dropdown
1432 | C
1433 | ```
1434 |
1435 | **What is the primary function of the hidden layers in a Multi-Layer Perceptron?**
1436 | - A) To directly interact with the input data
1437 | - B) To apply non-linear transformations to the inputs
1438 | - C) To reduce the dimensionality of the input data
1439 | - D) To categorize input data into predefined classes
1440 |
1441 | ```{admonition} Answer
1442 | :class: note, dropdown
1443 | B
1444 | ```
1445 |
1446 | **How is the input data typically structured for training an MLP in time series forecasting?**
1447 | - A) As a sequence of random data points
1448 | - B) In chronological order without modification
1449 | - C) Divided into overlapping or non-overlapping windows
1450 | - D) Categorized by the frequency of the data points
1451 |
1452 | ```{admonition} Answer
1453 | :class: note, dropdown
1454 | C
1455 | ```
1456 |
1457 | **Why is the Multi-Layer Perceptron considered a part of the windowed approaches in time series forecasting?**
1458 | - A) It uses the entire dataset at once for predictions
1459 | - B) It processes individual data points separately
1460 | - C) It analyzes data within specific time frames or windows
1461 | - D) It predicts without regard to temporal sequence
1462 |
1463 | ```{admonition} Answer
1464 | :class: note, dropdown
1465 | C
1466 | ```
1467 |
1468 | **What is one limitation of the windowed approach to time series forecasting related to the use of historical data?**
1469 | - A) It can only use the most recent data points.
1470 | - B) It requires a constant update of historical data.
1471 | - C) It restricts the model to only use data within the window.
1472 | - D) It mandates the inclusion of all historical data.
1473 |
1474 | ```{admonition} Answer
1475 | :class: note, dropdown
1476 | C
1477 | ```
1478 |
1479 | **How do RNNs benefit time series forecasting compared to MLPs?**
1480 | - A) By handling larger datasets more efficiently
1481 | - B) By processing each data point independently
1482 | - C) By capturing temporal dependencies within sequences
1483 | - D) By using fewer parameters and simpler training processes
1484 |
1485 | ```{admonition} Answer
1486 | :class: note, dropdown
1487 | C
1488 | ```
1489 |
1490 | **During the training of an RNN, what method is commonly used to update the model's weights?**
1491 | - A) Backpropagation through time
1492 | - B) Forward-only propagation
1493 | - C) Perceptron learning rule
1494 | - D) Unsupervised learning techniques
1495 |
1496 | ```{admonition} Answer
1497 | :class: note, dropdown
1498 | A
1499 | ```
1500 |
1501 | **Why might RNNs encounter difficulties in long sequence time series forecasting?**
1502 | - A) They process data too quickly.
1503 | - B) They favor shorter dependencies due to gradient issues.
1504 | - C) They are unable to handle multiple data types.
1505 | - D) They reduce the complexity of the model unnecessarily.
1506 |
1507 | ```{admonition} Answer
1508 | :class: note, dropdown
1509 | B
1510 | ```
1511 |
1512 | **How do Echo State Networks simplify the training process compared to standard Recurrent Neural Networks?**
1513 | - A) By training only the input weights
1514 | - B) By eliminating the need for hidden layers
1515 | - C) By only adapting the output weights
1516 | - D) By using simpler activation functions
1517 |
1518 | ```{admonition} Answer
1519 | :class: note, dropdown
1520 | C
1521 | ```
1522 |
1523 | **What is the function of the Readout in a Reservoir Computing model?**
1524 | - A) It serves as the primary memory component.
1525 | - B) It actively modifies the reservoir's weights.
1526 | - C) It is responsible for making final predictions from the reservoir states.
1527 | - D) It generates random weights for the reservoir.
1528 |
1529 | ```{admonition} Answer
1530 | :class: note, dropdown
1531 | C
1532 | ```
1533 |
1534 | **What is the primary function of the reservoir in Reservoir Computing models?**
1535 | - A) To reduce the dimensionality of the time series data
1536 | - B) To generate a high-dimensional representation of input features
1537 | - C) To directly predict future values of the time series
1538 | - D) To simplify the computational requirements of the network
1539 |
1540 | ```{admonition} Answer
1541 | :class: note, dropdown
1542 | B
1543 | ```
1544 |
1545 | **Why are the dynamical features generated by the reservoir considered general-purpose in Reservoir Computing?**
1546 | - A) They are specifically tailored to one type of time series data.
1547 | - B) They only predict one forecast horizon accurately.
1548 | - C) They adapt to different tasks without retraining the reservoir.
1549 | - D) They require constant updates to remain effective.
1550 |
1551 | ```{admonition} Answer
1552 | :class: note, dropdown
1553 | C
1554 | ```
1555 |
1556 | **What is the spectral radius of a reservoir in Reservoir Computing?**
1557 | - A) The maximum eigenvalue of the reservoir's weight matrix
1558 | - B) The total number of neurons in the reservoir
1559 | - C) The minimum value required for computational stability
1560 | - D) The learning rate for training the reservoir
1561 |
1562 | ```{admonition} Answer
1563 | :class: note, dropdown
1564 | A
1565 | ```
1566 |
1567 | **In a Reservoir with chaotic dynamics, what happens to two different initial states as time progresses?**
1568 | - A) They converge to the same final state quickly.
1569 | - B) They eventually diverge from each other.
1570 | - C) They stabilize at a midpoint between the two states.
1571 | - D) The evolution of one state is completely independent from the evolution of the other.
1572 |
1573 | ```{admonition} Answer
1574 | :class: note, dropdown
1575 | B
1576 | ```
1577 |
1578 | **What is the purpose of input scaling $\omega_{\text{in}}$ in the context of a Reservoir's input weights?**
1579 | - A) To decrease the stability of the reservoir
1580 | - B) To control the impact of input data on the reservoir's state
1581 | - C) To simplify the network architecture
1582 | - D) To enhance the linear behavior of the reservoir
1583 |
1584 | ```{admonition} Answer
1585 | :class: note, dropdown
1586 | B
1587 | ```
1588 |
1589 | **What is the impact of hyperparameter settings on the dynamics of a Reservoir?**
1590 | - A) They are irrelevant to how the Reservoir processes inputs.
1591 | - B) They primarily affect the speed of computations rather than accuracy.
1592 | - C) They dictate the internal dynamics and stability of the model.
1593 | - D) They only affect the output layer and not the Reservoir itself.
1594 |
1595 | ```{admonition} Answer
1596 | :class: note, dropdown
1597 | C
1598 | ```
1599 |
1600 | **What is Principal Component Analysis (PCA) primarily used for in data analysis?**
1601 | - A) To increase the dimensionality of the dataset
1602 | - B) To classify data into predefined categories
1603 | - C) To reduce the dimensionality of the dataset
1604 | - D) To predict future trends based on past data
1605 |
1606 | ```{admonition} Answer
1607 | :class: note, dropdown
1608 | C
1609 | ```
1610 |
1611 | **How are principal components selected in PCA?**
1612 | - A) By choosing components with the lowest eigenvalues
1613 | - B) By selecting components that explain the most variance
1614 | - C) Based on the components with the smallest eigenvectors
1615 | - D) Through random selection of the eigenvectors
1616 |
1617 | ```{admonition} Answer
1618 | :class: note, dropdown
1619 | B
1620 | ```
1621 |
1622 | **In what way can Principal Component Analysis (PCA) be applied within Reservoir Computing?**
1623 | - A) To increase the size of the reservoir states
1624 | - B) To decrease computational efficiency
1625 | - C) To reduce the dimensionality of the reservoir states
1626 | - D) To introduce more redundancy into the features
1627 |
1628 | ```{admonition} Answer
1629 | :class: note, dropdown
1630 | C
1631 | ```
1632 |
1633 | **In what scenario is a Gradient Boost Regression Tree particularly beneficial as a readout for Echo State Networks?**
1634 | - A) When data is predominantly linear and simple
1635 | - B) When minimal computational resources are available
1636 | - C) When dealing with highly non-linear and variable data
1637 | - D) When the model must be trained quickly with few data points
1638 |
1639 | ```{admonition} Answer
1640 | :class: note, dropdown
1641 | C
1642 | ```
1643 |
1644 |
1645 | ## Chapter 11
1646 |
1647 | **How is a dynamical system typically defined in the context of time series analysis?**
1648 | - A) A system where output values are independent of previous states.
1649 | - B) A system described by a deterministic process where the state evolves over time in a predictable manner.
1650 | - C) A random process with inputs that are not related to the time variable.
1651 | - D) A static system where the state does not change over time.
1652 |
1653 | ```{admonition} Answer
1654 | :class: note, dropdown
1655 | B
1656 | ```
1657 |
1658 | **In terms of mathematical modeling, how are continuous dynamical systems typically represented compared to discrete systems?**
1659 | - A) Using difference equations for continuous and differential equations for discrete.
1660 | - B) Using differential equations for continuous and difference equations for discrete.
1661 | - C) Both use differential equations but apply them differently.
1662 | - D) Both use difference equations but under different conditions.
1663 |
1664 | ```{admonition} Answer
1665 | :class: note, dropdown
1666 | B
1667 | ```
1668 |
1669 | **What distinguishes the outcomes of stochastic systems from those of deterministic systems in dynamical modeling?**
1670 | - A) Stochastic systems provide identical outcomes under identical conditions.
1671 | - B) Deterministic systems yield different outcomes under the same initial conditions.
1672 | - C) Stochastic systems may produce different outcomes even under identical initial conditions.
1673 | - D) Both systems are completely predictable and yield the same results every time.
1674 |
1675 | ```{admonition} Answer
1676 | :class: note, dropdown
1677 | C
1678 | ```
1679 |
1680 | **In terms of system behavior, how do linear and nonlinear dynamical systems differ?**
1681 | - A) Linear systems show exponential growth or decay, nonlinear systems do not.
1682 | - B) Linear systems' outputs are directly proportional to their inputs; nonlinear systems' outputs are not.
1683 | - C) Nonlinear systems are less predictable over time than linear systems.
1684 | - D) Nonlinear systems are always unstable, while linear systems are stable.
1685 |
1686 | ```{admonition} Answer
1687 | :class: note, dropdown
1688 | B
1689 | ```
1690 |
1691 | **What role does the parameter $r$ play in the logistic map related to population growth?**
1692 | - A) It represents the death rate of the population.
1693 | - B) It signifies the population’s initial size.
1694 | - C) It controls the growth rate of the population.
1695 | - D) It is irrelevant to changes in population size.
1696 |
1697 | ```{admonition} Answer
1698 | :class: note, dropdown
1699 | C
1700 | ```
1701 |
1702 | **Why is the logistic map classified as a nonlinear system?**
1703 | - A) It depends solely on linear equations to predict future states.
1704 | - B) It features a quadratic term that determines the rate of change.
1705 | - C) It behaves linearly regardless of parameter values.
1706 | - D) It simplifies all interactions to direct proportional relationships.
1707 |
1708 | ```{admonition} Answer
1709 | :class: note, dropdown
1710 | B
1711 | ```
1712 |
1713 | **What happens when the growth rate $r$ in the logistic map is increased beyond a critical threshold?**
1714 | - A) The system remains in a steady state.
1715 | - B) The system transitions from contractive to chaotic dynamics.
1716 | - C) Population growth becomes linear and predictable.
1717 | - D) The logistic map becomes a linear system.
1718 |
1719 | ```{admonition} Answer
1720 | :class: note, dropdown
1721 | B
1722 | ```
1723 |
1724 | **What is the characteristic of a two-points attractor in a dynamical system?**
1725 | - A) The system settles into one of two possible stable states.
1726 | - B) The system’s states alternate randomly between two points.
1727 | - C) The system never reaches any of the two points but orbits around them.
1728 | - D) The system remains unstable and does not converge to any point.
1729 |
1730 | ```{admonition} Answer
1731 | :class: note, dropdown
1732 | A
1733 | ```
1734 |
1735 | **What can period-doubling bifurcations indicate about a system’s dynamics?**
1736 | - A) They signal a transition towards simpler, more predictable behavior.
1737 | - B) They show a system is becoming less sensitive to initial conditions.
1738 | - C) They indicate a system’s route to chaotic behavior as parameters vary.
1739 | - D) They reflect a system’s shift to a lower energy state.
1740 |
1741 | ```{admonition} Answer
1742 | :class: note, dropdown
1743 | C
1744 | ```
1745 |
1746 | **In a system with multiple points attractors, how do different initial conditions affect the outcome?**
1747 | - A) All initial conditions lead to the same attractor point.
1748 | - B) Initial conditions determine which of the several attractor points the system converges to.
1749 | - C) Multiple attractors lead to a chaotic system where outcomes are unpredictable.
1750 | - D) Initial conditions have no influence on the system’s behavior.
1751 |
1752 | ```{admonition} Answer
1753 | :class: note, dropdown
1754 | B
1755 | ```
1756 |
1757 | **What are Lyapunov exponents used for in the analysis of dynamical systems?**
1758 | - A) To measure the rate of separation of infinitesimally close trajectories.
1759 | - B) To calculate the exact future state of chaotic systems.
1760 | - C) To reduce the complexity of modeling chaotic systems.
1761 | - D) To determine the initial conditions of a system.
1762 |
1763 | ```{admonition} Answer
1764 | :class: note, dropdown
1765 | A
1766 | ```
1767 |
1768 | **What is a return map in the context of dynamical systems?**
1769 | - A) A graphical representation of linear system trajectories.
1770 | - B) A tool for measuring the periodicity of a system.
1771 | - C) A plot showing the relationship between sequential points in a time series.
1772 | - D) A method for calculating Lyapunov exponents.
1773 |
1774 | ```{admonition} Answer
1775 | :class: note, dropdown
1776 | C
1777 | ```
1778 |
1779 | **What is a difference equation in the context of dynamical systems?**
1780 | - A) An equation that describes changes in continuous systems over infinitesimal time increments.
1781 | - B) An equation that models the discrete steps in which systems evolve over time.
1782 | - C) A method for determining the equilibrium state of a continuous system.
1783 | - D) A technique used exclusively in physical systems to measure differences in state.
1784 |
1785 | ```{admonition} Answer
1786 | :class: note, dropdown
1787 | B
1788 | ```
1789 |
1790 | **What are the Lotka-Volterra equations commonly used to model?**
1791 | - A) The interaction between predator and prey populations in an ecological system.
1792 | - B) The growth patterns of a single species in isolation.
1793 | - C) The economic dynamics between competing businesses.
1794 | - D) Chemical reaction rates in closed systems.
1795 |
1796 | ```{admonition} Answer
1797 | :class: note, dropdown
1798 | A
1799 | ```
1800 |
1801 | **What makes the Lotka-Volterra equations a continuous-time dynamical system?**
1802 | - A) They model population changes at discrete intervals only.
1803 | - B) They are based on continuous changes over time, not just at specific points.
1804 | - C) They predict exact population sizes at fixed times.
1805 | - D) The equations are used for systems that do not evolve over time.
1806 |
1807 | ```{admonition} Answer
1808 | :class: note, dropdown
1809 | B
1810 | ```
1811 |
1812 | **How does the Rössler system exemplify a chaotic dynamical system?**
1813 | - A) By exhibiting low sensitivity to initial conditions.
1814 | - B) Through its linear interaction of variables.
1815 | - C) By showing chaotic behavior when parameters reach certain values.
1816 | - D) It is inherently predictable regardless of parameter settings.
1817 |
1818 | ```{admonition} Answer
1819 | :class: note, dropdown
1820 | C
1821 | ```
1822 |
1823 | **What is the implication of a zero Lyapunov exponent in a dynamical system?**
1824 | - A) It signals exponential divergence of system trajectories.
1825 | - B) It indicates neutral stability where trajectories neither converge nor diverge.
1826 | - C) It suggests the system will always return to a stable equilibrium.
1827 | - D) It denotes a complete lack of sensitivity to initial conditions.
1828 |
1829 | ```{admonition} Answer
1830 | :class: note, dropdown
1831 | B
1832 | ```
1833 |
1834 | **What is the phase space of a dynamical system?**
1835 | - A) A graphical representation of all possible system states.
1836 | - B) A specific region where the system's energy is minimized.
1837 | - C) The timeline over which a system's behavior is observed.
1838 | - D) A mathematical model that predicts system failures.
1839 |
1840 | ```{admonition} Answer
1841 | :class: note, dropdown
1842 | A
1843 | ```
1844 |
1845 | **Why are fractal dimensions important in the analysis of chaotic systems?**
1846 | - A) They help in designing the system’s mechanical structure.
1847 | - B) They are crucial for understanding the complexity and scale properties of chaotic attractors.
1848 | - C) Fractal dimensions are used to simplify the mathematical model of the system.
1849 | - D) They determine the thermal properties of chaotic systems.
1850 |
1851 | ```{admonition} Answer
1852 | :class: note, dropdown
1853 | B
1854 | ```
1855 |
1856 | **What is the formula relating the ratio $r$, the number of parts $N$, and the dimensionality $D$ in fractal geometry?**
1857 | - A) $N = r^D$
1858 | - B) $D = \frac{\log N}{\log r}$
1859 | - C) $r = D \times N$
1860 | - D) $D = N \div r$
1861 |
1862 | ```{admonition} Answer
1863 | :class: note, dropdown
1864 | B
1865 | ```
1866 |
1867 | **What does a non-integer fractal dimension signify about the structure of a fractal?**
1868 | - A) It represents simple, predictable patterns within the fractal.
1869 | - B) It indicates a higher degree of complexity and fine structure at infinitesimal scales.
1870 | - C) Non-integer dimensions are errors in mathematical calculations.
1871 | - D) It shows that fractals are typically three-dimensional.
1872 |
1873 | ```{admonition} Answer
1874 | :class: note, dropdown
1875 | B
1876 | ```
1877 |
1878 | **What does the dimensionality of an attractor reveal about a dynamical system?**
1879 | - A) The precision of measurements in the system.
1880 | - B) The potential energy levels throughout the system’s operation.
1881 | - C) The complexity and predictability of the system’s dynamics.
1882 | - D) The geographical spread of the system.
1883 |
1884 | ```{admonition} Answer
1885 | :class: note, dropdown
1886 | C
1887 | ```
1888 |
1889 | **What does it imply when we say a dynamical system is observed partially?**
1890 | - A) It implies complete observation of all variables and interactions within the system.
1891 | - B) Observations are limited to a subset of the system's variables, not capturing the entire state.
1892 | - C) It means observations are made continuously without any interruption.
1893 | - D) The system is only observed at its initial and final states, not during its evolution.
1894 |
1895 | ```{admonition} Answer
1896 | :class: note, dropdown
1897 | B
1898 | ```
1899 |
1900 | **What is the primary statement of Takens' Embedding Theorem?**
1901 | - A) It states that a fully observable dynamical system can always be understood from partial observations.
1902 | - B) It suggests that a single observed variable is sufficient to reconstruct a dynamical system’s full state under certain conditions.
1903 | - C) It asserts that dynamical systems cannot be understood without complete data.
1904 | - D) It requires continuous observation of all variables in a system for accurate modeling.
1905 |
1906 | ```{admonition} Answer
1907 | :class: note, dropdown
1908 | B
1909 | ```
1910 |
1911 | **What hyperparameters must be specified to construct time-delay embedding vectors?**
1912 | - A) The embedding dimension and the delay time.
1913 | - B) The system's total energy and mass.
1914 | - C) The variables' initial and final values.
1915 | - D) The linear coefficients of the system equations.
1916 |
1917 | ```{admonition} Answer
1918 | :class: note, dropdown
1919 | A
1920 | ```
1921 |
1922 | **What is a potential consequence of setting $\tau$ too short or too long in Takens' Embedding?**
1923 | - A) Too short or too long $\tau$ may cause overlap or excessive separation between data points in the embedding, obscuring the system's true dynamics.
1924 | - B) It can change the fundamental properties of the dynamical system.
1925 | - C) The dimensionality of the attractor will decrease.
1926 | - D) It will automatically adjust to the optimal length over time.
1927 |
1928 | ```{admonition} Answer
1929 | :class: note, dropdown
1930 | A
1931 | ```
1932 |
1933 | **What method is used to ascertain the appropriate $m$ for Takens' Embedding?**
1934 | - A) The method of false nearest neighbors is employed to find the smallest $m$ where points that appear close in the embedding are close in the original space.
1935 | - B) Using a complex algorithm that integrates all known variables of the system.
1936 | - C) Setting $m$ based on the total number of observations available.
1937 | - D) By selecting $m$ randomly to ensure a diverse range of behaviors is captured.
1938 |
1939 | ```{admonition} Answer
1940 | :class: note, dropdown
1941 | A
1942 | ```
1943 |
1944 | **How can Takens' Embedding be used in time series forecasting?**
1945 | - A) By predicting the exact future states of a dynamical system.
1946 | - B) Through constructing a phase space that helps infer future states based on past behavior.
1947 | - C) By ensuring all predictions are absolutely deterministic.
1948 | - D) It is used to reduce the dimensionality of the data for easier visualization only.
1949 |
1950 | ```{admonition} Answer
1951 | :class: note, dropdown
1952 | B
1953 | ```
1954 |
1955 |
1956 | ## Chapter 12
1957 |
1958 | **What distinguishes a supervised task like classification from an unsupervised task such as clustering in time series analysis?**
1959 | - A) Supervised tasks use unlabelled data while unsupervised tasks use labelled data.
1960 | - B) Both supervised and unsupervised tasks use labels to guide the learning process.
1961 | - C) Supervised tasks use labels to guide the learning process, while unsupervised tasks do not use any labels.
1962 | - D) Unsupervised tasks require a set decision boundary predefined by the model.
1963 |
1964 | ```{admonition} Answer
1965 | :class: note, dropdown
1966 | C
1967 | ```
1968 |
1969 | **Under which circumstances is it preferable to use F1 score rather than accuracy?**
1970 | - A) When the data set is balanced and model performance is consistent across classes.
1971 | - B) When the data set is imbalanced and there is a need to balance the importance of precision and recall.
1972 | - C) When the classes in the data set are perfectly balanced.
1973 | - D) F1 score should be used only when accuracy is above a certain threshold.
1974 |
1975 | ```{admonition} Answer
1976 | :class: note, dropdown
1977 | B
1978 | ```
1979 |
1980 | **What is Normalized Mutual Information (NMI) used for in data analysis?**
1981 | - A) To measure the dependency between variables in regression tasks.
1982 | - B) To evaluate the performance of clustering by comparing the clusters to ground truth classes.
1983 | - C) To assess the accuracy of classification models.
1984 | - D) To determine the linearity of relationships in data.
1985 |
1986 | ```{admonition} Answer
1987 | :class: note, dropdown
1988 | B
1989 | ```
1990 |
1991 | **Which statement best describes the relationship between similarity and dissimilarity measures in clustering algorithms?**
1992 | - A) Similarity measures are recalculated into dissimilarity measures before use.
1993 | - B) They are often used interchangeably with an inverse relationship; high similarity implies low dissimilarity.
1994 | - C) Dissimilarity measures are derived from similarity measures through complex transformations.
1995 | - D) Only dissimilarity measures are valid in statistical analysis.
1996 |
1997 | ```{admonition} Answer
1998 | :class: note, dropdown
1999 | B
2000 | ```
2001 |
2002 | **Why do different (dis)similarity measures affect classification outcomes?**
2003 | - A) All (dis)similarity measures produce the same results.
2004 | - B) Different measures may interpret the relationships between data points differently, impacting the classification boundaries.
2005 | - C) Only linear measures affect classification; nonlinear measures do not.
2006 | - D) (Dis)similarity measures are unrelated to classification results.
2007 |
2008 | ```{admonition} Answer
2009 | :class: note, dropdown
2010 | B
2011 | ```
2012 |
2013 | **In what scenarios is hierarchical clustering particularly useful?**
2014 | - A) When data is linear and simple.
2015 | - B) When the dataset is extremely large and computational resources are limited.
2016 | - C) When exploring data to find inherent structures and relationships at multiple scales.
2017 | - D) It is only useful for numeric data types.
2018 |
2019 | ```{admonition} Answer
2020 | :class: note, dropdown
2021 | C
2022 | ```
2023 |
2024 | **Why are standard distances like Euclidean distance often unsuitable for time series data?**
2025 | - A) They ignore the temporal dynamics and patterns specific to time series data.
2026 | - B) They calculate distances too quickly, leading to underfitting.
2027 | - C) They are more computationally intensive than specialized time series distances.
2028 | - D) They only work with categorical data.
2029 |
2030 | ```{admonition} Answer
2031 | :class: note, dropdown
2032 | A
2033 | ```
2034 |
2035 | **What defines a multi-variate time series in data analysis?**
2036 | - A) A series that consists of multiple sequences of categorical data points.
2037 | - B) A series that tracks multiple variables or series over time.
2038 | - C) A time series that is derived from a single variable observed at different intervals.
2039 | - D) A series analyzed only through linear regression models.
2040 |
2041 | ```{admonition} Answer
2042 | :class: note, dropdown
2043 | B
2044 | ```
2045 |
2046 | **What is Dynamic Time Warping (DTW) and how does it differ from Euclidean distance in analyzing time series?**
2047 | - A) DTW is a method for measuring similarity between two sequences which may vary in speed, aligning them optimally to minimize their distance; Euclidean distance measures static point-to-point similarity.
2048 | - B) DTW uses a complex algorithm that requires more data than Euclidean distance.
2049 | - C) DTW can only be used with linear data, whereas Euclidean distance works with any data type.
2050 | - D) There is no difference; DTW and Euclidean distance are the same.
2051 |
2052 | ```{admonition} Answer
2053 | :class: note, dropdown
2054 | A
2055 | ```
2056 |
2057 | **What is an "alignment path" in the context of Dynamic Time Warping (DTW)?**
2058 | - A) A sequence of steps required to set up the DTW algorithm.
2059 | - B) The optimal route through a matrix that minimizes the cumulative distance between two time series.
2060 | - C) The maximum difference measured between two time series.
2061 | - D) A statistical method for estimating the time delay between sequences.
2062 |
2063 | ```{admonition} Answer
2064 | :class: note, dropdown
2065 | B
2066 | ```
2067 |
2068 | **How is the optimal alignment path determined in Dynamic Time Warping?**
2069 | - A) By randomly selecting paths until a satisfactory alignment is found.
2070 | - B) Through a greedy algorithm that chooses the shortest immediate path.
2071 | - C) Using dynamic programming to efficiently compute the minimal distance.
2072 | - D) By manual adjustment until the sequences are visually aligned.
2073 |
2074 | ```{admonition} Answer
2075 | :class: note, dropdown
2076 | C
2077 | ```
2078 |
2079 | **What are the key properties of Dynamic Time Warping (DTW)?**
2080 | - A) It is sensitive to outliers and noise in the data.
2081 | - B) It is invariant to scaling and rotation of the time series.
2082 | - C) It adjusts for shifts and distortions in the time dimension.
2083 | - D) It requires the time series to be of the same length.
2084 |
2085 | ```{admonition} Answer
2086 | :class: note, dropdown
2087 | C
2088 | ```
2089 |
2090 | **How can Dynamic Time Warping (DTW) be combined with classifiers like SVC or k-NN for time series analysis?**
2091 | - A) By using the DTW distance matrix as a feature vector directly in classifiers.
2092 | - B) First computing the DTW distance matrix, then using this matrix to measure similarities in the classifier’s training and testing phases.
2093 | - C) Applying DTW after classification to improve the accuracy of SVC or k-NN.
2094 | - D) DTW cannot be combined with these types of classifiers.
2095 |
2096 | ```{admonition} Answer
2097 | :class: note, dropdown
2098 | B
2099 | ```
2100 |
2101 | **What role does kernel-PCA play when combined with DTW in visualizing time series data?**
2102 | - A) It enhances the computational speed of the DTW calculations.
2103 | - B) It simplifies the time series data into a single variable.
2104 | - C) It projects the DTW (dis)similarity matrix into a lower-dimensional space for easier visualization.
2105 | - D) It directly classifies time series data into predefined categories.
2106 |
2107 | ```{admonition} Answer
2108 | :class: note, dropdown
2109 | C
2110 | ```
2111 |
2112 | **What is the fundamental concept behind a Gaussian Mixture Model (GMM) in clustering?**
2113 | - A) A model that uses a single Gaussian distribution to represent all data.
2114 | - B) A non-probabilistic model that assigns each data point to a cluster.
2115 | - C) A probabilistic model that assumes each cluster follows a different Gaussian distribution.
2116 | - D) A model that clusters data based on fixed thresholds of similarity.
2117 |
2118 | ```{admonition} Answer
2119 | :class: note, dropdown
2120 | C
2121 | ```
2122 |
2123 | **What is a primary advantage of using an ensemble approach in TCK?**
2124 | - A) It simplifies the model by reducing the number of parameters.
2125 | - B) It improves clustering robustness and accuracy by integrating diverse model perspectives.
2126 | - C) It reduces computational requirements by using a single model.
2127 | - D) It focuses only on the largest cluster, ignoring smaller ones.
2128 |
2129 | ```{admonition} Answer
2130 | :class: note, dropdown
2131 | B
2132 | ```
2133 |
2134 | **What advantage does embedding a time series into a real-valued vector provide?**
2135 | - A) It allows the time series to be processed by traditional data analysis tools that require fixed-length inputs.
2136 | - B) It enhances the temporal resolution of the time series data.
2137 | - C) It preserves the raw format of time series data without any loss.
2138 | - D) It increases the storage requirements for time series data.
2139 |
2140 | ```{admonition} Answer
2141 | :class: note, dropdown
2142 | A
2143 | ```
2144 |
2145 | **What is the primary purpose of the Reservoir module in the Reservoir Computing framework for time series analysis?**
2146 | - A) To directly predict future values in a time series
2147 | - B) To preprocess data by normalizing and cleaning
2148 | - C) To extract and expand dynamic features from the input time series for use in classification and clustering
2149 | - D) To reduce the dimensionality of the input data
2150 |
2151 | ```{admonition} Answer
2152 | :class: note, dropdown
2153 | C
2154 | ```
2155 |
2156 | **What advantage does a bidirectional Reservoir offer over a standard Reservoir?**
2157 | - A) It captures temporal dependencies more effectively by integrating past and future context.
2158 | - B) It reduces the computational requirements for processing.
2159 | - C) It operates with fewer parameters and simpler configuration.
2160 | - D) It is easier to implement and maintain.
2161 |
2162 | ```{admonition} Answer
2163 | :class: note, dropdown
2164 | A
2165 | ```
2166 |
2167 | **What characteristic does the Dimensionality Reduction module bring to the Reservoir Computing framework?**
2168 | - A) It decreases the processing speed of the system
2169 | - B) It compresses the high-dimensional data into a more manageable form without significant loss of information
2170 | - C) It increases the number of features for better classification accuracy
2171 | - D) It requires additional external data to function effectively
2172 |
2173 | ```{admonition} Answer
2174 | :class: note, dropdown
2175 | B
2176 | ```
2177 |
2178 | **What is the main difference between using Tensor-PCA and traditional PCA for dimensionality reduction in Reservoir Computing?**
2179 | - A) Tensor-PCA does not support multivariate data.
2180 | - B) Tensor-PCA is better suited for handling the multidimensional data structures typical of reservoir states, unlike traditional PCA which is limited to flat data structures.
2181 | - C) Traditional PCA is faster and less complex computationally than Tensor-PCA.
2182 | - D) Traditional PCA can handle larger datasets more efficiently than Tensor-PCA.
2183 |
2184 | ```{admonition} Answer
2185 | :class: note, dropdown
2186 | B
2187 | ```
2188 |
2189 | **Why does representing time series using the Reservoir model space typically perform better than using just the output model space?**
2190 | - A) Because it includes only the most recent data points, ignoring earlier dynamics.
2191 | - B) It captures a richer and more comprehensive set of dynamic behaviors from the entire reservoir processing.
2192 | - C) The output model space is more computationally intensive, leading to slower performance.
2193 | - D) It uses simpler mathematical models, making it easier to implement.
2194 |
2195 | ```{admonition} Answer
2196 | :class: note, dropdown
2197 | B
2198 | ```
2199 |
2200 | **What is the purpose of the readout module in the Reservoir Computing framework for multivariate time series?**
2201 | - A) To store the incoming multivariate time series data for processing
2202 | - B) To filter noise from the input data before it enters the reservoir
2203 | - C) To map the time series representation to the desired output
2204 | - D) To increase the computational speed of the reservoir processing
2205 |
2206 | ```{admonition} Answer
2207 | :class: note, dropdown
2208 | C
2209 | ```
2210 |
2211 | **What is a disadvantage of using Time Series Cluster Kernel (TCK)?**
2212 | - A) It requires large amounts of memory.
2213 | - B) It is limited to linear time series data.
2214 | - C) It cannot handle multivariate data.
2215 | - D) It's computationally intensive.
2216 |
2217 | ```{admonition} Answer
2218 | :class: note, dropdown
2219 | D
2220 | ```
--------------------------------------------------------------------------------
/notebooks/00/media/slides_nb.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/00/media/slides_nb.png
--------------------------------------------------------------------------------
/notebooks/00/media/slides_rise.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/00/media/slides_rise.png
--------------------------------------------------------------------------------
/notebooks/00/media/slides_rise2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/00/media/slides_rise2.png
--------------------------------------------------------------------------------
/notebooks/00/media/slides_rise3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/00/media/slides_rise3.png
--------------------------------------------------------------------------------
/notebooks/00/media/topics.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/00/media/topics.png
--------------------------------------------------------------------------------
/notebooks/00/resources.md:
--------------------------------------------------------------------------------
1 | # Resources and acknowledgments
2 |
3 | The following resources served as inspiration and provided very useful content to write some of the sections of the book.
4 |
5 | - Intel course on time series [[Link](https://www.intel.com/content/www/us/en/developer/topic-technology/artificial-intelligence/training/course-time-series-analysis.html)].
6 | - Collection of links for books, articles, frameworks, etc... on time series [[Link](https://github.com/ElizaLo/Time-Series)].
7 | - A “semi-auto” way to determine parameters for SARIMA model [[Link](https://tsanggeorge.medium.com/a-semi-auto-way-to-determine-parameters-for-sarima-model-74cdee853080)].
8 | - Book on Time Series analysis (University of California) [[Link](https://stats.libretexts.org/Bookshelves/Advanced_Statistics/Time_Series_Analysis_(Aue))].
9 | - Lecture on stats models, ESN, and state-space reconstruct [[Link](https://github.com/FilippoMB/lecture_RNN_phase_space)].
10 | - Time series classification and clustering with Reservoir Computing [Link](https://github.com/FilippoMB/Time-series-classification-and-clustering-with-Reservoir-Computing)
11 | - Medium article on Fourier transform [[Link](https://medium.com/the-modern-scientist/the-fourier-transform-and-its-application-in-machine-learning-edecfac4133c)]
12 | - Neptune blog, inspired by the Intel course [[Link](https://neptune.ai/blog/time-series-forecasting#:~:text=Pseudo%2Dadditive%20models%20combine%20the,related%20to%20the%20multiplicative%20model)].
13 | - Cheat-sheet TS models in Python [[Link](https://machinelearningmastery.com/time-series-forecasting-methods-in-python-cheat-sheet/)].
14 | - Time series analysis with Python [[Link](https://github.com/AileenNielsen/TimeSeriesAnalysisWithPython/tree/master)].
15 | - An Introduction to Kalman Filter [[Link](https://www.cs.unc.edu/~welch/media/pdf/kalman_intro.pdf)].
16 | - Python library to extract static features from time series data [[Link](https://github.com/fraunhoferportugal/tsfel)].
17 | - Basic Concepts in Nonlinear Dynamics and Chaos [[Link](https://www.vanderbilt.edu/AnS/psychology/cogsci/chaos/workshop/Workshop.html)].
18 | - IPython Cookbook, Second Edition (2018) [[Link](https://ipython-books.github.io/121-plotting-the-bifurcation-diagram-of-a-chaotic-dynamical-system/)].
19 | - Introduction to Taken's Embedding [[link](https://www.kaggle.com/code/tigurius/introduction-to-taken-s-embedding/notebook)].
20 | - An introduction to Dynamic Time Warping [[link](https://rtavenar.github.io/blog/dtw.html)].
21 | - An intuitive approach to DTW — Dynamic Time Warping [[link](https://towardsdatascience.com/an-intuitive-approach-to-dtw-dynamic-time-warping-f660ccb77ff4)].
22 |
23 | ## Acknowledgments
24 |
25 | Thanks to:
26 |
27 | - [Simone Scardapane](https://www.sscardapane.it/) for spotting many typos, giving feedback, and suggestions.
28 | - Jonas Berg Hansen for giving feedback on the exercises.
--------------------------------------------------------------------------------
/notebooks/01/media/anomaly.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/01/media/anomaly.png
--------------------------------------------------------------------------------
/notebooks/01/media/bar.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/01/media/bar.png
--------------------------------------------------------------------------------
/notebooks/01/media/bar_unequal.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/01/media/bar_unequal.png
--------------------------------------------------------------------------------
/notebooks/01/media/co2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/01/media/co2.png
--------------------------------------------------------------------------------
/notebooks/01/media/cover.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/01/media/cover.png
--------------------------------------------------------------------------------
/notebooks/01/media/ecommerce.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/01/media/ecommerce.png
--------------------------------------------------------------------------------
/notebooks/01/media/economic.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/01/media/economic.png
--------------------------------------------------------------------------------
/notebooks/01/media/electricity.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/01/media/electricity.png
--------------------------------------------------------------------------------
/notebooks/01/media/equally_spaced.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/01/media/equally_spaced.png
--------------------------------------------------------------------------------
/notebooks/01/media/not_equally_spaced.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/01/media/not_equally_spaced.png
--------------------------------------------------------------------------------
/notebooks/01/media/partial.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/01/media/partial.png
--------------------------------------------------------------------------------
/notebooks/01/media/passengers.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/01/media/passengers.png
--------------------------------------------------------------------------------
/notebooks/01/media/passengers_trend.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/01/media/passengers_trend.png
--------------------------------------------------------------------------------
/notebooks/01/media/random_var.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/01/media/random_var.png
--------------------------------------------------------------------------------
/notebooks/01/media/sunspots.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/01/media/sunspots.png
--------------------------------------------------------------------------------
/notebooks/01/media/time_delta.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/01/media/time_delta.png
--------------------------------------------------------------------------------
/notebooks/01/media/ts_equal.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/01/media/ts_equal.png
--------------------------------------------------------------------------------
/notebooks/01/media/ts_unequal.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/01/media/ts_unequal.png
--------------------------------------------------------------------------------
/notebooks/01/media/water_temps.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/01/media/water_temps.png
--------------------------------------------------------------------------------
/notebooks/02/media/cover.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/02/media/cover.png
--------------------------------------------------------------------------------
/notebooks/02/media/int.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/02/media/int.png
--------------------------------------------------------------------------------
/notebooks/02/media/nonstationary_chunk.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/02/media/nonstationary_chunk.png
--------------------------------------------------------------------------------
/notebooks/02/media/periodic_mean.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/02/media/periodic_mean.png
--------------------------------------------------------------------------------
/notebooks/02/media/stationary_chunk.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/02/media/stationary_chunk.png
--------------------------------------------------------------------------------
/notebooks/03/media/EqWMA.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/03/media/EqWMA.gif
--------------------------------------------------------------------------------
/notebooks/03/media/ExpWMA.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/03/media/ExpWMA.gif
--------------------------------------------------------------------------------
/notebooks/03/media/cover.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/03/media/cover.png
--------------------------------------------------------------------------------
/notebooks/03/media/values.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/03/media/values.png
--------------------------------------------------------------------------------
/notebooks/04/media/cover.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/04/media/cover.png
--------------------------------------------------------------------------------
/notebooks/04/media/timoelliot.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/04/media/timoelliot.png
--------------------------------------------------------------------------------
/notebooks/05/media/cover.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/05/media/cover.png
--------------------------------------------------------------------------------
/notebooks/06/media/cover.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/06/media/cover.png
--------------------------------------------------------------------------------
/notebooks/06/media/non-stationarity.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/06/media/non-stationarity.png
--------------------------------------------------------------------------------
/notebooks/07/media/correct.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/07/media/correct.png
--------------------------------------------------------------------------------
/notebooks/07/media/cover.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/07/media/cover.png
--------------------------------------------------------------------------------
/notebooks/07/media/extreme1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/07/media/extreme1.png
--------------------------------------------------------------------------------
/notebooks/07/media/extreme2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/07/media/extreme2.png
--------------------------------------------------------------------------------
/notebooks/07/media/gps.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/07/media/gps.png
--------------------------------------------------------------------------------
/notebooks/07/media/innovation.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/07/media/innovation.png
--------------------------------------------------------------------------------
/notebooks/07/media/inputs.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/07/media/inputs.png
--------------------------------------------------------------------------------
/notebooks/07/media/kalman.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/07/media/kalman.png
--------------------------------------------------------------------------------
/notebooks/07/media/model_pred.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/07/media/model_pred.png
--------------------------------------------------------------------------------
/notebooks/07/media/system.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/07/media/system.png
--------------------------------------------------------------------------------
/notebooks/07/media/update.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/07/media/update.png
--------------------------------------------------------------------------------
/notebooks/08/media/cover.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/08/media/cover.png
--------------------------------------------------------------------------------
/notebooks/08/media/filter.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/08/media/filter.png
--------------------------------------------------------------------------------
/notebooks/08/media/ft.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/08/media/ft.png
--------------------------------------------------------------------------------
/notebooks/09/media/cover.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/09/media/cover.png
--------------------------------------------------------------------------------
/notebooks/10/media/RNN.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/10/media/RNN.gif
--------------------------------------------------------------------------------
/notebooks/10/media/Readout.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/10/media/Readout.png
--------------------------------------------------------------------------------
/notebooks/10/media/Reservoir.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/10/media/Reservoir.gif
--------------------------------------------------------------------------------
/notebooks/10/media/Reservoir_pred1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/10/media/Reservoir_pred1.png
--------------------------------------------------------------------------------
/notebooks/10/media/Reservoir_pred2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/10/media/Reservoir_pred2.png
--------------------------------------------------------------------------------
/notebooks/10/media/bptt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/10/media/bptt.png
--------------------------------------------------------------------------------
/notebooks/10/media/cover.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/10/media/cover.png
--------------------------------------------------------------------------------
/notebooks/10/media/data_split.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/10/media/data_split.png
--------------------------------------------------------------------------------
/notebooks/10/media/dynamics.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/10/media/dynamics.png
--------------------------------------------------------------------------------
/notebooks/10/media/load_vs_temp.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/10/media/load_vs_temp.png
--------------------------------------------------------------------------------
/notebooks/10/media/mlp.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/10/media/mlp.png
--------------------------------------------------------------------------------
/notebooks/10/media/mlp_windowed.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/10/media/mlp_windowed.png
--------------------------------------------------------------------------------
/notebooks/10/media/robot.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/10/media/robot.png
--------------------------------------------------------------------------------
/notebooks/10/media/sin.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/10/media/sin.gif
--------------------------------------------------------------------------------
/notebooks/10/media/sin_pred.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/10/media/sin_pred.png
--------------------------------------------------------------------------------
/notebooks/10/media/tanh.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/10/media/tanh.png
--------------------------------------------------------------------------------
/notebooks/10/media/windowed.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/10/media/windowed.png
--------------------------------------------------------------------------------
/notebooks/10/media/word_pred.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/10/media/word_pred.gif
--------------------------------------------------------------------------------
/notebooks/11/media/Great-britain-coastline-paradox.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/11/media/Great-britain-coastline-paradox.gif
--------------------------------------------------------------------------------
/notebooks/11/media/Logistic_Map_Bifurcations_Underneath_Mandelbrot_Set.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/11/media/Logistic_Map_Bifurcations_Underneath_Mandelbrot_Set.gif
--------------------------------------------------------------------------------
/notebooks/11/media/cover.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/11/media/cover.png
--------------------------------------------------------------------------------
/notebooks/11/media/cube.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/11/media/cube.png
--------------------------------------------------------------------------------
/notebooks/11/media/dimensions.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/11/media/dimensions.png
--------------------------------------------------------------------------------
/notebooks/11/media/koch1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/11/media/koch1.png
--------------------------------------------------------------------------------
/notebooks/11/media/koch2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/11/media/koch2.png
--------------------------------------------------------------------------------
/notebooks/11/media/koch3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/11/media/koch3.png
--------------------------------------------------------------------------------
/notebooks/11/media/line.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/11/media/line.png
--------------------------------------------------------------------------------
/notebooks/11/media/logistic-zoom.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/11/media/logistic-zoom.gif
--------------------------------------------------------------------------------
/notebooks/11/media/lokta_volt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/11/media/lokta_volt.png
--------------------------------------------------------------------------------
/notebooks/11/media/lorenz.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/11/media/lorenz.gif
--------------------------------------------------------------------------------
/notebooks/11/media/menger-sr.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/11/media/menger-sr.jpeg
--------------------------------------------------------------------------------
/notebooks/11/media/partial.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/11/media/partial.png
--------------------------------------------------------------------------------
/notebooks/11/media/phase_space.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/11/media/phase_space.png
--------------------------------------------------------------------------------
/notebooks/11/media/predict.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/11/media/predict.png
--------------------------------------------------------------------------------
/notebooks/11/media/reconstruct.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/11/media/reconstruct.png
--------------------------------------------------------------------------------
/notebooks/11/media/rossler_attractor.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/11/media/rossler_attractor.png
--------------------------------------------------------------------------------
/notebooks/11/media/sinusoid.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/11/media/sinusoid.png
--------------------------------------------------------------------------------
/notebooks/11/media/square.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/11/media/square.png
--------------------------------------------------------------------------------
/notebooks/11/media/takens.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/11/media/takens.png
--------------------------------------------------------------------------------
/notebooks/11/media/triangle.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/11/media/triangle.png
--------------------------------------------------------------------------------
/notebooks/12/media/DTW_idea_1.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/12/media/DTW_idea_1.gif
--------------------------------------------------------------------------------
/notebooks/12/media/DTW_idea_2.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/12/media/DTW_idea_2.gif
--------------------------------------------------------------------------------
/notebooks/12/media/RC_classifier.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/12/media/RC_classifier.png
--------------------------------------------------------------------------------
/notebooks/12/media/UWaveGestureLibrary.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/12/media/UWaveGestureLibrary.jpg
--------------------------------------------------------------------------------
/notebooks/12/media/bidir.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/12/media/bidir.png
--------------------------------------------------------------------------------
/notebooks/12/media/conditions.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/12/media/conditions.png
--------------------------------------------------------------------------------
/notebooks/12/media/cost.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/12/media/cost.gif
--------------------------------------------------------------------------------
/notebooks/12/media/cover.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/12/media/cover.png
--------------------------------------------------------------------------------
/notebooks/12/media/dim_red.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/12/media/dim_red.png
--------------------------------------------------------------------------------
/notebooks/12/media/dist_matrix.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/12/media/dist_matrix.png
--------------------------------------------------------------------------------
/notebooks/12/media/ensemble.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/12/media/ensemble.png
--------------------------------------------------------------------------------
/notebooks/12/media/last_state.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/12/media/last_state.png
--------------------------------------------------------------------------------
/notebooks/12/media/map.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/12/media/map.png
--------------------------------------------------------------------------------
/notebooks/12/media/matrix.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/12/media/matrix.png
--------------------------------------------------------------------------------
/notebooks/12/media/mts_data.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/12/media/mts_data.png
--------------------------------------------------------------------------------
/notebooks/12/media/output_ms.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/12/media/output_ms.png
--------------------------------------------------------------------------------
/notebooks/12/media/path.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/12/media/path.gif
--------------------------------------------------------------------------------
/notebooks/12/media/precision-recall.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/12/media/precision-recall.png
--------------------------------------------------------------------------------
/notebooks/12/media/recursion.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/12/media/recursion.gif
--------------------------------------------------------------------------------
/notebooks/12/media/redundant.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/12/media/redundant.png
--------------------------------------------------------------------------------
/notebooks/12/media/reservoir_ms.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/12/media/reservoir_ms.png
--------------------------------------------------------------------------------
/notebooks/12/media/soft_clust.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/12/media/soft_clust.png
--------------------------------------------------------------------------------
/notebooks/12/media/start_end.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/12/media/start_end.png
--------------------------------------------------------------------------------
/notebooks/12/media/tck_scheme.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/12/media/tck_scheme.png
--------------------------------------------------------------------------------
/notebooks/12/media/unidir.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/12/media/unidir.png
--------------------------------------------------------------------------------
/notebooks/12/media/warping.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/12/media/warping.gif
--------------------------------------------------------------------------------
/notebooks/12/media/warping_constrained.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/12/media/warping_constrained.gif
--------------------------------------------------------------------------------
/notebooks/12/media/warping_fix.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/notebooks/12/media/warping_fix.gif
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup, find_packages
2 |
3 | setup(
4 | name="tsa_course",
5 | version="0.3.1",
6 | packages=find_packages(),
7 | python_requires='>=3.10',
8 | install_requires=[
9 | 'numpy>1.19.5',
10 | 'matplotlib',
11 | 'scipy',
12 | 'tqdm'
13 | ],
14 | author="Filippo Maria Bianchi",
15 | author_email="filippombianchi@gmail.com",
16 | description="A collection of scripts and functions used in the course 'Time Series Analysis with Python'",
17 | long_description=open('README.md').read(),
18 | long_description_content_type='text/markdown',
19 | project_urls={
20 | "Documentation": "https://filippomb.github.io/python-time-series-handbook",
21 | "Source Code": "https://github.com/FilippoMB/python-time-series-handbook",
22 | },
23 | classifiers=[
24 | "Programming Language :: Python :: 3",
25 | "License :: OSI Approved :: MIT License",
26 | "Operating System :: OS Independent",
27 | ],
28 | )
--------------------------------------------------------------------------------
/tsa_course/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoMB/python-time-series-handbook/cbb826d20d4ccf1c6bdc260e66c35251a61eb043/tsa_course/__init__.py
--------------------------------------------------------------------------------
/tsa_course/lecture1.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from scipy.fft import fft
3 |
4 | def fft_analysis(signal):
5 | """
6 | Perform a Fourier analysis on a time series.
7 |
8 | Parameters
9 | ----------
10 | signal : array-like
11 | The time series to analyze.
12 |
13 | Returns
14 | -------
15 | dominant_period : float
16 | The dominant period of the time series.
17 | positive_frequencies : array-like
18 | The positive frequencies.
19 | magnitudes : array-like
20 | The magnitudes of the positive frequencies.
21 | """
22 |
23 | # Linear detrending
24 | slope, intercept = np.polyfit(np.arange(len(signal)), signal, 1)
25 | trend = np.arange(len(signal)) * slope + intercept
26 | detrended = signal - trend
27 |
28 | fft_values = fft(detrended)
29 | frequencies = np.fft.fftfreq(len(fft_values))
30 |
31 | # Remove negative frequencies and sort
32 | positive_frequencies = frequencies[frequencies > 0]
33 | magnitudes = np.abs(fft_values)[frequencies > 0]
34 |
35 | # Identify dominant frequency
36 | dominant_frequency = positive_frequencies[np.argmax(magnitudes)]
37 |
38 | # Convert frequency to period (e.g., days, weeks, months, etc.)
39 | dominant_period = 1 / dominant_frequency
40 |
41 | return dominant_period, positive_frequencies, magnitudes
--------------------------------------------------------------------------------
/tsa_course/lecture11.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import matplotlib.pyplot as plt
3 | from scipy.integrate import solve_ivp
4 | from scipy.signal import find_peaks
5 | from tqdm.notebook import tqdm
6 |
7 | def _runge_kutta_4th_order(func, initial_value, start_time, end_time, params, stimulus=None):
8 | """
9 | Compute the integral of an ODE using the fourth-order, 4-step Runge-Kutta method.
10 |
11 | Parameters:
12 | ----------
13 | func : function
14 | ODE function. Must take arguments like func(t, x, p) where x and t are
15 | the state and time *now*, and p is a tuple of parameters. If there are
16 | no model parameters, p should be set to the empty tuple.
17 | initial_value : array_like
18 | Initial value for calculation.
19 | start_time : float
20 | Initial time for calculation.
21 | end_time : float
22 | Final time for calculation.
23 | params : tuple, optional
24 | Tuple of model parameters for func.
25 | stimulus : function or array_like, optional
26 | Stimulus to be applied to the system. If stimulus is a function, it will be evaluated at
27 | start_time, (start_time+end_time)/2, and end_time. If stimulus is an array, it should contain the
28 | stimulus values at start_time, (start_time+end_time)/2, and end_time.
29 |
30 | Returns:
31 | -------
32 | step : array_like
33 | Approximation to the integral.
34 |
35 | Notes:
36 | ------
37 | This function uses the fourth-order, 4-step Runge-Kutta method to numerically compute the integral of an ODE.
38 |
39 | Example usage:
40 | --------------
41 | >>> def f(t, x, p):
42 | ... return x * p[0] + p[1]
43 | >>> initial_value = 1.0
44 | >>> start_time = 0.0
45 | >>> end_time = 1.0
46 | >>> params = (2.0, 1.0)
47 | >>> step = RK4(f, initial_value, start_time, end_time, params)
48 | """
49 | midpoint_time = (start_time + end_time) / 2.0
50 | time_interval = end_time - start_time
51 |
52 | if stimulus is None:
53 | params_at_start = params_at_mid = params_at_end = params
54 | else:
55 | try:
56 | # test if stimulus is a function
57 | stimulus_at_start = stimulus(start_time)
58 | stimulus_at_start, stimulus_at_mid, stimulus_at_end = (stimulus, stimulus, stimulus)
59 | except TypeError:
60 | # otherwise assume stimulus is an array
61 | stimulus_at_start, stimulus_at_mid, stimulus_at_end = stimulus
62 | params_at_start = (params, stimulus_at_start)
63 | params_at_mid = (params, stimulus_at_mid)
64 | params_at_end = (params, stimulus_at_end)
65 |
66 | K1 = func(start_time, initial_value, params_at_start)
67 | K2 = func(midpoint_time, initial_value + time_interval * K1 / 2.0, params_at_mid)
68 | K3 = func(midpoint_time, initial_value + time_interval * K2 / 2.0, params_at_mid)
69 | K4 = func(end_time, initial_value + time_interval * K3, params_at_end)
70 |
71 | step = time_interval * (K1 / 2.0 + K2 + K3 + K4 / 2.0) / 3.0
72 |
73 | return step
74 |
75 |
76 | def _variational_equation(t, Phi, x, func_jac, p=()):
77 | """
78 | Compute the time derivative of the variational matrix for a set of differential equations.
79 |
80 | Parameters:
81 | ----------
82 | t : array_like
83 | Array of times at which to evaluate the variational equation.
84 | Phi : array_like
85 | Array representing the variational matrix.
86 | x : array_like
87 | Array representing the system state.
88 | func_jac : function
89 | Jacobian of the ODE function.
90 | p : tuple, optional
91 | Tuple of model parameters for the ODE function.
92 |
93 | Returns:
94 | -------
95 | dPhi_dt_flat : array_like
96 | Array representing the time derivative of the variational matrix.
97 |
98 | Notes:
99 | ------
100 | The variational equation calculates the time derivative of the variational matrix using the Jacobian of the ODE function.
101 |
102 | The variational matrix represents the sensitivity of the system state to initial conditions.
103 |
104 | The output is a flattened array representing the time derivative of the variational matrix, which can be used for numerical integration.
105 |
106 | Example usage:
107 | --------------
108 | >>> t = np.linspace(0, 10, 100)
109 | >>> Phi = np.eye(num_dimensions, dtype=np.float64).flatten()
110 | >>> x = np.array([1.0, 2.0, 3.0])
111 | >>> dPhi_dt = _variational_equation(t, Phi, x, fjac, p)
112 | """
113 | num_dimensions = len(x)
114 | Phi_matrix = np.reshape(Phi, (num_dimensions, num_dimensions))
115 | dPhi_dt = np.dot(func_jac(t, x, p), Phi_matrix)
116 | dPhi_dt_flat = dPhi_dt.flatten()
117 | return dPhi_dt_flat
118 |
119 |
120 | def _combined_state_equations(t, S, num_dimensions, func, func_jac, p=()):
121 | """
122 | Propagates the combined state and variational matrix for a set of differential equations.
123 |
124 | Parameters:
125 | ----------
126 | t : array_like
127 | Array of times over which to propagate the combined state.
128 | S : array_like
129 | Array representing the combined state, consisting of the system state and the variational matrix.
130 | num_dimensions : int
131 | Number of dimensions of the system state.
132 | func : function
133 | ODE function. Must take arguments like f(t, x, p) where x and t are the state and time *now*, and p is a tuple of parameters.
134 | func_jac : function
135 | Jacobian of f.
136 | p : tuple, optional
137 | Tuple of model parameters for f.
138 |
139 | Returns:
140 | -------
141 | S_dot : array_like
142 | Array representing the time derivative of the combined state.
143 |
144 | Notes:
145 | ------
146 | The combined state is represented as a flattened array, where the first num_dimensions elements represent the system state, and the remaining elements represent the variational matrix.
147 |
148 | The variational equation is used to calculate the time derivative of the variational matrix.
149 |
150 | The combined state and variational matrix are propagated using the provided ODE function and Jacobian.
151 |
152 | The output is the time derivative of the combined state, which can be used for numerical integration.
153 |
154 | Example usage:
155 | --------------
156 |
157 | >>> t = np.linspace(0, 10, 100)
158 | >>> S = np.zeros(num_dimensions + num_dimensions**2)
159 | >>> S_dot = _combined_state_equations(t, S, num_dimensions, f, fjac, p)
160 | """
161 | x = S[:num_dimensions]
162 | Phi = S[num_dimensions:]
163 | S_dot = np.append(func(t, x, p), _variational_equation(t, Phi, x, func_jac, p))
164 | return S_dot
165 |
166 |
167 | def computeLE(func, func_jac, x0, t, p=(), ttrans=None):
168 | """
169 | Computes the global Lyapunov exponents for a set of ODEs using the method described
170 | in Sandri (1996), through the use of the variational matrix.
171 |
172 | Parameters:
173 | ----------
174 | func : function
175 | ODE function. Must take arguments like func(t, x, p) where x and t are
176 | the state and time *now*, and p is a tuple of parameters. If there are
177 | no model parameters, p should be set to the empty tuple.
178 | func_jac : function
179 | Jacobian of func.
180 | x0 : array_like
181 | Initial position for calculation. Integration of transients will begin
182 | from this point.
183 | t : array_like
184 | Array of times over which to calculate LE.
185 | p : tuple, optional
186 | Tuple of model parameters for f.
187 | ttrans : array_like, optional
188 | Times over which to integrate transient behavior.
189 | If not specified, assumes trajectory is on the attractor.
190 |
191 | Returns:
192 | -------
193 | LEs : array_like
194 | Array of global Lyapunov exponents.
195 | LE_history : array_like
196 | Array of Lyapunov exponents over time.
197 | """
198 |
199 | # Change the function signature to match the required format
200 | func_ = lambda t, x, p: func(t, x, *p)
201 | func_jac_ = lambda t, x, p: func_jac(t, x, *p)
202 |
203 | # Initialize variables
204 | num_dimensions = len(x0)
205 | num_time_steps = len(t)
206 | if ttrans is not None:
207 | num_time_steps += len(ttrans) - 1
208 |
209 | # integrate transient behavior
210 | Phi0 = np.eye(num_dimensions, dtype=np.float64).flatten()
211 | if ttrans is not None:
212 | xi = x0
213 | for i, (t1, t2) in enumerate(zip(ttrans[:-1], ttrans[1:])):
214 | xip1 = xi + _runge_kutta_4th_order(func_, xi, t1, t2, p)
215 | xi = xip1
216 | x0 = xi
217 |
218 | # start LE calculation
219 | LE = np.zeros((num_time_steps - 1, num_dimensions), dtype=np.float64)
220 | combined_state_solution = np.zeros((num_time_steps, num_dimensions*(num_dimensions+1)), dtype=np.float64)
221 | combined_state_solution[0] = np.append(x0, Phi0)
222 |
223 | for i, (t1, t2) in enumerate(zip(t[:-1], t[1:])):
224 | combined_state_temp = combined_state_solution[i] + _runge_kutta_4th_order(
225 | lambda t, S, p: _combined_state_equations(t, S, num_dimensions, func_, func_jac_, p),
226 | combined_state_solution[i], t1, t2, p)
227 | # perform QR decomposition on Phi
228 | Phi_matrix = np.reshape(combined_state_temp[num_dimensions:], (num_dimensions, num_dimensions))
229 | Q, R = np.linalg.qr(Phi_matrix)
230 | combined_state_solution[i+1] = np.append(combined_state_temp[:num_dimensions], Q.flatten())
231 | LE[i] = np.abs(np.diag(R))
232 |
233 | # compute LEs
234 | LE_history = np.cumsum(np.log(LE + 1e-10), axis=0) / np.tile(t[1:], (num_dimensions, 1)).T
235 |
236 | LEs = LE_history[-1, :]
237 | return LEs, LE_history
238 |
239 |
240 | def plot_bifurcation_diagram(func, func_jac, x0, time_vector, parameters, p_idx, max_time=None):
241 | """
242 | Computes and plots the bifurcation diagram for a set of ordinary differential equations (ODEs).
243 |
244 | Parameters:
245 | ----------
246 | func : function
247 | ODE function. Must take arguments like func(t, x, p) where x and t are
248 | the state and time *now*, and p is a tuple of parameters. If there are
249 | no model parameters, p should be set to the empty tuple.
250 | func_jac : function
251 | Jacobian of func.
252 | x0 : array_like
253 | Initial conditions.
254 | time_vector : array_like
255 | Time vector for the integration.
256 | parameters : array_like
257 | Range of parameter values to explore.
258 | p_idx : int
259 | Index of the parameter to vary in the bifurcation diagram.
260 |
261 | Notes:
262 | ------
263 | The ODE function should be defined as a callable that takes arguments for the current state and time, and returns the derivative of the state with respect to time.
264 |
265 | The Jacobian function should be defined as a callable that takes arguments for the current state and time, and returns the Jacobian matrix of the ODE function.
266 |
267 | The initial conditions should be specified as an array-like object.
268 |
269 | The time vector should be an array-like object representing the time points at which to evaluate the ODEs.
270 |
271 | The range of parameter values should be specified as an array-like object.
272 |
273 | The index of the parameter to vary in the bifurcation diagram should be an integer.
274 |
275 | The function will compute the solution of the ODEs for each parameter value in the range, and plot the bifurcation diagram showing the local maxima and minima of the state variables, as well as the maximum Lyapunov exponents as a function of the parameter value.
276 | """
277 | maxima_x = []
278 | minima_x = []
279 | px_max = []
280 | px_min = []
281 | maxima_y = []
282 | minima_y = []
283 | py_max = []
284 | py_min = []
285 | maxima_z = []
286 | minima_z = []
287 | pz_max = []
288 | pz_min = []
289 | le_list = []
290 |
291 | for _p in tqdm(parameters):
292 |
293 | solution = solve_ivp(func, [time_vector[0], time_vector[-1]], x0, args=_p, t_eval=time_vector)
294 |
295 | local_max_x, _ = find_peaks(solution.y[0])
296 | local_min_x, _ = find_peaks(-1*solution.y[0])
297 | local_max_y, _ = find_peaks(solution.y[1])
298 | local_min_y, _ = find_peaks(-1*solution.y[1])
299 | local_max_z, _ = find_peaks(solution.y[2])
300 | local_min_z, _ = find_peaks(-1*solution.y[2])
301 |
302 | maxima_x.extend(solution.y[0, local_max_x])
303 | minima_x.extend(solution.y[0, local_min_x])
304 | px_max.extend([_p[p_idx]] * len(local_max_x))
305 | px_min.extend([_p[p_idx]] * len(local_min_x))
306 | maxima_y.extend(solution.y[1, local_max_y])
307 | minima_y.extend(solution.y[1, local_min_y])
308 | py_max.extend([_p[p_idx]] * len(local_max_y))
309 | py_min.extend([_p[p_idx]] * len(local_min_y))
310 | maxima_z.extend(solution.y[2, local_max_z])
311 | minima_z.extend(solution.y[2, local_min_z])
312 | pz_max.extend([_p[p_idx]] * len(local_max_z))
313 | pz_min.extend([_p[p_idx]] * len(local_min_z))
314 |
315 | LE_time = time_vector if max_time is None else time_vector[:max_time]
316 | LEs, _ = computeLE(func, func_jac, x0, LE_time, p=_p)
317 | le_list.append(LEs.max())
318 |
319 | x0 = solution.y[:,-1]
320 |
321 | mle = np.array(le_list)
322 | pos_idx = np.where(mle > 0)[0]
323 | neg_idx = np.where(mle < 0)[0]
324 | _, axes = plt.subplots(4, 1, figsize=(15, 15))
325 | axes[0].plot(px_max, maxima_x, 'ko', markersize=0.2, alpha=0.3, label="Local maxima")
326 | axes[0].plot(px_min, minima_x, 'o', color='tab:blue', markersize=0.2, alpha=0.3, label="Local minima")
327 | axes[0].legend(loc='upper left', markerscale=15)
328 | axes[0].set_ylabel("x-values")
329 | axes[1].plot(py_max, maxima_y, 'ko', markersize=0.2, alpha=0.3, label="Local maxima")
330 | axes[1].plot(py_min, minima_y, 'o', color='tab:blue', markersize=0.2, alpha=0.3, label="Local minima")
331 | axes[1].legend(loc='upper left', markerscale=15)
332 | axes[1].set_ylabel("y-values")
333 | axes[2].plot(pz_max, maxima_z, 'ko', markersize=0.2, alpha=0.3, label="Local maxima")
334 | axes[2].plot(pz_min, minima_z, 'o', color='tab:blue', markersize=0.2, alpha=0.3, label="Local minima")
335 | axes[2].legend(loc='upper left', markerscale=15)
336 | axes[2].set_ylabel("z-values")
337 | axes[3].plot(parameters[:,p_idx][pos_idx], mle[pos_idx], 'o', color='tab:red', markersize=2.5, alpha=0.5)
338 | axes[3].plot(parameters[:,p_idx][neg_idx], mle[neg_idx], 'ko', markersize=2.5, alpha=0.5)
339 | axes[3].set_ylabel("Maximum Lyapunov Exponent")
340 | axes[3].set_xlabel("Parameter Value")
341 | axes[3].axhline(0, color='k', lw=.5, alpha=.5)
--------------------------------------------------------------------------------
/tsa_course/lecture2.py:
--------------------------------------------------------------------------------
1 | import matplotlib.pyplot as plt
2 |
3 | def run_sequence_plot(x, y, title, xlabel="Time", ylabel="Values", ax=None):
4 | """
5 | Plot the time series data
6 |
7 | Parameters
8 | ----------
9 | x : array-like
10 | The time values.
11 | y : array-like
12 | The values of the time series.
13 | title : str
14 | The title of the plot.
15 | xlabel : str
16 | The label for the x-axis.
17 | ylabel : str
18 | The label for the y-axis.
19 | ax : matplotlib axes
20 | The axes to plot on.
21 |
22 | Returns
23 | -------
24 | ax : matplotlib axes
25 | The axes object with the plot.
26 | """
27 | if ax is None:
28 | _, ax = plt.subplots(1,1, figsize=(10, 3.5))
29 | ax.plot(x, y, 'k-')
30 | ax.set_title(title)
31 | ax.set_xlabel(xlabel)
32 | ax.set_ylabel(ylabel)
33 | ax.grid(alpha=0.3)
34 | return ax
--------------------------------------------------------------------------------
/tsa_course/lecture8.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import matplotlib.pyplot as plt
3 |
4 | def fourierPrediction(y, n_predict, n_harm = 5):
5 | """
6 | Predict the future values of a time series using the Fourier series.
7 |
8 | Parameters
9 | ----------
10 | y : array-like
11 | The time series to predict.
12 | n_predict : int
13 | The number of future values to predict.
14 | n_harm : int
15 | The number of harmonics to use in the Fourier series.
16 |
17 | Returns
18 | -------
19 | out : array-like
20 | The predicted values of the time series.
21 | """
22 | n = y.size # length of the time series
23 | t = np.arange(0, n) # time vector
24 | p = np.polyfit(t, y, 1) # find linear trend in x
25 | y_notrend = y - p[0] * t - p[1] # detrended x
26 | y_freqdom = np.fft.fft(y_notrend) # detrended x in frequency domain
27 | f = np.fft.fftfreq(n) # frequencies
28 |
29 | # Sort indexes by largest frequency components
30 | indexes = np.argsort(np.absolute(y_freqdom))[::-1]
31 |
32 | t = np.arange(0, n + n_predict)
33 | restored_sig = np.zeros(t.size)
34 | for i in indexes[:1 + n_harm * 2]:
35 | amp = np.absolute(y_freqdom[i]) / n # amplitude
36 | phase = np.angle(y_freqdom[i]) # phase
37 | restored_sig += amp * np.cos(2 * np.pi * f[i] * t + phase)
38 |
39 | out = restored_sig + p[0] * t + p[1] # add back the trend
40 | return out
41 |
42 |
43 | def annotated_sin_plot():
44 | """
45 | Plot a sine wave with a phase shift and annotate it.
46 | """
47 | A = 1
48 | f = 1
49 | T = 1 / f
50 | omega = 2 * np.pi * f
51 | phi = 0.5
52 | t = np.linspace(0, T, 1000)
53 | y = A * np.sin(omega * t)
54 | y_phi = A * np.sin(omega * t + phi)
55 | plt.figure(figsize=(8, 5))
56 | plt.plot(t, y)
57 | arrow_idx = len(t) // 2 - 20
58 | t_arrow = t[arrow_idx]
59 | y_arrow = y[arrow_idx]
60 | plt.plot(t, y_phi, color='tab:red', linestyle='--')
61 | plt.annotate('', xy=(t_arrow-phi/(2*np.pi), y_arrow), xytext=(t_arrow, y_arrow),
62 | arrowprops=dict(arrowstyle="<->", color="k", lw=1.5))
63 | plt.text(t_arrow-phi/(3*np.pi), y_arrow+0.1, r'$\psi$', va='center', color="k")
64 | plt.xlim(-0.1, T+0.1)
65 | plt.ylim(-A-0.2, A+0.2)
66 | xticks = [0, 1/4, 1/2, 3/4, 1]
67 | xtick_labels = ['0', r'$\pi/2$', r'$\pi$', r'$3\pi/2$', r'$2\pi$']
68 | plt.xticks(xticks, xtick_labels)
69 | plt.xlabel('Radians')
70 | ax2 = plt.gca().twiny() # Create a twin Axes sharing the yaxis
71 | ax2.set_xlim(plt.xlim()) # Ensure the limits are the same
72 | ax2.set_xticks(xticks) # Use the same x-ticks as ax1
73 | ax2.set_xticklabels(['0', '90', '180', '275', '360']) # But with degree labels
74 | ax2.set_yticks([]) # Hide the y-axis ticks
75 | plt.xlim(-0.1, T+0.1)
76 | plt.xlabel('Degrees')
77 | plt.text(0.11, -0.1, 'time ($t$)', ha='right')
78 | plt.text(-0.03, A+0.02, 'A', ha='right')
79 | plt.text(-0.03, 0+0.02, '0', ha='right')
80 | plt.text(-0.03, -A+0.02, '-A', ha='right')
81 | plt.text(T+0.05, 0, r'$T = 1/f$', va='bottom', ha='right')
82 | plt.text(T / 2 - 0.38, -A + 0.5, 'f = frequency\nT = period\nA = amplitude', ha='center', va='top')
83 | plt.ylabel('Amplitude')
84 | plt.axhline(A, color='gray', linestyle='--', linewidth=1)
85 | plt.axhline(-A, color='gray', linestyle='--', linewidth=1)
86 | plt.axhline(0, color='gray', linestyle='--', linewidth=1)
87 | plt.grid(True)
88 | plt.tight_layout()
89 | plt.show()
--------------------------------------------------------------------------------