├── py.typed ├── arch ├── py.typed ├── _build │ ├── __init__.py │ └── git_version.py ├── compat │ ├── __init__.py │ ├── matplotlib.py │ ├── statsmodels.py │ ├── pandas.py │ └── numba.py ├── data │ ├── __init__.py │ ├── vix │ │ ├── vix.csv.gz │ │ └── __init__.py │ ├── wti │ │ ├── wti.csv.gz │ │ └── __init__.py │ ├── crude │ │ ├── crude.csv.gz │ │ └── __init__.py │ ├── sp500 │ │ ├── sp500.csv.gz │ │ └── __init__.py │ ├── binary │ │ ├── binary.csv.gz │ │ └── __init__.py │ ├── default │ │ ├── default.csv.gz │ │ └── __init__.py │ ├── nasdaq │ │ ├── nasdaq.csv.gz │ │ └── __init__.py │ ├── core_cpi │ │ ├── core-cpi.csv.gz │ │ └── __init__.py │ ├── frenchdata │ │ ├── frenchdata.csv.gz │ │ └── __init__.py │ └── utility.py ├── tests │ ├── __init__.py │ ├── bootstrap │ │ ├── __init__.py │ │ └── test_block_length.py │ ├── covariance │ │ └── __init__.py │ ├── unitroot │ │ ├── __init__.py │ │ ├── data │ │ │ └── __init__.py │ │ └── cointegration_data.py │ ├── univariate │ │ ├── __init__.py │ │ ├── test_base.py │ │ ├── test_rescale.py │ │ └── test_moment.py │ ├── utility │ │ ├── __init__.py │ │ ├── test_utility.py │ │ ├── test_io.py │ │ ├── test_cov.py │ │ └── test_timeseries.py │ ├── test_data.py │ ├── test_tester.py │ ├── test_compat.py │ ├── test_build.py │ ├── test_examples.py │ └── test_vendor.py ├── vendor │ └── __init__.py ├── __future__ │ ├── __init__.py │ └── reindexing.py ├── covariance │ └── __init__.py ├── unitroot │ ├── critical_values │ │ ├── __init__.py │ │ ├── simulation │ │ │ ├── requirements.txt │ │ │ ├── eg_setup.bat │ │ │ ├── eg_setup.ps1 │ │ │ ├── eg_setup.sh │ │ │ ├── phillips_ouliaris.py │ │ │ ├── kpss_simulation_process.py │ │ │ ├── adf_z_simlation_process.py │ │ │ ├── adf_simulation.py │ │ │ ├── kpss_critical_values_simulation.py │ │ │ ├── adf_z_critical_values_simulation.py │ │ │ ├── adf_z_critical_values_simulation_joblib.py │ │ │ ├── dfgls_simulation_process.py │ │ │ ├── adf_z_critical_values_simulation_large_cluster.py │ │ │ ├── shared.py │ │ │ └── dfgls_critical_values_simulation.py │ │ ├── dfgls.py │ │ ├── kpss.py │ │ └── zivot_andrews.py │ └── __init__.py ├── bootstrap │ ├── _samplers.pyi │ ├── meson.build │ ├── __init__.py │ ├── _samplers_python.py │ └── _samplers.pyx ├── univariate │ ├── meson.build │ ├── __init__.py │ └── recursions.pyi ├── __init__.py ├── utility │ ├── io.py │ ├── __init__.py │ ├── cov.py │ ├── testing.py │ └── exceptions.py ├── conftest.py ├── _typing.py └── meson.build ├── doc ├── source │ ├── _static │ │ ├── .gitignore │ │ ├── images │ │ │ ├── color-logo-256.png │ │ │ ├── favicon-16x16.png │ │ │ ├── favicon-32x32.png │ │ │ ├── mstile-150x150.png │ │ │ ├── apple-touch-icon.png │ │ │ ├── android-chrome-192x192.png │ │ │ ├── android-chrome-512x512.png │ │ │ ├── browserconfig.xml │ │ │ ├── site.webmanifest │ │ │ └── safari-pinned-tab.svg │ │ └── css │ │ │ └── small_fixes.css │ ├── images │ │ ├── hero.png │ │ ├── favicon.ico │ │ ├── favicon.png │ │ ├── color-logo.png │ │ ├── hero.py │ │ ├── favicon.svg │ │ ├── logo.svg │ │ └── favicon.py │ ├── univariate │ │ ├── background.rst │ │ ├── utility.rst │ │ ├── results.rst │ │ ├── distribution.rst │ │ ├── univariate.rst │ │ ├── mean.rst │ │ ├── volatility.rst │ │ └── introduction.rst │ ├── bootstrap │ │ ├── bootstrap_histogram.png │ │ ├── iid-bootstraps.rst │ │ ├── timeseries-bootstraps.rst │ │ ├── bootstrap.rst │ │ ├── background.rst │ │ ├── parameter-covariance-estimation.rst │ │ └── low-level-interface.rst │ ├── _templates │ │ ├── autosummary │ │ │ ├── method.rst │ │ │ ├── attribute.rst │ │ │ ├── minimal_module.rst │ │ │ ├── member.rst │ │ │ └── class.rst │ │ └── layout.html │ ├── changes.rst │ ├── changes │ │ ├── 8.0.rst │ │ ├── 2.0.rst │ │ ├── 1.0.rst │ │ ├── 7.0.rst │ │ ├── 3.0.rst │ │ ├── 6.0.rst │ │ └── 5.0.rst │ ├── unitroot │ │ ├── tests.rst │ │ ├── cointegration.rst │ │ ├── unitroot.rst │ │ └── introduction.rst │ ├── names_wordlist.txt │ ├── multiple-comparison │ │ ├── multiple-comparisons.rst │ │ ├── background.rst │ │ └── multiple-comparison-reference.rst │ ├── types.rst │ ├── covariance │ │ └── covariance.rst │ ├── index.rst │ ├── api.rst │ └── spelling_wordlist.txt ├── requirements.txt ├── Makefile └── make.bat ├── .codebeatignore ├── .gitattributes ├── .codacy.yml ├── .github ├── codeql.yml ├── dependabot.yml └── workflows │ ├── codeql.yml │ ├── generate-documentation.yml │ └── cron-test.yml ├── codecov.yml ├── ci ├── azure │ ├── update_path.sh │ ├── install-posix.sh │ └── azure_template_windows.yml ├── install-statsmodels-main.sh ├── performance.py └── github-actions │ └── push-docs-gh-pages.sh ├── lgtm.yml ├── .flake8 ├── requirements.txt ├── MANIFEST.in ├── .gitignore ├── .pep8speaks.yml ├── meson.options ├── meson.build ├── .readthedocs.yaml ├── azure-pipelines.yml ├── requirements-dev.txt ├── .coveragerc └── LICENSE.md /py.typed: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /arch/py.typed: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /arch/_build/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /arch/compat/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /arch/data/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /arch/tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /arch/vendor/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /arch/__future__/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /arch/covariance/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /arch/__future__/reindexing.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /arch/tests/bootstrap/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /arch/tests/covariance/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /arch/tests/unitroot/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /arch/tests/univariate/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /doc/source/_static/.gitignore: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.codebeatignore: -------------------------------------------------------------------------------- 1 | arch/_version.py 2 | -------------------------------------------------------------------------------- /arch/tests/unitroot/data/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /arch/unitroot/critical_values/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | arch/_version.py export-subst 2 | -------------------------------------------------------------------------------- /.codacy.yml: -------------------------------------------------------------------------------- 1 | exclude_paths: 2 | - 'arch/_version.py' 3 | -------------------------------------------------------------------------------- /arch/tests/utility/__init__.py: -------------------------------------------------------------------------------- 1 | __author__ = "Kevin" 2 | -------------------------------------------------------------------------------- /.github/codeql.yml: -------------------------------------------------------------------------------- 1 | paths-ignore: 2 | - arch/_version.py 3 | -------------------------------------------------------------------------------- /codecov.yml: -------------------------------------------------------------------------------- 1 | codecov: 2 | token: 50f271d5-306a-4195-8236-96c03712c180 3 | -------------------------------------------------------------------------------- /arch/unitroot/critical_values/simulation/requirements.txt: -------------------------------------------------------------------------------- 1 | joblib 2 | colorama 3 | psutil 4 | -------------------------------------------------------------------------------- /arch/data/vix/vix.csv.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bashtage/arch/HEAD/arch/data/vix/vix.csv.gz -------------------------------------------------------------------------------- /arch/data/wti/wti.csv.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bashtage/arch/HEAD/arch/data/wti/wti.csv.gz -------------------------------------------------------------------------------- /arch/unitroot/critical_values/simulation/eg_setup.bat: -------------------------------------------------------------------------------- 1 | set MKL_NUM_THREADS=1 2 | set OMP_NUM_THREADS=1 -------------------------------------------------------------------------------- /arch/unitroot/critical_values/simulation/eg_setup.ps1: -------------------------------------------------------------------------------- 1 | $env:MKL_NUM_THREADS=1 2 | $env:OMP_NUM_THREADS=1 -------------------------------------------------------------------------------- /ci/azure/update_path.sh: -------------------------------------------------------------------------------- 1 | if [[ ${USE_CONDA} == "true" ]]; then 2 | source activate arch-test 3 | fi -------------------------------------------------------------------------------- /doc/source/images/hero.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bashtage/arch/HEAD/doc/source/images/hero.png -------------------------------------------------------------------------------- /lgtm.yml: -------------------------------------------------------------------------------- 1 | extraction: 2 | python: 3 | index: 4 | exclude: 5 | - arch/_version.py 6 | -------------------------------------------------------------------------------- /.flake8: -------------------------------------------------------------------------------- 1 | [flake8] 2 | # Leave at 99 for now 3 | max-line-length = 99 4 | ignore = E203,W503,BLK100 5 | -------------------------------------------------------------------------------- /arch/data/crude/crude.csv.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bashtage/arch/HEAD/arch/data/crude/crude.csv.gz -------------------------------------------------------------------------------- /arch/data/sp500/sp500.csv.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bashtage/arch/HEAD/arch/data/sp500/sp500.csv.gz -------------------------------------------------------------------------------- /doc/source/images/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bashtage/arch/HEAD/doc/source/images/favicon.ico -------------------------------------------------------------------------------- /doc/source/images/favicon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bashtage/arch/HEAD/doc/source/images/favicon.png -------------------------------------------------------------------------------- /arch/data/binary/binary.csv.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bashtage/arch/HEAD/arch/data/binary/binary.csv.gz -------------------------------------------------------------------------------- /arch/data/default/default.csv.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bashtage/arch/HEAD/arch/data/default/default.csv.gz -------------------------------------------------------------------------------- /arch/data/nasdaq/nasdaq.csv.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bashtage/arch/HEAD/arch/data/nasdaq/nasdaq.csv.gz -------------------------------------------------------------------------------- /doc/source/images/color-logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bashtage/arch/HEAD/doc/source/images/color-logo.png -------------------------------------------------------------------------------- /doc/source/univariate/background.rst: -------------------------------------------------------------------------------- 1 | Theoretical Background 2 | ---------------------- 3 | 4 | *To be completed* -------------------------------------------------------------------------------- /arch/data/core_cpi/core-cpi.csv.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bashtage/arch/HEAD/arch/data/core_cpi/core-cpi.csv.gz -------------------------------------------------------------------------------- /arch/data/frenchdata/frenchdata.csv.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bashtage/arch/HEAD/arch/data/frenchdata/frenchdata.csv.gz -------------------------------------------------------------------------------- /doc/source/_static/images/color-logo-256.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bashtage/arch/HEAD/doc/source/_static/images/color-logo-256.png -------------------------------------------------------------------------------- /doc/source/_static/images/favicon-16x16.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bashtage/arch/HEAD/doc/source/_static/images/favicon-16x16.png -------------------------------------------------------------------------------- /doc/source/_static/images/favicon-32x32.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bashtage/arch/HEAD/doc/source/_static/images/favicon-32x32.png -------------------------------------------------------------------------------- /doc/source/_static/images/mstile-150x150.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bashtage/arch/HEAD/doc/source/_static/images/mstile-150x150.png -------------------------------------------------------------------------------- /doc/source/bootstrap/bootstrap_histogram.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bashtage/arch/HEAD/doc/source/bootstrap/bootstrap_histogram.png -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | # Keep synced with pyproject.toml 2 | numpy>=1.22.3 3 | scipy>=1.8 4 | pandas>=1.4 5 | statsmodels>=0.12 6 | packaging -------------------------------------------------------------------------------- /doc/source/_static/images/apple-touch-icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bashtage/arch/HEAD/doc/source/_static/images/apple-touch-icon.png -------------------------------------------------------------------------------- /doc/source/_static/images/android-chrome-192x192.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bashtage/arch/HEAD/doc/source/_static/images/android-chrome-192x192.png -------------------------------------------------------------------------------- /doc/source/_static/images/android-chrome-512x512.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bashtage/arch/HEAD/doc/source/_static/images/android-chrome-512x512.png -------------------------------------------------------------------------------- /arch/unitroot/critical_values/simulation/eg_setup.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | export MKL_NUM_THREADS=1 4 | export OMP_NUM_THREADS=1 5 | export VML_NUM_THREADS=1 6 | -------------------------------------------------------------------------------- /doc/source/_templates/autosummary/method.rst: -------------------------------------------------------------------------------- 1 | :orphan: 2 | 3 | {{ fullname | escape | underline}} 4 | 5 | .. currentmodule:: {{ module }} 6 | 7 | .. auto{{ objtype }}:: {{ objname }} 8 | 9 | -------------------------------------------------------------------------------- /doc/source/_templates/autosummary/attribute.rst: -------------------------------------------------------------------------------- 1 | :orphan: 2 | 3 | {{ fullname | escape | underline}} 4 | 5 | .. currentmodule:: {{ module }} 6 | 7 | .. auto{{ objtype }}:: {{ objname }} 8 | 9 | -------------------------------------------------------------------------------- /doc/source/_templates/autosummary/minimal_module.rst: -------------------------------------------------------------------------------- 1 | {{ fullname | escape | underline}} 2 | 3 | .. automodule:: {{ fullname }} 4 | 5 | {% block docstring %} 6 | {% endblock %} 7 | 8 | 9 | -------------------------------------------------------------------------------- /doc/source/_templates/autosummary/member.rst: -------------------------------------------------------------------------------- 1 | :orphan: 2 | 3 | {{ fullname | escape | underline}} 4 | 5 | .. currentmodule:: {{ module }} 6 | 7 | .. auto{{ objtype }}:: {{ objname }} 8 | 9 | 10 | -------------------------------------------------------------------------------- /arch/bootstrap/_samplers.pyi: -------------------------------------------------------------------------------- 1 | from arch._typing import Float64Array, Int64Array1D 2 | 3 | def stationary_bootstrap_sample( 4 | indices: Int64Array1D, u: Float64Array, p: float 5 | ) -> Int64Array1D: ... 6 | -------------------------------------------------------------------------------- /arch/compat/matplotlib.py: -------------------------------------------------------------------------------- 1 | try: 2 | import matplotlib.pyplot as plt # noqa: F401 3 | 4 | HAS_MATPLOTLIB = True 5 | except ImportError: 6 | HAS_MATPLOTLIB = False 7 | 8 | __all__ = ["HAS_MATPLOTLIB"] 9 | -------------------------------------------------------------------------------- /arch/tests/utility/test_utility.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from arch import utility 4 | 5 | 6 | @pytest.mark.slow 7 | @pytest.mark.parametrize("arg", [["--collect-only"], "--collect-only"]) 8 | def test_runner(arg): 9 | utility.test(arg, exit=False) 10 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | # Maintain dependencies for GitHub Actions 4 | - package-ecosystem: "github-actions" 5 | directory: "/" 6 | schedule: 7 | # Check for updates to GitHub Actions every week 8 | interval: "weekly" 9 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include README.md 2 | include LICENSE.md 3 | include pyproject.toml 4 | include requirements.txt 5 | include setup.cfg 6 | include arch/bootstrap/_samplers.pyx 7 | include arch/univariate/recursions.pyx 8 | include arch/_version.py 9 | recursive-include arch/tests *.csv 10 | recursive-include arch/data *.csv.gz 11 | -------------------------------------------------------------------------------- /ci/install-statsmodels-main.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | python -m pip uninstall statsmodels -y 4 | export REPO_DIR=$PWD 5 | mkdir statsmodels-clone 6 | cd statsmodels-clone 7 | git clone --branch=main --depth=10000 https://github.com/statsmodels/statsmodels.git 8 | cd statsmodels 9 | pip install . -v 10 | cd ${REPO_DIR} 11 | -------------------------------------------------------------------------------- /arch/bootstrap/meson.build: -------------------------------------------------------------------------------- 1 | if not get_option('no-binary') 2 | py.extension_module( 3 | '_samplers', 4 | '_samplers.pyx', 5 | install: true, 6 | include_directories: [inc_np], 7 | subdir: 'arch/bootstrap', 8 | c_args: cython_c_args, 9 | cython_args: cython_args, 10 | ) 11 | endif 12 | -------------------------------------------------------------------------------- /doc/source/_static/images/browserconfig.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | #2b5797 7 | 8 | 9 | 10 | -------------------------------------------------------------------------------- /arch/univariate/meson.build: -------------------------------------------------------------------------------- 1 | if not get_option('no-binary') 2 | py.extension_module( 3 | 'recursions', 4 | 'recursions.pyx', 5 | install: true, 6 | include_directories: [inc_np], 7 | subdir: 'arch/univariate', 8 | cython_args: cython_args, 9 | c_args: cython_c_args, 10 | ) 11 | endif 12 | -------------------------------------------------------------------------------- /doc/source/changes.rst: -------------------------------------------------------------------------------- 1 | Change Logs 2 | ----------- 3 | 4 | .. include:: changes/8.0.rst 5 | .. include:: changes/7.0.rst 6 | 7 | ============= 8 | Past Releases 9 | ============= 10 | 11 | .. toctree:: 12 | :maxdepth: 1 13 | 14 | changes/6.0 15 | changes/5.0 16 | changes/4.0 17 | changes/3.0 18 | changes/2.0 19 | changes/1.0 20 | -------------------------------------------------------------------------------- /arch/tests/univariate/test_base.py: -------------------------------------------------------------------------------- 1 | from arch.univariate.base import format_float_fixed 2 | 3 | 4 | def test_format_float_fixed(): 5 | out = format_float_fixed(0.0) 6 | assert out == "0.0000" 7 | out = format_float_fixed(1.23e-9) 8 | assert out == "1.2300e-09" 9 | out = format_float_fixed(123456789.0) 10 | assert out == "1.2346e+08" 11 | -------------------------------------------------------------------------------- /doc/source/univariate/utility.rst: -------------------------------------------------------------------------------- 1 | Utilities 2 | ========= 3 | Utilities that do not fit well on other pages. 4 | 5 | .. module:: arch.utility.testing 6 | :synopsis: Utilities that do not fit well in other modules 7 | .. currentmodule:: arch.utility.testing 8 | 9 | Test Results 10 | ------------ 11 | 12 | .. autoclass:: WaldTestStatistic 13 | :members: 14 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .idea/ 2 | arch/**/*.pyd 3 | arch/**/*.so 4 | arch/_version.py 5 | arch/bootstrap/*.c 6 | arch.egg-info/ 7 | build/ 8 | dist 9 | .benchmarks/ 10 | .cache/ 11 | doc/**/*.ipynb 12 | .pytest_cache 13 | __pycache__/ 14 | **/__pycache__/ 15 | **/.ipynb_checkpoints/ 16 | **/.hypothesis/ 17 | dist/ 18 | doc/build/ 19 | doc/source/**/generated/ 20 | arch/univariate/recursions.c 21 | -------------------------------------------------------------------------------- /arch/__init__.py: -------------------------------------------------------------------------------- 1 | from ._version import version as __version__, version_tuple 2 | from .univariate.mean import arch_model 3 | from .utility import test 4 | 5 | 6 | def doc() -> None: 7 | import webbrowser # noqa: PLC0415 8 | 9 | webbrowser.open("https://bashtage.github.io/arch/") 10 | 11 | 12 | __all__ = ["__version__", "arch_model", "doc", "test", "version_tuple"] 13 | -------------------------------------------------------------------------------- /arch/data/vix/__init__.py: -------------------------------------------------------------------------------- 1 | from pandas import DataFrame 2 | 3 | from arch.data.utility import load_file 4 | 5 | 6 | def load() -> DataFrame: 7 | """ 8 | Load the VIX Index data used in the examples 9 | 10 | Returns 11 | ------- 12 | data : DataFrame 13 | Data set containing historical VIX 14 | """ 15 | return load_file(__file__, "vix.csv.gz") 16 | -------------------------------------------------------------------------------- /arch/compat/statsmodels.py: -------------------------------------------------------------------------------- 1 | from typing import Any 2 | 3 | from numpy import recarray 4 | from pandas import DataFrame 5 | 6 | 7 | def dataset_loader(dataset: Any) -> recarray | DataFrame: 8 | """Load a dataset using the new syntax is possible""" 9 | try: 10 | return dataset.load(as_pandas=True).data 11 | except TypeError: 12 | return dataset.load().data 13 | -------------------------------------------------------------------------------- /doc/source/changes/8.0.rst: -------------------------------------------------------------------------------- 1 | ========= 2 | Version 8 3 | ========= 4 | 5 | Release 8.0 6 | =========== 7 | - Moved to Meson build system from setuptools. 8 | - Minimum required Python is now 3.10. 9 | - Improved compatability with future NumPy and pandas, including pandas 3. 10 | - Many small documentation fixes. 11 | - Improved typing accuracy across the codebase. 12 | 13 | 14 | -------------------------------------------------------------------------------- /doc/source/_static/css/small_fixes.css: -------------------------------------------------------------------------------- 1 | div.deprecated { 2 | background-color: rgba(255, 0, 0, 0.2); 3 | padding: 0.1em 0.5em; 4 | border: #c00 solid 2px; 5 | } 6 | 7 | span.versionmodified.deprecated { 8 | color: #c00; 9 | font-weight: bold; 10 | } 11 | 12 | span.classifier:before { 13 | font-style: normal; 14 | margin: 0 0.5em; 15 | content: ":"; 16 | display: inline-block; 17 | } -------------------------------------------------------------------------------- /arch/data/default/__init__.py: -------------------------------------------------------------------------------- 1 | from pandas import DataFrame 2 | 3 | from arch.data.utility import load_file 4 | 5 | 6 | def load() -> DataFrame: 7 | """ 8 | Load the AAA and BAA rates used in the examples 9 | 10 | Returns 11 | ------- 12 | data : DataFrame 13 | Data set containing the rates on AAA and BAA rated bonds. 14 | """ 15 | return load_file(__file__, "default.csv.gz") 16 | -------------------------------------------------------------------------------- /arch/data/sp500/__init__.py: -------------------------------------------------------------------------------- 1 | from pandas import DataFrame 2 | 3 | from arch.data.utility import load_file 4 | 5 | 6 | def load() -> DataFrame: 7 | """ 8 | Load the S&P 500 data used in the examples 9 | 10 | Returns 11 | ------- 12 | data : DataFrame 13 | Data set containing OHLC, adjusted close and the trading volume. 14 | """ 15 | return load_file(__file__, "sp500.csv.gz") 16 | -------------------------------------------------------------------------------- /arch/data/nasdaq/__init__.py: -------------------------------------------------------------------------------- 1 | from pandas import DataFrame 2 | 3 | from arch.data.utility import load_file 4 | 5 | 6 | def load() -> DataFrame: 7 | """ 8 | Load the NASDAQ Composite data used in the examples 9 | 10 | Returns 11 | ------- 12 | data : DataFrame 13 | Data set containing OHLC, adjusted close and the trading volume. 14 | """ 15 | return load_file(__file__, "nasdaq.csv.gz") 16 | -------------------------------------------------------------------------------- /arch/data/binary/__init__.py: -------------------------------------------------------------------------------- 1 | from pandas import DataFrame 2 | 3 | from arch.data.utility import load_file 4 | 5 | 6 | def load() -> DataFrame: 7 | """ 8 | Load the graduate school admissions data used in the examples 9 | 10 | Returns 11 | ------- 12 | data : DataFrame 13 | Dataset containing GRE, GPA and class rank, and admission decision 14 | """ 15 | return load_file(__file__, "binary.csv.gz") 16 | -------------------------------------------------------------------------------- /doc/source/changes/2.0.rst: -------------------------------------------------------------------------------- 1 | ========= 2 | Version 2 3 | ========= 4 | 5 | Version 2.2 6 | =========== 7 | - Added multiple comparison procedures 8 | - Typographical and other small changes 9 | 10 | Version 2.1 11 | =========== 12 | - Add unit root tests: 13 | * Augmented Dickey-Fuller 14 | * Dickey-Fuller GLS 15 | * Phillips-Perron 16 | * KPSS 17 | * Variance Ratio 18 | - Removed deprecated locations for ARCH modeling functions 19 | 20 | 21 | -------------------------------------------------------------------------------- /arch/data/crude/__init__.py: -------------------------------------------------------------------------------- 1 | from pandas import DataFrame 2 | 3 | from arch.data.utility import load_file 4 | 5 | 6 | def load() -> DataFrame: 7 | """ 8 | Load the Core CPI data used in the examples 9 | 10 | Returns 11 | ------- 12 | data : DataFrame 13 | Data set containing the CPI less Food and Energy 14 | 15 | Notes 16 | ----- 17 | From the FRED database 18 | """ 19 | return load_file(__file__, "crude.csv.gz") 20 | -------------------------------------------------------------------------------- /arch/data/core_cpi/__init__.py: -------------------------------------------------------------------------------- 1 | from pandas import DataFrame 2 | 3 | from arch.data.utility import load_file 4 | 5 | 6 | def load() -> DataFrame: 7 | """ 8 | Load the Core CPI data used in the examples 9 | 10 | Returns 11 | ------- 12 | data : DataFrame 13 | Data set containing the CPI less Food and Energy 14 | 15 | Notes 16 | ----- 17 | From the FRED database 18 | """ 19 | return load_file(__file__, "core-cpi.csv.gz") 20 | -------------------------------------------------------------------------------- /arch/data/wti/__init__.py: -------------------------------------------------------------------------------- 1 | from pandas import DataFrame 2 | 3 | from arch.data.utility import load_file 4 | 5 | 6 | def load() -> DataFrame: 7 | """ 8 | Load the West Texas Intermediate crude oil price data used in the examples 9 | 10 | Returns 11 | ------- 12 | data : DataFrame 13 | Data set containing the price of WTI 14 | 15 | Notes 16 | ----- 17 | From the FRED database 18 | """ 19 | return load_file(__file__, "wti.csv.gz") 20 | -------------------------------------------------------------------------------- /.pep8speaks.yml: -------------------------------------------------------------------------------- 1 | scanner: 2 | diff_only: False 3 | linter: flake8 # Other option is flake8 4 | 5 | pycodestyle: # Same as scanner.linter value. Other option is flake8 6 | max-line-length: 99 # Default is 79 in PEP 8 7 | ignore: # Errors and warnings to ignore 8 | - E203 # Whitespace before ':' 9 | - W503 # Line break occurred before a binary operator (W503) 10 | 11 | no_blank_comment: False # If True, no comment is made on PR without any errors. 12 | -------------------------------------------------------------------------------- /doc/source/unitroot/tests.rst: -------------------------------------------------------------------------------- 1 | The Unit Root Tests 2 | =================== 3 | 4 | .. module:: arch.unitroot 5 | :synopsis: Unit root tests 6 | 7 | .. currentmodule:: arch.unitroot 8 | 9 | .. autosummary:: 10 | :toctree: generated/ 11 | 12 | ADF 13 | DFGLS 14 | PhillipsPerron 15 | ZivotAndrews 16 | VarianceRatio 17 | KPSS 18 | 19 | Automatic Bandwidth Selection 20 | ----------------------------- 21 | 22 | .. autosummary:: 23 | :toctree: generated/ 24 | 25 | auto_bandwidth 26 | -------------------------------------------------------------------------------- /arch/unitroot/__init__.py: -------------------------------------------------------------------------------- 1 | from arch.unitroot.cointegration import EngleGrangerTestResults, engle_granger 2 | from arch.unitroot.unitroot import ( 3 | ADF, 4 | DFGLS, 5 | KPSS, 6 | PhillipsPerron, 7 | VarianceRatio, 8 | ZivotAndrews, 9 | auto_bandwidth, 10 | ) 11 | 12 | __all__ = [ 13 | "ADF", 14 | "DFGLS", 15 | "KPSS", 16 | "EngleGrangerTestResults", 17 | "PhillipsPerron", 18 | "VarianceRatio", 19 | "ZivotAndrews", 20 | "auto_bandwidth", 21 | "engle_granger", 22 | ] 23 | -------------------------------------------------------------------------------- /doc/requirements.txt: -------------------------------------------------------------------------------- 1 | wheel 2 | meson-python 3 | ninja 4 | setuptools_scm[toml]>=9.2,<10 5 | cython>=3.1.0 6 | numpy >=2 7 | scipy >=1.15.0 8 | ipython >=8.0.1 9 | matplotlib >=3.9 10 | pandas >=2.3.0 11 | statsmodels >=0.14.5 12 | jinja2 13 | sphinx 14 | seaborn 15 | numpydoc>=1.0.0 16 | nbsphinx 17 | sphinx-immaterial 18 | jupyter 19 | notebook 20 | sphinx-autodoc-typehints 21 | fonttools>=4.43.0 # not directly required, pinned by Snyk to avoid a vulnerability 22 | jupyterlab>=4.4.8 # not directly required, pinned by Snyk to avoid a vulnerability 23 | -------------------------------------------------------------------------------- /doc/source/changes/1.0.rst: -------------------------------------------------------------------------------- 1 | ========= 2 | Version 1 3 | ========= 4 | 5 | Version 1.1 6 | =========== 7 | 8 | - Refactored to move the univariate routines to `arch.univariate` and added 9 | deprecation warnings in the old locations 10 | - Enable `numba` jit compilation in the python recursions 11 | - Added a bootstrap framework, which will be used in future versions. 12 | The bootstrap framework is general purpose and can be used via high-level 13 | functions such as `conf_int` or `cov`, or as a low level iterator using 14 | `bootstrap` 15 | -------------------------------------------------------------------------------- /meson.options: -------------------------------------------------------------------------------- 1 | # Use this options by adding to the pip command: 2 | # -Csetup-args="-Dcython-coverage=true" 3 | # then run: 4 | # pytest --cov=statsmodels statsmodels 5 | # coverage html 6 | option('cython-coverage', type: 'boolean', value: false, 7 | description: 'Compile and build cython modules with coverage support') 8 | # Use this options by adding to the pip command: 9 | # -Csetup-args="-Dno-binary=true" 10 | option('no-binary', type: 'boolean', value: false, 11 | description: 'Do not build any Cython extensions') 12 | -------------------------------------------------------------------------------- /arch/compat/pandas.py: -------------------------------------------------------------------------------- 1 | from typing import TYPE_CHECKING 2 | 3 | from packaging.version import parse 4 | import pandas as pd 5 | 6 | if TYPE_CHECKING: 7 | from pandas.api.types import is_datetime64_any_dtype 8 | else: 9 | try: 10 | from pandas.api.types import is_datetime64_any_dtype 11 | except ImportError: 12 | from pandas.core.common import is_datetime64_any_dtype 13 | 14 | PD_LT_22 = parse(pd.__version__) < parse("2.1.99") 15 | MONTH_END = "M" if PD_LT_22 else "ME" 16 | 17 | __all__ = ["MONTH_END", "is_datetime64_any_dtype"] 18 | -------------------------------------------------------------------------------- /ci/performance.py: -------------------------------------------------------------------------------- 1 | """ 2 | Script to run performance tests to show speed. 3 | """ 4 | 5 | import sys 6 | 7 | from arch.tests.univariate.test_recursions import MISSING_NUMBA, TestRecursions 8 | 9 | if __name__ == "__main__": 10 | if MISSING_NUMBA: 11 | print("numba not available -- skipping performance tests") 12 | sys.exit(0) 13 | 14 | t = TestRecursions() 15 | t.setup_class() 16 | 17 | t.test_garch_performance() 18 | t.test_harch_performance() 19 | t.test_egarch_performance() 20 | t.test_midas_performance() 21 | t.test_figarch_performance() 22 | -------------------------------------------------------------------------------- /doc/source/_static/images/site.webmanifest: -------------------------------------------------------------------------------- 1 | { 2 | "name": "ARCH", 3 | "short_name": "ARCH", 4 | "icons": [ 5 | { 6 | "src": "/arch/_static/images/android-chrome-192x192.png", 7 | "sizes": "192x192", 8 | "type": "image/png" 9 | }, 10 | { 11 | "src": "/arch/_static/images/android-chrome-512x512.png", 12 | "sizes": "512x512", 13 | "type": "image/png" 14 | } 15 | ], 16 | "theme_color": "#ffffff", 17 | "background_color": "#ffffff", 18 | "display": "standalone" 19 | } 20 | -------------------------------------------------------------------------------- /arch/tests/test_data.py: -------------------------------------------------------------------------------- 1 | import glob 2 | import os 3 | 4 | import pandas as pd 5 | import pytest 6 | 7 | CURR_DIR = os.path.split(os.path.abspath(__file__))[0] 8 | FILES = glob.glob(os.path.join(CURR_DIR, "..", "data", "*")) 9 | DATASETS = [os.path.split(f)[-1] for f in FILES if (".py" not in f and "__" not in f)] 10 | 11 | 12 | @pytest.fixture(params=DATASETS) 13 | def dataset(request): 14 | return request.param 15 | 16 | 17 | def test_dataset(dataset): 18 | mod = __import__(f"arch.data.{dataset}", fromlist=[dataset]) 19 | data = mod.load() 20 | assert isinstance(data, pd.DataFrame) 21 | -------------------------------------------------------------------------------- /arch/data/frenchdata/__init__.py: -------------------------------------------------------------------------------- 1 | from pandas import DataFrame 2 | 3 | from arch.data.utility import load_file 4 | 5 | 6 | def load() -> DataFrame: 7 | """ 8 | Load the Fama-French factor data used in the examples 9 | 10 | Returns 11 | ------- 12 | data : DataFrame 13 | Data set containing excess market, size and value factors and the 14 | risk-free rate 15 | 16 | Notes 17 | ----- 18 | Provided by Ken French, 19 | http://mba.tuck.dartmouth.edu/pages/faculty/ken.french/data_library.html 20 | """ 21 | return load_file(__file__, "frenchdata.csv.gz") 22 | -------------------------------------------------------------------------------- /doc/source/univariate/results.rst: -------------------------------------------------------------------------------- 1 | Model Results 2 | ============= 3 | All model return the same object, a results class (:class:`~arch.univariate.base.ARCHModelResult`). 4 | When using the ``fix`` method, a (:class:`~arch.univariate.base.ARCHModelFixedResult`) is produced 5 | that lacks some properties of a (:class:`~arch.univariate.base.ARCHModelResult`) that are not 6 | relevant when parameters are not estimated. 7 | 8 | 9 | .. module:: arch.univariate.base 10 | :noindex: 11 | 12 | .. currentmodule:: arch.univariate.base 13 | 14 | .. autosummary:: 15 | :toctree: generated/ 16 | 17 | ARCHModelResult 18 | ARCHModelFixedResult 19 | -------------------------------------------------------------------------------- /meson.build: -------------------------------------------------------------------------------- 1 | project( 2 | 'arch', 3 | 'c', 'cython', 4 | license: 'NCSA', 5 | meson_version: '>= 1.9.0', 6 | default_options: [], 7 | version: run_command(['arch/_build/git_version.py'], check: true).stdout().strip(), 8 | ) 9 | 10 | cc = meson.get_compiler('c') 11 | cy = meson.get_compiler('cython') 12 | cython = find_program(cy.cmd_array()[0]) 13 | 14 | if not cy.version().version_compare('>=3.0.10') 15 | error('arch requires Cython >= 3.0.10') 16 | endif 17 | 18 | py = import('python').find_installation(pure: false) 19 | fs = import('fs') 20 | 21 | py.install_sources('pyproject.toml', subdir: 'arch') 22 | subdir('arch') 23 | -------------------------------------------------------------------------------- /doc/source/names_wordlist.txt: -------------------------------------------------------------------------------- 1 | Akaike 2 | Biometrika 3 | Bithell 4 | Bollerslev 5 | Chernick 6 | Cushing 7 | Davison 8 | Dimitris 9 | Econometrica 10 | Econometrics 11 | Efron 12 | Engle 13 | Fama 14 | Foroni 15 | GJR 16 | Granger 17 | Halbert 18 | Hannan 19 | Hanning 20 | Hobijn 21 | Khrapov 22 | Kwiatkowski 23 | Lunde 24 | Marcellino 25 | Massimiliano 26 | Nason 27 | Neerlandica 28 | Newey 29 | Norges 30 | Ooms 31 | Parameterless 32 | Perron 33 | Politis 34 | Reisz 35 | Rothenberg 36 | Saikkonen 37 | Schwarz 38 | Schwert 39 | Sheppard 40 | Stanislav 41 | Stata 42 | Tibshirani 43 | Tukey 44 | Varanelli 45 | Wooldridge 46 | Zenodo 47 | Zivot 48 | -------------------------------------------------------------------------------- /doc/source/multiple-comparison/multiple-comparisons.rst: -------------------------------------------------------------------------------- 1 | Multiple Comparison Procedures 2 | ------------------------------ 3 | 4 | This module contains a set of bootstrap-based multiple comparison procedures. 5 | These are designed to allow multiple models to be compared while controlling 6 | a the `Familywise Error Rate `_, 7 | which is similar to the size of a test. 8 | 9 | .. module::arch.bootstrap 10 | 11 | .. toctree:: 12 | :maxdepth: 1 13 | 14 | Examples 15 | Module Reference 16 | Background and References 17 | 18 | 19 | 20 | 21 | 22 | 23 | -------------------------------------------------------------------------------- /doc/source/univariate/distribution.rst: -------------------------------------------------------------------------------- 1 | .. _distributions: 2 | 3 | Distributions 4 | ============= 5 | A distribution is the final component of an ARCH Model. 6 | 7 | .. module:: arch.univariate 8 | :noindex: 9 | .. currentmodule:: arch.univariate 10 | 11 | .. autosummary:: 12 | :toctree: generated/ 13 | 14 | Normal 15 | StudentsT 16 | SkewStudent 17 | GeneralizedError 18 | 19 | Writing New Distributions 20 | ------------------------- 21 | All distributions must inherit from :class:Distribution and provide all public 22 | methods. 23 | 24 | .. currentmodule:: arch.univariate.distribution 25 | 26 | .. autosummary:: 27 | :toctree: generated/ 28 | 29 | Distribution 30 | 31 | -------------------------------------------------------------------------------- /arch/tests/utility/test_io.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pytest 3 | 4 | from arch.utility.io import pval_format, str_format 5 | 6 | CASES = ( 7 | (np.nan, " "), 8 | (1, "1.0000"), 9 | (1.234567890e10, "1.235e+10"), 10 | (12345678900, "1.235e+10"), 11 | (123, "123.00"), 12 | (0.000006789, "6.789e-06"), 13 | ) 14 | 15 | 16 | @pytest.mark.parametrize("case", CASES) 17 | def test_str_format(case): 18 | assert str_format(case[0]) == case[1] 19 | 20 | 21 | PVAL_CASES = ((np.nan, " "), (1e-37, "0.0000"), (0.999999, "1.0000")) 22 | 23 | 24 | @pytest.mark.parametrize("case", PVAL_CASES) 25 | def test_pval_format(case): 26 | assert pval_format(case[0]) == case[1] 27 | -------------------------------------------------------------------------------- /doc/source/multiple-comparison/background.rst: -------------------------------------------------------------------------------- 1 | .. bibliography multiple-comparison Bibliography 2 | 3 | References 4 | ---------- 5 | 6 | Articles used in the creation of this module include 7 | 8 | .. [Hansen] Hansen, P. R. (2005). A test for superior predictive ability. `Journal of Business & Economic Statistics`, 23(4). 9 | 10 | .. [HansenLundeNason] Hansen, P. R., Lunde, A., & Nason, J. M. (2011). The model confidence set. `Econometrica`, 79(2), 453-497. 11 | 12 | .. [RomanoWolf] Romano, J. P., & Wolf, M. (2005). Stepwise multiple testing as formalized data snooping. `Econometrica`, 73(4), 1237-1282. 13 | 14 | .. [White] White, H. (2000). A reality check for data snooping. `Econometrica`, 68(5), 1097-1126. 15 | -------------------------------------------------------------------------------- /doc/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line, and also 5 | # from the environment for the first two. 6 | SPHINXOPTS ?= -j auto 7 | SPHINXBUILD ?= sphinx-build 8 | SOURCEDIR = source 9 | BUILDDIR = build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 21 | -------------------------------------------------------------------------------- /arch/utility/io.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | __all__ = ["pval_format", "str_format"] 4 | 5 | 6 | def str_format(v: float) -> str: 7 | """Preferred basic formatter""" 8 | if np.isnan(v): 9 | return " " 10 | av = abs(v) 11 | digits = 0 12 | if av != 0: 13 | digits = int(np.ceil(np.log10(av))) 14 | if digits > 4 or digits <= -4: 15 | return f"{v:8.4g}" 16 | 17 | if digits > 0: 18 | d = int(5 - digits) 19 | else: 20 | d = 4 21 | 22 | format_str = "{0:" + f"0.{d}f" + "}" 23 | return format_str.format(v) 24 | 25 | 26 | def pval_format(v: float) -> str: 27 | """Preferred formatting for x in [0,1]""" 28 | if np.isnan(v): 29 | return " " 30 | return f"{v:4.4f}" 31 | -------------------------------------------------------------------------------- /doc/source/changes/7.0.rst: -------------------------------------------------------------------------------- 1 | ========= 2 | Version 7 3 | ========= 4 | 5 | Release 7.2 6 | =========== 7 | - Fixed a number of bugs that affect :class:`~arch.univariate.base.ARCHModelFixedResult`. 8 | - Improved documentation of the correct format of exogenous variables when forecasting. 9 | 10 | Release 7.1 11 | =========== 12 | - Improve documentation 13 | - Improve typing of a small number of functions 14 | 15 | Release 7.0 16 | =========== 17 | - Full compatability with NumPy 2 18 | - Improved compatability with future changes in pandas 3. 19 | - Increases in related minimum requirements. 20 | 21 | .. note:: 22 | 23 | In order to use NumPy 2, the environment must consist of packages 24 | that have been built against NumPy 2.0.0rc1 or later. -------------------------------------------------------------------------------- /arch/tests/test_tester.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | import arch 4 | 5 | 6 | def test_runner(): 7 | status = arch.test(location="tests/utility/test_cov.py", exit=False) 8 | assert status == 0 9 | 10 | 11 | def test_runner_exception(): 12 | with pytest.raises(RuntimeError): 13 | arch.test(location="tests/utility/unknown_location.py") 14 | 15 | 16 | def test_extra_args(): 17 | status = arch.test( 18 | "--tb=short", 19 | append=False, 20 | location="tests/utility/test_cov.py", 21 | exit=False, 22 | ) 23 | 24 | assert status == 0 25 | 26 | status = arch.test( 27 | ["-r", "a"], 28 | append=True, 29 | location="tests/utility/test_cov.py", 30 | exit=False, 31 | ) 32 | 33 | assert status == 0 34 | -------------------------------------------------------------------------------- /doc/source/unitroot/cointegration.rst: -------------------------------------------------------------------------------- 1 | Cointegration Tests 2 | =================== 3 | 4 | .. module:: arch.unitroot.cointegration 5 | :synopsis: Cointegration tests and estimation 6 | 7 | .. currentmodule:: arch.unitroot.cointegration 8 | 9 | .. autosummary:: 10 | :toctree: generated/ 11 | 12 | engle_granger 13 | phillips_ouliaris 14 | 15 | Cointegrating Vector Estimation 16 | =============================== 17 | .. autosummary:: 18 | :toctree: generated/ 19 | 20 | DynamicOLS 21 | FullyModifiedOLS 22 | CanonicalCointegratingReg 23 | 24 | Results Classes 25 | --------------- 26 | 27 | .. autosummary:: 28 | :toctree: generated/ 29 | 30 | CointegrationAnalysisResults 31 | DynamicOLSResults 32 | EngleGrangerTestResults 33 | PhillipsOuliarisTestResults 34 | -------------------------------------------------------------------------------- /.readthedocs.yaml: -------------------------------------------------------------------------------- 1 | # .readthedocs.yaml 2 | # Read the Docs configuration file 3 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details 4 | 5 | # Required 6 | version: 2 7 | 8 | # Set the version of Python and other tools you might need 9 | build: 10 | os: ubuntu-22.04 11 | tools: 12 | python: "3.12" 13 | 14 | # Build documentation in the docs/ directory with Sphinx 15 | sphinx: 16 | configuration: doc/source/conf.py 17 | 18 | # If using Sphinx, optionally build your docs in additional formats such as PDF 19 | formats: 20 | - pdf 21 | 22 | # Optionally declare the Python requirements required to build your docs 23 | 24 | python: 25 | install: 26 | - requirements: requirements.txt 27 | - requirements: doc/requirements.txt 28 | - method: pip 29 | path: . 30 | -------------------------------------------------------------------------------- /azure-pipelines.yml: -------------------------------------------------------------------------------- 1 | # https://docs.microsoft.com/en-us/azure/devops/pipelines/process/templates?view=azure-devops#passing-parameters 2 | 3 | trigger: 4 | - main 5 | 6 | variables: 7 | MKL_NUM_THREADS: 1 8 | NUMEXPR_NUM_THREADS: 1 9 | OMP_NUM_THREADS: 1 10 | VML_NUM_THREADS: 1 11 | OPENBLAS_NUM_THREADS: 1 12 | JUPYTER_PLATFORM_DIRS: 1 13 | PYTHONHASHSEED: 12345678 # Ensure tests are correctly gathered by xdist 14 | TEST_INSTALL: false 15 | MPLBACKEND: agg 16 | PYTEST_PATTERN: "(not slow)" 17 | test.install: false 18 | pip.pre: false 19 | 20 | jobs: 21 | - template: ci/azure/azure_template_posix.yml 22 | parameters: 23 | name: Linux 24 | vmImage: ubuntu-latest 25 | 26 | - template: ci/azure/azure_template_windows.yml 27 | parameters: 28 | name: Windows 29 | vmImage: windows-latest 30 | -------------------------------------------------------------------------------- /arch/data/utility.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import pandas as pd 4 | 5 | 6 | def load_file(file_base: str, filename: str) -> pd.DataFrame: 7 | """ 8 | Load data from a csv.gz file. 9 | 10 | Parameters 11 | ---------- 12 | file_base : str 13 | The director to use as the root. 14 | filename : str 15 | Name of csv.gz to load 16 | 17 | Returns 18 | ------- 19 | DataFrame 20 | Dataframe containing the loaded data. 21 | """ 22 | curr_dir = os.path.split(os.path.abspath(file_base))[0] 23 | data = pd.read_csv(os.path.join(curr_dir, filename)) 24 | if "Date" in data: 25 | data.Date = pd.to_datetime(data.Date) 26 | data = data.set_index("Date") 27 | for col in data: 28 | data[col] = pd.to_numeric(data[col], errors="coerce") 29 | return data 30 | -------------------------------------------------------------------------------- /doc/source/types.rst: -------------------------------------------------------------------------------- 1 | Common Type Definitions 2 | ======================== 3 | 4 | .. py:currentmodule:: arch.typing 5 | 6 | Array Types 7 | ----------- 8 | 9 | .. autoclass:: ArrayLike1D 10 | .. autoclass:: ArrayLike2D 11 | .. autoclass:: ArrayLike 12 | .. autoclass:: NDArray 13 | .. autoclass:: Float64Array 14 | .. autoclass:: Int64Array 15 | .. autoclass:: Int32Array 16 | .. autoclass:: IntArray 17 | .. autoclass:: BoolArray 18 | .. autoclass:: AnyArray 19 | .. autoclass:: Uint32Array 20 | .. autoclass:: FloatOrArray 21 | .. autoclass:: NDArrayOrFrame 22 | .. autoclass:: AnyPandas 23 | 24 | Other Types 25 | ----------- 26 | 27 | .. autoclass:: RandomStateState 28 | .. autoclass:: RNGType 29 | .. autoclass:: BootstrapIndexT 30 | .. autoclass:: DateLike 31 | .. autoclass:: Label 32 | .. autoclass:: UnitRootTrend 33 | .. autoclass:: ForecastingMethod 34 | -------------------------------------------------------------------------------- /arch/tests/test_compat.py: -------------------------------------------------------------------------------- 1 | from arch.compat.numba import PerformanceWarning 2 | 3 | import numpy as np 4 | import pytest 5 | 6 | from arch.univariate.recursions_python import arch_recursion 7 | 8 | try: 9 | import numba # noqa: F401 10 | 11 | HAS_NUMBA = True 12 | except ImportError: 13 | HAS_NUMBA = False 14 | 15 | 16 | @pytest.mark.skipif(HAS_NUMBA, reason="Can only test when numba is not available.") 17 | def test_performance_warning(): 18 | parameters = np.array([1, 0.1]) 19 | nobs = 100 20 | resids = np.ones(nobs) 21 | sigma2 = resids.copy() 22 | p = 1 23 | backcast = 1.0 24 | var_bounds = np.empty((nobs, 2)) 25 | var_bounds[:, 0] = 0.0 26 | var_bounds[:, 1] = 1.0e14 27 | with pytest.warns(PerformanceWarning, match=r"numba is not available"): 28 | arch_recursion(parameters, resids, sigma2, p, nobs, backcast, var_bounds) 29 | -------------------------------------------------------------------------------- /doc/source/bootstrap/iid-bootstraps.rst: -------------------------------------------------------------------------------- 1 | .. module:: arch.bootstrap 2 | :noindex: 3 | .. currentmodule:: arch.bootstrap 4 | 5 | Independent, Identical Distributed Data (i.i.d.) 6 | ------------------------------------------------ 7 | :class:`~arch.bootstrap.IIDBootstrap` is the standard bootstrap that is appropriate for data that is 8 | either i.i.d. or at least not serially dependant. 9 | 10 | .. autosummary:: 11 | :toctree: generated/ 12 | 13 | IIDBootstrap 14 | 15 | Independent Samples 16 | ------------------- 17 | :class:`~arch.bootstrap.IndependentSamplesBootstrap` is a bootstrap that is appropriate for 18 | data is totally independent, and where each variable may have a different sample size. This 19 | type of data arises naturally in experimental settings, e.g., website A/B testing. 20 | 21 | .. autosummary:: 22 | :toctree: generated/ 23 | 24 | IndependentSamplesBootstrap 25 | -------------------------------------------------------------------------------- /requirements-dev.txt: -------------------------------------------------------------------------------- 1 | # versioning 2 | meson-python>=0.18.0 3 | meson 4 | ninja 5 | setuptools_scm[toml]>=8.0.3,<9 6 | packaging 7 | 8 | # Performance 9 | cython>=3.0.10 10 | numba>=0.49,!=0.50.*;python_version<"3.12" 11 | 12 | # Graphics 13 | matplotlib>=3 14 | seaborn 15 | 16 | # Tests 17 | pytest>=8.4.1,<9 18 | pytest-xdist 19 | pytest-cov 20 | pytest-randomly 21 | 22 | # formatting 23 | black[jupyter]~=25.1.0 24 | isort~=5.12 25 | colorama 26 | flake8 27 | flake8-bugbear 28 | mypy 29 | ruff>=0.8.6 30 | pyupgrade>=3.4.0 31 | jupyterlab-code-formatter 32 | 33 | # Documentation 34 | ipython>=7 35 | sphinx>=7 36 | sphinx_immaterial 37 | nbsphinx 38 | jupyter 39 | notebook 40 | nbconvert 41 | sphinx-autodoc-typehints 42 | pickleshare 43 | jupyterlab>=4.4.8 # not directly required, pinned by Snyk to avoid a vulnerability 44 | zipp>=3.19.1 # not directly required, pinned by Snyk to avoid a vulnerability 45 | -------------------------------------------------------------------------------- /doc/source/changes/3.0.rst: -------------------------------------------------------------------------------- 1 | ========= 2 | Version 3 3 | ========= 4 | 5 | - Added forecast code for mean forecasting 6 | - Added volatility hedgehog plot 7 | - Added ``fix`` to arch models which allows for user specified parameters 8 | instead of estimated parameters. 9 | - Added Hansen's Skew T distribution to distribution (Stanislav Khrapov) 10 | - Updated IPython notebooks to latest IPython version 11 | - Bug and typo fixes to IPython notebooks 12 | - Changed MCS to give a pvalue of 1.0 to best model. Previously was NaN 13 | - Removed ``hold_back`` and ``last_obs`` from model initialization and to 14 | ``fit`` method to simplify estimating a model over alternative samples 15 | (e.g., rolling window estimation) 16 | - Redefined ``hold_back`` to only accept integers so that is simply defined 17 | the number of observations held back. This number is now held out of the 18 | sample irrespective of the value of ``first_obs``. 19 | 20 | -------------------------------------------------------------------------------- /doc/source/covariance/covariance.rst: -------------------------------------------------------------------------------- 1 | Long-run Covariance Estimation 2 | ============================== 3 | 4 | 5 | Long-run Covariance Estimators 6 | ------------------------------ 7 | 8 | .. module:: arch.covariance.kernel 9 | :synopsis: Kernel-based long-run covariance estimation 10 | 11 | .. currentmodule:: arch.covariance.kernel 12 | 13 | .. autosummary:: 14 | :toctree: generated/ 15 | 16 | Andrews 17 | Bartlett 18 | Gallant 19 | NeweyWest 20 | Parzen 21 | ParzenCauchy 22 | ParzenGeometric 23 | ParzenRiesz 24 | QuadraticSpectral 25 | TukeyHamming 26 | TukeyHanning 27 | TukeyParzen 28 | 29 | Results 30 | ------- 31 | 32 | .. autosummary:: 33 | :toctree: generated/ 34 | 35 | CovarianceEstimate 36 | 37 | 38 | Base Class 39 | ---------- 40 | All long-run covariance estimators inherit from :class:`~arch.covariance.kernel.CovarianceEstimator`. 41 | 42 | .. autosummary:: 43 | :toctree: generated/ 44 | 45 | CovarianceEstimator 46 | -------------------------------------------------------------------------------- /arch/bootstrap/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Tools for implementing statistical bootstraps 3 | """ 4 | 5 | from arch.bootstrap import _samplers_python 6 | from arch.bootstrap.base import ( 7 | CircularBlockBootstrap, 8 | IIDBootstrap, 9 | IndependentSamplesBootstrap, 10 | MovingBlockBootstrap, 11 | StationaryBootstrap, 12 | optimal_block_length, 13 | ) 14 | from arch.bootstrap.multiple_comparison import MCS, SPA, RealityCheck, StepM 15 | 16 | COMPILED_SAMPLERS = True 17 | try: 18 | from arch.bootstrap import _samplers 19 | except ImportError: 20 | COMPILED_SAMPLERS = False 21 | 22 | 23 | __all__ = [ 24 | "MCS", 25 | "SPA", 26 | "CircularBlockBootstrap", 27 | "IIDBootstrap", 28 | "IndependentSamplesBootstrap", 29 | "MovingBlockBootstrap", 30 | "RealityCheck", 31 | "StationaryBootstrap", 32 | "StepM", 33 | "_samplers_python", 34 | "optimal_block_length", 35 | ] 36 | 37 | if COMPILED_SAMPLERS: 38 | __all__ += ["_samplers"] 39 | -------------------------------------------------------------------------------- /doc/source/_templates/autosummary/class.rst: -------------------------------------------------------------------------------- 1 | {{ fullname | escape | underline}} 2 | 3 | .. currentmodule:: {{ module }} 4 | 5 | .. autoclass:: {{ objname }} 6 | :exclude-members: {% for item in methods %}{%- if not item.startswith('_') or item in ['__call__'] %}{{ item }},{% endif %}{%- endfor %} 7 | 8 | {% block methods %} 9 | {% if methods %} 10 | .. rubric:: Methods 11 | 12 | .. autosummary:: 13 | :toctree: generated/ 14 | 15 | {% for item in methods %} 16 | {%- if not item.startswith('_') or item in ['__call__'] %} ~{{ name }}.{{ item }} 17 | {% endif %} 18 | {%- endfor %} 19 | {% endif %} 20 | {% endblock %} 21 | {% block attributes %} 22 | {% if attributes %} 23 | .. rubric:: Properties 24 | 25 | .. autosummary:: 26 | :toctree: generated/ 27 | 28 | {% for item in attributes %} 29 | {%- if not item.startswith('_') or item in ['__call__'] %} ~{{ name }}.{{ item }} 30 | {% endif %} 31 | {%- endfor %} 32 | {% endif %} 33 | {% endblock %} 34 | -------------------------------------------------------------------------------- /arch/unitroot/critical_values/simulation/phillips_ouliaris.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import numpy as np 4 | 5 | FILE_TYPES = ("z", "p") 6 | TRENDS = ("n", "c", "ct", "ctt") 7 | 8 | if os.name == "posix": 9 | ROOT = "/mnt/c/Users/kevin/Dropbox/phillips-ouliaris" 10 | else: 11 | ROOT = r"c:\Users\kevin\Dropbox\phillips-ouliaris" 12 | 13 | # percentiles to save 14 | PERCENTILES = ( 15 | list(np.arange(1, 10, 1)) 16 | + list(np.arange(10, 990, 5)) 17 | + list(np.arange(990, 1000, 1)) 18 | ) 19 | PERCENTILES = PERCENTILES[::-1] 20 | QUANTILES = np.array(PERCENTILES) / 1000.0 21 | 22 | SAMPLE_SIZES = ( 23 | 25, 24 | 30, 25 | 35, 26 | 40, 27 | 45, 28 | 50, 29 | 60, 30 | 70, 31 | 80, 32 | 90, 33 | 100, 34 | 120, 35 | 140, 36 | 160, 37 | 180, 38 | 200, 39 | 250, 40 | 300, 41 | 350, 42 | 400, 43 | 450, 44 | 500, 45 | 600, 46 | 700, 47 | 800, 48 | 900, 49 | 1000, 50 | 1200, 51 | 1400, 52 | 2000, 53 | ) 54 | -------------------------------------------------------------------------------- /doc/source/bootstrap/timeseries-bootstraps.rst: -------------------------------------------------------------------------------- 1 | .. module:: arch.bootstrap 2 | :noindex: 3 | .. currentmodule:: arch.bootstrap 4 | 5 | Time-series Bootstraps 6 | ---------------------- 7 | Bootstraps for time-series data come in a variety of forms. The three contained 8 | in this package are the stationary bootstrap 9 | (:class:`~arch.bootstrap.StationaryBootstrap`), which uses blocks with an 10 | exponentially distributed lengths, the circular block bootstrap 11 | (:class:`~arch.bootstrap.CircularBlockBootstrap`), which uses 12 | fixed length blocks, and the moving block bootstrap which also uses fixed 13 | length blocks (:class:`~arch.bootstrap.MovingBlockBootstrap`). The moving 14 | block bootstrap does *not* wrap around and so observations near the start or 15 | end of the series will be systematically under-sampled. It is not recommended 16 | for this reason. 17 | 18 | .. autosummary:: 19 | :toctree: generated/ 20 | 21 | StationaryBootstrap 22 | CircularBlockBootstrap 23 | MovingBlockBootstrap 24 | optimal_block_length -------------------------------------------------------------------------------- /.coveragerc: -------------------------------------------------------------------------------- 1 | [run] 2 | source = arch 3 | branch = True 4 | omit = 5 | */_version.py 6 | */compat/* 7 | plugins = Cython.Coverage 8 | 9 | [report] 10 | # Regexes for lines to exclude from consideration 11 | exclude_lines = 12 | # Have to re-enable the standard pragma 13 | pragma: no cover 14 | 15 | # Don't complain if tests don't hit defensive assertion code: 16 | raise NotImplementedError 17 | except NotImplementedError 18 | # Ignore pass 19 | pass 20 | # Ignore failure messages 21 | pytest.xfail 22 | # Ignore ImportError protection 23 | except ImportError 24 | # Ignore type checking code 25 | if TYPE_CHECKING 26 | elif TYPE_CHECKING 27 | # Cython function declarations 28 | cdef 29 | # Cython functions with void 30 | cdef void 31 | # Numba jit decorators 32 | @jit 33 | 34 | 35 | include = */arch/* 36 | omit = 37 | */_version.py 38 | */compat/* 39 | *recursions.py 40 | *samplers.py 41 | ignore_errors = True 42 | -------------------------------------------------------------------------------- /doc/source/bootstrap/bootstrap.rst: -------------------------------------------------------------------------------- 1 | Bootstrapping 2 | ------------- 3 | 4 | .. module:: arch.bootstrap 5 | :synopsis: Bootstrap methods for simulation and parameter inference 6 | .. currentmodule::arch.bootstrap 7 | 8 | The bootstrap module provides both high- and low-level interfaces for 9 | bootstrapping data contained in NumPy arrays or pandas Series or DataFrames. 10 | 11 | All bootstraps have the same interfaces and only differ in their name, setup 12 | parameters and the (internally generated) sampling scheme. 13 | 14 | .. toctree:: 15 | :maxdepth: 1 16 | 17 | Examples 18 | Confidence Interval Construction 19 | Parameter Covariance Estimation 20 | Low-level Interface 21 | Semiparametric and Parametric Bootstraps 22 | Bootstraps for IID data 23 | Bootstraps for Time-series Data 24 | Background and References 25 | 26 | 27 | -------------------------------------------------------------------------------- /.github/workflows/codeql.yml: -------------------------------------------------------------------------------- 1 | name: "CodeQL" 2 | 3 | on: 4 | push: 5 | branches: [ "main" ] 6 | pull_request: 7 | branches: [ "main" ] 8 | schedule: 9 | - cron: "28 19 * * 4" 10 | 11 | jobs: 12 | analyze: 13 | name: Analyze 14 | runs-on: ubuntu-latest 15 | permissions: 16 | actions: read 17 | contents: read 18 | security-events: write 19 | 20 | strategy: 21 | fail-fast: false 22 | matrix: 23 | language: [ python ] 24 | 25 | steps: 26 | - name: Checkout 27 | uses: actions/checkout@v6 28 | 29 | - name: Initialize CodeQL 30 | uses: github/codeql-action/init@v4 31 | with: 32 | languages: ${{ matrix.language }} 33 | config-file: ./.github/codeql.yml 34 | queries: +security-and-quality 35 | 36 | - name: Autobuild 37 | uses: github/codeql-action/autobuild@v4 38 | 39 | - name: Perform CodeQL Analysis 40 | uses: github/codeql-action/analyze@v4 41 | with: 42 | category: "/language:${{ matrix.language }}" 43 | -------------------------------------------------------------------------------- /arch/tests/bootstrap/test_block_length.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from numpy.testing import assert_allclose 3 | import pandas as pd 4 | 5 | from arch.bootstrap.base import optimal_block_length 6 | 7 | 8 | def test_block_length(): 9 | rs = np.random.RandomState(0) 10 | e = rs.standard_normal(10000 + 100) 11 | y = e 12 | for i in range(1, len(e)): 13 | y[i] = 0.3 * y[i - 1] + e[i] 14 | s = pd.Series(y[100:], name="x") 15 | bl = optimal_block_length(s) 16 | sb, cb = bl.loc["x"] 17 | assert_allclose(sb, 13.635665, rtol=1e-4) 18 | assert_allclose(cb, 15.60894, rtol=1e-4) 19 | 20 | df = pd.DataFrame([s, s]).T 21 | df.columns = ["x", "y"] 22 | bl = optimal_block_length(df) 23 | for idx in ("x", "y"): 24 | sb, cb = bl.loc[idx] 25 | assert_allclose(sb, 13.635665, rtol=1e-4) 26 | assert_allclose(cb, 15.60894, rtol=1e-4) 27 | 28 | assert tuple(bl.columns) == ("stationary", "circular") 29 | assert tuple(bl.index) == ("x", "y") 30 | 31 | bl = optimal_block_length(np.asarray(df)) 32 | assert tuple(bl.index) == (0, 1) 33 | -------------------------------------------------------------------------------- /doc/source/_static/images/safari-pinned-tab.svg: -------------------------------------------------------------------------------- 1 | 2 | 4 | 7 | 8 | Created by potrace 1.11, written by Peter Selinger 2001-2013 9 | 10 | 12 | 14 | 16 | 18 | 19 | 20 | 21 | 22 | 23 | -------------------------------------------------------------------------------- /doc/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=sphinx-build 9 | ) 10 | set SOURCEDIR=source 11 | set BUILDDIR=build 12 | set SPHINXPROJ=arch 13 | set SPHINXOPTS= 14 | set PYDEVD_DISABLE_FILE_VALIDATION=1 15 | 16 | if "%1" == "" goto help 17 | 18 | %SPHINXBUILD% >NUL 2>NUL 19 | if errorlevel 9009 ( 20 | echo. 21 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 22 | echo.installed, then set the SPHINXBUILD environment variable to point 23 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 24 | echo.may add the Sphinx directory to PATH. 25 | echo. 26 | echo.If you don't have Sphinx installed, grab it from 27 | echo.https://sphinx-doc.org/ 28 | set PYDEVD_DISABLE_FILE_VALIDATION= 29 | exit /b 1 30 | ) 31 | 32 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% 33 | set PYDEVD_DISABLE_FILE_VALIDATION= 34 | goto end 35 | 36 | :help 37 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% 38 | set PYDEVD_DISABLE_FILE_VALIDATION= 39 | 40 | :end 41 | popd 42 | -------------------------------------------------------------------------------- /doc/source/univariate/univariate.rst: -------------------------------------------------------------------------------- 1 | Univariate Volatility Models 2 | ---------------------------- 3 | 4 | ``arch.univaraite`` provides both high-level (:func:`~arch.univariate.arch_model`) and low-level 5 | methods (see :ref:`mean-models`) to specify models. All models can be used to produce forecasts 6 | either analytically (when tractable) or using simulation-based methods (Monte Carlo or 7 | residual Bootstrap). 8 | 9 | .. module::arch.univariate 10 | 11 | .. toctree:: 12 | :maxdepth: 1 13 | 14 | Introduction 15 | Examples 16 | Forecasting 17 | Forecasting Examples 18 | Forecasting Scenarios 19 | Forecasting with Exogenous Variables 20 | Mean Models 21 | Volatility Processes 22 | Using the Fixed Variance Process 23 | Distributions 24 | Results 25 | Utilities 26 | Background and References 27 | -------------------------------------------------------------------------------- /arch/bootstrap/_samplers_python.py: -------------------------------------------------------------------------------- 1 | from arch.compat.numba import jit 2 | 3 | from arch._typing import Float64Array, Int64Array 4 | 5 | 6 | def stationary_bootstrap_sample_python( 7 | indices: Int64Array, u: Float64Array, p: float 8 | ) -> Int64Array: 9 | """ 10 | Generate indices for sampling from the stationary bootstrap. 11 | 12 | Parameters 13 | ------- 14 | indices: ndarray 15 | Single-dimensional array containing draws from randint with the same 16 | size as the data in the range of [0,nobs). 17 | u : ndarray 18 | Single-dimensional Array of standard uniforms. 19 | p : float 20 | Probability that a new block is started in the stationary bootstrap. 21 | The multiplicative reciprocal of the window length. 22 | 23 | Returns 24 | ------- 25 | ndarray 26 | Indices for an iteration of the stationary bootstrap. 27 | """ 28 | num_items = indices.shape[0] 29 | for i in range(1, num_items): 30 | if u[i] > p: 31 | indices[i] = indices[i - 1] + 1 32 | if indices[i] == num_items: 33 | indices[i] = 0 34 | 35 | return indices 36 | 37 | 38 | stationary_bootstrap_sample = jit(stationary_bootstrap_sample_python) 39 | -------------------------------------------------------------------------------- /arch/bootstrap/_samplers.pyx: -------------------------------------------------------------------------------- 1 | #!python 2 | 3 | 4 | import numpy as np 5 | 6 | cimport numpy as np 7 | 8 | np.import_array() 9 | 10 | 11 | def stationary_bootstrap_sample(np.int64_t[:] indices, 12 | double[:] u, 13 | double p): 14 | """ 15 | Generate indices for sampling from the stationary bootstrap. 16 | 17 | Parameters 18 | ------- 19 | indices: ndarray 20 | Single-dimensional array containing draws from randint with the same 21 | size as the data in the range of [0,nobs). 22 | u : ndarray 23 | Single-dimensional Array of standard uniforms. 24 | p : float 25 | Probability that a new block is started in the stationary bootstrap. 26 | The multiplicative reciprocal of the window length. 27 | 28 | Returns 29 | ------- 30 | ndarray 31 | Indices for an iteration of the stationary bootstrap. 32 | """ 33 | cdef Py_ssize_t num_items, i 34 | num_items = indices.shape[0] 35 | 36 | for i in range(1, num_items): 37 | if u[i] > p: 38 | indices[i] = indices[i - 1] + 1 39 | if indices[i] == num_items: 40 | indices[i] = 0 41 | 42 | return np.asarray(indices) 43 | -------------------------------------------------------------------------------- /doc/source/_templates/layout.html: -------------------------------------------------------------------------------- 1 | {# Import the theme's layout. #} 2 | {% extends "!layout.html" %} 3 | 4 | {%- block hero %} 5 | {% if pagename == 'index' %} 6 | {# TODO: Should use md-hero--expand only when needed #} 7 | 8 | 9 | Univariate volatility modeling, bootstrapping, multiple comparison procedures and unit 10 | root tests. 11 | 12 | 13 | {% endif %} 14 | {% endblock %} 15 | 16 | {%- block htmltitle %} 17 | {{ super() }} 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | {% endblock %} -------------------------------------------------------------------------------- /arch/tests/test_build.py: -------------------------------------------------------------------------------- 1 | import tempfile 2 | 3 | import pytest 4 | 5 | from arch import __version__, version_tuple 6 | 7 | try: 8 | from arch._build.git_version import get_version, write_version_file 9 | 10 | HAS_SETUPTOOLS_SCM = True 11 | except ImportError: 12 | HAS_SETUPTOOLS_SCM = False 13 | 14 | 15 | @pytest.mark.skipif(not HAS_SETUPTOOLS_SCM, reason="setuptools_scm is not installed") 16 | def test_get_version(): 17 | try: 18 | version, version_fields = get_version() 19 | 20 | assert isinstance(version, str) 21 | assert isinstance(version_fields, tuple) 22 | assert all(isinstance(v, (int, str)) for v in version_fields) 23 | except LookupError: 24 | pytest.skip("No git repository found") 25 | 26 | 27 | @pytest.mark.skipif(not HAS_SETUPTOOLS_SCM, reason="setuptools_scm is not installed") 28 | def test_write_version_file(): 29 | with tempfile.NamedTemporaryFile(delete=False) as tmpfile: 30 | 31 | write_version_file(tmpfile.name, __version__, version_tuple) 32 | with open(tmpfile.name, "r") as f: 33 | content = f.read() 34 | 35 | assert f"__version__ = version = '{__version__}'" in content 36 | assert f"__version_tuple__ = version_tuple = {version_tuple}" in content 37 | -------------------------------------------------------------------------------- /doc/source/univariate/mean.rst: -------------------------------------------------------------------------------- 1 | .. _mean-models: 2 | 3 | Mean Models 4 | =========== 5 | All ARCH models start by specifying a mean model. 6 | 7 | .. module:: arch.univariate 8 | :noindex: 9 | 10 | .. currentmodule:: arch.univariate 11 | 12 | .. autosummary:: 13 | :toctree: generated/ 14 | 15 | ZeroMean 16 | ConstantMean 17 | ARX 18 | HARX 19 | LS 20 | 21 | (G)ARCH-in-mean Models 22 | ---------------------- 23 | 24 | (G)ARCH-in-mean models allow the conditional variance (or a transformation of it) 25 | to enter the conditional mean. 26 | 27 | .. autosummary:: 28 | :toctree: generated/ 29 | 30 | ARCHInMean 31 | 32 | Special Requirements 33 | ~~~~~~~~~~~~~~~~~~~~ 34 | Not all volatility processes support application to AIM modeling. 35 | Specifically, the property ``updateable`` must be ``True``. 36 | 37 | .. ipython:: 38 | 39 | In [1]: from arch.univariate import GARCH, EGARCH 40 | 41 | In [2]: GARCH().updateable 42 | 43 | In [3]: EGARCH().updateable 44 | 45 | Writing New Mean Models 46 | ----------------------- 47 | .. currentmodule:: arch.univariate.base 48 | 49 | All mean models must inherit from :class:ARCHModel and provide all public 50 | methods. There are two optional private methods that should be provided if 51 | applicable. 52 | 53 | .. autosummary:: 54 | :toctree: generated/ 55 | 56 | ARCHModel 57 | -------------------------------------------------------------------------------- /arch/univariate/__init__.py: -------------------------------------------------------------------------------- 1 | import types 2 | 3 | from arch.univariate import recursions_python 4 | from arch.univariate.distribution import ( 5 | Distribution, 6 | GeneralizedError, 7 | Normal, 8 | SkewStudent, 9 | StudentsT, 10 | ) 11 | from arch.univariate.mean import ( 12 | ARX, 13 | HARX, 14 | LS, 15 | ARCHInMean, 16 | ConstantMean, 17 | ZeroMean, 18 | arch_model, 19 | ) 20 | from arch.univariate.volatility import ( 21 | APARCH, 22 | ARCH, 23 | EGARCH, 24 | FIGARCH, 25 | GARCH, 26 | HARCH, 27 | ConstantVariance, 28 | EWMAVariance, 29 | FixedVariance, 30 | MIDASHyperbolic, 31 | RiskMetrics2006, 32 | ) 33 | 34 | recursions: types.ModuleType 35 | try: 36 | from arch.univariate import recursions 37 | except ImportError: 38 | recursions = recursions_python 39 | 40 | __all__ = [ 41 | "APARCH", 42 | "ARCH", 43 | "ARX", 44 | "EGARCH", 45 | "FIGARCH", 46 | "GARCH", 47 | "HARCH", 48 | "HARX", 49 | "LS", 50 | "ARCHInMean", 51 | "ConstantMean", 52 | "ConstantVariance", 53 | "Distribution", 54 | "EWMAVariance", 55 | "FixedVariance", 56 | "GeneralizedError", 57 | "MIDASHyperbolic", 58 | "Normal", 59 | "RiskMetrics2006", 60 | "SkewStudent", 61 | "StudentsT", 62 | "ZeroMean", 63 | "arch_model", 64 | "recursions", 65 | "recursions_python", 66 | ] 67 | -------------------------------------------------------------------------------- /.github/workflows/generate-documentation.yml: -------------------------------------------------------------------------------- 1 | name: Generate Documentation 2 | 3 | on: 4 | release: 5 | types: [published] 6 | push: 7 | branches: 8 | - main 9 | 10 | jobs: 11 | build: 12 | runs-on: ubuntu-latest 13 | defaults: 14 | run: 15 | shell: bash 16 | strategy: 17 | fail-fast: false 18 | matrix: 19 | python-version: ["3.13"] 20 | steps: 21 | - name: Checkout 22 | uses: actions/checkout@v6 23 | with: 24 | fetch-depth: 0 25 | 26 | - name: Install pandoc 27 | uses: r-lib/actions/setup-pandoc@v2 28 | 29 | - name: Set up Python ${{ matrix.python-version }} 30 | uses: actions/setup-python@v6 31 | with: 32 | python-version: ${{ matrix.python-version }} 33 | 34 | - name: Install dependencies 35 | run: | 36 | python -m pip install --upgrade pip 37 | python -m pip install -r requirements.txt 38 | python -m pip install -r requirements-dev.txt 39 | 40 | - name: Install arch 41 | run: | 42 | python -m pip list 43 | python -m pip install -e . --no-build-isolation -vv 44 | 45 | - name: Build documentation 46 | run: | 47 | pushd doc 48 | make html 49 | make html 50 | popd 51 | 52 | - name: Deploy documentation 53 | env: 54 | GIT_TAG: ${{ github.event.release.tag_name }} 55 | run: source ci/github-actions/push-docs-gh-pages.sh 56 | -------------------------------------------------------------------------------- /.github/workflows/cron-test.yml: -------------------------------------------------------------------------------- 1 | name: Build wheels 2 | 3 | on: 4 | push: 5 | branches: [ main ] 6 | pull_request: 7 | branches: [ main ] 8 | schedule: 9 | - cron: '37 0 * * 2' # 2/weekly 10 | 11 | jobs: 12 | build_wheels: 13 | name: ${{ matrix.os }}, Python ${{ matrix.python }} 14 | runs-on: ${{ matrix.os }} 15 | defaults: 16 | run: 17 | shell: bash 18 | strategy: 19 | fail-fast: false 20 | matrix: 21 | python: [cp314] 22 | os: [ubuntu-latest, windows-latest] 23 | env: 24 | BUILD_COMMIT: "main" 25 | MKL_NUM_THREADS: 1 26 | OMP_NUM_THREADS: 1 27 | OPENLAS_NUM_THREADS: 1 28 | 29 | steps: 30 | - uses: actions/checkout@v6 31 | with: 32 | submodules: recursive 33 | fetch-depth: 0 34 | 35 | - uses: actions/setup-python@v6 36 | with: 37 | python-version: '3.x' 38 | 39 | - uses: pypa/cibuildwheel@v3.3.0 40 | env: 41 | CIBW_BUILD: ${{ matrix.python }}-* 42 | CIBW_BEFORE_BUILD: python -m pip install pip --upgrade 43 | CIBW_TEST_COMMAND: python -c "import arch; arch.test(['--skip-slow','-n','auto'])" 44 | CIBW_TEST_REQUIRES: pytest pytest-xdist pytest-cov matplotlib numpy scipy pandas seaborn 45 | CIBW_SKIP: "*-musllinux_* *-win32" 46 | CIBW_ARCHS_LINUX: "x86_64" 47 | CIBW_BUILD_VERBOSITY: 2 48 | with: 49 | output-dir: wheelhouse 50 | -------------------------------------------------------------------------------- /arch/unitroot/critical_values/dfgls.py: -------------------------------------------------------------------------------- 1 | """ 2 | Contains values used to approximate the critical value and p-value from DFGLS 3 | statistics 4 | 5 | These have been computed using the methodology of MacKinnon (1994) and (2010) 6 | simulation. See dfgls_critival_values_simulation for implementation. 7 | """ 8 | 9 | from numpy import array 10 | 11 | dfgls_cv_approx = { 12 | "c": array( 13 | [ 14 | [-2.56781793e00, -2.05575392e01, 1.82727674e02, -1.77866664e03], 15 | [-1.94363325e00, -2.17272746e01, 2.60815068e02, -2.26914916e03], 16 | [-1.61998241e00, -2.32734708e01, 3.06474378e02, -2.57483557e03], 17 | ] 18 | ), 19 | "ct": array( 20 | [ 21 | [-3.40689134, -21.69971242, 27.26295939, -816.84404772], 22 | [-2.84677178, -19.69109364, 84.7664136, -799.40722401], 23 | [-2.55890707, -19.42621991, 116.53759752, -840.31342847], 24 | ] 25 | ), 26 | } 27 | 28 | dfgls_tau_max = {"c": 13.365361509140614, "ct": 8.73743383728356} 29 | 30 | dfgls_tau_min = {"c": -17.561302895074206, "ct": -13.681153542634465} 31 | 32 | dfgls_tau_star = {"c": -0.4795076091714674, "ct": -2.1960404365401298} 33 | 34 | dfgls_large_p = { 35 | "c": [0.50612497, 0.98305664, -0.05648525, 0.00140875], 36 | "ct": [2.60561421, 1.67850224, 0.0373599, -0.01017936], 37 | } 38 | 39 | dfgls_small_p = { 40 | "c": [0.67422739, 1.25475826, 0.03572509], 41 | "ct": [2.38767685, 1.57454737, 0.05754439], 42 | } 43 | -------------------------------------------------------------------------------- /doc/source/bootstrap/background.rst: -------------------------------------------------------------------------------- 1 | .. bibliography bootstrap Bibliography 2 | 3 | References 4 | ---------- 5 | 6 | The bootstrap is a large area with a number of high-quality books. Leading 7 | references include 8 | 9 | .. rubric:: References 10 | 11 | .. [Chernick] Chernick, M. R. (2011). *Bootstrap methods: A guide for 12 | practitioners and researchers* (Vol. 619). John Wiley & Sons. 13 | 14 | .. [Davidson] Davison, A. C. (1997). *Bootstrap methods and their* 15 | *application* (Vol. 1). Cambridge university press. 16 | 17 | .. [EfronTibshirani] Efron, B., & Tibshirani, R. J. (1994). *An* 18 | *introduction to the bootstrap* (Vol. 57). CRC press. 19 | 20 | .. [PolitisRomanoWolf] Politis, D. N., & Romano, J. P. M. Wolf, 1999. 21 | *Subsampling*. 22 | 23 | Articles used in the creation of this module include 24 | 25 | .. [CarpenterBithell] Carpenter, J., & Bithell, J. (2000). "Bootstrap 26 | confidence intervals: when, which, what? A practical guide for medical 27 | statisticians." *Statistics in medicine*, 19(9), 1141-1164. 28 | 29 | .. [DavidsonMacKinnon] Davidson, R., & MacKinnon, J. G. (2006). "Bootstrap 30 | methods in econometrics." *Palgrave Handbook of Econometrics*, 1, 812-38. 31 | 32 | .. [DiCiccioEfron] DiCiccio, T. J., & Efron, B. (1996). "Bootstrap confidence 33 | intervals." *Statistical Science*, 189-212. 34 | 35 | .. [Efron] Efron, B. (1987). "Better bootstrap confidence intervals." 36 | *Journal of the American statistical Association*, 82(397), 171-185. 37 | -------------------------------------------------------------------------------- /doc/source/changes/6.0.rst: -------------------------------------------------------------------------------- 1 | ========= 2 | Version 6 3 | ========= 4 | 5 | Release 6.3 6 | =========== 7 | - Performance enhancement for long-run covariance estimators when numba is installed (:issue:`687`) 8 | - Python 3.12 support 9 | - Compatability with NumPy 2 10 | - Fixes for future changes in pandas 11 | 12 | Release 6.2 13 | =========== 14 | - Fixed a bug that affected forecasting from :class:`~arch.univariate.volatility.FIGARCH` models (:issue:`606`). 15 | - Added a performance warning when testing for unit roots in large series using a lag-length 16 | search with no-max-lag specified. 17 | - Fixed a bug that affected forecasting from :class:`arch.univariate.volatility.FIGARCH` 18 | models (:issue:`606`). 19 | - Changed the default value of ``reindex`` to ``False`` so that forecasts 20 | will not match the input by default. Set ``reindex`` to ``True`` if this is 21 | required. 22 | - Made ``from __future__ import reindex`` a no-op. 23 | - Updated notebooks to reflect best practices 24 | 25 | Release 6.1 26 | =========== 27 | - Pushed back the adoption of Cython 3 until a later date 28 | - Fixed a bug that occurred when: 29 | 30 | - Using a AR, HAR or other model with lagged dependent variables; and 31 | - ``rescale=True`` or with data that was automatically rescaled. 32 | 33 | Release 6.0 34 | =========== 35 | - Minimum supported Python is 3.9 36 | - Bumped minimum NumPy, SciPy, pandas, statsmodels and Cython 37 | - Removed dependence on property-cached 38 | - Added compatability with Cython 3 39 | -------------------------------------------------------------------------------- /ci/github-actions/push-docs-gh-pages.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | export GIT_REPO_DIR=${PWD} 4 | echo "Set git email and name" 5 | git config user.email "kevin.k.sheppard@gmail.com" 6 | git config user.name "Kevin Sheppard" 7 | git config advice.addIgnoredFile false 8 | echo "Checkout pages" 9 | git checkout gh-pages 10 | echo "Remove devel" 11 | rm -rf devel 12 | echo "Make a new devel" 13 | mkdir devel 14 | echo "Checking for tag" 15 | if [[ -n "${GIT_TAG}" ]]; then 16 | echo "Tag ${GIT_TAG} is defined" 17 | echo "Copy docs to root" 18 | echo cp -r ${PWD}/doc/build/html/* ${PWD}/ 19 | cp -r ${PWD}/doc/build/html/* ${PWD} 20 | else 21 | echo "Tag is ${GIT_TAG}. Not updating main documents" 22 | fi 23 | echo "Copy docs to devel" 24 | echo cp -r ${PWD}/doc/build/html/* ${PWD}/devel/ 25 | cp -r ${PWD}/doc/build/html/* ${PWD}/devel/ 26 | echo "Clean up docs" 27 | cd ${GIT_REPO_DIR}/doc 28 | make clean && git clean -xfd 29 | echo "Add files" 30 | cd ${GIT_REPO_DIR} 31 | git add . 32 | # Ensure key files are added 33 | git add devel/**/* || true 34 | git add **/*.html || true 35 | git add **/*.ipynb || true 36 | git add **/*.txt || true 37 | git add _images/* || true 38 | git add _sources/**/* || true 39 | git add _modules/**/* || true 40 | git add _static/**/* || true 41 | echo "Change remote" 42 | git remote set-url origin https://bashtage:"${GITHUB_TOKEN}"@github.com/bashtage/arch.git 43 | echo "Github Actions doc build after commit ${GITHUB_SHA::8}" 44 | git commit -a -m "Github Actions doc build after commit ${GITHUB_SHA::8}" 45 | echo "Push" 46 | git push -f 47 | echo Show status using git status 48 | git status -------------------------------------------------------------------------------- /doc/source/images/hero.py: -------------------------------------------------------------------------------- 1 | import warnings 2 | 3 | import matplotlib.font_manager 4 | from matplotlib.pyplot import figure 5 | import numpy as np 6 | import seaborn as sns 7 | 8 | from arch import arch_model 9 | import arch.data.sp500 10 | 11 | warnings.simplefilter("ignore") 12 | sns.set_style("whitegrid") 13 | sns.mpl.rcParams["figure.figsize"] = (12, 3) 14 | 15 | data = arch.data.sp500.load() 16 | market = data["Adj Close"] 17 | returns = 100 * market.pct_change().dropna() 18 | 19 | am = arch_model(returns) 20 | res = am.fit(update_freq=5) 21 | 22 | prop = matplotlib.font_manager.FontProperties("Roboto") 23 | 24 | 25 | def _set_tight_x(axis, index): 26 | try: 27 | axis.set_xlim(index[0], index[-1]) 28 | except ValueError: 29 | pass 30 | 31 | 32 | fig = figure() 33 | ax = fig.add_subplot(1, 1, 1) 34 | vol = res.conditional_volatility 35 | title = "S&P 500 Annualized Conditional Volatility" 36 | scales = {"D": 252, "W": 52, "M": 12} 37 | vol = vol * np.sqrt(scales["D"]) 38 | 39 | ax.plot(res._index.values, vol) 40 | _set_tight_x(ax, res._index) 41 | ax.set_title(title) 42 | sns.despine(ax=ax) 43 | title = ax.get_children()[7] 44 | title.set_fontproperties(prop) 45 | title.set_fontsize(26) 46 | title.set_fontweight("ultralight") 47 | title.set_fontstretch("ultra-condensed") 48 | title.set_color("#757575") 49 | ax.xaxis.label.set_color("#757575") 50 | ax.yaxis.label.set_color("#757575") 51 | ax.tick_params(axis="x", colors="#757575") 52 | ax.tick_params(axis="y", colors="#757575") 53 | fig.tight_layout(pad=1.0) 54 | fig.savefig("hero.svg", transparent=True) 55 | fig.savefig("hero.png", transparent=True) 56 | -------------------------------------------------------------------------------- /arch/utility/__init__.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | 4 | PKG = os.path.dirname(os.path.dirname(__file__)) 5 | 6 | 7 | def test( 8 | extra_args: str | list[str] | None = None, 9 | exit: bool = True, 10 | append: bool = True, 11 | location: str = "", 12 | ) -> int: 13 | """ 14 | Test runner that allows testing of installed package. 15 | 16 | Exists with test status code upon completion. 17 | 18 | Parameters 19 | ---------- 20 | extra_args : {str, list[str]}, default None 21 | Extra arguments to pass to pytest. Default options are --tb=short 22 | and --disable-pytest-warnings. Providing extra_args overwrites the 23 | defaults with the user-provided arguments. 24 | """ 25 | 26 | try: 27 | import pytest # noqa: PLC0415 28 | except ImportError as exc: 29 | raise ImportError("Need pytest to run tests") from exc 30 | 31 | cmd = ["--tb=auto", "--disable-pytest-warnings"] 32 | if extra_args: 33 | if not isinstance(extra_args, list): 34 | pytest_args = [extra_args] 35 | else: 36 | pytest_args = extra_args 37 | if append: 38 | cmd += pytest_args[:] 39 | else: 40 | cmd = pytest_args 41 | pkg_loc = PKG 42 | if location: 43 | pkg_loc = os.path.abspath(os.path.join(PKG, location)) 44 | if not os.path.exists(pkg_loc): 45 | raise RuntimeError(f"{pkg_loc} was not found. Unable to run tests") 46 | cmd = [pkg_loc] + cmd 47 | cmd_str = " ".join(cmd) 48 | print(f"running: pytest {cmd_str}") 49 | status = pytest.main(cmd) 50 | if exit: # pragma: no cover 51 | sys.exit(status) 52 | return status 53 | 54 | 55 | __all__ = ["test"] 56 | -------------------------------------------------------------------------------- /doc/source/univariate/volatility.rst: -------------------------------------------------------------------------------- 1 | .. _volatility-processes: 2 | 3 | Volatility Processes 4 | ==================== 5 | A volatility process is added to a mean model to capture time-varying 6 | volatility. 7 | 8 | .. module:: arch.univariate 9 | :noindex: 10 | 11 | .. currentmodule:: arch.univariate 12 | 13 | .. autosummary:: 14 | :toctree: generated/ 15 | 16 | ConstantVariance 17 | GARCH 18 | FIGARCH 19 | EGARCH 20 | HARCH 21 | MIDASHyperbolic 22 | ARCH 23 | APARCH 24 | 25 | Parameterless Variance Processes 26 | -------------------------------- 27 | Some volatility processes use fixed parameters and so have no parameters that 28 | are estimable. 29 | 30 | .. autosummary:: 31 | :toctree: generated/ 32 | 33 | EWMAVariance 34 | RiskMetrics2006 35 | 36 | FixedVariance 37 | ------------- 38 | The ``FixedVariance`` class is a special-purpose volatility process that allows 39 | the so-called zig-zag algorithm to be used. See the example for usage. 40 | 41 | .. autosummary:: 42 | :toctree: generated/ 43 | 44 | FixedVariance 45 | 46 | Writing New Volatility Processes 47 | -------------------------------- 48 | All volatility processes must inherit from :class:`~arch.univariate.volatility.VolatilityProcess` and provide 49 | all public methods. 50 | 51 | .. currentmodule:: arch.univariate.volatility 52 | 53 | .. autosummary:: 54 | :toctree: generated/ 55 | 56 | VolatilityProcess 57 | 58 | They may optionally expose a 59 | :class:`~arch.univariate.recursions.VolatilityUpdater` class 60 | that can be used in :class:`~arch.univariate.ARCHInMean` estimation. 61 | 62 | .. currentmodule:: arch.univariate.recursions 63 | 64 | .. autosummary:: 65 | :toctree: generated/ 66 | 67 | VolatilityUpdater 68 | -------------------------------------------------------------------------------- /ci/azure/install-posix.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | if [[ ${USE_CONDA} == "true" ]]; then 4 | conda config --set always_yes true 5 | conda update --all --quiet 6 | conda create -n arch-test python=${PYTHON_VERSION} -y 7 | conda activate arch-test 8 | conda init 9 | echo ${PATH} 10 | source activate arch-test 11 | echo ${PATH} 12 | which python 13 | CMD="conda install numpy" 14 | else 15 | CMD="python -m pip install numpy" 16 | fi 17 | 18 | python -m pip install --upgrade pip "setuptools>=61" wheel 19 | python -m pip install cython "pytest>=8.4.1,<9" pytest-xdist "coverage[toml]" pytest-cov ipython jupyter notebook nbconvert "property_cached>=1.6.3" black isort flake8 nbconvert setuptools_scm colorama "meson-python>=0.18.0" meson ninja 20 | 21 | 22 | if [[ -n ${NUMPY} ]]; then CMD="$CMD~=${NUMPY}"; fi; 23 | CMD="$CMD scipy" 24 | if [[ -n ${SCIPY} ]]; then CMD="$CMD~=${SCIPY}"; fi; 25 | CMD="$CMD pandas" 26 | if [[ -n ${PANDAS} ]]; then CMD="$CMD~=${PANDAS}"; fi; 27 | CMD="$CMD statsmodels" 28 | if [[ -n ${STATSMODELS} ]]; then CMD="$CMD~=${STATSMODELS}"; fi 29 | if [[ -n ${MATPLOTLIB} ]]; then CMD="$CMD matplotlib~=${MATPLOTLIB} seaborn"; fi 30 | if [[ ${USE_NUMBA} = true ]]; then 31 | CMD="${CMD} numba"; 32 | if [[ -n ${NUMBA} ]]; then 33 | CMD="${CMD}~=${NUMBA}" 34 | fi; 35 | fi; 36 | CMD="$CMD $EXTRA" 37 | echo $CMD 38 | eval $CMD 39 | 40 | if [ "${PIP_PRE}" = true ]; then 41 | python -m pip install matplotlib cython formulaic meson-python --upgrade 42 | python -m pip uninstall -y numpy pandas scipy matplotlib statsmodels 43 | python -m pip install -i https://pypi.anaconda.org/scientific-python-nightly-wheels/simple numpy pandas scipy matplotlib statsmodels --upgrade --use-deprecated=legacy-resolver --only-binary=:all: 44 | fi 45 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | # License 2 | 3 | **Copyright (c) 2017 Kevin Sheppard. All rights reserved.** 4 | 5 | Developed by: Kevin Sheppard (, 6 | ) 7 | [https://www.kevinsheppard.com](https://www.kevinsheppard.com) 8 | 9 | Permission is hereby granted, free of charge, to any person obtaining a copy of 10 | this software and associated documentation files (the "Software"), to deal with 11 | the Software without restriction, including without limitation the rights to 12 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies 13 | of the Software, and to permit persons to whom the Software is furnished to do 14 | so, subject to the following conditions: 15 | 16 | Redistributions of source code must retain the above copyright notice, this 17 | list of conditions and the following disclaimers. 18 | 19 | Redistributions in binary form must reproduce the above copyright notice, this 20 | list of conditions and the following disclaimers in the documentation and/or 21 | other materials provided with the distribution. 22 | 23 | Neither the names of Kevin Sheppard, nor the names of its contributors may be 24 | used to endorse or promote products derived from this Software without specific 25 | prior written permission. 26 | 27 | **THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 28 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 29 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 30 | CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 31 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 32 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH 33 | THE SOFTWARE.** 34 | -------------------------------------------------------------------------------- /arch/compat/numba.py: -------------------------------------------------------------------------------- 1 | from collections.abc import Callable 2 | import functools 3 | import os 4 | from typing import Any 5 | import warnings 6 | 7 | from arch.utility.exceptions import PerformanceWarning 8 | 9 | DISABLE_NUMBA = os.environ.get("ARCH_DISABLE_NUMBA", "") in ("1", "true", "True") 10 | 11 | performance_warning: str = """\ 12 | numba is not available, and this function is being executed without JIT 13 | compilation. Either install numba or reinstalling after installing Cython 14 | is strongly recommended.""" 15 | 16 | HAS_NUMBA = False 17 | try: 18 | if DISABLE_NUMBA: 19 | raise ImportError 20 | from numba import jit 21 | 22 | HAS_NUMBA = True 23 | jit = functools.partial(jit, nopython=True) 24 | 25 | except ImportError: 26 | 27 | def jit( 28 | function_or_signature: Callable[..., Any] | None = None, 29 | *args: Any, 30 | **kwargs: Any, 31 | ) -> Any: 32 | if function_or_signature is not None and callable(function_or_signature): 33 | # Used directly, e.g., f_jit = jit(f) 34 | @functools.wraps(function_or_signature) 35 | def wrapper(*args: Any, **kwargs: Any) -> Callable[..., Any]: 36 | warnings.warn(performance_warning, PerformanceWarning, stacklevel=2) 37 | return function_or_signature(*args, **kwargs) 38 | 39 | return wrapper 40 | 41 | # Used as a decorator, e.g., @jit 42 | def wrap(func: Callable[..., Any]) -> Callable[..., Any]: 43 | @functools.wraps(func) 44 | def wrapper(*args: Any, **kwargs: Any) -> Callable[..., Any]: 45 | warnings.warn(performance_warning, PerformanceWarning, stacklevel=2) 46 | return func(*args, **kwargs) 47 | 48 | return wrapper 49 | 50 | return wrap 51 | 52 | 53 | __all__ = ["DISABLE_NUMBA", "HAS_NUMBA", "PerformanceWarning", "jit"] 54 | -------------------------------------------------------------------------------- /doc/source/index.rst: -------------------------------------------------------------------------------- 1 | :hero: Univariate volatility modeling, bootstrapping, multiple comparison procedures and unit root tests. 2 | 3 | .. image:: images/color-logo.svg 4 | :width: 33.3% 5 | :alt: arch logo 6 | 7 | .. note:: 8 | 9 | `Stable documentation `_ for the latest release 10 | is located at `doc `_. 11 | Documentation for `recent developments `_ 12 | is located at `devel `_. 13 | 14 | Introduction 15 | ============ 16 | 17 | The ARCH toolbox contains routines for: 18 | 19 | - Univariate volatility models; 20 | - Bootstrapping; 21 | - Multiple comparison procedures; 22 | - Unit root tests; 23 | - Cointegration Testing and Estimation; and 24 | - Long-run covariance estimation. 25 | 26 | Future plans are to continue to expand this toolbox to include additional 27 | routines relevant for the analysis of financial data. 28 | 29 | .. toctree:: 30 | :maxdepth: 2 31 | :hidden: 32 | 33 | Univariate Volatility Models 34 | Bootstrapping 35 | Multiple Comparison Problems 36 | Unit Root Tests and Cointegration Analysis 37 | Long-run Covariance Estimation 38 | API Reference 39 | Common Type Definitions 40 | Change Log 41 | 42 | Citation 43 | ======== 44 | This package should be cited using Zenodo. For example, for the 4.13 release please 45 | cite [arch]_ as: 46 | 47 | .. [arch] Kevin Sheppard (2021, March 3). bashtage/arch: Release 4.18 (Version v4.18). 48 | Zenodo. https://doi.org/10.5281/zenodo.593254 49 | 50 | .. image:: https://zenodo.org/badge/doi/10.5281/zenodo.593254.svg 51 | :target: https://doi.org/10.5281/zenodo.593254 52 | 53 | Index 54 | ===== 55 | 56 | * :ref:`genindex` 57 | * :ref:`modindex` 58 | -------------------------------------------------------------------------------- /ci/azure/azure_template_windows.yml: -------------------------------------------------------------------------------- 1 | # Python package 2 | # Create and test a Python package on multiple Python versions. 3 | # Add steps that analyze code, save the dist with the build record, publish to a PyPI-compatible index, and more: 4 | # https://docs.microsoft.com/azure/devops/pipelines/languages/python 5 | 6 | parameters: 7 | # defaults for any parameters that are not specified 8 | name: '' 9 | vmImage: '' 10 | use.numba: 'true' 11 | 12 | 13 | jobs: 14 | 15 | - job: ${{ parameters.name }}Test 16 | pool: 17 | vmImage: ${{ parameters.vmImage }} 18 | strategy: 19 | matrix: 20 | python310_win_latest: 21 | python.version: '3.10' 22 | python311_win_latest: 23 | python.version: '3.11' 24 | python312_win_latest: 25 | python.version: '3.12' 26 | python313_win_latest: 27 | python.version: '3.13' 28 | maxParallel: 10 29 | 30 | steps: 31 | - task: UsePythonVersion@0 32 | inputs: 33 | versionSpec: '$(python.version)' 34 | architecture: 'x64' 35 | displayName: 'Use Python $(python.version)' 36 | 37 | - script: | 38 | python -m venv arch-test 39 | call arch-test\Scripts\activate.bat 40 | python -m pip install --upgrade pip wheel 41 | python -m pip install -r requirements-dev.txt -U 42 | python -m pip install -r requirements.txt -U 43 | python -m pip list 44 | displayName: 'Create Venv and Install dependencies' 45 | 46 | - script: | 47 | call arch-test\Scripts\activate.bat 48 | python -m pip install -e . -vv --no-build-isolation 49 | displayName: 'Build Cython Extensions' 50 | 51 | - script: | 52 | call arch-test\Scripts\activate.bat 53 | pytest arch --junitxml=junit/test-results.xml -n 2 --durations=25 54 | displayName: 'pytest' 55 | 56 | - task: PublishTestResults@2 57 | inputs: 58 | testResultsFiles: '**/test-results.xml' 59 | testRunTitle: 'Python $(python.version)' 60 | condition: succeededOrFailed() 61 | -------------------------------------------------------------------------------- /arch/utility/cov.py: -------------------------------------------------------------------------------- 1 | from numpy import asarray, squeeze 2 | 3 | from arch._typing import Float64Array 4 | 5 | 6 | def cov_nw( 7 | y: Float64Array, lags: int = 0, demean: bool = True, axis: int = 0, ddof: int = 0 8 | ) -> Float64Array | float: 9 | """ 10 | Computes Newey-West covariance for 1-d and 2-d arrays 11 | 12 | Parameters 13 | ---------- 14 | y : ndarray 15 | Values to use when computing the Newey-West covariance estimator, either 16 | 1-d or 2-d. When y is 2d, default behavior is to treat columns as variables 17 | and rows as observations. 18 | lags : int 19 | Number of lags to include in the Newey-West covariance estimator 20 | demean : bool 21 | Indicates whether to subtract the mean. Default is True 22 | axis : int 23 | The axis to use when y is 2d 24 | ddof : int 25 | Degree of freedom correction for compatibility with simple covariance 26 | estimators. Default is 0. 27 | 28 | Returns 29 | ------- 30 | ndarray 31 | The estimated covariance 32 | """ 33 | z = y 34 | is_1d = False 35 | if axis > z.ndim: 36 | raise ValueError("axis must be less than the dimension of y") 37 | if z.ndim == 1: 38 | is_1d = True 39 | z = asarray(z)[:, None] 40 | if axis == 1: 41 | z = z.T 42 | n = z.shape[0] 43 | if ddof > n: 44 | raise ValueError( 45 | "ddof must be strictly smaller than the number of observations" 46 | ) 47 | if lags > n: 48 | error = "lags must be weakly smaller than the number of observations" 49 | raise ValueError(error) 50 | 51 | if demean: 52 | z = z - z.mean(0) 53 | cov = z.T.dot(z) 54 | for j in range(1, lags + 1): 55 | w = 1 - j / (lags + 1) 56 | gamma = z[j:].T.dot(z[:-j]) 57 | cov += w * (gamma + gamma.T) 58 | cov = cov / (n - ddof) 59 | if is_1d: 60 | return float(squeeze(cov)) 61 | return asarray(cov) 62 | -------------------------------------------------------------------------------- /arch/unitroot/critical_values/simulation/kpss_simulation_process.py: -------------------------------------------------------------------------------- 1 | from io import StringIO 2 | 3 | import numpy as np 4 | import pandas as pd 5 | 6 | sio = StringIO() 7 | sio.write("from numpy import asarray\n\n") 8 | sio.write("kpss_critical_values = {}\n") 9 | 10 | c = pd.read_hdf("kpss_critical_values.h5", "c") 11 | ct = pd.read_hdf("kpss_critical_values.h5", "ct") 12 | 13 | data = {"c": c, "ct": ct} 14 | for k in ("c", "ct"): 15 | v = data[k] 16 | n = v.shape[0] 17 | selected = np.zeros((n, 1), dtype=bool) 18 | selected[0] = True 19 | selected[-1] = True 20 | selected[v.index == 10.0] = True 21 | selected[v.index == 5.0] = True 22 | selected[v.index == 2.5] = True 23 | selected[v.index == 1.0] = True 24 | max_diff = 1.0 25 | while max_diff > 0.05: 26 | xp = np.squeeze(np.asarray(v[selected].values)) 27 | yp = np.asarray(v[selected].index, dtype=float) 28 | x = np.squeeze(np.asarray(v.values)) 29 | y = np.asarray(v.index, dtype=float) 30 | yi = np.interp(x, xp, yp) 31 | abs_diff = np.abs(y - yi) 32 | max_diff = np.max(abs_diff) 33 | if max_diff > 0.05: 34 | selected[np.where(abs_diff == max_diff)] = True 35 | selected[np.asarray(v.index, dtype=float) <= 10.0] = True 36 | 37 | quantiles = list(np.squeeze(v[selected].index.values)) 38 | critical_values = list(np.squeeze(np.asarray(v[selected].values))) 39 | # Fix for first CV 40 | critical_values[0] = 0.0 41 | sio.write(k + " = (") 42 | count = 0 43 | for c, q in zip(critical_values, quantiles, strict=False): 44 | sio.write("(" + f"{q:0.3f}" + ", " + f"{c:0.4f}" + ")") 45 | count += 1 46 | if count % 4 == 0: 47 | sio.write(",\n " + " " * len(k)) 48 | else: 49 | sio.write(", ") 50 | sio.write(")\n") 51 | sio.write("kpss_critical_values['" + k + "'] = ") 52 | sio.write("asarray(" + k + ")") 53 | sio.write("\n") 54 | 55 | sio.seek(0) 56 | print(sio.read()) 57 | -------------------------------------------------------------------------------- /doc/source/unitroot/unitroot.rst: -------------------------------------------------------------------------------- 1 | Unit Root Testing 2 | ----------------- 3 | 4 | .. module::arch.unitroot 5 | 6 | Many time series are highly persistent, and determining whether the data appear 7 | to be stationary or contains a unit root is the first step in many analyses. 8 | This module contains a number of routines: 9 | 10 | * Augmented Dickey-Fuller (:class:`~arch.unitroot.ADF`) 11 | * Dickey-Fuller GLS (:class:`~arch.unitroot.DFGLS`) 12 | * Phillips-Perron (:class:`~arch.unitroot.PhillipsPerron`) 13 | * KPSS (:class:`~arch.unitroot.KPSS`) 14 | * Zivot-Andrews (:class:`~arch.unitroot.ZivotAndrews`) 15 | * Variance Ratio (:class:`~arch.unitroot.VarianceRatio`) 16 | * Automatic Bandwidth Selection (:func:`~arch.unitroot.auto_bandwidth`) 17 | 18 | The first four all start with the null of a unit root and have an alternative 19 | of a stationary process. The final test, KPSS, has a null of a stationary 20 | process with an alternative of a unit root. 21 | 22 | .. toctree:: 23 | :maxdepth: 1 24 | 25 | Introduction 26 | Unit Root Testing Examples 27 | Unit Root Testing 28 | 29 | Cointegration Analysis 30 | ---------------------- 31 | The module extended the single-series unit root testing to multiple 32 | series and cointegration testing and cointegrating vector estimation. 33 | 34 | * Cointegrating Testing 35 | 36 | * Engle-Granger Test (:class:`~arch.unitroot.cointegration.engle_granger`) 37 | * Phillips-Ouliaris Tests (:class:`~arch.unitroot.cointegration.phillips_ouliaris`) 38 | 39 | * Cointegrating Vector Estimation 40 | 41 | * Dynamic OLS (:class:`~arch.unitroot.cointegration.DynamicOLS`) 42 | * Fully Modified OLS (:class:`~arch.unitroot.cointegration.FullyModifiedOLS`) 43 | * Canonical Cointegrating Regression (:class:`~arch.unitroot.cointegration.CanonicalCointegratingReg`) 44 | 45 | 46 | .. toctree:: 47 | :maxdepth: 1 48 | 49 | Cointegration Testing Examples 50 | Cointegration Testing and Estimation 51 | 52 | 53 | -------------------------------------------------------------------------------- /doc/source/unitroot/introduction.rst: -------------------------------------------------------------------------------- 1 | Introduction 2 | ------------ 3 | 4 | All tests expect a 1-d series as the first input. The input can be any array that 5 | can `squeeze` into a 1-d array, a pandas `Series` or a pandas `DataFrame` that 6 | contains a single variable. 7 | 8 | All tests share a common structure. The key elements are: 9 | 10 | - `stat` - Returns the test statistic 11 | - `pvalue` - Returns the p-value of the test statistic 12 | - `lags` - Sets or gets the number of lags used in the model. In most test, can be ``None`` to trigger automatic selection. 13 | - `trend` - Sets or gets the trend used in the model. Supported trends vary by model, but include: 14 | 15 | - `'nc'`: No constant 16 | - `'c'`: Constant 17 | - `'ct'`: Constant and time trend 18 | - `'ctt'`: Constant, time trend and quadratic time trend 19 | 20 | - `summary()` - Returns a summary object that can be printed to get a formatted table 21 | 22 | 23 | Basic Example 24 | ============= 25 | 26 | This basic example show the use of the Augmented-Dickey fuller to test whether the default premium, 27 | defined as the difference between the yields of large portfolios of BAA and AAA bonds. This example 28 | uses a constant and time trend. 29 | 30 | 31 | .. code-block:: python 32 | 33 | import datetime as dt 34 | 35 | import pandas_datareader.data as web 36 | from arch.unitroot import ADF 37 | 38 | start = dt.datetime(1919, 1, 1) 39 | end = dt.datetime(2014, 1, 1) 40 | 41 | df = web.DataReader(["AAA", "BAA"], "fred", start, end) 42 | df['diff'] = df['BAA'] - df['AAA'] 43 | adf = ADF(df['diff']) 44 | adf.trend = 'ct' 45 | 46 | print(adf.summary()) 47 | 48 | which yields 49 | 50 | :: 51 | 52 | Augmented Dickey-Fuller Results 53 | ===================================== 54 | Test Statistic -3.448 55 | P-value 0.045 56 | Lags 21 57 | ------------------------------------- 58 | 59 | Trend: Constant and Linear Time Trend 60 | Critical Values: -3.97 (1%), -3.41 (5%), -3.13 (10%) 61 | Null Hypothesis: The process contains a unit root. 62 | Alternative Hypothesis: The process is weakly stationary. 63 | 64 | -------------------------------------------------------------------------------- /arch/tests/univariate/test_rescale.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from numpy.testing import assert_allclose 3 | import pandas as pd 4 | import pytest 5 | 6 | from arch.univariate import GARCH, Normal, ZeroMean 7 | 8 | 9 | @pytest.fixture(scope="module") 10 | def small_data(): 11 | rs = np.random.RandomState([2389280, 238901, 382908031]) 12 | mod = ZeroMean(None, volatility=GARCH(), distribution=Normal(seed=rs)) 13 | sim = mod.simulate([1e-3, 0.05, 0.90], initial_value_vol=1e-3 / 0.05, nobs=1000) 14 | return sim.data 15 | 16 | 17 | @pytest.fixture(scope="module") 18 | def small_data2(): 19 | rs = np.random.RandomState([2389280, 238901, 382908031]) 20 | mod = ZeroMean(None, volatility=GARCH(), distribution=Normal(seed=rs)) 21 | sim = mod.simulate([1e-3, 0.05, 0.90], nobs=1000) 22 | return sim.data 23 | 24 | 25 | @pytest.fixture(scope="module") 26 | def std_data(): 27 | rs = np.random.RandomState([2389280, 238901, 382908031]) 28 | mod = ZeroMean(None, volatility=GARCH(), distribution=Normal(seed=rs)) 29 | sim = mod.simulate([1.0, 0.05, 0.90], nobs=1000) 30 | return sim.data 31 | 32 | 33 | def test_reproducibility(small_data, small_data2): 34 | pd.testing.assert_series_equal(small_data, small_data2) 35 | 36 | 37 | def test_blank(small_data, std_data): 38 | small_mod = ZeroMean(small_data, volatility=GARCH(), rescale=False) 39 | small_res = small_mod.fit(starting_values=np.array([1e-3, 0.05, 0.90]), disp="off") 40 | mod = ZeroMean(std_data, volatility=GARCH(), rescale=False) 41 | res = mod.fit(starting_values=np.array([1, 0.05, 0.90]), disp="off") 42 | 43 | small_param0, *_ = small_res.params 44 | param0, *_ = res.params 45 | assert_allclose(1e3 * small_param0, param0, rtol=5e-3, atol=1e9) 46 | 47 | 48 | def test_rescale_fit(small_data, std_data): 49 | small_mod = ZeroMean(small_data, volatility=GARCH(), rescale=True) 50 | small_res = small_mod.fit(disp="off") 51 | direct_mod = ZeroMean(10 * small_data, volatility=GARCH()) 52 | direct_res = direct_mod.fit(disp="off") 53 | assert_allclose(small_res.loglikelihood, direct_res.loglikelihood) 54 | small_fcast = small_res.forecast(start=0) 55 | direct_fcast = direct_res.forecast(start=0) 56 | assert_allclose(small_fcast.variance, direct_fcast.variance) 57 | -------------------------------------------------------------------------------- /arch/utility/testing.py: -------------------------------------------------------------------------------- 1 | from functools import cached_property 2 | 3 | from scipy.stats import chi2 4 | 5 | __all__ = ["WaldTestStatistic"] 6 | 7 | 8 | class WaldTestStatistic: 9 | """ 10 | Test statistic holder for Wald-type tests 11 | 12 | Parameters 13 | ---------- 14 | stat : float 15 | The test statistic 16 | df : int 17 | Degree of freedom. 18 | null : str 19 | A statement of the test's null hypothesis 20 | alternative : str 21 | A statement of the test's alternative hypothesis 22 | name : str, default "" (empty) 23 | Name of test 24 | """ 25 | 26 | def __init__( 27 | self, 28 | stat: float, 29 | df: int, 30 | null: str, 31 | alternative: str, 32 | name: str = "", 33 | ) -> None: 34 | self._stat = stat 35 | self._null = null 36 | self._alternative = alternative 37 | self.df: int = df 38 | self._name = name 39 | self.dist = chi2(df) 40 | self.dist_name: str = f"chi2({df})" 41 | 42 | @property 43 | def stat(self) -> float: 44 | """Test statistic""" 45 | return self._stat 46 | 47 | @cached_property 48 | def pval(self) -> float: 49 | """P-value of test statistic""" 50 | return 1 - self.dist.cdf(self.stat) 51 | 52 | @cached_property 53 | def critical_values(self) -> dict[str, float]: 54 | """Critical values test for common test sizes""" 55 | return dict( 56 | zip(["10%", "5%", "1%"], self.dist.ppf([0.9, 0.95, 0.99]), strict=False) 57 | ) 58 | 59 | @property 60 | def null(self) -> str: 61 | """Null hypothesis""" 62 | return self._null 63 | 64 | @property 65 | def alternative(self) -> str: 66 | return self._alternative 67 | 68 | def __str__(self) -> str: 69 | name = "" if not self._name else self._name + "\n" 70 | return ( 71 | f"{name}H0: {self.null}\n{name}H1: {self.alternative}\nStatistic: {self.stat:0.4f}\n" 72 | f"P-value: {self.pval:0.4f}\nDistributed: {self.dist}" 73 | ) 74 | 75 | def __repr__(self) -> str: 76 | return ( 77 | self.__str__() + "\n" + self.__class__.__name__ + f", id: {hex(id(self))}" 78 | ) 79 | -------------------------------------------------------------------------------- /doc/source/images/favicon.svg: -------------------------------------------------------------------------------- 1 | 2 | 4 | 5 | 6 | 7 | 10 | 11 | 12 | 13 | 14 | 20 | 21 | 22 | 28 | 29 | 30 | 36 | 37 | 38 | 44 | 45 | 46 | 52 | 53 | 54 | 60 | 61 | 62 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | -------------------------------------------------------------------------------- /doc/source/images/logo.svg: -------------------------------------------------------------------------------- 1 | 2 | 4 | 5 | 6 | 7 | 10 | 11 | 12 | 13 | 14 | 20 | 21 | 22 | 28 | 29 | 30 | 36 | 37 | 38 | 44 | 45 | 46 | 52 | 53 | 54 | 60 | 61 | 62 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | -------------------------------------------------------------------------------- /doc/source/images/favicon.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | 3 | import matplotlib.font_manager 4 | import matplotlib.pyplot as plt 5 | import numpy as np 6 | import pandas_datareader as pdr 7 | import seaborn as sns 8 | 9 | colors = sns.color_palette("muted") 10 | 11 | NBINS = 7 12 | 13 | plt.rcParams["figure.figsize"] = (10, 10) 14 | 15 | start = datetime.datetime(1980, 1, 1) 16 | end = datetime.datetime(2020, 1, 1) 17 | data = pdr.get_data_yahoo("^GSPC", start, end) 18 | price = data["Adj Close"] 19 | rets = 100 * price.resample("M").last().pct_change() 20 | 21 | lower, upper = rets.quantile([0.01, 0.99]) 22 | bins = np.linspace(lower, upper, NBINS) 23 | fig = plt.figure(frameon=False) 24 | fig.set_size_inches(8, 8) 25 | ax = fig.add_subplot("111") 26 | rwidth = np.diff(bins).mean() * 0.22 27 | _, _, patches = ax.hist(rets, bins=bins, rwidth=rwidth, align="mid") # '#2196f3') 28 | for i, patch in enumerate(patches): 29 | patch.set_facecolor(colors[i]) 30 | ax.set_xticks([]) 31 | ax.set_yticks([]) 32 | ax.set_ylabel("") 33 | sns.despine(left=True, bottom=True) 34 | fig.tight_layout(pad=1.0) 35 | 36 | fig.savefig("favicon.svg", transparent=True, bbox_inches=0) 37 | fig.savefig("favicon.png", transparent=True) 38 | 39 | fig = plt.figure(frameon=False) 40 | fig.set_size_inches(8, 8) 41 | ax = fig.add_subplot("111") 42 | rwidth = np.diff(bins).mean() * 0.22 43 | ax.hist(rets, bins=bins, rwidth=rwidth, align="mid", color="#ffffff") 44 | ax.set_xticks([]) 45 | ax.set_yticks([]) 46 | ax.set_ylabel("") 47 | sns.despine(left=True, bottom=True) 48 | fig.tight_layout(pad=1.0) 49 | 50 | fig.savefig("logo.svg", transparent=True, bbox_inches=0) 51 | 52 | 53 | prop = matplotlib.font_manager.FontProperties("Roboto") 54 | prop.set_size(216) 55 | fig = plt.figure(frameon=False, figsize=(12, 4)) 56 | ax = fig.add_subplot("111") 57 | rwidth = np.diff(bins).mean() * 0.22 58 | _, _, patches = ax.hist(rets, bins=bins, rwidth=rwidth, align="mid") 59 | for i, patch in enumerate(patches): 60 | patch.set_facecolor(colors[i]) 61 | ax.set_xticks([]) 62 | ax.set_yticks([]) 63 | text = ax.text(11, 0, "arch") 64 | text.set_fontproperties(prop) 65 | text.set_fontsize(216) 66 | text.set_color("#757575") 67 | ax.set_ylim(0, 180) 68 | sns.despine(left=True, bottom=True) 69 | fig.tight_layout(pad=1.0) 70 | fig.savefig("color-logo.png", transparent=True) 71 | fig.savefig("color-logo.svg", transparent=True) 72 | -------------------------------------------------------------------------------- /arch/tests/test_examples.py: -------------------------------------------------------------------------------- 1 | import glob 2 | import os 3 | import sys 4 | 5 | import pytest 6 | 7 | SKIP = True 8 | REASON = "Required packages not available" 9 | 10 | try: 11 | import jupyter_client 12 | 13 | # matplotlib is required for most notebooks 14 | import matplotlib as mpl # noqa: F401 15 | from nbconvert.preprocessors import ExecutePreprocessor 16 | import nbformat 17 | 18 | kernels = jupyter_client.kernelspec.find_kernel_specs() 19 | SKIP = False 20 | 21 | if sys.platform.startswith("win") and sys.version_info >= ( 22 | 3, 23 | 8, 24 | ): # pragma: no cover 25 | import asyncio 26 | 27 | try: 28 | from asyncio import WindowsSelectorEventLoopPolicy 29 | except ImportError: 30 | pass # Can't assign a policy which doesn't exist. 31 | else: 32 | if not isinstance( 33 | asyncio.get_event_loop_policy(), WindowsSelectorEventLoopPolicy 34 | ): 35 | asyncio.set_event_loop_policy(WindowsSelectorEventLoopPolicy()) 36 | 37 | except ImportError: # pragma: no cover 38 | pytestmark = pytest.mark.skip(reason=REASON) 39 | 40 | SLOW_NOTEBOOKS = ["multiple-comparison_examples.ipynb"] 41 | if bool(os.environ.get("ARCH_TEST_SLOW_NOTEBOOKS", "")): # pragma: no cover 42 | SLOW_NOTEBOOKS = [] 43 | kernel_name = "python%s" % sys.version_info.major 44 | 45 | head, _ = os.path.split(__file__) 46 | NOTEBOOK_DIR = os.path.abspath(os.path.join(head, "..", "..", "examples")) 47 | 48 | nbs = sorted(glob.glob(os.path.join(NOTEBOOK_DIR, "*.ipynb"))) 49 | ids = [os.path.split(nb)[-1].split(".")[0] for nb in nbs] 50 | if nbs: # pragma: no cover 51 | 52 | @pytest.mark.slow 53 | @pytest.mark.parametrize("notebook", nbs, ids=ids) 54 | @pytest.mark.skipif(SKIP, reason=REASON) 55 | def test_notebook(notebook): 56 | nb_name = os.path.split(notebook)[-1] 57 | if nb_name in SLOW_NOTEBOOKS: 58 | pytest.skip("Notebook is too slow to test") 59 | nb = nbformat.read(notebook, as_version=4) 60 | ep = ExecutePreprocessor( 61 | allow_errors=False, timeout=240, kernel_name=kernel_name 62 | ) 63 | ep.preprocess(nb, {"metadata": {"path": NOTEBOOK_DIR}}) 64 | 65 | else: 66 | REASON = "No notebooks found and so no tests run" 67 | pytestmark = pytest.mark.skip(reason=REASON) 68 | -------------------------------------------------------------------------------- /doc/source/bootstrap/parameter-covariance-estimation.rst: -------------------------------------------------------------------------------- 1 | Covariance Estimation 2 | ===================== 3 | The bootstrap can be used to estimate parameter covariances in applications where 4 | analytical computation is challenging, or simply as an alternative to 5 | traditional estimators. 6 | 7 | This example estimates the covariance of the mean, standard deviation and 8 | Sharpe ratio of the S&P 500 using Yahoo! Finance data. 9 | 10 | .. code-block:: python 11 | 12 | import datetime as dt 13 | import pandas as pd 14 | import pandas_datareader.data as web 15 | 16 | start = dt.datetime(1951, 1, 1) 17 | end = dt.datetime(2014, 1, 1) 18 | sp500 = web.DataReader('^GSPC', 'yahoo', start=start, end=end) 19 | low = sp500.index.min() 20 | high = sp500.index.max() 21 | monthly_dates = pd.date_range(low, high, freq='M') 22 | monthly = sp500.reindex(monthly_dates, method='ffill') 23 | returns = 100 * monthly['Adj Close'].pct_change().dropna() 24 | 25 | The function that returns the parameters. 26 | 27 | .. code-block:: python 28 | 29 | def sharpe_ratio(r): 30 | mu = 12 * r.mean(0) 31 | sigma = np.sqrt(12 * r.var(0)) 32 | sr = mu / sigma 33 | return np.array([mu, sigma, sr]) 34 | 35 | Like all applications of the bootstrap, it is important to choose a bootstrap 36 | that captures the dependence in the data. This example uses the stationary 37 | bootstrap with an average block size of 12. 38 | 39 | .. code-block:: python 40 | 41 | import pandas as pd 42 | from arch.bootstrap import StationaryBootstrap 43 | 44 | bs = StationaryBootstrap(12, returns) 45 | param_cov = bs.cov(sharpe_ratio) 46 | index = ['mu', 'sigma', 'SR'] 47 | params = sharpe_ratio(returns) 48 | params = pd.Series(params, index=index) 49 | param_cov = pd.DataFrame(param_cov, index=index, columns=index) 50 | 51 | The output is 52 | 53 | .. code-block:: python 54 | 55 | >>> params 56 | mu 8.148534 57 | sigma 14.508540 58 | SR 0.561637 59 | dtype: float64 60 | 61 | >>> param_cov 62 | mu sigma SR 63 | mu 3.729435 -0.442891 0.273945 64 | sigma -0.442891 0.495087 -0.049454 65 | SR 0.273945 -0.049454 0.020830 66 | 67 | .. note:: 68 | 69 | The covariance estimator is centered using the average of the bootstrapped 70 | estimators. The original sample estimator can be used to center using the 71 | keyword argument ``recenter=False``. 72 | -------------------------------------------------------------------------------- /doc/source/multiple-comparison/multiple-comparison-reference.rst: -------------------------------------------------------------------------------- 1 | Module Reference 2 | ---------------- 3 | 4 | .. module:: arch.bootstrap.multiple_comparison 5 | :synopsis: Multiple comparison procedures 6 | 7 | .. currentmodule:: arch.bootstrap 8 | 9 | Test of Superior Predictive Ability (SPA), Reality Check 10 | ======================================================== 11 | The test of Superior Predictive Ability (Hansen 2005), or SPA, is an improved 12 | version of the Reality Check (White 2000). It tests whether the best 13 | forecasting performance from a set of models is better than that of the 14 | forecasts from a benchmark model. A model is "better" if its losses are 15 | smaller than those from the benchmark. Formally, it tests the null 16 | 17 | .. math:: 18 | 19 | H_0: \max_i E[L_i] \geq E[L_{bm}] 20 | 21 | where :math:`L_i` is the loss from model *i* and :math:`L_{bm}` is the loss 22 | from the benchmark model. The alternative is 23 | 24 | .. math:: 25 | 26 | H_1: \min_i E[L_i] < E[L_{bm}] 27 | 28 | This procedure accounts for dependence between the losses and the fact that 29 | there are potentially alternative models being considered. 30 | 31 | **Note**: Also callable using ``arch.bootstrap.RealityCheck`` 32 | 33 | 34 | .. autosummary:: 35 | :toctree: generated/ 36 | 37 | SPA 38 | 39 | 40 | Stepwise Multiple Testing (StepM) 41 | ================================= 42 | 43 | The Stepwise Multiple Testing procedure (Romano & Wolf (2005)) is closely 44 | related to the SPA, except that it returns a set of models that are superior 45 | to the benchmark model, rather than the p-value from the null. They are 46 | so closely related that :class:`~arch.bootstrap.StepM` is essentially 47 | a wrapper around :class:`~arch.bootstrap.SPA` with some small modifications 48 | to allow multiple calls. 49 | 50 | .. autosummary:: 51 | :toctree: generated/ 52 | 53 | StepM 54 | 55 | Model Confidence Set (MCS) 56 | ========================== 57 | 58 | The Model Confidence Set (Hansen, Lunde & Nason (2011)) differs from other 59 | multiple comparison procedures in that there is no benchmark. The MCS attempts 60 | to identify the set of models which produce the same expected loss, while 61 | controlling the probability that a model that is worse than the best model is 62 | in the model confidence set. Like the other MCPs, it controls the 63 | Familywise Error Rate rather than the usual test size. 64 | 65 | .. autosummary:: 66 | :toctree: generated/ 67 | 68 | MCS 69 | -------------------------------------------------------------------------------- /arch/conftest.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | 4 | import pandas as pd 5 | import pytest 6 | 7 | pytest_plugins = [ 8 | "arch.tests.unitroot.cointegration_data", 9 | ] 10 | 11 | 12 | logger = logging.getLogger(__name__) 13 | COW = bool(os.environ.get("ARCH_TEST_COPY_ON_WRITE", "")) 14 | try: 15 | import warnings 16 | 17 | with warnings.catch_warnings(): 18 | warnings.simplefilter("ignore") 19 | pd.options.mode.copy_on_write = COW 20 | except AttributeError: 21 | pass 22 | 23 | if COW: 24 | logger.critical("Copy on Write Enabled!") 25 | else: 26 | logger.critical("Copy on Write disabled") 27 | 28 | 29 | def pytest_configure(config): 30 | # Minimal config to simplify running tests from lm.test() 31 | config.addinivalue_line("markers", "slow: mark a test as slow") 32 | config.addinivalue_line( 33 | "filterwarnings", "ignore:Method .ptp is deprecated:FutureWarning" 34 | ) 35 | 36 | 37 | def pytest_addoption(parser): 38 | parser.addoption("--skip-slow", action="store_true", help="skip slow tests") 39 | parser.addoption("--only-slow", action="store_true", help="run only slow tests") 40 | 41 | 42 | def pytest_runtest_setup(item): 43 | if "slow" in item.keywords and item.config.getoption( 44 | "--skip-slow" 45 | ): # pragma: no cover 46 | pytest.skip("skipping due to --skip-slow") # pragma: no cover 47 | 48 | if "slow" not in item.keywords and item.config.getoption( 49 | "--only-slow" 50 | ): # pragma: no cover 51 | pytest.skip("skipping due to --only-slow") # pragma: no cover 52 | 53 | 54 | @pytest.fixture 55 | def agg_backend(): 56 | """ 57 | Fixture that switches the backend to agg for the duration of the test 58 | 59 | Returns 60 | ------- 61 | switch_backend : callable 62 | Function that will change the backend to agg when called 63 | 64 | Notes 65 | ----- 66 | Used by passing as an argument to the function that produces a plot, 67 | for example 68 | 69 | def test_some_plot(agg_backend): 70 | 71 | """ 72 | backend = None 73 | try: 74 | import matplotlib as mpl # noqa: PLC0415 75 | 76 | backend = mpl.get_backend() 77 | mpl.use("agg") 78 | 79 | except ImportError: 80 | # Nothing to do if MPL is not available 81 | pass 82 | 83 | def null(): 84 | pass 85 | 86 | yield null 87 | if backend: 88 | import matplotlib as mpl # noqa: PLC0415 89 | 90 | mpl.use(backend) 91 | -------------------------------------------------------------------------------- /arch/unitroot/critical_values/simulation/adf_z_simlation_process.py: -------------------------------------------------------------------------------- 1 | import glob 2 | import os 3 | 4 | from adf_simulation import OUTPUT_PATH, PERCENTILES, TIME_SERIES_LENGTHS, TRENDS 5 | from black import FileMode, TargetVersion, format_file_contents 6 | import numpy as np 7 | import pandas as pd 8 | from shared import estimate_cv_regression, fit_pval_model, format_dict 9 | 10 | critical_values = (1.0, 5.0, 10.0) 11 | adf_z_cv_approx = {} 12 | adf_z_max = {} 13 | adf_z_min = {} 14 | adf_z_star = {} 15 | adf_z_small_p = {} 16 | adf_z_large_p = {} 17 | 18 | for trend in TRENDS: 19 | data_files = glob.glob(os.path.join(OUTPUT_PATH, f"adf_z_{trend}-*.npz")) 20 | percentiles = PERCENTILES 21 | all_results = [] 22 | for df in data_files: 23 | with np.load(df) as data: 24 | all_results.append(data["results"]) 25 | results = np.hstack(all_results) 26 | cols = TIME_SERIES_LENGTHS.tolist() * len(data_files) 27 | results_df = pd.DataFrame( 28 | results, index=pd.Index(PERCENTILES / 100.0), columns=cols 29 | ) 30 | 31 | cv_approx = estimate_cv_regression(results_df, critical_values) 32 | adf_z_cv_approx[trend] = [cv_approx[cv] for cv in critical_values] 33 | 34 | pvals = fit_pval_model(results_df[2000], small_order=4, use_log=True) 35 | adf_z_max[trend] = pvals.tau_max 36 | adf_z_min[trend] = pvals.tau_min 37 | adf_z_star[trend] = pvals.tau_star 38 | adf_z_small_p[trend] = pvals.small_p 39 | adf_z_large_p[trend] = pvals.large_p 40 | 41 | formatted_code = "adf_z_min = " + format_dict(adf_z_min) 42 | formatted_code += "\n\nadf_z_star = " + format_dict(adf_z_star) 43 | formatted_code += "\n\nadf_z_max = " + format_dict(adf_z_max) 44 | formatted_code += "\n\n# The small p parameters are for np.log(np.abs(stat))\n" 45 | formatted_code += "adf_z_small_p = " + format_dict(adf_z_small_p) 46 | formatted_code += "\n\nadf_z_large_p = " + format_dict(adf_z_large_p) 47 | formatted_code += "\n\nadf_z_cv_approx = " + format_dict(adf_z_cv_approx) 48 | 49 | with open("../dickey_fuller.py") as cvs: 50 | lines = cvs.readlines() 51 | 52 | retain = [] 53 | for line in lines: 54 | if "# Z values from" in line: 55 | break 56 | retain.append(line) 57 | retain.append("\n\n# Z values from new simulations, 500 exercises, 250,000 per ex.\n") 58 | 59 | formatted_code = "".join(retain) + formatted_code 60 | 61 | targets = {TargetVersion.PY36, TargetVersion.PY37, TargetVersion.PY38} 62 | fm = FileMode(target_versions=targets) 63 | formatted_code = format_file_contents(formatted_code, fast=False, mode=fm) 64 | 65 | with open("../dickey_fuller.py", "w") as cvs: 66 | cvs.write(formatted_code) 67 | -------------------------------------------------------------------------------- /arch/_typing.py: -------------------------------------------------------------------------------- 1 | from collections.abc import Callable, Hashable 2 | import datetime as dt 3 | from typing import Any, Literal, Optional, TypeVar, Union 4 | 5 | import numpy as np 6 | from pandas import DataFrame, Series, Timestamp 7 | 8 | __all__ = [ 9 | "AnyArray", 10 | "AnyArray1D", 11 | "AnyPandas", 12 | "ArrayLike", 13 | "ArrayLike1D", 14 | "ArrayLike2D", 15 | "BoolArray", 16 | "BootstrapIndexT", 17 | "DateLike", 18 | "Float64Array", 19 | "Float64Array1D", 20 | "Float64Array2D", 21 | "FloatOrArray", 22 | "ForecastingMethod", 23 | "Int32Array", 24 | "Int64Array", 25 | "Int64Array2D", 26 | "IntArray", 27 | "Label", 28 | "Literal", 29 | "NDArray", 30 | "NDArrayOrFrame", 31 | "RNGType", 32 | "RandomStateState", 33 | "Uint32Array", 34 | "UnitRootTrend", 35 | ] 36 | 37 | NDArray = Union[np.ndarray] 38 | Float64Array = np.ndarray[tuple[int, ...], np.dtype[np.float64]] # pragma: no cover 39 | Float64Array1D = np.ndarray[tuple[int], np.dtype[np.float64]] # pragma: no cover 40 | Float64Array2D = np.ndarray[tuple[int, int], np.dtype[np.float64]] # pragma: no cover 41 | Int64Array = np.ndarray[tuple[int, ...], np.dtype[np.int64]] # pragma: no cover 42 | Int64Array1D = np.ndarray[tuple[int], np.dtype[np.int64]] # pragma: no cover 43 | Int64Array2D = np.ndarray[tuple[int, int], np.dtype[np.int64]] # pragma: no cover 44 | Int32Array = np.ndarray[tuple[int, ...], np.dtype[np.intc]] # pragma: no cover 45 | IntArray = np.ndarray[tuple[int, ...], np.dtype[np.int64]] # pragma: no cover 46 | BoolArray = np.ndarray[tuple[int, ...], np.dtype[np.bool_]] # pragma: no cover 47 | AnyArray = np.ndarray[tuple[int, ...], Any] # pragma: no cover 48 | AnyArray1D = np.ndarray[tuple[int], Any] # pragma: no cover 49 | Uint32Array = np.ndarray[tuple[int, ...], np.dtype[np.uintc]] # pragma: no cover 50 | 51 | BootstrapIndexT = Union[ 52 | Int64Array1D, 53 | tuple[Int64Array1D, ...], 54 | tuple[list[Int64Array1D], dict[str, Int64Array1D]], 55 | ] 56 | RandomStateState = tuple[str, Uint32Array, int, int, float] 57 | 58 | RNGType = Callable[[Union[int, tuple[int, ...]]], Float64Array] 59 | ArrayLike1D = Union[Float64Array1D, Series] 60 | ArrayLike2D = Union[Float64Array2D, DataFrame] 61 | ArrayLike = Union[NDArray, DataFrame, Series] 62 | NDArrayOrFrame = TypeVar("NDArrayOrFrame", Float64Array, DataFrame) 63 | AnyPandas = Union[Series, DataFrame] 64 | DateLike = Union[str, dt.datetime, np.datetime64, Timestamp] 65 | Label = Optional[Hashable] 66 | FloatOrArray = TypeVar("FloatOrArray", float, np.ndarray) 67 | UnitRootTrend = Literal["n", "c", "ct", "ctt"] 68 | ForecastingMethod = Literal["analytic", "simulation", "bootstrap"] 69 | -------------------------------------------------------------------------------- /arch/utility/exceptions.py: -------------------------------------------------------------------------------- 1 | class InvalidLengthWarning(Warning): 2 | pass 3 | 4 | 5 | invalid_length_doc: str = """ 6 | The length of {var} is not an exact multiple of {block}, and so the final 7 | {drop} observations have been dropped. 8 | """ 9 | 10 | deprecation_doc: str = """ 11 | {old_func} has been deprecated. Please use {new_func}. 12 | """ 13 | 14 | 15 | class ConvergenceWarning(Warning): 16 | pass 17 | 18 | 19 | convergence_warning: str = """\ 20 | The optimizer returned code {code}. The message is: 21 | {string_message} 22 | See scipy.optimize.fmin_slsqp for code meaning. 23 | """ 24 | 25 | 26 | class StartingValueWarning(Warning): 27 | pass 28 | 29 | 30 | starting_value_warning: str = """\ 31 | Starting values do not satisfy the parameter constraints in the model. The 32 | provided starting values will be ignored. 33 | """ 34 | 35 | 36 | class InitialValueWarning(Warning): 37 | pass 38 | 39 | 40 | initial_value_warning: str = """\ 41 | Parameters are not consistent with a stationary model. Using the intercept 42 | to initialize the model. 43 | """ 44 | 45 | 46 | class DataScaleWarning(Warning): 47 | pass 48 | 49 | 50 | data_scale_warning: str = """\ 51 | y is poorly scaled, which may affect convergence of the optimizer when 52 | estimating the model parameters. The scale of y is {0:0.4g}. Parameter 53 | estimation work better when this value is between 1 and 1000. The recommended 54 | rescaling is {1:0.4g} * y. 55 | 56 | This warning can be disabled by either rescaling y before initializing the 57 | model or by setting rescale=False. 58 | """ 59 | 60 | 61 | arg_type_error: str = """\ 62 | Only NumPy arrays and pandas DataFrames and Series are supported in positional 63 | arguments. Positional input {i} has type {arg_type}. 64 | """ 65 | 66 | kwarg_type_error: str = """\ 67 | Only NumPy arrays and pandas DataFrames and Series are supported in keyword 68 | arguments. Input `{key}` has type {arg_type}. 69 | """ 70 | 71 | 72 | class StudentizationError(RuntimeError): 73 | pass 74 | 75 | 76 | studentization_error: str = """ 77 | The estimated covariance computed in the studentication is numerically 0. 78 | This might occur if your statistic has no variation. It is not possible to 79 | apply the studentized bootstrap if any of the variances the values returned 80 | by func have not variability when resampled. The estimated covariance 81 | is:\n\n {cov} 82 | """ 83 | 84 | 85 | class InfeasibleTestException(RuntimeError): 86 | pass 87 | 88 | 89 | class PerformanceWarning(UserWarning): 90 | """Warning issued if recursions are run in CPython""" 91 | 92 | 93 | class ValueWarning(UserWarning): 94 | """Warning issued if value is problematic but no fatal.""" 95 | -------------------------------------------------------------------------------- /arch/unitroot/critical_values/simulation/adf_simulation.py: -------------------------------------------------------------------------------- 1 | import os 2 | import platform 3 | 4 | from numpy import arange, array, cumsum, dot, ones, vstack 5 | from numpy.linalg import pinv 6 | from numpy.random import Generator, RandomState 7 | 8 | from arch._typing import UnitRootTrend 9 | 10 | # Storage Location 11 | if platform.system() == "Linux": 12 | BASE_PATH = os.path.join("/mnt", "c") 13 | else: 14 | BASE_PATH = "C:\\\\" 15 | OUTPUT_PATH = os.path.join(BASE_PATH, "Users", "kevin", "Dropbox", "adf-z") 16 | 17 | _PERCENTILES = ( 18 | list(arange(1, 10)) 19 | + list(arange(10, 50, 5)) 20 | + list(arange(50, 950, 10)) 21 | + list(arange(950, 990, 5)) 22 | + list(arange(990, 999)) 23 | ) 24 | PERCENTILES = array(_PERCENTILES) / 10.0 25 | 26 | TRENDS = ("n", "c", "ct", "ctt") 27 | TIME_SERIES_LENGTHS = array( 28 | ( 29 | 20, 30 | 25, 31 | 30, 32 | 35, 33 | 40, 34 | 45, 35 | 50, 36 | 60, 37 | 70, 38 | 80, 39 | 90, 40 | 100, 41 | 120, 42 | 140, 43 | 160, 44 | 180, 45 | 200, 46 | 250, 47 | 300, 48 | 350, 49 | 400, 50 | 450, 51 | 500, 52 | 600, 53 | 700, 54 | 800, 55 | 900, 56 | 1000, 57 | 1200, 58 | 1400, 59 | 2000, 60 | ) 61 | ) 62 | 63 | 64 | def adf_simulation( 65 | n: int, 66 | trend: UnitRootTrend, 67 | b: int, 68 | rng: None | RandomState | Generator = None, 69 | ) -> float: 70 | """ 71 | Simulates the empirical distribution of the ADF z-test statistic 72 | """ 73 | if rng is None: 74 | rng = RandomState(0) 75 | standard_normal = rng.standard_normal 76 | 77 | nobs = n - 1 78 | z = None 79 | if trend == "c": 80 | z = ones((nobs, 1)) 81 | elif trend == "ct": 82 | z = vstack((ones(nobs), arange(1, nobs + 1))).T 83 | elif trend == "ctt": 84 | tau = arange(1, nobs + 1) 85 | z = vstack((ones(nobs), tau, tau**2.0)).T 86 | 87 | y = standard_normal((n + 50, b)) 88 | y = cumsum(y, axis=0) 89 | y = y[50:, :] 90 | lhs = y[1:, :] 91 | rhs = y[:-1, :] 92 | if z is not None: 93 | z_inv = pinv(z) 94 | beta = dot(z_inv, lhs) 95 | lhs = lhs - dot(z, beta) 96 | beta = dot(z_inv, rhs) 97 | rhs = rhs - dot(z, beta) 98 | 99 | xpy = sum(rhs * lhs, 0) 100 | xpx = sum(rhs**2.0, 0) 101 | gamma = xpy / xpx 102 | nobs = lhs.shape[0] 103 | stat = nobs * (gamma - 1.0) 104 | return stat 105 | -------------------------------------------------------------------------------- /arch/tests/univariate/test_moment.py: -------------------------------------------------------------------------------- 1 | from numpy import exp, inf, log, nan, ones_like, pi 2 | from numpy.testing import assert_almost_equal, assert_equal 3 | import pytest 4 | from scipy.integrate import quad 5 | from scipy.special import gammaln 6 | 7 | from arch.univariate.distribution import ( 8 | GeneralizedError, 9 | Normal, 10 | SkewStudent, 11 | StudentsT, 12 | ) 13 | 14 | DISTRIBUTIONS = [ 15 | (SkewStudent(), [6, -0.1]), 16 | (SkewStudent(), [6, -0.5]), 17 | (SkewStudent(), [6, 0.1]), 18 | (SkewStudent(), [6, 0.5]), 19 | (GeneralizedError(), [1.5]), 20 | (GeneralizedError(), [2.1]), 21 | (StudentsT(), [6]), 22 | (StudentsT(), [7]), 23 | (Normal(), None), 24 | ] 25 | 26 | 27 | @pytest.mark.parametrize(("dist", "params"), DISTRIBUTIONS) 28 | def test_moment(dist, params): 29 | """ 30 | Ensures that Distribtion.moment and .partial_moment agree 31 | with numeric integrals for order n=0,...,5 and z=+/-1,...,+/-5 32 | 33 | Parameters 34 | ---------- 35 | dist : distribution.Distribution 36 | The distribution whose moments are being checked 37 | params : List 38 | List of parameters 39 | """ 40 | 41 | assert_equal(dist.moment(-1, params), nan) 42 | assert_equal(dist.partial_moment(-1, 0.0, params), nan) 43 | 44 | # verify moments that exist 45 | def f(x, n): 46 | sigma2 = ones_like(x) 47 | return (x**n) * exp(dist.loglikelihood(params, x, sigma2, True)) 48 | 49 | for n in range(6): # moments 0-5 50 | # complete moments 51 | m_quad = quad(f, -inf, inf, args=(n,))[0] 52 | m_method = dist.moment(n, params) 53 | assert_almost_equal(m_quad, m_method) 54 | 55 | # partial moments at z=+/-1,...,+/-5 56 | # SkewT integral is broken up for numerical stability 57 | for z in range(-5, 5): # partial moments at +-1,...,+-5 58 | if isinstance(dist, SkewStudent): 59 | eta, lam = params 60 | c = gammaln((eta + 1) / 2) - gammaln(eta / 2) - log(pi * (eta - 2)) / 2 61 | a = 4 * lam * exp(c) * (eta - 2) / (eta - 1) 62 | b = (1 + 3 * lam**2 - a**2) ** 0.5 63 | loc = -a / b 64 | if z < loc: 65 | m_quad = quad(f, -inf, z, args=(n,))[0] 66 | else: 67 | m_quad = ( 68 | quad(f, -inf, loc - 1e-9, args=(n,))[0] 69 | + quad(f, loc + 1e-9, z, args=(n,))[0] 70 | ) 71 | 72 | else: 73 | m_quad = quad(f, -inf, z, args=(n,))[0] 74 | 75 | m_method = dist.partial_moment(n, z, params) 76 | assert_almost_equal(m_quad, m_method) 77 | -------------------------------------------------------------------------------- /arch/tests/unitroot/cointegration_data.py: -------------------------------------------------------------------------------- 1 | from arch.compat.pandas import MONTH_END 2 | 3 | from typing import cast 4 | 5 | import numpy as np 6 | import pandas as pd 7 | import pytest 8 | 9 | from arch._typing import ArrayLike2D, Float64Array, Float64Array2D 10 | 11 | 12 | @pytest.fixture(scope="module", params=[True, False]) 13 | def data(request) -> tuple[Float64Array, Float64Array]: 14 | g = np.random.RandomState([12839028, 3092183, 902813]) 15 | e = g.standard_normal((2000, 2)) 16 | phi = g.random_sample((3, 2, 2)) 17 | phi[:, 0, 0] *= 0.8 / phi[:, 0, 0].sum() 18 | phi[:, 1, 1] *= 0.8 / phi[:, 1, 1].sum() 19 | phi[:, 0, 1] *= 0.2 / phi[:, 0, 1].sum() 20 | phi[:, 1, 0] *= 0.2 / phi[:, 1, 0].sum() 21 | y = e.copy() 22 | for i in range(3, y.shape[0]): 23 | y[i] = e[i] 24 | for j in range(3): 25 | y[i] += (phi[j] @ y[i - j - 1].T).T 26 | y = y[-1000:] 27 | if request.param: 28 | df = pd.DataFrame(y, columns=["y", "x"]) 29 | return np.asarray(df.iloc[:, :1], dtype=float), np.asarray( 30 | df.iloc[:, 1:], dtype=float 31 | ) 32 | return y[:, :1], y[:, 1:] 33 | 34 | 35 | @pytest.fixture(scope="module", params=[True, False], ids=["pandas", "numpy"]) 36 | def trivariate_data(request) -> tuple[ArrayLike2D, ArrayLike2D]: 37 | rs = np.random.RandomState([922019, 12882912, 192010, 10189, 109981]) 38 | nobs = 1000 39 | burn = 100 40 | e = rs.standard_normal((nobs + burn, 3)) 41 | y = e.copy() 42 | for i in range(1, 3): 43 | roots = np.ones(3) 44 | roots[1:] = rs.random_sample(2) 45 | ar = -np.poly(roots)[1:] 46 | lags = np.arange(1, 4) 47 | for j in range(3, nobs + burn): 48 | y[j, i] = y[j - lags, i] @ ar + e[j, i] 49 | y[:, 0] = 10 + 0.75 * y[:, 1] + 0.25 * y[:, 2] + e[:, 0] 50 | y = y[burn:] 51 | theta = np.pi * (2 * rs.random_sample(3) - 1) 52 | rot = np.eye(3) 53 | idx = 0 54 | for i in range(3): 55 | for j in range(i + 1, 3): 56 | th = theta[idx] 57 | c = np.cos(th) 58 | s = np.sin(th) 59 | r = np.eye(3) 60 | r[j, j] = r[i, i] = c 61 | r[i, j] = -s 62 | r[j, i] = s 63 | rot = rot @ r 64 | idx += 1 65 | y = y @ rot 66 | if request.param: 67 | dt_index = pd.date_range("1-1-2000", periods=nobs, freq=MONTH_END) 68 | cols = [f"y{i}" for i in range(1, 4)] 69 | data = pd.DataFrame(y, columns=cols, index=dt_index) 70 | return cast("pd.DataFrame", data.iloc[:, :1]), cast( 71 | "pd.DataFrame", data.iloc[:, 1:] 72 | ) 73 | 74 | return cast("Float64Array2D", y[:, :1]), cast("Float64Array2D", y[:, 1:]) 75 | -------------------------------------------------------------------------------- /doc/source/bootstrap/low-level-interface.rst: -------------------------------------------------------------------------------- 1 | Low-level Interfaces 2 | -------------------- 3 | 4 | Constructing Parameter Estimates 5 | ================================ 6 | The bootstrap method apply can be use to directly compute parameter estimates 7 | from a function and the bootstrapped data. 8 | 9 | This example makes use of monthly S&P 500 data. 10 | 11 | .. code-block:: python 12 | 13 | import datetime as dt 14 | 15 | import pandas as pd 16 | import pandas_datareader.data as web 17 | 18 | start = dt.datetime(1951, 1, 1) 19 | end = dt.datetime(2014, 1, 1) 20 | sp500 = web.DataReader('^GSPC', 'yahoo', start=start, end=end) 21 | low = sp500.index.min() 22 | high = sp500.index.max() 23 | monthly_dates = pd.date_range(low, high, freq='M') 24 | monthly = sp500.reindex(monthly_dates, method='ffill') 25 | returns = 100 * monthly['Adj Close'].pct_change().dropna() 26 | 27 | The function will compute the Sharpe ratio -- the (annualized) mean divided by 28 | the (annualized) standard deviation. 29 | 30 | .. code-block:: python 31 | 32 | import numpy as np 33 | def sharpe_ratio(x): 34 | return np.array([12 * x.mean() / np.sqrt(12 * x.var())]) 35 | 36 | The bootstrapped Sharpe ratios can be directly computed using `apply`. 37 | 38 | .. code-block:: python 39 | 40 | import seaborn 41 | from arch.bootstrap import IIDBootstrap 42 | bs = IIDBootstrap(returns) 43 | sharpe_ratios = bs.apply(sr, 1000) 44 | sharpe_ratios = pd.DataFrame(sharp_ratios, columns=['Sharpe Ratio']) 45 | sharpe_ratios.hist(bins=20) 46 | 47 | .. image:: bootstrap_histogram.png 48 | 49 | 50 | The Bootstrap Iterator 51 | ====================== 52 | The lowest-level method to use a bootstrap is the iterator. This is used 53 | internally in all higher-level methods that estimate a function using multiple 54 | bootstrap replications. The iterator returns a two-element tuple where the 55 | first element contains all positional arguments (in the order input) passed when 56 | constructing the bootstrap instance, and the second contains the all keyword 57 | arguments passed when constructing the instance. 58 | 59 | This example makes uses of simulated data to demonstrate how to use the 60 | bootstrap iterator. 61 | 62 | .. code-block:: python 63 | 64 | import pandas as pd 65 | import numpy as np 66 | 67 | from arch.bootstrap import IIDBootstrap 68 | 69 | x = np.random.randn(1000, 2) 70 | y = pd.DataFrame(np.random.randn(1000, 3)) 71 | z = np.random.rand(1000, 10) 72 | bs = IIDBootstrap(x, y=y, z=z) 73 | 74 | for pos, kw in bs.bootstrap(1000): 75 | xstar = pos[0] # pos is always a tuple, even when a singleton 76 | ystar = kw['y'] # A dictionary 77 | zstar = kw['z'] # A dictionary 78 | 79 | -------------------------------------------------------------------------------- /arch/tests/utility/test_cov.py: -------------------------------------------------------------------------------- 1 | from arch.compat.statsmodels import dataset_loader 2 | 3 | from numpy import diff, log 4 | from numpy.random import RandomState 5 | from numpy.testing import assert_almost_equal 6 | import pytest 7 | from statsmodels.datasets import macrodata 8 | 9 | from arch.utility.cov import cov_nw 10 | 11 | 12 | class TestVarNW: 13 | @classmethod 14 | def setup_class(cls): 15 | cls.rng = RandomState(12345) 16 | cls.cpi = log(dataset_loader(macrodata)["cpi"]) 17 | cls.inflation = diff(cls.cpi) 18 | 19 | def test_cov_nw(self): 20 | y = self.inflation 21 | simple_cov = cov_nw(y, lags=0) 22 | e = y - y.mean() 23 | assert_almost_equal(e.dot(e) / e.shape[0], simple_cov) 24 | 25 | def test_cov_nw_ddof(self): 26 | y = self.inflation 27 | simple_cov = cov_nw(y, lags=0, ddof=1) 28 | e = y - y.mean() 29 | n = e.shape[0] 30 | assert_almost_equal(e.dot(e) / (n - 1), simple_cov) 31 | 32 | def test_cov_nw_no_demean(self): 33 | y = self.inflation 34 | simple_cov = cov_nw(y, lags=0, demean=False) 35 | assert_almost_equal(y.dot(y) / y.shape[0], simple_cov) 36 | 37 | def test_cov_nw_2d(self): 38 | y = self.rng.randn(100, 2) 39 | simple_cov = cov_nw(y, lags=0) 40 | e = y - y.mean(0) 41 | assert_almost_equal(e.T.dot(e) / e.shape[0], simple_cov) 42 | 43 | def test_cov_nw_2d_2lags(self): 44 | y = self.rng.randn(100, 2) 45 | e = y - y.mean(0) 46 | gamma_0 = e.T.dot(e) 47 | gamma_1 = e[1:].T.dot(e[:-1]) 48 | gamma_2 = e[2:].T.dot(e[:-2]) 49 | w1, w2 = 1.0 - (1.0 / 3.0), 1.0 - (2.0 / 3.0) 50 | expected = ( 51 | gamma_0 + w1 * (gamma_1 + gamma_1.T) + w2 * (gamma_2 + gamma_2.T) 52 | ) / 100.0 53 | assert_almost_equal(cov_nw(y, lags=2), expected) 54 | 55 | def test_cov_nw_axis(self): 56 | y = self.rng.randn(100, 2) 57 | e = y - y.mean(0) 58 | gamma_0 = e.T.dot(e) 59 | gamma_1 = e[1:].T.dot(e[:-1]) 60 | gamma_2 = e[2:].T.dot(e[:-2]) 61 | w1, w2 = 1.0 - (1.0 / 3.0), 1.0 - (2.0 / 3.0) 62 | expected = ( 63 | gamma_0 + w1 * (gamma_1 + gamma_1.T) + w2 * (gamma_2 + gamma_2.T) 64 | ) / 100.0 65 | assert_almost_equal(cov_nw(y.T, lags=2, axis=1), expected) 66 | 67 | def test_errors(self): 68 | y = self.rng.randn(100, 2) 69 | with pytest.raises(ValueError, match=r"lags must be weakly smaller than"): 70 | cov_nw(y, 200) 71 | with pytest.raises(ValueError, match=r"axis must be less than the"): 72 | cov_nw(y, axis=3) 73 | with pytest.raises(ValueError, match=r"ddof must be strictly smaller"): 74 | cov_nw(y, ddof=200) 75 | -------------------------------------------------------------------------------- /arch/_build/git_version.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | from __future__ import annotations 3 | 4 | import os 5 | import setuptools_scm 6 | from packaging.version import Version 7 | from pathlib import Path 8 | 9 | ROOT = Path(__file__).parent.parent.parent.absolute() 10 | 11 | 12 | def get_version() -> tuple[str, tuple[int | str, ...]]: 13 | _version = setuptools_scm.get_version(root=ROOT) 14 | parsed_version = Version(_version) 15 | version_fields: tuple[int | str, ...] = parsed_version.release 16 | if parsed_version.epoch: 17 | version_fields = (f"{parsed_version.epoch}!", *version_fields) 18 | if parsed_version.pre is not None: 19 | version_fields += (f"{parsed_version.pre[0]}{parsed_version.pre[1]}",) 20 | 21 | if parsed_version.post is not None: 22 | version_fields += (f"post{parsed_version.post}",) 23 | 24 | if parsed_version.dev is not None: 25 | version_fields += (f"dev{parsed_version.dev}",) 26 | 27 | if parsed_version.local is not None: 28 | version_fields += (parsed_version.local,) 29 | 30 | return _version, version_fields 31 | 32 | 33 | def write_version_file( 34 | filename: str, version: str, version_fields: tuple[int | str, ...] 35 | ) -> None: 36 | template = f"""# file generated by setuptools-scm 37 | # don't change, don't track in version control 38 | 39 | __all__ = ["__version__", "__version_tuple__", "version", "version_tuple"] 40 | 41 | TYPE_CHECKING = False 42 | if TYPE_CHECKING: 43 | from typing import Tuple 44 | from typing import Union 45 | 46 | VERSION_TUPLE = Tuple[Union[int, str], ...] 47 | else: 48 | VERSION_TUPLE = object 49 | 50 | version: str 51 | __version__: str 52 | __version_tuple__: VERSION_TUPLE 53 | version_tuple: VERSION_TUPLE 54 | 55 | __version__ = version = '{version}' 56 | __version_tuple__ = version_tuple = {version_fields} 57 | """ 58 | 59 | with open(filename, "w") as f: 60 | f.write(template) 61 | 62 | 63 | if __name__ == "__main__": 64 | import argparse 65 | 66 | parser = argparse.ArgumentParser() 67 | parser.add_argument("--write", help="Save version to this file") 68 | parser.add_argument( 69 | "--meson-dist", 70 | help="Output path is relative to MESON_DIST_ROOT", 71 | action="store_true", 72 | ) 73 | args = parser.parse_args() 74 | 75 | version, version_tuple = get_version() 76 | 77 | if args.write: 78 | outfile = args.write 79 | if args.meson_dist: 80 | outfile = os.path.join(os.environ.get("MESON_DIST_ROOT", ""), outfile) 81 | 82 | # Print human readable output path 83 | relpath = os.path.relpath(outfile) 84 | if relpath.startswith("."): 85 | relpath = outfile 86 | write_version_file(relpath, version, version_tuple) 87 | else: 88 | print(version) 89 | -------------------------------------------------------------------------------- /arch/meson.build: -------------------------------------------------------------------------------- 1 | incdir_numpy = run_command( 2 | py, 3 | [ 4 | '-c', 5 | ''' 6 | import os 7 | cwd = os.getcwd() 8 | 9 | # Protect from import errors due to module names 10 | os.chdir(os.path.join('..', 'examples')) 11 | import numpy as np 12 | os.chdir(cwd) 13 | 14 | try: 15 | # Check if include directory is inside the dir 16 | # e.g. a venv created inside the dir 17 | # If so, convert it to a relative path 18 | incdir = os.path.relpath(np.get_include()) 19 | except Exception as exc: 20 | incdir = np.get_include() 21 | print(incdir) 22 | ''', 23 | ], 24 | check: true, 25 | ).stdout().strip() 26 | 27 | inc_np = include_directories(incdir_numpy, is_system: true) 28 | 29 | # Copy the main __init__.py to the build dir. 30 | # Some submodules (linalg, special, optimize) add pxd files to this. 31 | # Needed to trick Cython, it won't do a relative import outside a package 32 | _cython_tree = [fs.copyfile('__init__.py')] 33 | cython_args = [ 34 | '-X cpow=True', 35 | '-X boundscheck=False', 36 | '-X wraparound=False', 37 | '-X cdivision=True', 38 | '-X binding=True' 39 | ] 40 | cython_c_args = ['-DNPY_NO_DEPRECATED_API=NPY_1_7_API_VERSION'] 41 | if get_option('cython-coverage') 42 | message('Building with Cython coverage support') 43 | cython_args += ['-X linetrace=True'] 44 | cython_c_args += ['-DCYTHON_TRACE=1'] 45 | endif 46 | 47 | arch_dir = py.get_install_dir() / 'arch' 48 | # Generate version.py for sdist 49 | meson.add_dist_script( 50 | ['_build/git_version.py', '--meson-dist', '--write', 51 | 'arch/_version.py'] 52 | ) 53 | if not fs.exists('_version.py') 54 | generate_version = custom_target( 55 | 'generate-version', 56 | install: true, 57 | build_always_stale: true, 58 | build_by_default: true, 59 | output: '_version.py', 60 | input: '_build/git_version.py', 61 | command: [py, '@INPUT@', '--write', '@OUTPUT@'], 62 | install_dir: arch_dir, 63 | install_tag: 'python-runtime' 64 | ) 65 | else 66 | # When building from sdist, version.py exists and should be included 67 | py.install_sources( 68 | ['_version.py'], 69 | subdir : 'arch' 70 | ) 71 | endif 72 | 73 | subdir('bootstrap') 74 | subdir('univariate') 75 | 76 | subdirs_list = [ 77 | '__future__', 78 | '_build', 79 | 'bootstrap', 80 | 'compat', 81 | 'covariance', 82 | 'data', 83 | 'tests', 84 | 'unitroot', 85 | 'univariate', 86 | 'utility', 87 | 'vendor', 88 | ] 89 | 90 | foreach subdir : subdirs_list 91 | install_subdir(subdir, install_dir: py.get_install_dir() / 'arch') 92 | endforeach 93 | 94 | top_level_py_list = [ 95 | '__init__.py', 96 | 'conftest.py', 97 | 'py.typed', 98 | '_typing.py' 99 | ] 100 | py.install_sources(top_level_py_list, subdir: 'arch') 101 | -------------------------------------------------------------------------------- /doc/source/api.rst: -------------------------------------------------------------------------------- 1 | API Reference 2 | ============= 3 | 4 | This page lists contains a list of the essential end-user API functions 5 | and classes. 6 | 7 | Volatility Modeling 8 | ------------------- 9 | 10 | High-level 11 | ~~~~~~~~~~ 12 | 13 | .. autosummary:: 14 | 15 | ~arch.univariate.arch_model 16 | 17 | Mean Specification 18 | ~~~~~~~~~~~~~~~~~~ 19 | 20 | .. autosummary:: 21 | 22 | ~arch.univariate.ConstantMean 23 | ~arch.univariate.ZeroMean 24 | ~arch.univariate.HARX 25 | ~arch.univariate.ARX 26 | ~arch.univariate.LS 27 | 28 | Volatility Process Specification 29 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 30 | .. autosummary:: 31 | 32 | ~arch.univariate.GARCH 33 | ~arch.univariate.EGARCH 34 | ~arch.univariate.HARCH 35 | ~arch.univariate.FIGARCH 36 | ~arch.univariate.MIDASHyperbolic 37 | ~arch.univariate.EWMAVariance 38 | ~arch.univariate.RiskMetrics2006 39 | ~arch.univariate.ConstantVariance 40 | ~arch.univariate.FixedVariance 41 | 42 | Shock Distributions 43 | ~~~~~~~~~~~~~~~~~~~ 44 | 45 | .. autosummary:: 46 | 47 | ~arch.univariate.Normal 48 | ~arch.univariate.StudentsT 49 | ~arch.univariate.SkewStudent 50 | ~arch.univariate.GeneralizedError 51 | 52 | Unit Root Testing 53 | ----------------- 54 | .. autosummary:: 55 | 56 | ~arch.unitroot.ADF 57 | ~arch.unitroot.DFGLS 58 | ~arch.unitroot.PhillipsPerron 59 | ~arch.unitroot.ZivotAndrews 60 | ~arch.unitroot.VarianceRatio 61 | ~arch.unitroot.KPSS 62 | 63 | Cointegration Testing 64 | --------------------- 65 | .. autosummary:: 66 | 67 | ~arch.unitroot.cointegration.engle_granger 68 | ~arch.unitroot.cointegration.phillips_ouliaris 69 | 70 | Cointegrating Relationship Estimation 71 | ------------------------------------- 72 | .. autosummary:: 73 | 74 | ~arch.unitroot.cointegration.CanonicalCointegratingReg 75 | ~arch.unitroot.cointegration.DynamicOLS 76 | ~arch.unitroot.cointegration.FullyModifiedOLS 77 | 78 | Bootstraps 79 | ---------- 80 | 81 | .. autosummary:: 82 | ~arch.bootstrap.IIDBootstrap 83 | ~arch.bootstrap.IndependentSamplesBootstrap 84 | ~arch.bootstrap.StationaryBootstrap 85 | ~arch.bootstrap.CircularBlockBootstrap 86 | ~arch.bootstrap.MovingBlockBootstrap 87 | 88 | Block-length Selection 89 | ~~~~~~~~~~~~~~~~~~~~~~ 90 | 91 | .. autosummary:: 92 | 93 | ~arch.bootstrap.optimal_block_length 94 | 95 | Testing with Multiple-Comparison 96 | -------------------------------- 97 | .. autosummary:: 98 | 99 | ~arch.bootstrap.SPA 100 | ~arch.bootstrap.MCS 101 | ~arch.bootstrap.StepM 102 | 103 | Long-run Covariance (HAC) Estimation 104 | ------------------------------------ 105 | .. autosummary:: 106 | 107 | ~arch.covariance.kernel.Bartlett 108 | ~arch.covariance.kernel.Parzen 109 | ~arch.covariance.kernel.ParzenCauchy 110 | ~arch.covariance.kernel.ParzenGeometric 111 | ~arch.covariance.kernel.ParzenRiesz 112 | ~arch.covariance.kernel.QuadraticSpectral 113 | ~arch.covariance.kernel.TukeyHamming 114 | ~arch.covariance.kernel.TukeyHanning 115 | ~arch.covariance.kernel.TukeyParzen 116 | -------------------------------------------------------------------------------- /doc/source/spelling_wordlist.txt: -------------------------------------------------------------------------------- 1 | ADF 2 | APARCH 3 | ARCHInMean 4 | ARCHModel 5 | ARCHModelFixedResult 6 | ARCHModelForecast 7 | ARCHModelForecastSimulation 8 | ARCHModelResult 9 | Autoregression 10 | Autoregressions 11 | Autoregressive 12 | BCa 13 | CanonicalCointegratingReg 14 | Centerings 15 | CircularBlockBootstrap 16 | Cointegrating 17 | CointegrationAnalysisResults 18 | ConstantMean 19 | ConstantVariance 20 | Covariance 21 | CovarianceEstimate 22 | DF 23 | DFGLS 24 | Datetimes 25 | DynamicOLS 26 | DynamicOLSResults 27 | EGARCH 28 | EWMAVariance 29 | EngleGrangerTestResults 30 | FIGARCH 31 | Familywise 32 | FixedVariance 33 | FullyModifiedOLS 34 | GARCH 35 | GLS 36 | GeneralizedError 37 | HARCH 38 | HARX 39 | IIDBootstrap 40 | IndependentSamplesBootstrap 41 | KPSS 42 | Loglikelihood 43 | MCS 44 | MIDASHyperbolic 45 | MovingBlockBootstrap 46 | NaN 47 | NeweyWest 48 | Nonparametric 49 | ParzenCauchy 50 | ParzenGeometric 51 | ParzenRiesz 52 | PhillipsOuliarisTestResults 53 | PhillipsPerron 54 | Probit 55 | Pz 56 | Pᵤ 57 | QuadraticSpectral 58 | Rabba 59 | Regressors 60 | RiskMetrics 61 | Semiparametric 62 | SkewStudent 63 | StationaryBootstrap 64 | Statistica 65 | Statsmodel 66 | StepM 67 | Stepwise 68 | Studentized 69 | StudentsT 70 | TukeyHamming 71 | TukeyHanning 72 | TukeyParzen 73 | VaR 74 | VarianceRatio 75 | VolatilityProcess 76 | Winkler 77 | Za 78 | ZeroMean 79 | ZivotAndrews 80 | Zt 81 | Zα 82 | aic 83 | al 84 | andrews 85 | annualize 86 | arch 87 | args 88 | arx 89 | asym 90 | autocorrelation 91 | autocorrelations 92 | autocovariance 93 | autocovariances 94 | autolag 95 | ba 96 | backcast 97 | backcasting 98 | bartlett 99 | bashtage 100 | bc 101 | bca 102 | bic 103 | cbb 104 | cdf 105 | centerings 106 | cointegrated 107 | cointegrating 108 | cointegration 109 | conf 110 | const 111 | cov 112 | covariance 113 | covariances 114 | covergence 115 | crit 116 | ctt 117 | datetime 118 | debiased 119 | dependant 120 | deterministics 121 | detrended 122 | detrending 123 | df 124 | differenced 125 | differencing 126 | disp 127 | econometrics 128 | engle 129 | eps 130 | exog 131 | expon 132 | familywise 133 | forecastable 134 | ftol 135 | func 136 | gaussian 137 | ged 138 | granger 139 | heteroskedasticity 140 | homoskedastic 141 | hqic 142 | iid 143 | ith 144 | jit 145 | jth 146 | kmax 147 | kurtosis 148 | kwargs 149 | len 150 | lm 151 | loadings 152 | loglikelihood 153 | maxiter 154 | maxlag 155 | mbb 156 | nan 157 | nc 158 | ndarray 159 | ndarrays 160 | nforecast 161 | nonparametric 162 | nonrobust 163 | nparam 164 | num 165 | nvar 166 | nw 167 | ouliaris 168 | param 169 | parameterless 170 | parametrizations 171 | params 172 | parzen 173 | phillips 174 | pos 175 | ppf 176 | pre 177 | pvalue 178 | pvalues 179 | pyproject 180 | qs 181 | quantiles 182 | quartile 183 | regressand 184 | regressor 185 | reindex 186 | reproducibility 187 | resampled 188 | resamples 189 | rescaled 190 | resid 191 | resids 192 | rng 193 | rsquared 194 | sb 195 | scipy 196 | semidefinite 197 | semiparametric 198 | skewness 199 | skewstudent 200 | skewt 201 | sqrt 202 | stationarity 203 | stepwise 204 | studentization 205 | studentize 206 | studentized 207 | studentst 208 | subclassing 209 | th 210 | toml 211 | tvalues 212 | unadjusted 213 | unitroot 214 | univariate 215 | unstandardized 216 | updateable 217 | vals 218 | vendored 219 | volatilty 220 | vr 221 | zag 222 | zig 223 | -------------------------------------------------------------------------------- /arch/unitroot/critical_values/simulation/kpss_critical_values_simulation.py: -------------------------------------------------------------------------------- 1 | """ 2 | Calculates quantiles of the KPSS test statistic for both the constant 3 | and constant plus trend scenarios. 4 | """ 5 | 6 | import os 7 | from typing import cast 8 | 9 | import numpy as np 10 | from numpy.random import RandomState 11 | import pandas as pd 12 | 13 | from arch._typing import Float64Array 14 | from arch.utility.timeseries import add_trend 15 | 16 | 17 | def simulate_kpss( 18 | nobs: int, 19 | b: int, 20 | trend: str = "c", 21 | rng: RandomState | None = None, 22 | ) -> float: 23 | """ 24 | Simulated the KPSS test statistic for nobs observations, 25 | performing b replications. 26 | """ 27 | if rng is None: 28 | rng = RandomState() 29 | rng.seed(0) 30 | 31 | standard_normal = rng.standard_normal 32 | 33 | e = standard_normal((nobs, b)) 34 | z: Float64Array = np.ones((nobs, 1)) 35 | if trend == "ct": 36 | z = add_trend(z, trend="t") 37 | zinv = np.linalg.pinv(z) 38 | trend_coef = zinv.dot(e) 39 | resid = e - cast("np.ndarray", z.dot(trend_coef)) 40 | s = np.cumsum(resid, axis=0) 41 | lam = (resid**2.0).mean(axis=0) 42 | kpss = 1 / (nobs**2.0) * (s**2.0).sum(axis=0) / lam 43 | return kpss 44 | 45 | 46 | def wrapper(nobs: int, b: int, trend: str = "c", max_memory: int = 1024) -> np.ndarray: 47 | """ 48 | A wrapper around the main simulation that runs it in blocks so that large 49 | simulations can be run without constructing very large arrays and running 50 | out of memory. 51 | """ 52 | rng = RandomState() 53 | rng.seed(0) 54 | memory = max_memory * 2**20 55 | b_max_memory = memory // 8 // nobs 56 | b_max_memory = max(b_max_memory, 1) 57 | remaining = b 58 | results = np.zeros(b) 59 | now = dt.datetime.now() 60 | time_fmt = "{0:d}:{1:0>2d}:{2:0>2d}" 61 | msg = "trend {0}, {1} reps remaining, " + "elapsed {2}, remaining {3}" 62 | while remaining > 0: 63 | b_eff = min(remaining, b_max_memory) 64 | completed = b - remaining 65 | results[completed : completed + b_eff] = simulate_kpss( 66 | nobs, b_eff, trend=trend, rng=rng 67 | ) 68 | remaining -= b_max_memory 69 | elapsed = (dt.datetime.now() - now).total_seconds() 70 | expected_remaining = max(0, remaining) * (elapsed / (b - remaining)) 71 | 72 | m, s = divmod(int(elapsed), 60) 73 | h, m = divmod(m, 60) 74 | elapsed_fmt = time_fmt.format(h, m, s) 75 | 76 | m, s = divmod(int(expected_remaining), 60) 77 | h, m = divmod(m, 60) 78 | expected_remaining_fmt = time_fmt.format(h, m, s) 79 | 80 | print(msg.format(trend, max(0, remaining), elapsed_fmt, expected_remaining_fmt)) 81 | 82 | return results 83 | 84 | 85 | if __name__ == "__main__": 86 | import datetime as dt 87 | 88 | nobs = 2000 89 | B = 100000000 90 | 91 | percentiles = np.concatenate( 92 | ( 93 | np.arange(0.0, 99.0, 0.5), 94 | np.arange(99.0, 99.9, 0.1), 95 | np.arange(99.9, 100.0, 0.01), 96 | ) 97 | ) 98 | 99 | critical_values = 100 - percentiles 100 | critical_values_string = [f"{cv:0.1f}" for cv in critical_values] 101 | 102 | hdf_filename = "kpss_critical_values.h5" 103 | try: 104 | os.remove(hdf_filename) 105 | except OSError: 106 | pass 107 | 108 | for tr in ("c", "ct"): 109 | now = dt.datetime.now() 110 | kpss = wrapper(nobs, B, trend=tr) 111 | quantiles = np.percentile(kpss, list(percentiles)) 112 | df = pd.DataFrame(quantiles, index=critical_values, columns=[tr]) 113 | df.to_hdf(hdf_filename, key=tr, mode="a") 114 | -------------------------------------------------------------------------------- /arch/tests/test_vendor.py: -------------------------------------------------------------------------------- 1 | from typing import Literal 2 | 3 | import pytest 4 | 5 | from arch.vendor._decorators import Substitution, deprecate_kwarg, indent 6 | 7 | 8 | @deprecate_kwarg("old", "new", {"yes": True, "no": False}, stacklevel=2) 9 | def f(x: int, *, old: Literal["yes", "no"] = "yes", new: bool = True) -> int: 10 | if new: 11 | return x + 1 12 | else: 13 | return x - 1 14 | 15 | 16 | @deprecate_kwarg("old", None, stacklevel=2) 17 | def g(x: int, *, old: bool = True) -> int: 18 | """ 19 | Function with keyword-only arguments. 20 | 21 | Parameters 22 | ---------- 23 | x : int 24 | An integer value. 25 | """ 26 | return x + 1 27 | 28 | 29 | @deprecate_kwarg("old", "new", stacklevel=2) 30 | def bar(old=False, new=False): 31 | return new 32 | 33 | 34 | def _baz_mapping(old: Literal["yes", "no"]) -> bool: 35 | if old == "yes": 36 | return True 37 | elif old == "no": 38 | return False 39 | else: 40 | raise ValueError(old) 41 | 42 | 43 | @deprecate_kwarg("old", "new", _baz_mapping, stacklevel=2) 44 | def baz(x: int, *, old: Literal["yes", "no"] = "yes", new: bool = True) -> int: 45 | if new: 46 | return x + 1 47 | else: 48 | return x - 1 49 | 50 | 51 | def test_deprecate_kwarg(): 52 | """ 53 | Test the deprecation of the `y` keyword argument in the function `f`. 54 | """ 55 | with pytest.warns(FutureWarning, match=r"the old"): 56 | f(1, old="yes") 57 | with pytest.warns(FutureWarning, match=r"the old"): 58 | f(1, old="no") 59 | f(2, new=True) 60 | f(2, new=False) 61 | with pytest.raises(TypeError, match=r"Can only specify 'old' or 'new', not both"): 62 | with pytest.warns(FutureWarning, match=r"the old"): 63 | f(2, old="yes", new=True) 64 | with pytest.warns(FutureWarning, match=r"the old"): 65 | baz(2, old="yes") 66 | with pytest.warns(FutureWarning, match=r"the old"): 67 | baz(2, old="no") 68 | with pytest.raises(ValueError, match=r"maybe"): 69 | baz(2, old="maybe") 70 | 71 | 72 | def test_deprecate_kwarg_no_alt(): 73 | """ 74 | Test the deprecation of the `y` keyword argument in the function `f`. 75 | """ 76 | with pytest.warns(FutureWarning, match=r"the 'old'"): 77 | g(1, old=True) 78 | 79 | 80 | def test_bad_deprecate_kwarg(): 81 | def h(x: int, *, old: bool = True) -> int: 82 | """ 83 | Function with keyword-only arguments. 84 | 85 | Parameters 86 | ---------- 87 | x : int 88 | An integer value. 89 | """ 90 | return x + 1 91 | 92 | assert h(1) == 2 93 | 94 | def constructor(): 95 | return deprecate_kwarg("old", None, [("yes", True), ("no", False)])(h) 96 | 97 | with pytest.raises(TypeError, match=r"mapping from old to new argu"): 98 | constructor() 99 | 100 | 101 | def test_simple_depr(): 102 | with pytest.warns(FutureWarning, match=r"the 'old'"): 103 | bar(old=True) 104 | 105 | 106 | def test_indent(): 107 | res = indent( 108 | """ 109 | This is a test 110 | """, 111 | 1, 112 | ) 113 | assert res[:5] == "\n" + " " * 4 114 | assert res[5:] == "This is a test\n " 115 | 116 | assert indent(None) == "" 117 | 118 | 119 | def test_substitution_error(): 120 | with pytest.raises( 121 | AssertionError, match=r"Only positional or keyword args are allow" 122 | ): 123 | Substitution("First", second="second")("") 124 | 125 | 126 | def test_substitution_update(): 127 | sub = Substitution(first="first", second="second") 128 | sub.update({"third": "third"}) 129 | assert "third" in sub.params 130 | -------------------------------------------------------------------------------- /arch/unitroot/critical_values/simulation/adf_z_critical_values_simulation.py: -------------------------------------------------------------------------------- 1 | """ 2 | Simulation of ADF z-test critical values. Closely follows MacKinnon (2010). 3 | Running this files requires an IPython cluster, which is assumed to be 4 | on the local machine. This can be started using a command similar to 5 | 6 | ipcluster start -n 4 7 | 8 | Remote clusters can be used by modifying the call to Client. 9 | """ 10 | 11 | import datetime 12 | from typing import Literal, cast 13 | 14 | from ipyparallel import Client 15 | from numpy import array, nan, ndarray, percentile, savez 16 | 17 | from .adf_simulation import adf_simulation 18 | 19 | # Number of repetitions 20 | EX_NUM = 500 21 | # Number of simulations per exercise 22 | EX_SIZE = 200000 23 | # Approximately controls memory use, in MiB 24 | MAX_MEMORY_SIZE = 100 25 | 26 | rc = Client() 27 | dview = rc.direct_view() 28 | with dview.sync_imports(): 29 | from numpy import arange, zeros 30 | from numpy.random import RandomState 31 | 32 | 33 | def wrapper( 34 | n: int, trend: Literal["n", "c", "ct", "ctt"], b: int, seed: int = 0 35 | ) -> ndarray: 36 | """ 37 | Wraps and blocks the main simulation so that the maximum amount of memory 38 | can be controlled on multi processor systems when executing in parallel 39 | """ 40 | rng = RandomState() 41 | rng.seed(seed) 42 | remaining = b 43 | res = zeros(b) 44 | finished = 0 45 | block_size = int(2**20.0 * MAX_MEMORY_SIZE / (8.0 * n)) 46 | for _ in range(0, b, block_size): 47 | if block_size < remaining: 48 | count = block_size 49 | else: 50 | count = remaining 51 | st = finished 52 | en = finished + count 53 | res[st:en] = adf_simulation(n, trend, count, rng) 54 | finished += count 55 | remaining -= count 56 | 57 | return res 58 | 59 | 60 | # Push variables and functions to all engines 61 | dview.execute("import numpy as np") 62 | dview["MAX_MEMORY_SIZE"] = MAX_MEMORY_SIZE 63 | dview["wrapper"] = wrapper 64 | dview["adf_simulation"] = adf_simulation 65 | lview = rc.load_balanced_view() 66 | 67 | trends = ("n", "c", "ct", "ctt") 68 | T = array( 69 | ( 70 | 20, 71 | 25, 72 | 30, 73 | 35, 74 | 40, 75 | 45, 76 | 50, 77 | 60, 78 | 70, 79 | 80, 80 | 90, 81 | 100, 82 | 120, 83 | 140, 84 | 160, 85 | 180, 86 | 200, 87 | 250, 88 | 300, 89 | 350, 90 | 400, 91 | 450, 92 | 500, 93 | 600, 94 | 700, 95 | 800, 96 | 900, 97 | 1000, 98 | 1200, 99 | 1400, 100 | 2000, 101 | ) 102 | ) 103 | T = T[::-1] 104 | m = T.shape[0] 105 | percentiles = list(arange(0.5, 100.0, 0.5)) 106 | rng = RandomState(0) 107 | seeds = rng.random_integers(0, 2**31 - 2, size=EX_NUM) 108 | 109 | for tr in trends: 110 | results = cast("ndarray", zeros((len(percentiles), len(T), EX_NUM)) * nan) 111 | filename = "adf_z_" + tr + ".npz" 112 | 113 | for i in range(EX_NUM): 114 | print(f"Experiment Number {i + 1} for Trend {tr}") 115 | # Non parallel version 116 | # args = (T, [tr] * m, [EX_SIZE] * m, [seeds[i]] * m) 117 | # out = [wrapper(a, b, c, d) for a, b, c, d in args] 118 | now = datetime.datetime.now() 119 | out = lview.map_sync(wrapper, T, [tr] * m, [EX_SIZE] * m, [seeds[i]] * m) 120 | # Prevent unnecessary results from accumulating 121 | lview.purge_results("all") 122 | rc.purge_everything() 123 | print(datetime.datetime.now() - now) 124 | quantiles = [percentile(x, percentiles) for x in out] 125 | results[:, :, i] = cast("ndarray", array(quantiles).T) 126 | 127 | if i % 50 == 0: 128 | savez(filename, trend=tr, results=results, percentiles=percentiles, T=T) 129 | 130 | savez(filename, trend=tr, results=results, percentiles=percentiles, T=T) 131 | -------------------------------------------------------------------------------- /doc/source/changes/5.0.rst: -------------------------------------------------------------------------------- 1 | ========= 2 | Version 5 3 | ========= 4 | 5 | Changes Since 5.5 6 | ================= 7 | - Removed dependence on property-cached 8 | - Bumped minimum NumPy, SciPy, pandas, statsmodels and Cython 9 | - Added compatability with Cython 3 10 | 11 | Release 5.5 12 | =========== 13 | - NumPy 1.25 fixes 14 | - Initial pandas copy-on-write support 15 | - Switched doc theme to sphinx-immaterial 16 | - Small fixes for typing issues 17 | 18 | Release 5.4 19 | =========== 20 | - Compatability release with pandas 2.0 21 | - Add testing and wheel support for Python 3.11 22 | 23 | Release 5.3 24 | =========== 25 | - Fixed a bug in :func:`~arch.univariate.arch_model` where ``power`` was 26 | not passed to the :class:`~arch.univariate.FIGARCH` constructor (:issue:`572`). 27 | - Fixed a bug that affected downstream projects due to an overly specific assert 28 | (:issue:`569`). 29 | 30 | Release 5.2 31 | =========== 32 | - Fixed a bug in in :func:`~arch.univariate.base.ARCHModelResult.std_resid` that 33 | would raise an exception when the data used to construct the model with a NumPy 34 | array (:issue:`565`). 35 | - Fixed a bug in :func:`~arch.univariate.base.ARCHModelResult.forecast` and related 36 | ``forecast`` methods when producing multi-step forecasts usign simulation with 37 | exogenous variables (:issue:`551`). 38 | 39 | Release 5.1 40 | =========== 41 | 42 | Unit Root 43 | ~~~~~~~~~ 44 | 45 | - Improved automatic lag length selection in :class:`~arch.unitroot.DFGLS` 46 | by using OLS rather than GLS detrended data when selecting the lag length. 47 | This problem was studied by Perron, P., & Qu, Z. (2007). 48 | 49 | Release 5.0 50 | =========== 51 | 52 | Unit Root 53 | ~~~~~~~~~ 54 | - All unit root tests are now immutable, and so properties such as ``trend`` cannot 55 | be set after the test is created. 56 | 57 | Bootstrap 58 | ~~~~~~~~~ 59 | - Added ``seed`` keyword argument to all bootstraps (e.g., :class:`~arch.bootstrap.IIDBootstrap` 60 | and :class:`~arch.bootstrap.StationaryBootstrap`) that allows a NumPy :class:`numpy.random.Generator` 61 | to be used. The ``seed`` keyword argument also accepts legacy :class:`numpy.random.RandomState` 62 | instances and integers. If an integer is passed, the random number generator is constructed by 63 | calling :func:`numpy.random.default_rng` The ``seed`` keyword argument replaces the 64 | ``random_state`` keyword argument. 65 | - The :func:`~arch.bootstrap.IIDBootstrap.random_state` property has also been deprecated in favor 66 | of :func:`~arch.bootstrap.IIDBootstrap.generator`. 67 | - The :func:`~arch.bootstrap.IIDBootstrap.get_state` and :func:`~arch.bootstrap.IIDBootstrap.set_state` 68 | methods have been replaced by the :func:`~arch.bootstrap.IIDBootstrap.state` property. 69 | 70 | Volatility Modeling 71 | ~~~~~~~~~~~~~~~~~~~ 72 | - Added ``seed`` keyword argument to all distributions (e.g., :class:`~arch.univariate.distribution.Normal` 73 | and :class:`~arch.univariate.distribution.StudentsT`) that allows a NumPy :class:`numpy.random.Generator` 74 | to be used. The ``seed`` keyword argument also accepts legacy :class:`numpy.random.RandomState` 75 | instances and integers. If an integer is passed, the random number generator is constructed by 76 | calling :func:`numpy.random.default_rng` The ``seed`` keyword argument replaces the 77 | ``random_state`` keyword argument. 78 | - The ``Normal.random_state`` property has also been deprecated in favor 79 | of :meth:`~arch.univariate.Normal.generator`. 80 | - Added :class:`~arch.univariate.ARCHInMean` mean process supporting (G)ARCH-in-mean models. 81 | - Extended :class:`~arch.univariate.volatility.VolatilityProcess` with 82 | :func:`~arch.univariate.volatility.VolatilityProcess.volatility_updater` that contains a 83 | :class:`~arch.univariate.recursions.VolatilityUpdater` to allow 84 | :class:`~arch.univariate.ARCHInMean` to be created from different volatility processes. 85 | 86 | Setup 87 | ~~~~~ 88 | - Added support for using an environmental variable to disable C-extension compilation. 89 | 90 | * Linux and OSX: ``export ARCH_NO_BINARY=1`` 91 | * PowerShell: ``$env:ARCH_NO_BINARY=1`` 92 | * cmd: ``set ARCH_NO_BINARY=1`` 93 | -------------------------------------------------------------------------------- /arch/unitroot/critical_values/kpss.py: -------------------------------------------------------------------------------- 1 | from numpy import asarray 2 | 3 | kpss_critical_values = {} 4 | c = ( 5 | (100.000, 0.0000), 6 | (99.500, 0.0219), 7 | (99.000, 0.0249), 8 | (98.000, 0.0289), 9 | (97.000, 0.0318), 10 | (96.000, 0.0344), 11 | (95.000, 0.0366), 12 | (93.500, 0.0397), 13 | (91.500, 0.0435), 14 | (89.500, 0.0470), 15 | (86.000, 0.0527), 16 | (74.000, 0.0720), 17 | (70.500, 0.0778), 18 | (67.000, 0.0839), 19 | (64.000, 0.0894), 20 | (61.500, 0.0941), 21 | (58.500, 0.1001), 22 | (56.000, 0.1053), 23 | (53.000, 0.1119), 24 | (50.000, 0.1190), 25 | (47.000, 0.1266), 26 | (44.500, 0.1333), 27 | (42.000, 0.1406), 28 | (39.500, 0.1484), 29 | (37.000, 0.1568), 30 | (34.500, 0.1659), 31 | (32.000, 0.1758), 32 | (30.000, 0.1845), 33 | (28.000, 0.1939), 34 | (26.000, 0.2041), 35 | (24.000, 0.2153), 36 | (22.500, 0.2244), 37 | (21.000, 0.2343), 38 | (19.500, 0.2451), 39 | (18.000, 0.2568), 40 | (16.500, 0.2698), 41 | (15.000, 0.2842), 42 | (14.000, 0.2947), 43 | (12.000, 0.3186), 44 | (10.000, 0.3475), 45 | (9.500, 0.3557), 46 | (9.000, 0.3644), 47 | (8.500, 0.3736), 48 | (8.000, 0.3834), 49 | (7.500, 0.3940), 50 | (7.000, 0.4053), 51 | (6.500, 0.4176), 52 | (6.000, 0.4309), 53 | (5.500, 0.4454), 54 | (5.000, 0.4614), 55 | (4.500, 0.4793), 56 | (4.000, 0.4993), 57 | (3.500, 0.5222), 58 | (3.000, 0.5488), 59 | (2.500, 0.5804), 60 | (2.000, 0.6195), 61 | (1.500, 0.6704), 62 | (1.000, 0.7428), 63 | (0.900, 0.7618), 64 | (0.800, 0.7830), 65 | (0.700, 0.8073), 66 | (0.600, 0.8353), 67 | (0.500, 0.8683), 68 | (0.400, 0.9090), 69 | (0.300, 0.9618), 70 | (0.200, 1.0369), 71 | (0.100, 1.1650), 72 | (0.100, 1.1650), 73 | (0.090, 1.1845), 74 | (0.080, 1.2068), 75 | (0.070, 1.2318), 76 | (0.060, 1.2608), 77 | (0.050, 1.2948), 78 | (0.040, 1.3372), 79 | (0.030, 1.3912), 80 | (0.020, 1.4671), 81 | (0.010, 1.6000), 82 | ) 83 | kpss_critical_values["c"] = asarray(c) 84 | 85 | ct = ( 86 | (100.000, 0.0000), 87 | (99.500, 0.0156), 88 | (99.000, 0.0173), 89 | (98.500, 0.0185), 90 | (97.500, 0.0203), 91 | (96.000, 0.0224), 92 | (94.500, 0.0240), 93 | (93.000, 0.0254), 94 | (91.000, 0.0272), 95 | (88.500, 0.0291), 96 | (86.000, 0.0309), 97 | (81.000, 0.0343), 98 | (68.500, 0.0424), 99 | (62.500, 0.0464), 100 | (56.500, 0.0507), 101 | (53.500, 0.0529), 102 | (51.000, 0.0548), 103 | (48.000, 0.0573), 104 | (45.500, 0.0594), 105 | (43.000, 0.0616), 106 | (40.000, 0.0645), 107 | (37.500, 0.0670), 108 | (35.500, 0.0691), 109 | (33.000, 0.0720), 110 | (31.000, 0.0744), 111 | (29.000, 0.0770), 112 | (27.500, 0.0791), 113 | (25.500, 0.0820), 114 | (24.000, 0.0844), 115 | (22.500, 0.0869), 116 | (21.000, 0.0897), 117 | (18.500, 0.0946), 118 | (16.000, 0.1004), 119 | (14.000, 0.1057), 120 | (12.000, 0.1119), 121 | (10.000, 0.1193), 122 | (9.500, 0.1214), 123 | (9.000, 0.1236), 124 | (8.500, 0.1259), 125 | (8.000, 0.1284), 126 | (7.500, 0.1310), 127 | (7.000, 0.1339), 128 | (6.500, 0.1370), 129 | (6.000, 0.1403), 130 | (5.500, 0.1439), 131 | (5.000, 0.1479), 132 | (4.500, 0.1523), 133 | (4.000, 0.1573), 134 | (3.500, 0.1630), 135 | (3.000, 0.1696), 136 | (2.500, 0.1774), 137 | (2.000, 0.1871), 138 | (1.500, 0.1996), 139 | (1.000, 0.2175), 140 | (0.900, 0.2222), 141 | (0.800, 0.2275), 142 | (0.700, 0.2335), 143 | (0.600, 0.2404), 144 | (0.500, 0.2486), 145 | (0.400, 0.2588), 146 | (0.300, 0.2719), 147 | (0.200, 0.2904), 148 | (0.100, 0.3223), 149 | (0.100, 0.3223), 150 | (0.090, 0.3272), 151 | (0.080, 0.3326), 152 | (0.070, 0.3388), 153 | (0.060, 0.3460), 154 | (0.050, 0.3546), 155 | (0.040, 0.3653), 156 | (0.030, 0.3790), 157 | (0.020, 0.3985), 158 | (0.010, 0.4313), 159 | ) 160 | kpss_critical_values["ct"] = asarray(ct) 161 | -------------------------------------------------------------------------------- /arch/tests/utility/test_timeseries.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from numpy.random import RandomState 3 | from numpy.testing import assert_array_almost_equal, assert_array_equal, assert_equal 4 | import pandas as pd 5 | from pandas.testing import assert_frame_equal 6 | import pytest 7 | 8 | from arch.utility.timeseries import ColumnNameConflict, add_trend 9 | 10 | 11 | @pytest.fixture 12 | def rng(): 13 | return RandomState(12345) 14 | 15 | 16 | def test_add_trend_err(): 17 | with pytest.raises(ValueError, match=r"One and only one"): 18 | add_trend(x=None, trend="ctt", nobs=None) 19 | 20 | 21 | def test_add_trend_prepend(rng): 22 | n = 10 23 | x = rng.randn(n, 1) 24 | trend_1 = add_trend(x, trend="ct", prepend=True) 25 | trend_2 = add_trend(x, trend="ct", prepend=False) 26 | assert_equal(trend_1[:, :2], trend_2[:, 1:]) 27 | 28 | 29 | def test_add_time_trend_dataframe(rng): 30 | n = 10 31 | x = rng.randn(n, 1) 32 | x = pd.DataFrame(x, columns=["col1"]) 33 | trend_1 = add_trend(x, trend="t") 34 | assert_array_almost_equal(np.asarray(trend_1["trend"]), np.arange(1.0, n + 1)) 35 | 36 | 37 | def test_add_trend_prepend_dataframe(rng): 38 | n = 10 39 | x = rng.randn(n, 1) 40 | x = pd.DataFrame(x, columns=["col1"]) 41 | trend_1 = add_trend(x, trend="ct", prepend=True) 42 | trend_2 = add_trend(x, trend="ct", prepend=False) 43 | assert_frame_equal(trend_1.iloc[:, :2], trend_2.iloc[:, 1:]) 44 | 45 | 46 | def test_add_trend_duplicate_name(): 47 | x = pd.DataFrame(np.zeros((10, 1)), columns=["trend"]) 48 | with pytest.warns(ColumnNameConflict, match=r"Some of the column names being"): 49 | _ = add_trend(x, trend="ct") 50 | with pytest.warns(ColumnNameConflict, match=r"Some of the column names being"): 51 | y = add_trend(x, trend="ct") 52 | 53 | assert "const" in y.columns 54 | assert "trend_0" in y.columns 55 | 56 | 57 | def test_add_trend_c(): 58 | x = np.zeros((10, 1)) 59 | y = add_trend(x, trend="c") 60 | assert np.all(y[:, 1] == 1.0) 61 | 62 | 63 | def test_add_trend_ct(): 64 | n = 20 65 | x = np.zeros((20, 1)) 66 | y = add_trend(x, trend="ct") 67 | assert np.all(y[:, 1] == 1.0) 68 | assert_equal(y[0, 2], 1.0) 69 | assert_array_almost_equal(np.diff(y[:, 2]), np.ones(n - 1)) 70 | 71 | 72 | def test_add_trend_ctt(): 73 | n = 10 74 | x = np.zeros((n, 1)) 75 | y = add_trend(x, trend="ctt") 76 | assert np.all(y[:, 1] == 1.0) 77 | assert y[0, 2] == 1.0 78 | assert_array_almost_equal(np.diff(y[:, 2]), np.ones(n - 1)) 79 | assert y[0, 3] == 1.0 80 | assert_array_almost_equal(np.diff(y[:, 3]), np.arange(3.0, 2.0 * n, 2.0)) 81 | 82 | 83 | def test_add_trend_t(): 84 | n = 20 85 | x = np.zeros((20, 1)) 86 | y = add_trend(x, trend="t") 87 | assert y[0, 1] == 1.0 88 | assert_array_almost_equal(np.diff(y[:, 1]), np.ones(n - 1)) 89 | 90 | 91 | def test_add_trend_no_input(): 92 | n = 100 93 | y = add_trend(x=None, trend="ct", nobs=n) 94 | assert np.all(y[:, 0] == 1.0) 95 | assert y[0, 1] == 1.0 96 | assert_array_almost_equal(np.diff(y[:, 1]), np.ones(n - 1)) 97 | 98 | 99 | def test_skip_constant(): 100 | x = np.ones((100, 1)) 101 | appended = add_trend(x, trend="c", has_constant="add") 102 | assert_array_equal(np.ones((100, 2)), appended) 103 | appended = add_trend(x, trend="c", has_constant="skip") 104 | assert_array_equal(np.ones((100, 1)), appended) 105 | 106 | 107 | def test_errors(): 108 | n = 100 109 | with pytest.raises(ValueError, match=r"trend unknown not understood"): 110 | add_trend(x=None, trend="unknown", nobs=n) 111 | with pytest.raises( 112 | ValueError, match=r"One and only one of x or nobs must be provided" 113 | ): 114 | add_trend(x=None, trend="ct") 115 | x = np.ones((100, 1)) 116 | with pytest.raises(ValueError, match=r"x already contains a constant"): 117 | add_trend(x, trend="ct", has_constant="raise") 118 | 119 | 120 | def test_trend_n_nobs(): 121 | assert add_trend(nobs=100, trend="n").shape == (100, 0) 122 | assert add_trend(np.empty((100, 2)), trend="n").shape == (100, 2) 123 | 124 | 125 | def test_addtrend_bad_nobs(): 126 | with pytest.raises(ValueError, match=r"nobs must"): 127 | add_trend(None, trend="ct") 128 | with pytest.raises(ValueError, match=r"nobs must"): 129 | add_trend(None, trend="ct", nobs=-3) 130 | -------------------------------------------------------------------------------- /arch/unitroot/critical_values/simulation/adf_z_critical_values_simulation_joblib.py: -------------------------------------------------------------------------------- 1 | """ 2 | Simulation of ADF z-test critical values. Closely follows MacKinnon (2010). 3 | """ 4 | 5 | import argparse 6 | import os 7 | import random 8 | from typing import cast 9 | 10 | from adf_simulation import ( 11 | OUTPUT_PATH, 12 | PERCENTILES, 13 | TIME_SERIES_LENGTHS, 14 | TRENDS, 15 | adf_simulation, 16 | ) 17 | import colorama 18 | from joblib import Parallel, delayed 19 | import numpy as np 20 | from numpy.random import PCG64, Generator, SeedSequence 21 | import psutil 22 | 23 | from arch._typing import UnitRootTrend 24 | 25 | GREEN = colorama.Fore.GREEN 26 | BLUE = colorama.Fore.BLUE 27 | RED = colorama.Fore.RED 28 | RESET = colorama.Fore.RESET 29 | 30 | # Number of repetitions 31 | EX_NUM = 500 32 | # Number of simulations per exercise 33 | EX_SIZE = 250000 34 | # Approximately controls memory use, in MiB 35 | MAX_MEMORY_SIZE = 100 36 | 37 | 38 | # raw entropy (16 bits each) from random.org 39 | RAW = [ 40 | 64303, 41 | 60269, 42 | 6936, 43 | 46495, 44 | 33811, 45 | 56090, 46 | 36001, 47 | 55726, 48 | 32840, 49 | 17611, 50 | 32276, 51 | 58287, 52 | 10615, 53 | 53045, 54 | 52978, 55 | 10484, 56 | 25209, 57 | 35367, 58 | 52618, 59 | 24147, 60 | ] 61 | ENTROPY = [(RAW[i] << 16) + RAW[i + 1] for i in range(0, len(RAW), 2)] 62 | 63 | 64 | def single_experiment(trend: str, gen: Generator, file_name: str) -> None: 65 | """ 66 | Wraps and blocks the main simulation so that the maximum amount of memory 67 | can be controlled on multi processor systems when executing in parallel 68 | """ 69 | 70 | res = np.zeros(EX_SIZE) 71 | output: np.ndarray = np.zeros( 72 | (len(cast("np.ndarray", PERCENTILES)), len(TIME_SERIES_LENGTHS)) 73 | ) 74 | for col, nobs in enumerate(TIME_SERIES_LENGTHS): 75 | remaining = EX_SIZE 76 | finished = 0 77 | block_size = int(2**20.0 * MAX_MEMORY_SIZE / (8.0 * nobs)) 78 | for _ in range(0, EX_SIZE, block_size): 79 | if block_size < remaining: 80 | count = block_size 81 | else: 82 | count = remaining 83 | st = finished 84 | en = finished + count 85 | _trend: UnitRootTrend 86 | if trend == "n": 87 | _trend = "n" 88 | elif trend == "c": 89 | _trend = "c" 90 | elif trend == "ct": 91 | _trend = "ct" 92 | else: 93 | _trend = "ctt" 94 | res[st:en] = adf_simulation(nobs, _trend, count, gen) 95 | finished += count 96 | remaining -= count 97 | output[:, col] = np.percentile(res, PERCENTILES) 98 | np.savez(file_name, results=output) 99 | 100 | 101 | if __name__ == "__main__": 102 | parser = argparse.ArgumentParser("Simulations for ADF-Z critical values") 103 | parser.add_argument( 104 | "--ncpu", 105 | type=int, 106 | action="store", 107 | help="Number of CPUs to use. If not specified, uses cpu_count() - 1", 108 | ) 109 | parser.add_argument( 110 | "--z_only", 111 | action="store_true", 112 | help="Only execute Z-type tests", 113 | ) 114 | args = parser.parse_args() 115 | njobs = getattr(args, "ncpu", None) 116 | njobs = psutil.cpu_count(logical=False) - 1 if njobs is None else njobs 117 | njobs = max(njobs, 1) 118 | 119 | ss = SeedSequence(ENTROPY) 120 | children = ss.spawn(len(TRENDS) * EX_NUM) 121 | generators = [Generator(PCG64(child)) for child in children] 122 | jobs = [] 123 | count = 0 124 | for tr in TRENDS: 125 | for i in range(EX_NUM): 126 | file_name = os.path.join(OUTPUT_PATH, f"adf_z_{tr}-{i:04d}.npz") 127 | jobs.append((tr, generators[count], file_name)) 128 | count += 1 129 | jobs = [job for job in jobs if not os.path.exists(job[-1])] 130 | random.shuffle(jobs) 131 | nremconfig = len(jobs) 132 | nconfig = len(children) 133 | print( 134 | f"Total configurations: {BLUE}{nconfig}{RESET}, " 135 | f"Remaining: {RED}{nremconfig}{RESET}" 136 | ) 137 | print(f"Running on {BLUE}{njobs}{RESET} CPUs") 138 | if njobs > 1: 139 | Parallel(verbose=50, n_jobs=njobs)( 140 | delayed(single_experiment)(t, g, f) for t, g, f in jobs 141 | ) 142 | else: 143 | for job in jobs: 144 | single_experiment(*job) 145 | -------------------------------------------------------------------------------- /arch/unitroot/critical_values/zivot_andrews.py: -------------------------------------------------------------------------------- 1 | """ 2 | Critical values for the three different models specified for the 3 | Zivot-Andrews unit-root test. 4 | 5 | Notes 6 | ----- 7 | The p-values are generated through Monte Carlo simulation using 100,000 8 | replications and 2000 data points. 9 | """ 10 | 11 | from numpy import array 12 | 13 | # constant-only model 14 | c = ( 15 | (0.001, -6.78442), 16 | (0.100, -5.83192), 17 | (0.200, -5.68139), 18 | (0.300, -5.58461), 19 | (0.400, -5.51308), 20 | (0.500, -5.45043), 21 | (0.600, -5.39924), 22 | (0.700, -5.36023), 23 | (0.800, -5.33219), 24 | (0.900, -5.30294), 25 | (1.000, -5.27644), 26 | (2.500, -5.03340), 27 | (5.000, -4.81067), 28 | (7.500, -4.67636), 29 | (10.000, -4.56618), 30 | (12.500, -4.48130), 31 | (15.000, -4.40507), 32 | (17.500, -4.33947), 33 | (20.000, -4.28155), 34 | (22.500, -4.22683), 35 | (25.000, -4.17830), 36 | (27.500, -4.13101), 37 | (30.000, -4.08586), 38 | (32.500, -4.04455), 39 | (35.000, -4.00380), 40 | (37.500, -3.96144), 41 | (40.000, -3.92078), 42 | (42.500, -3.88178), 43 | (45.000, -3.84503), 44 | (47.500, -3.80549), 45 | (50.000, -3.77031), 46 | (52.500, -3.73209), 47 | (55.000, -3.69600), 48 | (57.500, -3.65985), 49 | (60.000, -3.62126), 50 | (65.000, -3.54580), 51 | (70.000, -3.46848), 52 | (75.000, -3.38533), 53 | (80.000, -3.29112), 54 | (85.000, -3.17832), 55 | (90.000, -3.04165), 56 | (92.500, -2.95146), 57 | (95.000, -2.83179), 58 | (96.000, -2.76465), 59 | (97.000, -2.68624), 60 | (98.000, -2.57884), 61 | (99.000, -2.40044), 62 | (99.900, -1.88932), 63 | ) 64 | 65 | # trend-only model 66 | t = ( 67 | (0.001, -83.9094), 68 | (0.100, -13.8837), 69 | (0.200, -9.13205), 70 | (0.300, -6.32564), 71 | (0.400, -5.60803), 72 | (0.500, -5.38794), 73 | (0.600, -5.26585), 74 | (0.700, -5.18734), 75 | (0.800, -5.12756), 76 | (0.900, -5.07984), 77 | (1.000, -5.03421), 78 | (2.500, -4.65634), 79 | (5.000, -4.40580), 80 | (7.500, -4.25214), 81 | (10.000, -4.13678), 82 | (12.500, -4.03765), 83 | (15.000, -3.95185), 84 | (17.500, -3.87945), 85 | (20.000, -3.81295), 86 | (22.500, -3.75273), 87 | (25.000, -3.69836), 88 | (27.500, -3.64785), 89 | (30.000, -3.59819), 90 | (32.500, -3.55146), 91 | (35.000, -3.50522), 92 | (37.500, -3.45987), 93 | (40.000, -3.41672), 94 | (42.500, -3.37465), 95 | (45.000, -3.33394), 96 | (47.500, -3.29393), 97 | (50.000, -3.25316), 98 | (52.500, -3.21244), 99 | (55.000, -3.17124), 100 | (57.500, -3.13211), 101 | (60.000, -3.09204), 102 | (65.000, -3.01135), 103 | (70.000, -2.92897), 104 | (75.000, -2.83614), 105 | (80.000, -2.73893), 106 | (85.000, -2.62840), 107 | (90.000, -2.49611), 108 | (92.500, -2.41337), 109 | (95.000, -2.30820), 110 | (96.000, -2.25797), 111 | (97.000, -2.19648), 112 | (98.000, -2.11320), 113 | (99.000, -1.99138), 114 | (99.900, -1.67466), 115 | ) 116 | 117 | # constant + trend model 118 | ct = ( 119 | (0.001, -38.17800), 120 | (0.100, -6.43107), 121 | (0.200, -6.07279), 122 | (0.300, -5.95496), 123 | (0.400, -5.86254), 124 | (0.500, -5.77081), 125 | (0.600, -5.72541), 126 | (0.700, -5.68406), 127 | (0.800, -5.65163), 128 | (0.900, -5.60419), 129 | (1.000, -5.57556), 130 | (2.500, -5.29704), 131 | (5.000, -5.07332), 132 | (7.500, -4.93003), 133 | (10.000, -4.82668), 134 | (12.500, -4.73711), 135 | (15.000, -4.66020), 136 | (17.500, -4.58970), 137 | (20.000, -4.52855), 138 | (22.500, -4.47100), 139 | (25.000, -4.42011), 140 | (27.500, -4.37387), 141 | (30.000, -4.32705), 142 | (32.500, -4.28126), 143 | (35.000, -4.23793), 144 | (37.500, -4.19822), 145 | (40.000, -4.15800), 146 | (42.500, -4.11946), 147 | (45.000, -4.08064), 148 | (47.500, -4.04286), 149 | (50.000, -4.00489), 150 | (52.500, -3.96837), 151 | (55.000, -3.93200), 152 | (57.500, -3.89496), 153 | (60.000, -3.85577), 154 | (65.000, -3.77795), 155 | (70.000, -3.69794), 156 | (75.000, -3.61852), 157 | (80.000, -3.52485), 158 | (85.000, -3.41665), 159 | (90.000, -3.28527), 160 | (92.500, -3.19724), 161 | (95.000, -3.08769), 162 | (96.000, -3.03088), 163 | (97.000, -2.96091), 164 | (98.000, -2.85581), 165 | (99.000, -2.71015), 166 | (99.900, -2.28767), 167 | ) 168 | 169 | 170 | za_critical_values = {"ct": array(ct), "t": array(t), "c": array(c)} 171 | 172 | __all__ = ["za_critical_values"] 173 | -------------------------------------------------------------------------------- /arch/univariate/recursions.pyi: -------------------------------------------------------------------------------- 1 | from typing import Optional, Union 2 | 3 | from arch._typing import Float64Array, Float64Array1D, Float64Array2D, Int32Array 4 | 5 | def harch_recursion( 6 | parameters: Float64Array1D, 7 | resids: Float64Array1D, 8 | sigma2: Float64Array1D, 9 | lags: Int32Array, 10 | nobs: int, 11 | backcast: float, 12 | var_bounds: Float64Array2D, 13 | ) -> Float64Array: ... 14 | def arch_recursion( 15 | parameters: Float64Array1D, 16 | resids: Float64Array1D, 17 | sigma2: Float64Array1D, 18 | p: int, 19 | nobs: int, 20 | backcast: float, 21 | var_bounds: Float64Array2D, 22 | ) -> Float64Array: ... 23 | def garch_recursion( 24 | parameters: Float64Array1D, 25 | fresids: Float64Array1D, 26 | sresids: Float64Array1D, 27 | sigma2: Float64Array1D, 28 | p: int, 29 | o: int, 30 | q: int, 31 | nobs: int, 32 | backcast: float, 33 | var_bounds: Float64Array2D, 34 | ) -> Float64Array: ... 35 | def egarch_recursion( 36 | parameters: Float64Array1D, 37 | resids: Float64Array1D, 38 | sigma2: Float64Array1D, 39 | p: int, 40 | o: int, 41 | q: int, 42 | nobs: int, 43 | backcast: float, 44 | var_bounds: Float64Array2D, 45 | lnsigma2: Float64Array1D, 46 | std_resids: Float64Array1D, 47 | abs_std_resids: Float64Array1D, 48 | ) -> Float64Array: ... 49 | def midas_recursion( 50 | parameters: Float64Array1D, 51 | weights: Float64Array1D, 52 | resids: Float64Array1D, 53 | sigma2: Float64Array1D, 54 | nobs: int, 55 | backcast: float, 56 | var_bounds: Float64Array2D, 57 | ) -> Float64Array: ... 58 | def figarch_weights( 59 | parameters: Float64Array1D, p: int, q: int, trunc_lag: int 60 | ) -> Float64Array: ... 61 | def figarch_recursion( 62 | parameters: Float64Array1D, 63 | fresids: Float64Array1D, 64 | sigma2: Float64Array1D, 65 | p: int, 66 | q: int, 67 | nobs: int, 68 | trunc_lag: int, 69 | backcast: float, 70 | var_bounds: Float64Array2D, 71 | ) -> Float64Array: ... 72 | def aparch_recursion( 73 | parameters: Float64Array1D, 74 | resids: Float64Array1D, 75 | abs_resids: Float64Array1D, 76 | sigma2: Float64Array1D, 77 | sigma_delta: Float64Array1D, 78 | p: int, 79 | o: int, 80 | q: int, 81 | nobs: int, 82 | backcast: float, 83 | var_bounds: Float64Array2D, 84 | ) -> Float64Array: ... 85 | def harch_core( 86 | t: int, 87 | parameters: Float64Array1D, 88 | resids: Float64Array1D, 89 | sigma2: Float64Array1D, 90 | lags: Int32Array, 91 | backcast: float, 92 | var_bounds: Float64Array2D, 93 | ) -> Float64Array: ... 94 | def garch_core( 95 | t: int, 96 | parameters: Float64Array1D, 97 | resids: Float64Array1D, 98 | sigma2: Float64Array1D, 99 | backcast: float, 100 | var_bounds: Float64Array2D, 101 | p: int, 102 | o: int, 103 | q: int, 104 | power: float, 105 | ) -> Float64Array: ... 106 | 107 | class VolatilityUpdater: 108 | def initialize_update( 109 | self, 110 | parameters: Float64Array1D, 111 | backcast: Union[float, Float64Array1D], 112 | nobs: int, 113 | ) -> None: ... 114 | def _update_tester( 115 | self, 116 | t: int, 117 | parameters: Float64Array1D, 118 | resids: Float64Array1D, 119 | sigma2: Float64Array1D, 120 | var_bounds: Float64Array2D, 121 | ) -> None: ... 122 | 123 | class GARCHUpdater(VolatilityUpdater): 124 | def __init__(self, p: int, o: int, q: int, power: float) -> None: ... 125 | 126 | class EWMAUpdater(VolatilityUpdater): 127 | def __init__(self, lam: Optional[float]) -> None: ... 128 | 129 | class FIGARCHUpdater(VolatilityUpdater): 130 | def __init__(self, p: int, q: int, power: float, truncation: int) -> None: ... 131 | 132 | class HARCHUpdater(VolatilityUpdater): 133 | def __init__(self, lags: Int32Array) -> None: ... 134 | 135 | class MIDASUpdater(VolatilityUpdater): 136 | def __init__(self, m: int, asym: bool) -> None: ... 137 | 138 | class RiskMetrics2006Updater(VolatilityUpdater): 139 | def __init__( 140 | self, 141 | kmax: int, 142 | combination_weights: Float64Array1D, 143 | smoothing_parameters: Float64Array1D, 144 | ) -> None: ... 145 | 146 | class ARCHInMeanRecursion: 147 | def __init__(self, updater: VolatilityUpdater) -> None: ... 148 | def recursion( 149 | self, 150 | y: Float64Array1D, 151 | x: Float64Array2D, 152 | mean_parameters: Float64Array1D, 153 | variance_params: Float64Array1D, 154 | sigma2: Float64Array1D, 155 | var_bounds: Float64Array2D, 156 | power: float, 157 | ) -> Float64Array: ... 158 | -------------------------------------------------------------------------------- /arch/unitroot/critical_values/simulation/dfgls_simulation_process.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from scipy.stats import norm 3 | from statsmodels.regression.linear_model import OLS, WLS 4 | 5 | trends = ("c", "ct") 6 | critical_values = (1.0, 5.0, 10.0) 7 | dfgls_cv_approx = {} 8 | for t in trends: 9 | print(t) 10 | data = np.load("dfgls_" + t + ".npz") 11 | percentiles = data["percentiles"] 12 | trend = data["trend"] 13 | results = data["results"] 14 | # T = data['T'] 15 | data.close() 16 | 17 | # Remove later 18 | T = np.array( 19 | ( 20 | 20, 21 | 25, 22 | 30, 23 | 35, 24 | 40, 25 | 45, 26 | 50, 27 | 60, 28 | 70, 29 | 80, 30 | 90, 31 | 100, 32 | 120, 33 | 140, 34 | 160, 35 | 180, 36 | 200, 37 | 250, 38 | 300, 39 | 350, 40 | 400, 41 | 450, 42 | 500, 43 | 600, 44 | 700, 45 | 800, 46 | 900, 47 | 1000, 48 | 1200, 49 | 1400, 50 | 2000, 51 | ) 52 | ) 53 | T = T[::-1] 54 | 55 | # For percentiles 1, 5 and 10, regress on a constant, and powers of 1/T 56 | out = [] 57 | for cv in critical_values: 58 | num_ex = results.shape[2] 59 | loc = np.where(percentiles == cv)[0][0] 60 | lhs = np.squeeze(results[loc, :, :]) 61 | # Adjust for effective sample size, this is what lookup the code uses 62 | tau = np.ones((num_ex, 1)).dot(T[None, :]) - 1.0 63 | tau = tau.T 64 | lhs = lhs.ravel() 65 | tau = tau.ravel() 66 | tau = tau[:, None] 67 | n = lhs.shape[0] 68 | rhs = np.ones((n, 1)) 69 | rhs = np.hstack((rhs, 1.0 / tau)) 70 | rhs = np.hstack((rhs, (1.0 / tau) ** 2.0)) 71 | rhs = np.hstack((rhs, (1.0 / tau) ** 3.0)) 72 | res = OLS(lhs, rhs).fit() 73 | out.append(res.params) 74 | 75 | dfgls_cv_approx[t] = np.array(out) 76 | 77 | trends = ("c", "ct") 78 | dfgls_large_p = {} 79 | dfgls_small_p = {} 80 | dfgls_tau_star = {} 81 | dfgls_tau_max = {} 82 | dfgls_tau_min = {} 83 | for t in trends: 84 | data = np.load("dfgls_" + t + ".npz") 85 | percentiles = data["percentiles"] 86 | results = data["results"] # Remove later 87 | # LHS is norm cdf inv of percentiles 88 | lhs = norm().ppf(percentiles / 100.0) 89 | lhs_large = lhs 90 | # RHS is made up of avg test stats for largest T, which is in pos 1 91 | avg_test_stats = results[:, 1, :].mean(axis=1) 92 | avg_test_std = results[:, 1, :].std(axis=1) 93 | avg_test_stats = avg_test_stats[:, None] 94 | m = lhs.shape[0] 95 | rhs = np.ones((m, 1)) 96 | rhs = np.hstack((rhs, avg_test_stats)) 97 | rhs = np.hstack((rhs, avg_test_stats**2.0)) 98 | rhs = np.hstack((rhs, avg_test_stats**3.0)) 99 | rhs_large = rhs 100 | res_large = WLS(lhs, rhs, weights=1.0 / avg_test_std).fit() 101 | dfgls_large_p[t] = res_large.params 102 | # Compute tau_max, by finding the func maximum 103 | p = res_large.params 104 | poly_roots = np.roots(np.array([3, 2, 1.0]) * p[:0:-1]) 105 | dfgls_tau_max[t] = float(np.squeeze(np.real(np.max(poly_roots)))) 106 | 107 | # Small p regression using only p<=15% 108 | cutoff = np.where(percentiles <= 15.0)[0] 109 | avg_test_stats = results[cutoff, 1, :].mean(axis=1) 110 | avg_test_std = results[cutoff, 1, :].std(axis=1) 111 | avg_test_stats = avg_test_stats[:, None] 112 | lhs = lhs[cutoff] 113 | m = lhs.shape[0] 114 | rhs = np.ones((m, 1)) 115 | rhs = np.hstack((rhs, avg_test_stats)) 116 | rhs = np.hstack((rhs, avg_test_stats**2.0)) 117 | res_small = WLS(lhs, rhs, weights=1.0 / avg_test_std).fit() 118 | dfgls_small_p[t] = res_small.params 119 | 120 | # Compute tau star 121 | err_large = res_large.resid 122 | # Missing 1 parameter here, replace with 0 123 | params = np.append(np.asarray(res_small.params), np.zeros(1)) 124 | err_small = lhs_large - rhs_large.dot(params) 125 | # Find the location that minimizes the total absolute error 126 | m = lhs_large.shape[0] 127 | abs_err = np.zeros((m, 1)) 128 | for i in range(m): 129 | abs_err[i] = np.abs(err_large[i:]).sum() + np.abs(err_small[:i]).sum() 130 | loc = np.argmin(abs_err) 131 | dfgls_tau_star[t] = rhs_large[loc, 1] 132 | # Compute tau min 133 | dfgls_tau_min[t] = -params[1] / (2 * params[2]) 134 | 135 | print("from numpy import array") 136 | print("") 137 | print("dfgls_cv_approx = " + str(dfgls_cv_approx)) 138 | print("") 139 | print("dfgls_tau_max = " + str(dfgls_tau_max)) 140 | print("") 141 | print("dfgls_tau_min = " + str(dfgls_tau_min)) 142 | print("") 143 | print("dfgls_tau_star = " + str(dfgls_tau_star)) 144 | print("") 145 | print("dfgls_large_p = " + str(dfgls_large_p)) 146 | print("") 147 | print("dfgls_small_p = " + str(dfgls_small_p)) 148 | -------------------------------------------------------------------------------- /arch/unitroot/critical_values/simulation/adf_z_critical_values_simulation_large_cluster.py: -------------------------------------------------------------------------------- 1 | """ 2 | Simulation of ADF z-test critical values. Closely follows MacKinnon (2010). 3 | Running this files requires an IPython cluster, which is assumed to be 4 | on the local machine. This can be started using a command similar to 5 | 6 | ipcluster start -n 4 7 | 8 | Remote clusters can be used by modifying the Client initiation. 9 | 10 | This version has been optimized for execution on a large cluster and should 11 | scale well with 128 or more engines. 12 | """ 13 | 14 | import datetime 15 | import time 16 | from typing import cast 17 | 18 | from ipyparallel import Client, DirectView 19 | from numpy import array, nan, ndarray, percentile, savez 20 | 21 | from arch._typing import UnitRootTrend 22 | 23 | from .adf_simulation import adf_simulation 24 | 25 | # Time in seconds to sleep before checking if ready 26 | SLEEP = 10 27 | # Number of repetitions 28 | EX_NUM = 500 29 | # Number of simulations per exercise 30 | EX_SIZE = 200000 31 | # Approximately controls memory use, in MiB 32 | MAX_MEMORY_SIZE = 100 33 | 34 | rc = Client() 35 | dview = rc.direct_view() 36 | with dview.sync_imports(): 37 | from numpy import arange, zeros 38 | from numpy.random import RandomState 39 | 40 | 41 | def clear_cache(client: Client, view: DirectView) -> None: 42 | """Cache-clearing function from mailing list""" 43 | assert not rc.outstanding, "don't clear history when tasks are outstanding" 44 | client.purge_results("all") # clears controller 45 | client.results.clear() 46 | client.metadata.clear() 47 | view.results.clear() 48 | client.history = [] 49 | view.history = [] 50 | client.session.digest_history.clear() 51 | 52 | 53 | def wrapper(n: int, trend: UnitRootTrend, b: int, rng_seed: int = 0) -> ndarray: 54 | """ 55 | Wraps and blocks the main simulation so that the maximum amount of memory 56 | can be controlled on multi processor systems when executing in parallel 57 | """ 58 | rng = RandomState() 59 | rng.seed(rng_seed) 60 | remaining = b 61 | res = zeros(b) 62 | finished = 0 63 | block_size = int(2**20.0 * MAX_MEMORY_SIZE / (8.0 * n)) 64 | for _ in range(0, b, block_size): 65 | if block_size < remaining: 66 | count = block_size 67 | else: 68 | count = remaining 69 | st = finished 70 | en = finished + count 71 | res[st:en] = adf_simulation(n, trend, count, rng) 72 | finished += count 73 | remaining -= count 74 | 75 | return res 76 | 77 | 78 | # Push variables and functions to all engines 79 | dview.execute("import numpy as np") 80 | dview["MAX_MEMORY_SIZE"] = MAX_MEMORY_SIZE 81 | dview["wrapper"] = wrapper 82 | dview["adf_simulation"] = adf_simulation 83 | lview = rc.load_balanced_view() 84 | 85 | trends = ("n", "c", "ct", "ctt") 86 | T = array( 87 | ( 88 | 20, 89 | 25, 90 | 30, 91 | 35, 92 | 40, 93 | 45, 94 | 50, 95 | 60, 96 | 70, 97 | 80, 98 | 90, 99 | 100, 100 | 120, 101 | 140, 102 | 160, 103 | 180, 104 | 200, 105 | 250, 106 | 300, 107 | 350, 108 | 400, 109 | 450, 110 | 500, 111 | 600, 112 | 700, 113 | 800, 114 | 900, 115 | 1000, 116 | 1200, 117 | 1400, 118 | 2000, 119 | ) 120 | ) 121 | T = T[::-1] 122 | m = T.shape[0] 123 | percentiles = list(arange(0.5, 100.0, 0.5)) 124 | rng = RandomState(0) 125 | seeds = list(rng.random_integers(0, 2**31 - 2, size=EX_NUM)) 126 | 127 | for tr in trends: 128 | results = cast("ndarray", zeros((len(percentiles), m, EX_NUM)) * nan) 129 | filename = "adf_z_" + tr + ".npz" 130 | 131 | for i, t in enumerate(T): 132 | print(f"Time series length {t} for Trend {tr}") 133 | now = datetime.datetime.now() 134 | # Serial version 135 | # args = ([t] * EX_NUM, [tr] * EX_NUM, [EX_SIZE] * EX_NUM, seeds) 136 | # out = [ wrapper(a, b, c, d) for a, b, c, d in zip(*args)] 137 | 138 | # Parallel version 139 | res = lview.map_async( 140 | wrapper, [t] * EX_NUM, [tr] * EX_NUM, [EX_SIZE] * EX_NUM, seeds 141 | ) 142 | sleep_count = 0 143 | while not res.ready(): 144 | sleep_count += 1 145 | elapsed = datetime.datetime.now() - now 146 | if sleep_count % 10: 147 | print(f"Elapsed time {elapsed}, waiting for results") 148 | time.sleep(SLEEP) 149 | 150 | out = res.get() 151 | # Prevent unnecessary results from accumulating 152 | clear_cache(rc, lview) 153 | 154 | elapsed = datetime.datetime.now() - now 155 | print(f"Total time {elapsed} for T={t}") 156 | quantiles = [percentile(x, percentiles) for x in out] 157 | results[:, i, :] = cast("ndarray", array(quantiles).T) 158 | 159 | savez(filename, trend=tr, results=results, percentiles=percentiles, T=T) 160 | -------------------------------------------------------------------------------- /arch/unitroot/critical_values/simulation/shared.py: -------------------------------------------------------------------------------- 1 | from collections.abc import Sequence 2 | from typing import Any, NamedTuple 3 | 4 | import numpy as np 5 | import pandas as pd 6 | from scipy import stats 7 | from statsmodels.regression.linear_model import OLS, WLS 8 | 9 | 10 | def format_dict(d: dict[Any, Any]) -> str: 11 | return ( 12 | str(d) 13 | .replace(" ", "") 14 | .replace("],", "],\n") 15 | .replace(":", ":\n") 16 | .replace("},", "},\n") 17 | ) 18 | 19 | 20 | class PvalueResult(NamedTuple): 21 | large_p: list[float] 22 | small_p: list[float] 23 | tau_max: float 24 | tau_star: float 25 | tau_min: float 26 | 27 | 28 | def estimate_cv_regression( 29 | results: pd.DataFrame, critical_values: Sequence[float] 30 | ) -> dict[float, list[float]]: 31 | """ 32 | Parameters 33 | ---------- 34 | results : DataFrame 35 | A dataframe with rows contaoning the quantiles and columns containign the 36 | number of observations 37 | critical_values : Sequence[float] 38 | The critical values to use 39 | """ 40 | # For percentiles 1, 5 and 10, regress on a constant, and powers of 1/T 41 | out = {} 42 | quantiles = np.asarray(results.index) 43 | tau = np.array(results.columns).reshape((1, -1)).T 44 | rhs = (1.0 / tau) ** np.arange(4) 45 | for cv in critical_values: 46 | loc = np.argmin(np.abs(100 * quantiles - cv)) 47 | lhs = np.squeeze(np.asarray(results.iloc[loc])) 48 | res = OLS(lhs, rhs).fit() 49 | params = res.params.copy() 50 | params[res.pvalues > 0.05] = 0.0 51 | out[cv] = [round(val, 5) for val in params] 52 | return out 53 | 54 | 55 | def fit_pval_model( 56 | quantiles: pd.Series | pd.DataFrame, 57 | small_order: int = 3, 58 | use_log: bool = False, 59 | drop_insignif: bool = True, 60 | ) -> PvalueResult: 61 | if small_order not in (3, 4): 62 | raise ValueError("Small order must be 3 or 4") 63 | quantiles = quantiles.sort_index(ascending=False) 64 | percentiles = quantiles.index.to_numpy() 65 | lhs = stats.norm.ppf(percentiles) 66 | data = np.asarray(quantiles) 67 | avg_test_stats = data.mean(1) 68 | avg_test_std = data.std(1) 69 | avg_test_stats = avg_test_stats[:, None] 70 | 71 | rhs = avg_test_stats ** np.arange(4) 72 | rhs_large = rhs 73 | rhs_log = np.log(np.abs(avg_test_stats)) ** np.arange(4) 74 | lhs_large = lhs 75 | res_large = WLS(lhs_large, rhs, weights=1.0 / avg_test_std).fit() 76 | temp = res_large.params.copy() 77 | if drop_insignif: 78 | temp[res_large.pvalues > 0.05] = 0.0 79 | large_p = temp 80 | 81 | # Compute tau_max, by finding the func maximum 82 | p = res_large.params 83 | poly_roots = np.roots(np.array([3, 2, 1.0]) * p[:0:-1]) 84 | if np.isreal(poly_roots[0]): 85 | tau_max = float(np.squeeze(np.real(np.max(poly_roots)))) 86 | else: 87 | tau_max = np.inf 88 | 89 | # Small p regression using only p<=15% 90 | cutoff = np.where(percentiles <= 0.150)[0] 91 | lhs_small = lhs[cutoff] 92 | if use_log: 93 | avg_test_stats = np.log(np.abs(avg_test_stats[cutoff])) 94 | avg_test_std = np.log(np.abs(data[cutoff])).std(1) 95 | assert np.all(np.isfinite(avg_test_std)) 96 | rhs = avg_test_stats ** np.arange(small_order) 97 | else: 98 | avg_test_stats = avg_test_stats[cutoff] 99 | avg_test_std = avg_test_std[cutoff] 100 | rhs = avg_test_stats ** np.arange(small_order) 101 | 102 | res_small = WLS(lhs_small, rhs, weights=1.0 / avg_test_std).fit() 103 | temp = res_small.params 104 | if drop_insignif: 105 | temp[res_small.pvalues > 0.05] = 0.0 106 | small_p = temp 107 | 108 | # Compute tau star 109 | err_large = lhs_large - rhs_large.dot(large_p) 110 | params = small_p.copy() 111 | if small_order == 3: 112 | # Missing 1 parameter here, replace with 0 113 | params = np.append(params, 0.0) 114 | if use_log: 115 | pred_small = rhs_log.dot(params) 116 | else: 117 | pred_small = rhs_large.dot(params) 118 | err_small = lhs_large - pred_small 119 | # Find the location that minimizes the total absolute error 120 | m = lhs_large.shape[0] 121 | abs_err = np.zeros((m, 1)) 122 | for j in range(m): 123 | abs_err[j] = np.abs(err_large[:j]).sum() + np.abs(err_small[j:]).sum() 124 | loc = np.argmin(abs_err) 125 | tau_star = rhs_large[loc, 1] 126 | if use_log: 127 | assert tau_star < 0 128 | # Compute tau min 129 | tau_min = -params[1] / (2 * params[2]) 130 | if use_log: 131 | assert small_order == 4 132 | assert params[2] * params[3] < 0 133 | tau_min = -np.inf 134 | large_p = [round(val, 5) for val in large_p] 135 | small_p = [round(val, 5) for val in small_p] 136 | tau_max = round(tau_max, 5) 137 | tau_star = round(tau_star, 5) 138 | tau_min = round(tau_min, 5) 139 | return PvalueResult(large_p, small_p, tau_max, tau_star, tau_min) 140 | -------------------------------------------------------------------------------- /arch/unitroot/critical_values/simulation/dfgls_critical_values_simulation.py: -------------------------------------------------------------------------------- 1 | """ 2 | Critical value simulation for the Dickey-Fuller GLS model. Similar in design 3 | to MacKinnon (2010). Makes use of parallel_fun which works 4 | best when joblib is installed. 5 | """ 6 | 7 | import datetime 8 | from typing import cast 9 | 10 | import numpy as np 11 | from numpy.linalg import pinv 12 | from numpy.random import RandomState 13 | from statsmodels.tools.parallel import parallel_func 14 | 15 | from arch._typing import Literal 16 | 17 | # Controls memory use, in MiB 18 | MAX_MEMORY_SIZE = 100 19 | NUM_JOBS = 4 20 | EX_NUM = 500 21 | EX_SIZE = 200000 22 | 23 | 24 | def wrapper(n: int, trend: Literal["c", "ct"], b: int, seed: int = 0) -> np.ndarray: 25 | """ 26 | Wraps and blocks the main simulation so that the maximum amount of memory 27 | can be controlled on multi processor systems when executing in parallel 28 | """ 29 | rng = RandomState() 30 | rng.seed(seed) 31 | remaining = b 32 | res = np.zeros(b) 33 | finished = 0 34 | block_size = int(2**20.0 * MAX_MEMORY_SIZE / (8.0 * n)) 35 | for _ in range(0, b, block_size): 36 | if block_size < remaining: 37 | count = block_size 38 | else: 39 | count = remaining 40 | st = finished 41 | en = finished + count 42 | res[st:en] = dfgsl_simulation(n, trend, count, rng) 43 | finished += count 44 | remaining -= count 45 | 46 | return res 47 | 48 | 49 | def dfgsl_simulation( 50 | n: int, trend: Literal["c", "ct"], b: int, rng: RandomState | None = None 51 | ) -> float: 52 | """ 53 | Simulates the empirical distribution of the DFGLS test statistic 54 | """ 55 | if rng is None: 56 | rng = RandomState(0) 57 | standard_normal = rng.standard_normal 58 | 59 | nobs = n 60 | if trend == "c": 61 | c = -7.0 62 | z = np.ones((nobs, 1)) 63 | else: 64 | c = -13.5 65 | z = np.vstack((np.ones(nobs), np.arange(1, nobs + 1))).T 66 | 67 | ct = c / nobs 68 | 69 | delta_z = np.copy(z) 70 | delta_z[1:, :] = delta_z[1:, :] - (1 + ct) * delta_z[:-1, :] 71 | delta_z_inv = pinv(delta_z) 72 | y = standard_normal((n + 50, b)) 73 | y = np.cumsum(y, axis=0) 74 | y = y[50:, :] 75 | delta_y = y.copy() 76 | delta_y[1:, :] = delta_y[1:, :] - (1 + ct) * delta_y[:-1, :] 77 | detrend_coef = delta_z_inv.dot(delta_y) 78 | y_detrended = y - cast("np.ndarray", z.dot(detrend_coef)) 79 | 80 | delta_y_detrended = np.diff(y_detrended, axis=0) 81 | rhs = y_detrended[:-1, :] 82 | lhs = delta_y_detrended 83 | 84 | xpy = np.sum(rhs * lhs, 0) 85 | xpx = np.sum(rhs**2.0, 0) 86 | gamma = xpy / xpx 87 | e = lhs - rhs * gamma 88 | sigma2 = np.sum(e**2.0, axis=0) / (n - 1) # DOF correction? 89 | gamma_var = sigma2 / xpx 90 | 91 | stat = gamma / np.sqrt(gamma_var) 92 | return stat 93 | 94 | 95 | if __name__ == "__main__": 96 | trends = ("c", "ct") 97 | T = np.array( 98 | ( 99 | 20, 100 | 25, 101 | 30, 102 | 35, 103 | 40, 104 | 45, 105 | 50, 106 | 60, 107 | 70, 108 | 80, 109 | 90, 110 | 100, 111 | 120, 112 | 140, 113 | 160, 114 | 180, 115 | 200, 116 | 250, 117 | 300, 118 | 350, 119 | 400, 120 | 450, 121 | 500, 122 | 600, 123 | 700, 124 | 800, 125 | 900, 126 | 1000, 127 | 1200, 128 | 1400, 129 | 2000, 130 | ) 131 | ) 132 | T = T[::-1] 133 | percentiles = list(np.arange(0.5, 100.0, 0.5)) 134 | seeds = np.arange(0, 2**32, step=2**23) 135 | for tr in trends: 136 | results = np.zeros((len(percentiles), len(T), EX_NUM)) 137 | 138 | for i in range(EX_NUM): 139 | print(f"Experiment Number {i + 1} of {EX_NUM} (trend {tr})") 140 | now = datetime.datetime.now() 141 | parallel, p_func, n_jobs = parallel_func( 142 | wrapper, n_jobs=NUM_JOBS, verbose=2 143 | ) 144 | out = parallel(p_func(t, tr, EX_SIZE, seed=seeds[i]) for t in T) 145 | quantiles = [np.percentile(x, percentiles) for x in out] 146 | results[:, :, i] = np.array(quantiles).T 147 | print(f"Elapsed time {datetime.datetime.now() - now} seconds") 148 | 149 | if i % 50 == 0: 150 | np.savez( 151 | "dfgls_" + tr + ".npz", 152 | trend=tr, 153 | results=results, 154 | percentiles=percentiles, 155 | T=T, 156 | ) 157 | 158 | np.savez( 159 | "dfgls_" + tr + ".npz", 160 | trend=tr, 161 | results=results, 162 | percentiles=percentiles, 163 | T=T, 164 | ) 165 | -------------------------------------------------------------------------------- /doc/source/univariate/introduction.rst: -------------------------------------------------------------------------------- 1 | Introduction to ARCH Models 2 | --------------------------- 3 | ARCH models are a popular class of volatility models that use observed values 4 | of returns or residuals as volatility shocks. A basic GARCH model is specified 5 | as 6 | 7 | .. math:: 8 | :nowrap: 9 | 10 | \begin{eqnarray} 11 | r_t & = & \mu + \epsilon_t \\ 12 | \epsilon_t & = & \sigma_t e_t \\ 13 | \sigma^2_t & = & \omega + \alpha \epsilon_{t-1}^2 + \beta \sigma^2_{t-1} 14 | \end{eqnarray} 15 | 16 | A complete ARCH model is divided into three components: 17 | 18 | * a :ref:`mean model`, e.g., a constant mean or an :class:`~arch.univariate.mean.ARX`; 19 | * a :ref:`volatility process`, e.g., a :class:`~arch.univariate.volatility.GARCH` or an :class:`~arch.univariate.volatility.EGARCH` process; and 20 | * a :ref:`distribution` for the standardized residuals. 21 | 22 | In most applications, the simplest method to construct this model is to use the constructor 23 | function :meth:`~arch.univariate.arch_model` 24 | 25 | .. code-block:: python 26 | 27 | import datetime as dt 28 | 29 | import pandas_datareader.data as web 30 | 31 | from arch import arch_model 32 | 33 | start = dt.datetime(2000, 1, 1) 34 | end = dt.datetime(2014, 1, 1) 35 | sp500 = web.DataReader('^GSPC', 'yahoo', start=start, end=end) 36 | returns = 100 * sp500['Adj Close'].pct_change().dropna() 37 | am = arch_model(returns) 38 | 39 | Alternatively, the same model can be manually assembled from the building 40 | blocks of an ARCH model 41 | 42 | .. code-block:: python 43 | 44 | from arch import ConstantMean, GARCH, Normal 45 | 46 | am = ConstantMean(returns) 47 | am.volatility = GARCH(1, 0, 1) 48 | am.distribution = Normal() 49 | 50 | In either case, model parameters are estimated using 51 | 52 | .. code-block:: python 53 | 54 | res = am.fit() 55 | 56 | 57 | with the following output 58 | 59 | :: 60 | 61 | Iteration: 1, Func. Count: 6, Neg. LLF: 5159.58323938 62 | Iteration: 2, Func. Count: 16, Neg. LLF: 5156.09760149 63 | Iteration: 3, Func. Count: 24, Neg. LLF: 5152.29989336 64 | Iteration: 4, Func. Count: 31, Neg. LLF: 5146.47531817 65 | Iteration: 5, Func. Count: 38, Neg. LLF: 5143.86337547 66 | Iteration: 6, Func. Count: 45, Neg. LLF: 5143.02096168 67 | Iteration: 7, Func. Count: 52, Neg. LLF: 5142.24105141 68 | Iteration: 8, Func. Count: 60, Neg. LLF: 5142.07138907 69 | Iteration: 9, Func. Count: 67, Neg. LLF: 5141.416653 70 | Iteration: 10, Func. Count: 73, Neg. LLF: 5141.39212288 71 | Iteration: 11, Func. Count: 79, Neg. LLF: 5141.39023885 72 | Iteration: 12, Func. Count: 85, Neg. LLF: 5141.39023359 73 | Optimization terminated successfully. (Exit mode 0) 74 | Current function value: 5141.39023359 75 | Iterations: 12 76 | Function evaluations: 85 77 | Gradient evaluations: 12 78 | 79 | .. code-block:: python 80 | 81 | print(res.summary()) 82 | 83 | yields 84 | 85 | :: 86 | 87 | Constant Mean - GARCH Model Results 88 | ============================================================================== 89 | Dep. Variable: Adj Close R-squared: -0.001 90 | Mean Model: Constant Mean Adj. R-squared: -0.001 91 | Vol Model: GARCH Log-Likelihood: -5141.39 92 | Distribution: Normal AIC: 10290.8 93 | Method: Maximum Likelihood BIC: 10315.4 94 | No. Observations: 3520 95 | Date: Fri, Dec 02 2016 Df Residuals: 3516 96 | Time: 22:22:28 Df Model: 4 97 | Mean Model 98 | ============================================================================== 99 | coef std err t P>|t| 95.0% Conf. Int. 100 | ------------------------------------------------------------------------------ 101 | mu 0.0531 1.487e-02 3.569 3.581e-04 [2.392e-02,8.220e-02] 102 | Volatility Model 103 | ============================================================================== 104 | coef std err t P>|t| 95.0% Conf. Int. 105 | ------------------------------------------------------------------------------ 106 | omega 0.0156 4.932e-03 3.155 1.606e-03 [5.892e-03,2.523e-02] 107 | alpha[1] 0.0879 1.140e-02 7.710 1.260e-14 [6.554e-02, 0.110] 108 | beta[1] 0.9014 1.183e-02 76.163 0.000 [ 0.878, 0.925] 109 | ============================================================================== 110 | 111 | Covariance estimator: robust 112 | 113 | 114 | Model Constructor 115 | ================= 116 | While models can be carefully specified using the individual components, most common specifications can be specified 117 | using a simple model constructor. 118 | 119 | .. currentmodule:: arch.univariate 120 | .. autofunction:: arch_model 121 | --------------------------------------------------------------------------------
Univariate volatility modeling, bootstrapping, multiple comparison procedures and unit 10 | root tests.