├── .github
├── ISSUE_TEMPLATE
│ ├── bug_report.md
│ └── feature_request.md
├── pull_request_template.md
└── workflows
│ └── codeql-analysis.yml
├── .gitignore
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── LICENSE
├── MANIFEST.in
├── README.md
├── TODO.md
├── autots
├── __init__.py
├── datasets
│ ├── __init__.py
│ ├── _base.py
│ ├── data
│ │ ├── covid_daily.zip
│ │ ├── eia_weekly.zip
│ │ ├── fred_monthly.zip
│ │ ├── fred_yearly.zip
│ │ ├── holidays.zip
│ │ └── traffic_hourly.zip
│ └── fred.py
├── evaluator
│ ├── __init__.py
│ ├── anomaly_detector.py
│ ├── auto_model.py
│ ├── auto_ts.py
│ ├── benchmark.py
│ ├── event_forecasting.py
│ ├── metrics.py
│ └── validation.py
├── models
│ ├── __init__.py
│ ├── arch.py
│ ├── base.py
│ ├── basics.py
│ ├── cassandra.py
│ ├── composite.py
│ ├── dnn.py
│ ├── ensemble.py
│ ├── gluonts.py
│ ├── greykite.py
│ ├── matrix_var.py
│ ├── mlensemble.py
│ ├── model_list.py
│ ├── neural_forecast.py
│ ├── prophet.py
│ ├── pytorch.py
│ ├── sklearn.py
│ ├── statsmodels.py
│ ├── tfp.py
│ └── tide.py
├── templates
│ ├── __init__.py
│ └── general.py
└── tools
│ ├── __init__.py
│ ├── anomaly_utils.py
│ ├── calendar.py
│ ├── cointegration.py
│ ├── constraint.py
│ ├── cpu_count.py
│ ├── fast_kalman.py
│ ├── fft.py
│ ├── fir_filter.py
│ ├── hierarchial.py
│ ├── holiday.py
│ ├── impute.py
│ ├── kalman.py
│ ├── lunar.py
│ ├── percentile.py
│ ├── probabilistic.py
│ ├── profile.py
│ ├── regressor.py
│ ├── seasonal.py
│ ├── shaping.py
│ ├── thresholding.py
│ ├── transform.py
│ ├── wavelet.py
│ └── window_functions.py
├── docs
├── .gitignore
├── .nojekyll
├── Makefile
├── _config.yml
├── _static
│ └── autots_logo.png
├── build
│ ├── doctrees
│ │ ├── environment.pickle
│ │ ├── index.doctree
│ │ └── source
│ │ │ ├── autots.datasets.doctree
│ │ │ ├── autots.doctree
│ │ │ ├── autots.evaluator.doctree
│ │ │ ├── autots.models.doctree
│ │ │ ├── autots.templates.doctree
│ │ │ ├── autots.tools.doctree
│ │ │ ├── intro.doctree
│ │ │ ├── modules.doctree
│ │ │ └── tutorial.doctree
│ └── html
│ │ ├── .buildinfo
│ │ ├── .nojekyll
│ │ ├── _sources
│ │ ├── index.rst.txt
│ │ └── source
│ │ │ ├── autots.datasets.rst.txt
│ │ │ ├── autots.evaluator.rst.txt
│ │ │ ├── autots.models.rst.txt
│ │ │ ├── autots.rst.txt
│ │ │ ├── autots.templates.rst.txt
│ │ │ ├── autots.tools.rst.txt
│ │ │ ├── intro.rst.txt
│ │ │ ├── modules.rst.txt
│ │ │ └── tutorial.rst.txt
│ │ ├── _static
│ │ ├── alabaster.css
│ │ ├── autots_logo.png
│ │ ├── basic.css
│ │ ├── custom.css
│ │ ├── doctools.js
│ │ ├── documentation_options.js
│ │ ├── file.png
│ │ ├── language_data.js
│ │ ├── minus.png
│ │ ├── plus.png
│ │ ├── pygments.css
│ │ ├── searchtools.js
│ │ └── sphinx_highlight.js
│ │ ├── genindex.html
│ │ ├── index.html
│ │ ├── objects.inv
│ │ ├── py-modindex.html
│ │ ├── search.html
│ │ ├── searchindex.js
│ │ └── source
│ │ ├── autots.datasets.html
│ │ ├── autots.evaluator.html
│ │ ├── autots.html
│ │ ├── autots.models.html
│ │ ├── autots.templates.html
│ │ ├── autots.tools.html
│ │ ├── intro.html
│ │ ├── modules.html
│ │ └── tutorial.html
├── conf.py
├── index.html
├── index.rst
├── make.bat
└── source
│ ├── autots.datasets.rst
│ ├── autots.evaluator.rst
│ ├── autots.models.rst
│ ├── autots.rst
│ ├── autots.templates.rst
│ ├── autots.tools.rst
│ ├── intro.rst
│ ├── modules.rst
│ └── tutorial.rst
├── extended_tutorial.md
├── img
├── autots_1280.png
├── autots_logo.jpg
└── autots_logo.png
├── production_example.py
├── pyproject.toml
├── setup.py
├── test.py
└── tests
├── model_forecasts.json
├── test_anomalies.py
├── test_autots.py
├── test_calendar_holiday.py
├── test_cassandra.py
├── test_constraint.py
├── test_event_forecasting.py
├── test_impute.py
├── test_metrics.py
├── test_percentile.py
├── test_regressor.py
├── test_seasonal.py
├── test_transforms.py
├── test_validation.py
├── transform_forecasts.json
└── transform_forecasts_042.json
/.github/ISSUE_TEMPLATE/bug_report.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Bug report
3 | about: Create a report to help us improve
4 | title: ''
5 | labels: ''
6 | assignees: ''
7 |
8 | ---
9 |
10 | **Describe the bug**
11 | A clear and concise description of what the bug is.
12 |
13 | **To Reproduce**
14 | Steps to reproduce the behavior:
15 | 1. Go to '...'
16 | 2. Click on '....'
17 | 3. Scroll down to '....'
18 | 4. See error
19 |
20 | **Expected behavior**
21 | A clear and concise description of what you expected to happen.
22 |
23 | **Screenshots**
24 | If applicable, add screenshots to help explain your problem.
25 |
26 | **Desktop (please complete the following information):**
27 | - OS: [e.g. iOS]
28 | - Package Versions [e.g. 22]
29 |
30 | **Additional context**
31 | Add any other context about the problem here.
32 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature_request.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Feature request
3 | about: Suggest an idea for this project
4 | title: ''
5 | labels: ''
6 | assignees: ''
7 |
8 | ---
9 |
10 | **Is your feature request related to a problem? Please describe.**
11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
12 |
13 | **Describe the solution you'd like**
14 | A clear and concise description of what you want to happen.
15 |
16 | **Describe alternatives you've considered**
17 | A clear and concise description of any alternative solutions or features you've considered.
18 |
19 | **Additional context**
20 | Add any other context or screenshots about the feature request here.
21 |
--------------------------------------------------------------------------------
/.github/pull_request_template.md:
--------------------------------------------------------------------------------
1 | ## Changes
2 |
3 | ## Relevant Issues and Mentions
4 |
--------------------------------------------------------------------------------
/.github/workflows/codeql-analysis.yml:
--------------------------------------------------------------------------------
1 | # For most projects, this workflow file will not need changing; you simply need
2 | # to commit it to your repository.
3 | #
4 | # You may wish to alter this file to override the set of languages analyzed,
5 | # or to provide custom queries or build logic.
6 | #
7 | # ******** NOTE ********
8 | # We have attempted to detect the languages in your repository. Please check
9 | # the `language` matrix defined below to confirm you have the correct set of
10 | # supported CodeQL languages.
11 | #
12 | name: "CodeQL"
13 |
14 | on:
15 | push:
16 | branches: [master]
17 | pull_request:
18 | # The branches below must be a subset of the branches above
19 | branches: [master, dev]
20 |
21 | jobs:
22 | analyze:
23 | name: Analyze
24 | runs-on: ubuntu-latest
25 |
26 | strategy:
27 | fail-fast: false
28 | matrix:
29 | language: [ 'python' ]
30 | # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
31 | # Learn more:
32 | # https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
33 |
34 | steps:
35 | - name: Checkout repository
36 | uses: actions/checkout@v2
37 |
38 | # Initializes the CodeQL tools for scanning.
39 | - name: Initialize CodeQL
40 | uses: github/codeql-action/init@v2
41 | with:
42 | languages: ${{ matrix.language }}
43 | # If you wish to specify custom queries, you can do so here or in a config file.
44 | # By default, queries listed here will override any specified in a config file.
45 | # Prefix the list here with "+" to use these queries and those in the config file.
46 | # queries: ./path/to/local/query, your-org/your-repo/queries@main
47 |
48 | # Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
49 | # If this step fails, then you should remove it and run the build manually (see below)
50 | - name: Autobuild
51 | uses: github/codeql-action/autobuild@v2
52 |
53 | # ℹ️ Command-line programs to run using the OS shell.
54 | # 📚 https://git.io/JvXDl
55 |
56 | # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
57 | # and modify them (or add more) to build your code if your project
58 | # uses a compiled language
59 |
60 | #- run: |
61 | # make bootstrap
62 | # make release
63 |
64 | - name: Perform CodeQL Analysis
65 | uses: github/codeql-action/analyze@v2
66 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # supporting files
2 | standalone.py
3 | functional_environments.md
4 |
5 | # Byte-compiled / optimized / DLL files
6 | __pycache__/
7 | *.py[cod]
8 | *$py.class
9 |
10 | # C extensions
11 | *.so
12 |
13 | # Distribution / packaging
14 | .Python
15 | build/
16 | develop-eggs/
17 | dist/
18 | downloads/
19 | eggs/
20 | .eggs/
21 | lib/
22 | lib64/
23 | parts/
24 | sdist/
25 | var/
26 | wheels/
27 | *.egg-info/
28 | .installed.cfg
29 | *.egg
30 | MANIFEST
31 |
32 | # PyInstaller
33 | # Usually these files are written by a python script from a template
34 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
35 | *.manifest
36 | *.spec
37 |
38 | # Installer logs
39 | pip-log.txt
40 | pip-delete-this-directory.txt
41 |
42 | # Unit test / coverage reports
43 | htmlcov/
44 | .tox/
45 | .coverage
46 | .coverage.*
47 | .cache
48 | nosetests.xml
49 | coverage.xml
50 | *.cover
51 | .hypothesis/
52 | .pytest_cache/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 |
63 | # Flask stuff:
64 | instance/
65 | .webassets-cache
66 |
67 | # Scrapy stuff:
68 | .scrapy
69 |
70 | # Sphinx documentation
71 | docs/_build/
72 | !docs/build/
73 |
74 | # PyBuilder
75 | target/
76 |
77 | # Jupyter Notebook
78 | .ipynb_checkpoints
79 |
80 | # pyenv
81 | .python-version
82 |
83 | # celery beat schedule file
84 | celerybeat-schedule
85 |
86 | # SageMath parsed files
87 | *.sage.py
88 |
89 | # Environments
90 | .env
91 | .venv
92 | env/
93 | venv/
94 | ENV/
95 | env.bak/
96 | venv.bak/
97 |
98 | # Spyder project settings
99 | .spyderproject
100 | .spyproject
101 |
102 | # Rope project settings
103 | .ropeproject
104 |
105 | # mkdocs documentation
106 | /site
107 |
108 | # mypy
109 | .mypy_cache/
110 |
111 | # Visual Studio
112 | *.DS_Store
113 | .vscode/
114 |
115 | # old stuff and diagnostic
116 | archive/
117 | kernel_failures/
118 | lightning_logs/
119 | current_model_*
120 | temp*.py
121 | # ignore in root but not in subdirectories
122 | /*.zip
123 | /*.gz
124 | /*.png
125 | /*.csv
126 | /*.pickle
127 |
--------------------------------------------------------------------------------
/CODE_OF_CONDUCT.md:
--------------------------------------------------------------------------------
1 | # Contributor Covenant Code of Conduct
2 |
3 | ## Our Pledge
4 |
5 | We as members, contributors, and leaders pledge to make participation in our
6 | community a harassment-free experience for everyone, regardless of age, body
7 | size, visible or invisible disability, ethnicity, sex characteristics, gender
8 | identity and expression, level of experience, education, socio-economic status,
9 | nationality, personal appearance, race, religion, or sexual identity
10 | and orientation.
11 |
12 | We pledge to act and interact in ways that contribute to an open, welcoming,
13 | diverse, inclusive, and healthy community.
14 |
15 | ## Our Standards
16 |
17 | Examples of behavior that contributes to a positive environment for our
18 | community include:
19 |
20 | * Demonstrating empathy and kindness toward other people
21 | * Being respectful of differing opinions, viewpoints, and experiences
22 | * Giving and gracefully accepting constructive feedback
23 | * Accepting responsibility and apologizing to those affected by our mistakes,
24 | and learning from the experience
25 | * Focusing on what is best not just for us as individuals, but for the
26 | overall community
27 |
28 | Examples of unacceptable behavior include:
29 |
30 | * The use of sexualized language or imagery, and sexual attention or
31 | advances of any kind
32 | * Trolling, insulting or derogatory comments, and personal or political attacks
33 | * Public or private harassment
34 | * Publishing others' private information, such as a physical or email
35 | address, without their explicit permission
36 | * Other conduct which could reasonably be considered inappropriate in a
37 | professional setting
38 |
39 | ## Enforcement Responsibilities
40 |
41 | Community leaders are responsible for clarifying and enforcing our standards of
42 | acceptable behavior and will take appropriate and fair corrective action in
43 | response to any behavior that they deem inappropriate, threatening, offensive,
44 | or harmful.
45 |
46 | Community leaders have the right and responsibility to remove, edit, or reject
47 | comments, commits, code, wiki edits, issues, and other contributions that are
48 | not aligned to this Code of Conduct, and will communicate reasons for moderation
49 | decisions when appropriate.
50 |
51 | ## Scope
52 |
53 | This Code of Conduct applies within all community spaces, and also applies when
54 | an individual is officially representing the community in public spaces.
55 | Examples of representing our community include using an official e-mail address,
56 | posting via an official social media account, or acting as an appointed
57 | representative at an online or offline event.
58 |
59 | ## Enforcement
60 |
61 | Instances of abusive, harassing, or otherwise unacceptable behavior may be
62 | reported to the community leaders responsible for enforcement at
63 | colin.catlin@gmail.com.
64 | All complaints will be reviewed and investigated promptly and fairly.
65 |
66 | All community leaders are obligated to respect the privacy and security of the
67 | reporter of any incident.
68 |
69 | ## Enforcement Guidelines
70 |
71 | Community leaders will follow these Community Impact Guidelines in determining
72 | the consequences for any action they deem in violation of this Code of Conduct:
73 |
74 | ### 1. Correction
75 |
76 | **Community Impact**: Use of inappropriate language or other behavior deemed
77 | unprofessional or unwelcome in the community.
78 |
79 | **Consequence**: A private, written warning from community leaders, providing
80 | clarity around the nature of the violation and an explanation of why the
81 | behavior was inappropriate. A public apology may be requested.
82 |
83 | ### 2. Warning
84 |
85 | **Community Impact**: A violation through a single incident or series
86 | of actions.
87 |
88 | **Consequence**: A warning with consequences for continued behavior. No
89 | interaction with the people involved, including unsolicited interaction with
90 | those enforcing the Code of Conduct, for a specified period of time. This
91 | includes avoiding interactions in community spaces as well as external channels
92 | like social media. Violating these terms may lead to a temporary or
93 | permanent ban.
94 |
95 | ### 3. Temporary Ban
96 |
97 | **Community Impact**: A serious violation of community standards, including
98 | sustained inappropriate behavior.
99 |
100 | **Consequence**: A temporary ban from any sort of interaction or public
101 | communication with the community for a specified period of time. No public or
102 | private interaction with the people involved, including unsolicited interaction
103 | with those enforcing the Code of Conduct, is allowed during this period.
104 | Violating these terms may lead to a permanent ban.
105 |
106 | ### 4. Permanent Ban
107 |
108 | **Community Impact**: Demonstrating a pattern of violation of community
109 | standards, including sustained inappropriate behavior, harassment of an
110 | individual, or aggression toward or disparagement of classes of individuals.
111 |
112 | **Consequence**: A permanent ban from any sort of public interaction within
113 | the community.
114 |
115 | ## Attribution
116 |
117 | This Code of Conduct is adapted from the [Contributor Covenant][homepage],
118 | version 2.0, available at
119 | https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
120 |
121 | Community Impact Guidelines were inspired by [Mozilla's code of conduct
122 | enforcement ladder](https://github.com/mozilla/diversity).
123 |
124 | [homepage]: https://www.contributor-covenant.org
125 |
126 | For answers to common questions about this code of conduct, see the FAQ at
127 | https://www.contributor-covenant.org/faq. Translations are available at
128 | https://www.contributor-covenant.org/translations.
129 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | As an AutoML project, there are many moving parts in this project, and any additions should be done carefully.
2 |
3 | Non-spurious contributions are welcome, however, for example, a new model type (see the new model checklist in TO-DO).
4 | While only limited tests are currently available, it is strongly recommended to include tests for any new code.
5 |
6 | Submit pull requests to the `dev` branch where they can be integrated and tested with other code before being released onto main.
7 |
8 | Please utilize the `Discussions` area for discussions and any questions, with `Issues` focused on specifically identified bugs or new features.
9 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2024 Colin Catlin
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | include autots/datasets/data/*.zip
2 | include README.md LICENSE
--------------------------------------------------------------------------------
/TODO.md:
--------------------------------------------------------------------------------
1 | # Basic Tenants
2 | * Ease of Use > Accuracy > Speed (with speed more important with 'fast' selections)
3 | * Availability of models which share information among series
4 | * All models should be probabilistic (upper/lower forecasts)
5 | * New transformations should be applicable to many datasets and models
6 | * New models need only be sometimes applicable
7 | * Fault tolerance: it is perfectly acceptable for model parameters to fail on some datasets, the higher level API will pass over and use others.
8 | * Missing data tolerance: large chunks of data can be missing and model will still produce reasonable results (although lower quality than if data is available)
9 |
10 | ## Assumptions on Data
11 | * Series will largely be consistent in period, or at least up-sampled to regular intervals
12 | * The most recent data will generally be the most important
13 | * Forecasts are desired for the future immediately following the most recent data.
14 | * trimmed_mean to AverageValueNaive
15 |
16 | # 0.6.21 🇺🇦 🇺🇦 🇺🇦
17 | * Prophet and Cassandra bug fixes
18 |
19 | ### Unstable Upstream Pacakges (those that are frequently broken by maintainers)
20 | * Pytorch-Forecasting
21 | * Neural Prophet
22 | * GluonTS
23 |
24 | ### New Model Checklist:
25 | * Add to ModelMonster in auto_model.py
26 | * add to appropriate model_lists: all, recombination_approved if so, no_shared if so
27 | * add to model table in extended_tutorial.md (most columns here have an equivalent model_list)
28 | * if model has regressors, make sure it meets Simulation Forecasting needs (method=="regressor", fails on no regressor if "User")
29 | * if model has result_windows, add to appropriate model_list noting also diff_window_motif_list
30 |
31 | ## New Transformer Checklist:
32 | * Make sure that if it modifies the size (more/fewer columns or rows) it returns pd.DataFrame with proper index/columns
33 | * add to transformer_dict
34 | * add to trans_dict or have_params or external
35 | * add to shared_trans if so
36 | * oddities_list for those with forecast/original transform difference
37 | * add to docstring of GeneralTransformer
38 | * add to dictionary by type: filter, scaler, transformer
39 | * add to test_transform call
40 |
41 | ## New Metric Checklist:
42 | * Create function in metrics.py
43 | * Add to mode base full_metric_evaluation (benchmark to make sure it is still fast)
44 | * Add to concat in TemplateWizard (if per_series metrics will be used)
45 | * Add to concat in TemplateEvalObject (if per_series metrics will be used)
46 | * Add to generate_score
47 | * Add to generate_score_per_series (if per_series metrics will be used)
48 | * Add to validation_aggregation
49 | * Update test_metrics results
50 | * metric_weighting in AutoTS, get_new_params, prod example, test
51 |
--------------------------------------------------------------------------------
/autots/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | Automated Time Series Model Selection for Python
3 |
4 | https://github.com/winedarksea/AutoTS
5 | """
6 |
7 | from autots.datasets import (
8 | load_hourly,
9 | load_daily,
10 | load_monthly,
11 | load_yearly,
12 | load_weekly,
13 | load_weekdays,
14 | load_live_daily,
15 | load_linear,
16 | load_artificial,
17 | load_sine,
18 | )
19 |
20 | from autots.evaluator.auto_ts import AutoTS
21 | from autots.evaluator.event_forecasting import EventRiskForecast
22 | from autots.tools.transform import GeneralTransformer, RandomTransform
23 | from autots.tools.shaping import long_to_wide, infer_frequency
24 | from autots.tools.regressor import create_lagged_regressor, create_regressor
25 | from autots.evaluator.auto_model import model_forecast, ModelPrediction
26 | from autots.evaluator.anomaly_detector import AnomalyDetector, HolidayDetector
27 | from autots.models.cassandra import Cassandra
28 |
29 |
30 | __version__ = '0.6.21'
31 |
32 | TransformTS = GeneralTransformer
33 |
34 | __all__ = [
35 | 'load_daily',
36 | 'load_monthly',
37 | 'load_yearly',
38 | 'load_hourly',
39 | 'load_weekly',
40 | 'load_weekdays',
41 | 'load_live_daily',
42 | 'load_linear',
43 | 'load_artificial',
44 | 'load_sine',
45 | 'AutoTS',
46 | 'TransformTS',
47 | 'GeneralTransformer',
48 | 'RandomTransform',
49 | 'long_to_wide',
50 | 'model_forecast',
51 | 'create_lagged_regressor',
52 | 'create_regressor',
53 | 'EventRiskForecast',
54 | 'AnomalyDetector',
55 | 'HolidayDetector',
56 | 'Cassandra',
57 | 'infer_frequency',
58 | 'ModelPrediction',
59 | ]
60 |
--------------------------------------------------------------------------------
/autots/datasets/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | Tools for Importing Sample Data
3 | """
4 |
5 | from autots.datasets._base import (
6 | load_daily,
7 | load_live_daily,
8 | load_monthly,
9 | load_yearly,
10 | load_hourly,
11 | load_weekly,
12 | load_weekdays,
13 | load_zeroes,
14 | load_linear,
15 | load_sine,
16 | load_artificial,
17 | )
18 |
19 | __all__ = [
20 | 'load_daily',
21 | 'load_monthly',
22 | 'load_yearly',
23 | 'load_hourly',
24 | 'load_weekly',
25 | 'load_weekdays',
26 | 'load_live_daily',
27 | 'load_zeroes',
28 | 'load_linear',
29 | 'load_sine',
30 | 'load_artificial',
31 | ]
32 |
--------------------------------------------------------------------------------
/autots/datasets/data/covid_daily.zip:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/winedarksea/AutoTS/7258a7a655b040a4fbe11ac6439c30141252992c/autots/datasets/data/covid_daily.zip
--------------------------------------------------------------------------------
/autots/datasets/data/eia_weekly.zip:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/winedarksea/AutoTS/7258a7a655b040a4fbe11ac6439c30141252992c/autots/datasets/data/eia_weekly.zip
--------------------------------------------------------------------------------
/autots/datasets/data/fred_monthly.zip:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/winedarksea/AutoTS/7258a7a655b040a4fbe11ac6439c30141252992c/autots/datasets/data/fred_monthly.zip
--------------------------------------------------------------------------------
/autots/datasets/data/fred_yearly.zip:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/winedarksea/AutoTS/7258a7a655b040a4fbe11ac6439c30141252992c/autots/datasets/data/fred_yearly.zip
--------------------------------------------------------------------------------
/autots/datasets/data/holidays.zip:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/winedarksea/AutoTS/7258a7a655b040a4fbe11ac6439c30141252992c/autots/datasets/data/holidays.zip
--------------------------------------------------------------------------------
/autots/datasets/data/traffic_hourly.zip:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/winedarksea/AutoTS/7258a7a655b040a4fbe11ac6439c30141252992c/autots/datasets/data/traffic_hourly.zip
--------------------------------------------------------------------------------
/autots/datasets/fred.py:
--------------------------------------------------------------------------------
1 | """
2 | FRED (Federal Reserve Economic Data) Data Import
3 |
4 | requires API key from FRED
5 | and pip install fredapi
6 | """
7 |
8 | import time
9 | import pandas as pd
10 |
11 | try:
12 | from fredapi import Fred
13 | except Exception: # except ImportError
14 | _has_fred = False
15 | else:
16 | _has_fred = True
17 |
18 |
19 | def get_fred_data(
20 | fredkey: str,
21 | SeriesNameDict: dict = None,
22 | long=True,
23 | observation_start=None,
24 | sleep_seconds: int = 1,
25 | **kwargs,
26 | ):
27 | """Imports Data from Federal Reserve.
28 | For simplest results, make sure requested series are all of the same frequency.
29 |
30 | args:
31 | fredkey (str): an API key from FRED
32 | SeriesNameDict (dict): pairs of FRED Series IDs and Series Names like: {'SeriesID': 'SeriesName'} or a list of FRED IDs.
33 | Series id must match Fred IDs, but name can be anything
34 | if None, several default series are returned
35 | long (bool): if True, return long style data, else return wide style data with dt index
36 | observation_start (datetime): passed to Fred get_series
37 | sleep_seconds (int): seconds to sleep between each series call, reduces failure chance usually
38 | """
39 | if not _has_fred:
40 | raise ImportError("Package fredapi is required")
41 |
42 | fred = Fred(api_key=fredkey)
43 |
44 | if SeriesNameDict is None:
45 | SeriesNameDict = {
46 | 'T10Y2Y': '10 Year Treasury Constant Maturity Minus 2 Year Treasury Constant Maturity',
47 | 'DGS10': '10 Year Treasury Constant Maturity Rate',
48 | 'DCOILWTICO': 'Crude Oil West Texas Intermediate Cushing Oklahoma',
49 | 'SP500': 'S&P 500',
50 | 'DEXUSEU': 'US Euro Foreign Exchange Rate',
51 | 'DEXCHUS': 'China US Foreign Exchange Rate',
52 | 'DEXCAUS': 'Canadian to US Dollar Exchange Rate Daily',
53 | 'VIXCLS': 'CBOE Volatility Index: VIX', # this is a more irregular series
54 | 'T10YIE': '10 Year Breakeven Inflation Rate',
55 | 'USEPUINDXD': 'Economic Policy Uncertainty Index for United States', # also very irregular
56 | }
57 |
58 | if isinstance(SeriesNameDict, dict):
59 | series_desired = list(SeriesNameDict.keys())
60 | else:
61 | series_desired = list(SeriesNameDict)
62 |
63 | if long:
64 | fred_timeseries = pd.DataFrame(
65 | columns=['date', 'value', 'series_id', 'series_name']
66 | )
67 | else:
68 | fred_timeseries = pd.DataFrame()
69 |
70 | for series in series_desired:
71 | data = fred.get_series(series, observation_start=observation_start)
72 | try:
73 | series_name = SeriesNameDict[series]
74 | except Exception:
75 | series_name = series
76 |
77 | if long:
78 | data_df = pd.DataFrame(
79 | {
80 | 'date': data.index,
81 | 'value': data,
82 | 'series_id': series,
83 | 'series_name': series_name,
84 | }
85 | )
86 | data_df.reset_index(drop=True, inplace=True)
87 | fred_timeseries = pd.concat(
88 | [fred_timeseries, data_df], axis=0, ignore_index=True
89 | )
90 | else:
91 | data.name = series_name
92 | fred_timeseries = fred_timeseries.merge(
93 | data, how="outer", left_index=True, right_index=True
94 | )
95 | time.sleep(sleep_seconds)
96 |
97 | return fred_timeseries
98 |
--------------------------------------------------------------------------------
/autots/evaluator/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | Model Evaluators
3 | """
4 |
--------------------------------------------------------------------------------
/autots/evaluator/validation.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Extracted from auto_ts.py, the functions to create validation segments.
4 |
5 | Warning, these are used in AMFM, possibly other places. Avoid modification of function structures, if possible.
6 |
7 | Created on Mon Jan 16 11:36:01 2023
8 |
9 | @author: Colin
10 | """
11 | import numpy as np
12 | from autots.tools.transform import GeneralTransformer
13 | from autots.tools.window_functions import retrieve_closest_indices
14 | from autots.tools.seasonal import seasonal_window_match
15 |
16 |
17 | def extract_seasonal_val_periods(validation_method):
18 | val_list = [x for x in str(validation_method) if x.isdigit()]
19 | seasonal_val_periods = int(''.join(val_list))
20 | return seasonal_val_periods
21 |
22 |
23 | def validate_num_validations(
24 | validation_method="backwards",
25 | num_validations=2,
26 | df_wide_numeric=None,
27 | forecast_length=None,
28 | min_allowed_train_percent=0.5,
29 | verbose=0,
30 | ):
31 | """Check how many validations are possible given the length of the data. Beyond initial eval split which is always assumed."""
32 | if 'seasonal' in validation_method and validation_method != "seasonal":
33 | seasonal_val_periods = extract_seasonal_val_periods(validation_method)
34 | temp = df_wide_numeric.shape[0] + forecast_length
35 | max_possible = temp / seasonal_val_periods
36 | else:
37 | max_possible = (df_wide_numeric.shape[0]) / forecast_length
38 | # now adjusted for minimum % amount of training data required
39 | if (max_possible - np.floor(max_possible)) > min_allowed_train_percent:
40 | max_possible = int(max_possible)
41 | else:
42 | max_possible = int(max_possible) - 1
43 | # set auto and max validations
44 | if num_validations == "auto":
45 | num_validations = 3 if max_possible >= 4 else max_possible
46 | elif num_validations == "max":
47 | num_validations = 50 if max_possible > 51 else max_possible - 1
48 | # this still has the initial test segment as a validation segment, so -1
49 | elif max_possible < (num_validations + 1):
50 | num_validations = max_possible - 1
51 | if verbose >= 0:
52 | print(
53 | "Too many training validations for length of data provided, decreasing num_validations to {}".format(
54 | num_validations
55 | )
56 | )
57 | else:
58 | num_validations = abs(int(num_validations))
59 | if num_validations <= 0:
60 | num_validations = 0
61 | return int(num_validations)
62 |
63 |
64 | def generate_validation_indices(
65 | validation_method,
66 | forecast_length,
67 | num_validations,
68 | df_wide_numeric,
69 | validation_params={},
70 | preclean=None,
71 | verbose=0,
72 | ):
73 | """generate validation indices (total indicies is num_validations + 1 as it includes zero based initial 'eval' section, yes, I know, confusing).
74 | Note that for most methods this is currently the full index, with the end of forecas't_length being the test period
75 | mixed_length now returns a tuple of (train index, test index).
76 |
77 | Args:
78 | validation_method (str): 'backwards', 'even', 'similarity', 'seasonal', 'seasonal 364', etc.
79 | forecast_length (int): number of steps ahead for forecast
80 | num_validations (int): number of additional vals after first eval sample
81 | df_wide_numeric (pd.DataFrame): pandas DataFrame with a dt index and columns as time series
82 | preclean (dict): transformer dict, used for similarity cleaning
83 | verbose (int): verbosity
84 | """
85 | bval_list = ['backwards', 'back', 'backward']
86 | base_val_list = bval_list + ['even', 'Even']
87 | # num_validations = int(num_validations)
88 |
89 | # generate similarity matching indices (so it can fail now, not after all the generations)
90 | if validation_method == "similarity":
91 | sim_df = df_wide_numeric.copy()
92 | if preclean is None:
93 | params = {
94 | "fillna": "median", # mean or median one of few consistent things
95 | "transformations": {"0": "MaxAbsScaler"},
96 | "transformation_params": {
97 | "0": {},
98 | },
99 | }
100 | trans = GeneralTransformer(
101 | forecast_length=forecast_length, verbose=verbose, **params
102 | )
103 | sim_df = trans.fit_transform(sim_df)
104 |
105 | created_idx = retrieve_closest_indices(
106 | sim_df,
107 | num_indices=num_validations + 1,
108 | forecast_length=forecast_length,
109 | include_last=True,
110 | verbose=verbose,
111 | **validation_params,
112 | # **self.similarity_validation_params,
113 | )
114 | validation_indexes = [
115 | df_wide_numeric.index[df_wide_numeric.index <= indx[-1]]
116 | for indx in created_idx
117 | ]
118 | del sim_df
119 | elif validation_method == "seasonal":
120 | test, _ = seasonal_window_match(
121 | DTindex=df_wide_numeric.index,
122 | k=num_validations + 1,
123 | forecast_length=forecast_length,
124 | **validation_params,
125 | # **self.seasonal_validation_params,
126 | )
127 | validation_indexes = [df_wide_numeric.index[0 : x[-1]] for x in test.T]
128 | elif validation_method in base_val_list or (
129 | 'seasonal' in validation_method and validation_method != "seasonal"
130 | ):
131 | validation_indexes = [df_wide_numeric.index]
132 | elif validation_method in ["mixed_length"]:
133 | validation_indexes = []
134 | else:
135 | raise ValueError(
136 | f"Validation Method `{validation_method}` not recognized try 'backwards'"
137 | )
138 |
139 | if validation_method in bval_list:
140 | idx = df_wide_numeric.index
141 | shp0 = df_wide_numeric.shape[0]
142 | for y in range(num_validations):
143 | # gradually remove the end
144 | current_slice = idx[0 : shp0 - (y + 1) * forecast_length]
145 | validation_indexes.append(current_slice)
146 | elif validation_method in ['even', 'Even']:
147 | idx = df_wide_numeric.index
148 | # /num_validations biases it towards the last segment
149 | for y in range(num_validations):
150 | validation_size = len(idx) - forecast_length
151 | validation_size = validation_size / (num_validations + 1)
152 | validation_size = int(np.floor(validation_size))
153 | current_slice = idx[0 : validation_size * (y + 1) + forecast_length]
154 | validation_indexes.append(current_slice)
155 | elif 'seasonal' in validation_method and validation_method != "seasonal":
156 | idx = df_wide_numeric.index
157 | shp0 = df_wide_numeric.shape[0]
158 | seasonal_val_periods = extract_seasonal_val_periods(validation_method)
159 | for y in range(num_validations):
160 | val_per = (y + 1) * seasonal_val_periods
161 | if seasonal_val_periods < forecast_length:
162 | pass
163 | else:
164 | val_per = val_per - forecast_length
165 | val_per = shp0 - val_per
166 | current_slice = idx[0:val_per]
167 | validation_indexes.append(current_slice)
168 | elif validation_method in ["mixed_length"]:
169 | idx = df_wide_numeric.index
170 | shp0 = df_wide_numeric.shape[0]
171 | count = 0
172 | for y in range(num_validations + 1):
173 | if count == 0:
174 | cut = int(len(idx) / 2)
175 | validation_indexes.append((idx[:cut], idx[cut:]))
176 | elif count == 1:
177 | cut = len(idx) - int(len(idx) / 3)
178 | validation_indexes.append((idx[:cut], idx[cut:]))
179 | else:
180 | # gradually remove the end
181 | cut = shp0 - (y + 1) * forecast_length
182 | current_slice = idx[0:cut]
183 | current_slice_2 = idx[cut : cut + forecast_length]
184 | validation_indexes.append((current_slice, current_slice_2))
185 | count += 1
186 | return validation_indexes
187 |
--------------------------------------------------------------------------------
/autots/models/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | Model Models
3 | """
4 |
--------------------------------------------------------------------------------
/autots/templates/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | Model Templates
3 | """
4 |
--------------------------------------------------------------------------------
/autots/tools/__init__.py:
--------------------------------------------------------------------------------
1 | """Basic utilities."""
2 |
3 | from .cpu_count import cpu_count
4 | from .seasonal import seasonal_int
5 |
--------------------------------------------------------------------------------
/autots/tools/cointegration.py:
--------------------------------------------------------------------------------
1 | """Cointegration
2 |
3 | Johansen heavily based on Statsmodels source code
4 |
5 | BTCD heavily based on D. Barba
6 | https://towardsdatascience.com/canonical-decomposition-a-forgotten-method-for-time-series-cointegration-and-beyond-4d1213396da1
7 |
8 | """
9 |
10 | import datetime
11 | import numpy as np
12 | import pandas as pd
13 | from scipy.linalg import fractional_matrix_power
14 |
15 | # np.allclose(np.matmul(trans.components_, (df.values - trans.mean_).T).T, trans.transform(df))
16 | # np.allclose(np.matmul((df.values - trans.mean_), (trans.components_.T)), trans.transform(df))
17 | # np.allclose((np.matmul(transformed, trans.components_) + trans.mean_), trans.inverse_transform(transformed))
18 |
19 |
20 | def lagmat(
21 | x,
22 | maxlag: int,
23 | trim='forward',
24 | original="ex",
25 | ):
26 | """
27 | Create 2d array of lags. Modified from Statsmodels.
28 | """
29 | orig = x
30 | trim = "none" if trim is None else trim
31 | trim = trim.lower()
32 | is_pandas = isinstance(x, pd.DataFrame)
33 |
34 | dropidx = 0
35 | nobs, nvar = x.shape
36 | if original in ["ex", "sep"]:
37 | dropidx = nvar
38 | if maxlag >= nobs:
39 | raise ValueError("maxlag should be < nobs")
40 | lm = np.zeros((nobs + maxlag, nvar * (maxlag + 1)))
41 | for k in range(0, int(maxlag + 1)):
42 | lm[
43 | maxlag - k : nobs + maxlag - k,
44 | nvar * (maxlag - k) : nvar * (maxlag - k + 1),
45 | ] = x
46 |
47 | if trim in ("none", "forward"):
48 | startobs = 0
49 | elif trim in ("backward", "both"):
50 | startobs = maxlag
51 | else:
52 | raise ValueError("trim option not valid")
53 |
54 | if trim in ("none", "backward"):
55 | stopobs = len(lm)
56 | else:
57 | stopobs = nobs
58 |
59 | if is_pandas:
60 | x = orig
61 | x_columns = x.columns if isinstance(x, pd.DataFrame) else [x.name]
62 | columns = [str(col) for col in x_columns]
63 | for lag in range(maxlag):
64 | lag_str = str(lag + 1)
65 | columns.extend([str(col) + ".L." + lag_str for col in x_columns])
66 | lm = pd.DataFrame(lm[:stopobs], index=x.index, columns=columns)
67 | lags = lm.iloc[startobs:]
68 | if original in ("sep", "ex"):
69 | leads = lags[x_columns]
70 | lags = lags.drop(x_columns, axis=1)
71 | else:
72 | lags = lm[startobs:stopobs, dropidx:]
73 | if original == "sep":
74 | leads = lm[startobs:stopobs, :dropidx]
75 |
76 | if original == "sep":
77 | return lags, leads
78 | else:
79 | return lags
80 |
81 |
82 | def coint_johansen(endog, det_order=-1, k_ar_diff=1, return_eigenvalues=False):
83 | """Johansen cointegration test of the cointegration rank of a VECM, abbreviated from Statsmodels"""
84 |
85 | def detrend(y, order):
86 | if order == -1:
87 | return y
88 | else:
89 | from statsmodels.regression.linear_model import OLS
90 | return OLS(y, np.vander(np.linspace(-1, 1, len(y)), order + 1)).fit().resid
91 |
92 | def resid(y, x):
93 | if x.size == 0:
94 | return y
95 | r = y - np.dot(x, np.dot(np.linalg.pinv(x), y))
96 | return r
97 |
98 | endog = np.asarray(endog)
99 |
100 | # f is detrend transformed series, det_order is detrend data
101 | if det_order > -1:
102 | f = 0
103 | else:
104 | f = det_order
105 |
106 | endog = detrend(endog, det_order)
107 | dx = np.diff(endog, 1, axis=0)
108 | z = lagmat(dx, k_ar_diff)
109 | z = z[k_ar_diff:]
110 | z = detrend(z, f)
111 |
112 | dx = dx[k_ar_diff:]
113 |
114 | dx = detrend(dx, f)
115 | r0t = resid(dx, z)
116 | # GH 5731, [:-0] does not work, need [:t-0]
117 | lx = endog[: (endog.shape[0] - k_ar_diff)]
118 | lx = lx[1:]
119 | dx = detrend(lx, f)
120 | rkt = resid(dx, z) # level on lagged diffs
121 | # Level covariance after filtering k_ar_diff
122 | skk = np.dot(rkt.T, rkt) / rkt.shape[0]
123 | # Covariacne between filtered and unfiltered
124 | sk0 = np.dot(rkt.T, r0t) / rkt.shape[0]
125 | s00 = np.dot(r0t.T, r0t) / r0t.shape[0]
126 | sig = np.dot(sk0, np.dot(np.linalg.pinv(s00), sk0.T))
127 | tmp = np.linalg.pinv(skk)
128 | au, du = np.linalg.eig(np.dot(tmp, sig)) # au is eval, du is evec
129 |
130 | temp = np.linalg.pinv(np.linalg.cholesky(np.dot(du.T, np.dot(skk, du))))
131 | dt = np.dot(du, temp)
132 | if return_eigenvalues:
133 | return au, dt
134 | else:
135 | return dt
136 |
137 |
138 | def btcd_decompose(
139 | p_mat: np.ndarray,
140 | regression_model,
141 | max_lag: int = 1,
142 | return_eigenvalues=False,
143 | ):
144 | """Calculate decomposition.
145 | p_mat is of shape(t,n), wide style data.
146 | """
147 | B_sqrt_inv = _get_b_sqrt_inv(p_mat)
148 | A = _get_A(p_mat, regression_model, max_lag=max_lag)
149 | D = np.matmul(np.matmul(B_sqrt_inv, A), B_sqrt_inv)
150 | eigenvalues, eigenvectors = np.linalg.eigh(D)
151 | eigenvectors = np.matmul(B_sqrt_inv, eigenvectors)
152 | if return_eigenvalues:
153 | return eigenvalues, eigenvectors
154 | else:
155 | return eigenvectors
156 |
157 |
158 | def _get_expected_dyadic_prod(V):
159 | return (1.0 / V.shape[0]) * np.matmul(V.T, V)
160 |
161 |
162 | def _get_b_sqrt_inv(p_mat):
163 | """Rows of p_mat represent t index, columns represent each path."""
164 | B = _get_expected_dyadic_prod(p_mat)
165 | B_sqrt = fractional_matrix_power(B, 0.5)
166 | return np.linalg.pinv(B_sqrt)
167 |
168 |
169 | def _get_y(p_mat: np.ndarray, p_mat_col_idx: int, max_lag: int):
170 | """
171 | Returns a 1D array which corresonds to a specific column of p_mat,
172 | with the first max_lag idxs trimmed.
173 | the index of this column is p_mat_col_idx
174 | """
175 | return p_mat[max_lag:, p_mat_col_idx]
176 |
177 |
178 | def _get_q_t(regression_model, X: np.ndarray, y: np.ndarray):
179 | """
180 | Expected value for p_t (q model) using RegressionModel.
181 | - X is a numpy 2D array of shape (T-max_lag, n_features)
182 | - y is a numpy 1D array of shape (T-max_lag,)
183 | """
184 | regression_model.fit(X, y)
185 | return regression_model.predict(X)
186 |
187 |
188 | def _get_A(p_mat: np.ndarray, regression_model, max_lag: int = 1):
189 | """Estimate A using an instance of RegressionModel."""
190 | X = np.concatenate(
191 | [p_mat[max_lag - lag : -lag, :] for lag in range(1, max_lag + 1)], axis=1
192 | )
193 | qs = []
194 | # model each column j of p_mat.
195 | for j in range(p_mat.shape[1]):
196 | y = _get_y(p_mat, j, max_lag)
197 | q_j = _get_q_t(regression_model, X, y)
198 | qs.append(q_j)
199 | q_mat = np.asarray(qs).T
200 | return _get_expected_dyadic_prod(q_mat)
201 |
202 |
203 | def fourier_series(dates, period, series_order):
204 | """Provides Fourier series components with the specified frequency
205 | and order.
206 |
207 | Parameters
208 | ----------
209 | dates: pd.Series containing timestamps.
210 | period: Number of days of the period.
211 | series_order: Number of components.
212 |
213 | Returns
214 | -------
215 | Matrix with seasonality features.
216 | """
217 | # Fourier Detrend
218 | # periods, order, start_shift, and scaling (multi or univariate)
219 | # then just subtract
220 |
221 | # convert to days since epoch
222 | dates = pd.date_range("2020-01-01", "2022-01-01", freq="D")
223 | t = np.array(
224 | (dates - datetime.datetime(1970, 1, 1)).total_seconds().astype(float)
225 | ) / (3600 * 24.0)
226 | result = np.column_stack(
227 | [
228 | fun((2.0 * (i + 1) * np.pi * t / period))
229 | for i in range(series_order)
230 | for fun in (np.sin, np.cos)
231 | ]
232 | )
233 |
--------------------------------------------------------------------------------
/autots/tools/cpu_count.py:
--------------------------------------------------------------------------------
1 | """CPU counter for multiprocesing."""
2 |
3 |
4 | def cpu_count(modifier: float = 1):
5 | """Find available CPU count, running on both Windows/Linux.
6 |
7 | Attempts to be very conservative:
8 | * Remove Intel Hyperthreading logical cores
9 | * Find max cores allowed to the process, if less than machine has total
10 |
11 | Runs best with psutil installed, fallsback to mkl, then os core count/2
12 |
13 | Args:
14 | modifier (float): multiple CPU count by this value
15 | """
16 | import os
17 |
18 | # your basic cpu count, includes logical cores and all of machine
19 | num_cores = os.cpu_count()
20 | if num_cores is None:
21 | num_cores = -1
22 |
23 | # includes logical cores, and counts only cores available to task
24 | try:
25 | import psutil
26 |
27 | available_cores = len(psutil.Process().cpu_affinity())
28 | except Exception:
29 | # this only works on UNIX I believe
30 | try:
31 | available_cores = len(os.sched_getaffinity(0))
32 | except Exception:
33 | available_cores = -1
34 |
35 | # only physical cores, includes all available to machine
36 | try:
37 | import psutil
38 |
39 | ps_cores = psutil.cpu_count(logical=False)
40 | except Exception:
41 | try:
42 | import mkl
43 |
44 | ps_cores = int(mkl.get_max_threads())
45 | except Exception:
46 | ps_cores = int(num_cores / 2)
47 |
48 | core_list = [num_cores, available_cores, ps_cores]
49 | core_list = [x for x in core_list if x > 0]
50 | if core_list:
51 | core_count = min(core_list)
52 | else:
53 | core_count = 1
54 | if modifier != 1:
55 | core_count = int(modifier * core_count)
56 | core_count = 1 if core_count < 1 else core_count
57 | return core_count
58 |
59 |
60 | def set_n_jobs(n_jobs, verbose=0):
61 | if n_jobs is None:
62 | return None
63 | frac_flag = False
64 | if isinstance(n_jobs, float):
65 | frac_flag = n_jobs < 1 and n_jobs > 0
66 | if n_jobs == 'auto' or frac_flag or n_jobs == -1:
67 | if frac_flag:
68 | n_jobs = cpu_count(modifier=n_jobs)
69 | else:
70 | n_jobs = cpu_count(modifier=0.75)
71 | if verbose > 0:
72 | print(f"Using {n_jobs} cpus for n_jobs.")
73 | elif str(n_jobs).isdigit():
74 | n_jobs = int(n_jobs)
75 | elif n_jobs < 0:
76 | core_count = cpu_count(modifier=1) + 1 + n_jobs
77 | n_jobs = core_count if core_count > 1 else 1
78 | elif isinstance(n_jobs, (float, int)):
79 | pass
80 | else:
81 | raise ValueError("n_jobs must be 'auto' or integer")
82 | if n_jobs <= 0:
83 | n_jobs = 1
84 | return int(n_jobs)
85 |
--------------------------------------------------------------------------------
/autots/tools/fft.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 | """
4 | Created on Mon Oct 9 22:07:37 2023
5 |
6 | @author: colincatlin
7 | """
8 | import numpy as np
9 |
10 |
11 | def fourier_extrapolation(
12 | x, forecast_length=10, n_harm=10, detrend='linear', freq_range=None
13 | ):
14 | m, n = x.shape
15 | t = np.arange(0, m)
16 |
17 | # Detrend
18 | if detrend == 'linear':
19 | p = np.polyfit(t, x, 1).T
20 | x_notrend = x - np.outer(t, p[:, 0])
21 | elif detrend == 'quadratic':
22 | p = np.polyfit(t, x, 2).T
23 | x_notrend = x - np.outer(t**2, p[:, 0]) - np.outer(t, p[:, 1])
24 | elif detrend is None:
25 | x_notrend = x
26 | else:
27 | raise ValueError(f"Unsupported detrend option: {detrend}")
28 |
29 | # FFT
30 | x_freqdom = np.fft.fft(x_notrend, axis=0)
31 |
32 | # Frequencies and sorted indices
33 | f = np.fft.fftfreq(m)
34 | indexes = np.argsort(np.abs(f))
35 |
36 | # Frequency range filtering
37 | if freq_range:
38 | low, high = freq_range
39 | indexes = [i for i in indexes if low <= np.abs(f[i]) <= high]
40 |
41 | if n_harm is None:
42 | use_idx = indexes
43 | elif isinstance(n_harm, (int, float)):
44 | # handle float as percentage
45 | if 0 < n_harm < 1:
46 | use_idx = indexes[: int(len(indexes) * n_harm)]
47 | # handle negative percentage ie last N percentage
48 | elif -1 < n_harm < 0:
49 | use_idx = indexes[int(len(indexes) * n_harm) :]
50 | elif n_harm <= -1:
51 | use_idx = indexes[n_harm * 2 :]
52 | # handle exact number
53 | else:
54 | use_idx = indexes[: 1 + n_harm * 2]
55 | elif isinstance(n_harm, str):
56 | if "mid" in n_harm:
57 | midp = int(''.join(filter(str.isdigit, n_harm)))
58 | use_idx = indexes[midp : midp + 40]
59 | else:
60 | raise ValueError(f"n_harm value {n_harm} not recognized")
61 |
62 | t_extended = np.arange(0, m + forecast_length)
63 | restored_sig = np.zeros((t_extended.size, n))
64 |
65 | # Use harmonics to reconstruct signal
66 | for i in use_idx:
67 | ampli = np.abs(x_freqdom[i]) / m
68 | phase = np.angle(x_freqdom[i])
69 | restored_sig += ampli * np.cos(2 * np.pi * f[i] * t_extended[:, None] + phase)
70 | """
71 | # Use harmonics to reconstruct signal
72 | for i in indexes[10:10 + n_harm * 2]:
73 | # for i in indexes[-2000:]:
74 | ampli = np.abs(x_freqdom[i]) / m
75 | phase = np.angle(x_freqdom[i])
76 | restored_sig += (ampli * np.cos(2 * np.pi * f[i] * t_extended[:, None] + phase))
77 |
78 | nw = pd.DataFrame((restored_sig + np.outer(t_extended, p[:, 0])), columns=df.columns)
79 | nw.index = df.index.union(pd.date_range(start=df.index[-1], periods=forecast_length+1, freq='D'))
80 | col = 'FOODS_2_025_TX_1_evaluation' # 'wiki_all'
81 | nw['actual'] = df[col]
82 | nw[['actual', col]].plot()
83 | """
84 |
85 | # Add trend back
86 | if detrend == 'linear':
87 | return restored_sig + np.outer(t_extended, p[:, 0])
88 | elif detrend == 'quadratic':
89 | return (
90 | restored_sig
91 | + np.outer(t_extended**2, p[:, 0])
92 | + np.outer(t_extended, p[:, 1])
93 | )
94 | else:
95 | return restored_sig
96 |
97 |
98 | class FFT(object):
99 | def __init__(self, n_harm=10, detrend='linear', freq_range=None):
100 | self.n_harm = n_harm
101 | self.detrend = detrend
102 | self.freq_range = freq_range
103 |
104 | def fit(self, x):
105 | self.m, self.n = x.shape
106 | t = np.arange(0, self.m)
107 |
108 | # Detrend
109 | if self.detrend == 'linear':
110 | self.p = np.polyfit(t, x, 1).T
111 | x_notrend = x - np.outer(t, self.p[:, 0])
112 | elif self.detrend == 'quadratic':
113 | self.p = np.polyfit(t, x, 2).T
114 | x_notrend = x - np.outer(t**2, self.p[:, 0]) - np.outer(t, self.p[:, 1])
115 | elif self.detrend == 'cubic':
116 | self.p = np.polyfit(t, x, 3).T
117 | x_notrend = (
118 | x
119 | - np.outer(t**3, self.p[:, 0])
120 | - np.outer(t**2, self.p[:, 1])
121 | - np.outer(t, self.p[:, 2])
122 | )
123 | elif self.detrend == 'quartic':
124 | self.p = np.polyfit(t, x, 4).T
125 | x_notrend = (
126 | x
127 | - np.outer(t**4, self.p[:, 0])
128 | - np.outer(t**3, self.p[:, 1])
129 | - np.outer(t**2, self.p[:, 2])
130 | - np.outer(t, self.p[:, 3])
131 | )
132 | elif self.detrend is None:
133 | x_notrend = x
134 | else:
135 | raise ValueError(f"Unsupported detrend option: {self.detrend}")
136 |
137 | # FFT
138 | self.x_freqdom = np.fft.fft(x_notrend, axis=0)
139 |
140 | # Frequencies and sorted indices
141 | self.f = np.fft.fftfreq(self.m)
142 | indexes = np.argsort(np.abs(self.f))
143 |
144 | # Frequency range filtering
145 | if self.freq_range:
146 | low, high = self.freq_range
147 | indexes = [i for i in indexes if low <= np.abs(self.f[i]) <= high]
148 |
149 | if self.n_harm is None:
150 | use_idx = indexes
151 | elif isinstance(self.n_harm, (int, float)):
152 | # handle float as percentage
153 | if 0 < self.n_harm < 1:
154 | use_idx = indexes[: int(len(indexes) * self.n_harm)]
155 | # handle negative percentage ie last N percentage
156 | elif -1 < self.n_harm < 0:
157 | use_idx = indexes[int(len(indexes) * self.n_harm) :]
158 | elif self.n_harm <= -1:
159 | use_idx = indexes[self.n_harm * 2 :]
160 | # handle exact number
161 | else:
162 | use_idx = indexes[: 1 + self.n_harm * 2]
163 | elif isinstance(self.n_harm, str):
164 | if "mid" in self.n_harm:
165 | midp = int(''.join(filter(str.isdigit, self.n_harm)))
166 | use_idx = indexes[midp : midp + 41]
167 | else:
168 | raise ValueError(f"n_harm value {self.n_harm} not recognized")
169 | self.use_idx = use_idx
170 |
171 | return self
172 |
173 | def generate_harmonics_dataframe(self, forecast_length=0):
174 | extended_m = self.m + forecast_length
175 | harmonics_data = np.zeros((extended_m, len(self.use_idx) * 2))
176 |
177 | for i, idx in enumerate(self.use_idx):
178 | freq_component = np.fft.ifft(self.x_freqdom[idx], n=self.m, axis=0)
179 | extended_freq_component = np.tile(
180 | freq_component, (extended_m // self.m) + 1
181 | )[:extended_m]
182 | harmonics_data[:, 2 * i] = np.real(extended_freq_component).flatten()
183 | harmonics_data[:, 2 * i + 1] = np.imag(extended_freq_component).flatten()
184 |
185 | return harmonics_data
186 |
187 | def predict(self, forecast_length=0):
188 | # this rather assumes you care only about historical + fcst of length n after
189 | t_extended = np.arange(0, self.m + forecast_length)
190 | restored_sig = np.zeros((t_extended.size, self.n))
191 |
192 | # Use harmonics to reconstruct signal
193 | for i in self.use_idx:
194 | ampli = np.abs(self.x_freqdom[i]) / self.m
195 | phase = np.angle(self.x_freqdom[i])
196 | restored_sig += ampli * np.cos(
197 | 2 * np.pi * self.f[i] * t_extended[:, None] + phase
198 | )
199 |
200 | # Add trend back
201 | if self.detrend == 'linear':
202 | return restored_sig + np.outer(t_extended, self.p[:, 0])
203 | elif self.detrend == 'quadratic':
204 | return (
205 | restored_sig
206 | + np.outer(t_extended**2, self.p[:, 0])
207 | + np.outer(t_extended, self.p[:, 1])
208 | )
209 | elif self.detrend == 'cubic':
210 | return (
211 | restored_sig
212 | + np.outer(t_extended**3, self.p[:, 0])
213 | + np.outer(t_extended**2, self.p[:, 1])
214 | + np.outer(t_extended, self.p[:, 2])
215 | )
216 | elif self.detrend == 'quartic':
217 | return (
218 | restored_sig
219 | + np.outer(t_extended**4, self.p[:, 0])
220 | + np.outer(t_extended**3, self.p[:, 1])
221 | + np.outer(t_extended**2, self.p[:, 2])
222 | + np.outer(t_extended, self.p[:, 3])
223 | )
224 | else:
225 | return restored_sig
226 |
--------------------------------------------------------------------------------
/autots/tools/fir_filter.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 | """
4 | Created on Fri Sep 6 23:59:35 2024
5 |
6 | @author: colincatlin
7 | """
8 | import random
9 | import numpy as np
10 |
11 | try:
12 | from scipy.signal import fftconvolve, firwin, convolve, lfilter
13 | except Exception:
14 | pass
15 |
16 |
17 | def apply_fir_filter_to_timeseries(
18 | data, sampling_frequency, numtaps=512, cutoff_hz=20, window='hamming'
19 | ):
20 | """
21 | Apply FIR filter to an array of time series data with shape (observations, series).
22 |
23 | Parameters:
24 | - data: numpy array of shape (observations, series), where each column represents a time series
25 | - sampling_frequency: The sampling frequency of the time series data (e.g., 365 for daily data)
26 | - numtaps: Number of taps (filter length)
27 | - cutoff_hz: The cutoff frequency in Hz (for filtering purposes)
28 | - window: The windowing function to use for FIR filter design ('hamming', 'hann', etc.)
29 |
30 | Returns:
31 | - filtered_data: The filtered version of the input data
32 | """
33 |
34 | # Ensure the data has the correct shape: (observations, series)
35 | # if data.shape[0] < data.shape[1]:
36 | # data = data.T # Transpose if necessary to match (observations, series)
37 |
38 | # Normalize the cutoff frequency with respect to the Nyquist frequency
39 | nyquist_frequency = 0.5 * sampling_frequency
40 | cutoff_norm = cutoff_hz / nyquist_frequency
41 |
42 | # Design the FIR filter using the given parameters
43 | fir_coefficients = firwin(numtaps=numtaps, cutoff=cutoff_norm, window=window)
44 |
45 | # Apply the FIR filter to each time series (each column in the data)
46 | # Convolve each column with the FIR filter
47 | filtered_data = np.apply_along_axis(
48 | lambda x: convolve(x, fir_coefficients, mode='same'), axis=0, arr=data
49 | )
50 |
51 | return filtered_data
52 |
53 |
54 | def apply_fir_filter_time_domain(
55 | data, sampling_frequency, numtaps=512, cutoff_hz=20, window='hamming'
56 | ):
57 | """
58 | Apply FIR filter using time-domain convolution (lfilter) for smaller memory usage.
59 | This function has padding issues currently.
60 | """
61 | # Ensure the data has the correct shape: (observations, series)
62 | # if data.shape[0] < data.shape[1]:
63 | # data = data.T # Transpose if necessary to match (observations, series)
64 |
65 | # Normalize the cutoff frequency with respect to the Nyquist frequency
66 | nyquist_frequency = 0.5 * sampling_frequency
67 | cutoff_norm = cutoff_hz / nyquist_frequency
68 |
69 | # Design the FIR filter
70 | fir_coefficients = firwin(numtaps=numtaps, cutoff=cutoff_norm, window=window)
71 |
72 | # Apply time-domain filtering (lfilter)
73 | filtered_data = lfilter(fir_coefficients, 1.0, data, axis=0)
74 |
75 | return filtered_data
76 |
77 |
78 | def fft_fir_filter_to_timeseries(
79 | data,
80 | sampling_frequency,
81 | numtaps=512,
82 | cutoff_hz=20,
83 | window='hamming',
84 | chunk_size=1000,
85 | ):
86 | """
87 | Apply FIR filter to an array of time series data with shape (observations, series).
88 |
89 | Parameters:
90 | - data: numpy array of shape (observations, series), where each column represents a time series
91 | - sampling_frequency: The sampling frequency of the time series data (e.g., 365 for daily data)
92 | - numtaps: Number of taps (filter length)
93 | - cutoff_hz: The cutoff frequency in Hz (for filtering purposes)
94 | - window: The windowing function to use for FIR filter design ('hamming', 'hann', etc.)
95 |
96 | Returns:
97 | - filtered_data: The filtered version of the input data
98 | """
99 | # Ensure the data has the correct shape: (observations, series)
100 | # if data.shape[0] < data.shape[1]:
101 | # data = data.T # Transpose if necessary to match (observations, series)
102 |
103 | # Normalize the cutoff frequency with respect to the Nyquist frequency
104 | nyquist_frequency = 0.5 * sampling_frequency
105 | cutoff_norm = cutoff_hz / nyquist_frequency
106 |
107 | if window == 'kaiser':
108 | beta = 14
109 | window = ('kaiser', beta)
110 |
111 | # Design the FIR filter using the given parameters
112 | fir_coefficients = firwin(numtaps=numtaps, cutoff=cutoff_norm, window=window)
113 |
114 | # Pad the beginning of the data to shift edge artifacts to the start
115 | # pad_width = numtaps - 1
116 | # padded_data = np.pad(data, ((pad_width, 0), (0, 0)), mode='reflect')
117 | pad_width_start = numtaps - 1
118 | pad_width_end = numtaps - 1
119 | padded_data = np.pad(
120 | data, ((pad_width_start, pad_width_end), (0, 0)), mode='reflect'
121 | )
122 |
123 | num_series = data.shape[1]
124 | if chunk_size is not None and chunk_size < num_series:
125 | # Filter the data in chunks to reduce memory load
126 | filtered_data = np.zeros_like(data)
127 |
128 | for start in range(0, num_series, chunk_size):
129 | end = min(start + chunk_size, num_series)
130 | chunk = padded_data[:, start:end]
131 | filtered_chunk = fftconvolve(
132 | chunk, fir_coefficients[:, np.newaxis], mode='same', axes=0
133 | )
134 | filtered_data[:, start:end] = filtered_chunk[
135 | pad_width_start:-pad_width_end, :
136 | ] # [pad_width:, :]
137 | else:
138 | # Apply FFT convolution across all time series at once
139 | filtered_padded_data = fftconvolve(
140 | padded_data, fir_coefficients[:, np.newaxis], mode='same', axes=0
141 | )
142 |
143 | # Remove the padding from the start (discard the first `pad_width` samples)
144 | filtered_data = filtered_padded_data[
145 | pad_width_start:-pad_width_end, :
146 | ] # [pad_width:, :]
147 |
148 | return filtered_data
149 |
150 |
151 | def generate_random_fir_params(method='random', data_type="time_series"):
152 | params = {}
153 |
154 | # Random number of taps (filter length)
155 | params["numtaps"] = random.choices(
156 | [4, 7, 12, 32, 64, 128, 256, 512, 1024],
157 | [0.1, 0.2, 0.1, 0.1, 0.2, 0.2, 0.1, 0.1, 0.1],
158 | )[0]
159 |
160 | if data_type == "audio":
161 | # Higher cutoff frequencies for audio
162 | cutoff_choices = [20, 100, 500, 1000, 5000, 10000, 15000]
163 | cutoff_weights = [0.2, 0.3, 0.2, 0.1, 0.1, 0.05, 0.05]
164 | else:
165 | # Lower cutoff frequencies for time series data
166 | cutoff_choices = [0.01, 0.1, 0.5, 5, 10, 20, 50, 100, 500]
167 | cutoff_weights = [0.3, 0.3, 0.2, 0.05, 0.05, 0.05, 0.05, 0.05, 0.01]
168 |
169 | params["cutoff_hz"] = random.choices(cutoff_choices, cutoff_weights)[0]
170 |
171 | # Random window type
172 | params["window"] = random.choices(
173 | [
174 | "hamming",
175 | "hann",
176 | "blackman",
177 | "kaiser",
178 | "tukey",
179 | "boxcar",
180 | "taylor",
181 | ("kaiser", 4.0),
182 | ('exponential', None, 0.25),
183 | ('exponential', None, 1.0),
184 | ('exponential', None, 3.0),
185 | 'triang',
186 | ("gaussian", 1.0),
187 | ('chebwin', 100),
188 | ],
189 | [
190 | 0.2,
191 | 0.1,
192 | 0.1,
193 | 0.1,
194 | 0.05,
195 | 0.15,
196 | 0.05,
197 | 0.05,
198 | 0.05,
199 | 0.05,
200 | 0.025,
201 | 0.05,
202 | 0.025,
203 | 0.025,
204 | ],
205 | )[0]
206 |
207 | return params
208 |
209 |
210 | """
211 | # Example Usage with Time Series Data
212 | sampling_frequency = 365 # Example for daily data with 365 observations per year
213 | num_series = 5 # Number of time series
214 | time_vector = np.arange(365) # Example time vector (one year of daily data)
215 |
216 | # Generate an array of time series signals: Each series has random noise and some seasonality
217 | time_series_data = np.array([
218 | np.sin(2 * np.pi * time_vector / 365) + 0.5 * np.random.randn(len(time_vector))
219 | for _ in range(num_series)
220 | ]).T # Transposed to (observations, series)
221 |
222 | # Apply the FIR filter to the time series data
223 | filtered_time_series = apply_fir_filter_to_timeseries(time_series_data, sampling_frequency, numtaps=256, cutoff_hz=30)
224 |
225 | # Output filtered time series
226 | import pandas as pd
227 | pd.DataFrame(filtered_time_series).plot()
228 |
229 | # Apply the FIR filter to the time series data
230 | filtered_time_series = apply_fir_filter_time_domain(time_series_data, sampling_frequency, numtaps=256, cutoff_hz=30)
231 |
232 | # Output filtered time series
233 | import pandas as pd
234 | pd.DataFrame(filtered_time_series).plot()
235 |
236 | # Apply the FIR filter to the time series data
237 | filtered_time_series = fft_fir_filter_to_timeseries(time_series_data, sampling_frequency, numtaps=256, cutoff_hz=30)
238 |
239 | # Output filtered time series
240 | import pandas as pd
241 | pd.DataFrame(filtered_time_series).plot()
242 |
243 | # Apply the FIR filter to the time series data
244 | filtered_time_series = fft_fir_filter_to_timeseries(time_series_data, sampling_frequency, numtaps=256, cutoff_hz=30, chunk_size=2)
245 |
246 | # Output filtered time series
247 | import pandas as pd
248 | pd.DataFrame(filtered_time_series).plot()
249 |
250 | """
251 |
--------------------------------------------------------------------------------
/autots/tools/hierarchial.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import pandas as pd
3 |
4 |
5 | class hierarchial(object):
6 | """Create hierarchial series, then reconcile.
7 |
8 | Currently only performs one-level groupings.
9 | Args:
10 | grouping_method (str): method to create groups. 'User' requires hier_id input of groupings.
11 | n_groups (int): number of groups, if above is not 'User'
12 | reconciliation (str): None, or 'mean' method to combine top and bottom forecasts.
13 | grouping_ids (dict): dict of series_id: group_id to use if grouping is 'User'
14 | """
15 |
16 | def __init__(
17 | self,
18 | grouping_method: str = 'tile',
19 | n_groups: int = 5,
20 | reconciliation: str = 'mean',
21 | grouping_ids: dict = None,
22 | ):
23 | self.grouping_method = str(grouping_method).lower()
24 | self.n_groups = n_groups
25 | self.reconciliation = reconciliation
26 | self.grouping_ids = grouping_ids
27 |
28 | if self.grouping_method == 'user':
29 | if grouping_ids is None:
30 | raise ValueError("grouping_ids must be provided.")
31 |
32 | def fit(self, df):
33 | """Construct and save object info."""
34 | # construct grouping_ids if not given
35 | if self.grouping_method != 'user':
36 | num_hier = df.shape[1] / self.n_groups
37 | if self.grouping_method == 'dbscan':
38 | X = df.mean().values.reshape(-1, 1)
39 | from sklearn.cluster import DBSCAN
40 |
41 | clustering = DBSCAN(eps=0.5, min_samples=2).fit(X)
42 | grouping_ids = clustering.labels_
43 | elif self.grouping_method == 'tile':
44 | grouping_ids = np.tile(
45 | np.arange(self.n_groups), int(np.ceil(num_hier))
46 | )[: df.shape[1]]
47 | elif self.grouping_method == 'alternating':
48 | grouping_ids = np.repeat(
49 | np.arange(self.n_groups), int(np.ceil(num_hier))
50 | )[: df.shape[1]]
51 | elif self.grouping_method == 'kmeans':
52 | from sklearn.cluster import KMeans
53 |
54 | X = df.mean().values.reshape(-1, 1)
55 | kmeans = KMeans(n_clusters=self.n_groups, random_state=0).fit(X)
56 | grouping_ids = kmeans.labels_
57 | grouping_ids = grouping_ids.astype(str).astype(np.object)
58 | # z is a deliberate typo to make such an id very rare in source
59 | grouping_ids = grouping_ids + '_hierarchy_levelz'
60 | grouping_ids = dict(zip(df.columns.tolist(), grouping_ids))
61 | self.grouping_ids = grouping_ids
62 | else:
63 | # fix missing or extra ids
64 | grouping_ids = {}
65 | for x in df.columns:
66 | if x not in self.grouping_ids.keys():
67 | grouping_ids[x] = 'hierarchy_levelz'
68 | else:
69 | grouping_ids[x] = self.grouping_ids[x]
70 | self.grouping_ids = grouping_ids.copy()
71 |
72 | self.top_ids = set(grouping_ids.values())
73 | self.bottom_ids = grouping_ids.keys()
74 |
75 | hier = df.abs().groupby(grouping_ids, axis=1).sum()
76 | self.hier = hier
77 |
78 | if self.reconciliation == 'mean':
79 | level_sums = pd.DataFrame(hier.sum(axis=0))
80 | individal_sums = pd.DataFrame(df.abs().sum(axis=0))
81 | divisors = pd.DataFrame.from_dict(grouping_ids, orient='index')
82 | divisors.columns = ['group']
83 | divisors = divisors.merge(level_sums, left_on='group', right_index=True)
84 | divisors = divisors.merge(individal_sums, left_index=True, right_index=True)
85 | divisors.columns = ['group', 'divisor', 'value']
86 | divisors['fraction'] = divisors['value'] / divisors['divisor']
87 | self.divisors = divisors
88 |
89 | return self
90 |
91 | def transform(self, df):
92 | """Apply hierarchy to existing data with bottom levels only."""
93 | try:
94 | return pd.concat([df, self.hier], axis=1)
95 | except Exception as e:
96 | raise ValueError(f"{e} .fit() has not been called.")
97 |
98 | def reconcile(self, df):
99 | """Apply to forecasted data containing bottom and top levels."""
100 | if self.reconciliation is None:
101 | return df[self.bottom_ids]
102 | elif self.reconciliation == 'mean':
103 | fore = df
104 | fracs = pd.DataFrame(
105 | np.repeat(
106 | self.divisors['fraction'].values.reshape(1, -1),
107 | fore.shape[0],
108 | axis=0,
109 | )
110 | )
111 | fracs.index = fore.index
112 | fracs.columns = pd.MultiIndex.from_frame(
113 | self.divisors.reset_index()[['index', 'group']]
114 | )
115 |
116 | top_level = fore[self.top_ids]
117 | bottom_up = (
118 | fore[self.bottom_ids].abs().groupby(self.grouping_ids, axis=1).sum()
119 | )
120 |
121 | diff = (top_level - bottom_up) / 2
122 |
123 | # gotta love that 'level' option on multiple for easy broadcasting
124 | test = fracs.multiply(diff, level='group')
125 | test.columns = self.divisors.index
126 |
127 | result = fore[self.bottom_ids] + test
128 | return result
129 | else:
130 | print("Complete and utter failure.")
131 | return df
132 |
133 |
134 | """
135 | grouping_ids = {
136 | 'CSUSHPISA': 'A',
137 | 'EMVOVERALLEMV': 'A',
138 | 'EXCAUS': 'exchange rates',
139 | 'EXCHUS': 'exchange rates',
140 | 'EXUSEU': 'exchange rates',
141 | 'GS10': 'B',
142 | 'MCOILWTICO': 'C',
143 | 'T10YIEM': 'C',
144 | 'USEPUINDXM': 'C'
145 | }
146 | test = hierarchial(n_groups=3, grouping_method='dbscan',
147 | grouping_ids=None, reconciliation='mean').fit(df)
148 | test_df = test.transform(df)
149 | test.reconcile(test_df)
150 | """
151 | # how to assign groups
152 | # how to assign blame/reconcile
153 | # multiple group levels
154 | # one additional overall-level
155 |
--------------------------------------------------------------------------------
/autots/tools/holiday.py:
--------------------------------------------------------------------------------
1 | """Manage holiday features."""
2 |
3 | import numpy as np
4 | import pandas as pd
5 | from autots.tools.shaping import infer_frequency
6 |
7 |
8 | def holiday_flag(
9 | DTindex,
10 | country: str = 'US',
11 | encode_holiday_type: bool = False,
12 | holidays_subdiv=None,
13 | ):
14 | """Create a 0/1 flag for given datetime index. Includes fallback to pandas for US holidays if holidays package unavailable.
15 |
16 | Args:
17 | DTindex (panda.DatetimeIndex): DatetimeIndex of dates to create flags
18 | country (str): to pass through to python package Holidays
19 | also accepts a list of countries, but not a list of subdivisions
20 | encode_holiday_type (bool): if True, each holiday gets a unique integer column, if False, 0/1 for all holidays
21 | holidays_subdiv (str): subdivision (ie state), if used
22 |
23 | Returns:
24 | pd.DataFrame with DatetimeIndex
25 | """
26 | use_index = DTindex.copy()
27 | # extend the index to make sure all holidays are captured in holiday flag
28 | if encode_holiday_type:
29 | frequency = infer_frequency(use_index)
30 | new_index = pd.date_range(
31 | use_index[-1], end=use_index[-1] + pd.Timedelta(days=900), freq=frequency
32 | )
33 | # just new index wasn't enough, although another option might be to add more than 1 year to new index
34 | prev_index = pd.date_range(
35 | use_index[0] - pd.Timedelta(days=365), end=use_index[0], freq=frequency
36 | )
37 | use_index = prev_index[:-1].append(use_index.append(new_index[1:]))
38 |
39 | if isinstance(country, str):
40 | country = [country]
41 | elif isinstance(country, dict):
42 | country = list(country.keys())
43 | # subdivisions = list(country.values())
44 |
45 | holiday_list = []
46 | for hld in country:
47 | if hld == "RU":
48 | hld = "UA"
49 | elif hld == 'CN':
50 | hld = 'TW'
51 | hld = str(hld).upper()
52 | if hld in ['US', "USA", "UNITED STATES"]:
53 | try:
54 | holi_days = query_holidays(
55 | use_index,
56 | country="US",
57 | encode_holiday_type=encode_holiday_type,
58 | holidays_subdiv=holidays_subdiv,
59 | )
60 | except Exception:
61 | from pandas.tseries.holiday import USFederalHolidayCalendar
62 |
63 | # uses pandas calendar as backup in the event holidays fails
64 | holi_days = (
65 | USFederalHolidayCalendar()
66 | .holidays()
67 | .to_series()[use_index[0] : use_index[-1]]
68 | )
69 | holi_days = pd.Series(np.repeat(1, len(holi_days)), index=holi_days)
70 | holi_days = holi_days.reindex(use_index).fillna(0)
71 | holi_days = holi_days.rename("holiday_flag")
72 | else:
73 | holi_days = query_holidays(
74 | use_index,
75 | country=hld,
76 | encode_holiday_type=encode_holiday_type,
77 | holidays_subdiv=holidays_subdiv,
78 | )
79 | if not encode_holiday_type:
80 | holi_days.name = str(holi_days.name) + '_' + str(hld)
81 | holiday_list.append(holi_days.reindex(DTindex))
82 |
83 | return_df = pd.concat(holiday_list, axis=1, ignore_index=False)
84 | if encode_holiday_type:
85 | return return_df.loc[:, ~return_df.columns.duplicated()]
86 | else:
87 | return return_df
88 |
89 |
90 | def query_holidays(
91 | DTindex, country: str, encode_holiday_type: bool = False, holidays_subdiv=None
92 | ):
93 | """Query holidays package for dates.
94 |
95 | Args:
96 | DTindex (panda.DatetimeIndex): DatetimeIndex of dates to create flags
97 | country (str): to pass through to python package Holidays
98 | encode_holiday_type (bool): if True, each holiday gets a unique integer column, if False, 0/1 for all holidays
99 | """
100 | import holidays
101 |
102 | # need the extra years to make sure it captures less common holidays
103 | # mostly it is the (Observed) holiday flags showing up that cause issues
104 | years = list(range(DTindex[0].year - 2, DTindex[-1].year + 4))
105 | try:
106 | country_holidays_base = holidays.country_holidays(
107 | country, years=years, subdiv=holidays_subdiv
108 | )
109 | except Exception:
110 | print(
111 | f'country {country} not recognized. Filter holiday_countries by holidays.utils.list_supported_countries() to remove this warning'
112 | )
113 | country_holidays_base = {}
114 | if encode_holiday_type:
115 | # sorting to hopefully get consistent encoding across runs (requires long period...)
116 | if not country_holidays_base:
117 | country_holidays = pd.Series('HolidayFlag', index=DTindex)
118 | else:
119 | country_holidays = pd.Series(country_holidays_base).sort_values()
120 | """
121 | from sklearn.preprocessing import OrdinalEncoder
122 | encoder = OrdinalEncoder(handle_unknown="use_encoded_value", unknown_value=999)
123 | holi_days = pd.Series(
124 | encoder.fit_transform(country_holidays.to_numpy().reshape(-1, 1)).flatten(),
125 | name="HolidayFlag",
126 | )
127 | # since zeroes are reserved for non-holidays
128 | holi_days = holi_days + 1
129 | holi_days.index = country_holidays.index
130 | """
131 | holi_days = pd.get_dummies(country_holidays, dtype=float)
132 | else:
133 | country_holidays = country_holidays_base.keys()
134 | if not country_holidays:
135 | holi_days = pd.Series(0, name='HolidayFlag', dtype=int)
136 | else:
137 | holi_days = pd.Series(
138 | np.repeat(1, len(country_holidays)),
139 | index=pd.DatetimeIndex(country_holidays),
140 | name="HolidayFlag",
141 | dtype=int,
142 | )
143 | # do some messy stuff to make sub daily data (hourly) align with daily holidays
144 | try:
145 | holi_days.index = pd.DatetimeIndex(holi_days.index).normalize()
146 | holi_days = holi_days.reindex(pd.DatetimeIndex(DTindex).normalize()).fillna(0)
147 | holi_days.index = DTindex
148 | except Exception:
149 | holi_days = holi_days.reindex(DTindex).fillna(0)
150 | return holi_days
151 |
--------------------------------------------------------------------------------
/autots/tools/lunar.py:
--------------------------------------------------------------------------------
1 | """Phases of the moon.
2 | Modified from https://stackoverflow.com/a/2531541/9492254
3 | by keturn and earlier from John Walker
4 | """
5 |
6 | from math import sin, cos, floor, sqrt, pi
7 | import bisect
8 | import numpy as np
9 | import pandas as pd
10 |
11 |
12 | def moon_phase(
13 | datetime_index,
14 | epsilon=1e-6,
15 | epoch=2444237.905,
16 | ecliptic_longitude_epoch=278.833540,
17 | ecliptic_longitude_perigee=282.596403,
18 | eccentricity=0.016718,
19 | moon_mean_longitude_epoch=64.975464,
20 | moon_mean_perigee_epoch=349.383063,
21 | ):
22 | """Numpy version. Takes a pd.DatetimeIndex and returns moon phase (%illuminated).
23 | Epoch can be adjust slightly (0.5 = half day) to adjust for time zones. This is for US. epoch=2444238.5 for Asia generally.
24 | """
25 | # set time to Noon if not otherwise given, as midnight is confusingly close to previous day
26 | if np.sum(datetime_index.hour) == 0:
27 | datetime_index = datetime_index + pd.Timedelta(hours=12)
28 | days = datetime_index.to_julian_date() - epoch
29 |
30 | # Mean anomaly of the Sun
31 | a = (360 / 365.2422) * days
32 | N = a - 360.0 * np.floor(a / 360.0)
33 | N = N + ecliptic_longitude_epoch - ecliptic_longitude_perigee
34 | # Convert from perigee coordinates to epoch 1980
35 | M = a - 360.0 * np.floor(N / 360.0)
36 |
37 | m = torad(M)
38 | e = m.copy()
39 | while 1:
40 | delta = e - eccentricity * np.sin(e) - m
41 | e = e - delta / (1.0 - eccentricity * np.cos(e))
42 | if abs(delta).max() <= epsilon:
43 | break
44 |
45 | Ec = sqrt((1 + eccentricity) / (1 - eccentricity)) * np.tan(e / 2.0)
46 | # True anomaly
47 | Ec = 2 * todeg(np.arctan(Ec))
48 | # Suns's geometric ecliptic longuitude
49 | a = Ec + ecliptic_longitude_perigee
50 | lambda_sun = a - 360.0 * np.floor(a / 360.0)
51 |
52 | # Calculation of the Moon's position
53 |
54 | # Moon's mean longitude
55 | a = 13.1763966 * days + moon_mean_longitude_epoch
56 | moon_longitude = a - 360.0 * np.floor(a / 360.0)
57 |
58 | # Moon's mean anomaly
59 | a = moon_longitude - 0.1114041 * days - moon_mean_perigee_epoch
60 | MM = a - 360.0 * np.floor(a / 360.0)
61 |
62 | # Moon's ascending node mean longitude
63 | # MN = fixangle(c.node_mean_longitude_epoch - 0.0529539 * day)
64 |
65 | evection = 1.2739 * np.sin(torad(2 * (moon_longitude - lambda_sun) - MM))
66 |
67 | # Annual equation
68 | annual_eq = 0.1858 * np.sin(torad(M))
69 |
70 | # Correction term
71 | A3 = 0.37 * np.sin(torad(M))
72 |
73 | MmP = MM + evection - annual_eq - A3
74 |
75 | # Correction for the equation of the centre
76 | mEc = 6.2886 * np.sin(torad(MmP))
77 |
78 | # Another correction term
79 | A4 = 0.214 * np.sin(torad(2 * MmP))
80 |
81 | # Corrected longitude
82 | lP = moon_longitude + evection + mEc - annual_eq + A4
83 |
84 | # Variation
85 | variation = 0.6583 * np.sin(torad(2 * (lP - lambda_sun)))
86 |
87 | # True longitude
88 | lPP = lP + variation
89 |
90 | # Calculation of the phase of the Moon
91 |
92 | # Age of the Moon, in degrees
93 | moon_age = lPP - lambda_sun
94 |
95 | # Phase of the Moon
96 | moon_phase = (1 - np.cos(torad(moon_age))) / 2.0
97 | return moon_phase
98 | # return pd.Series(moon_phase, index=datetime_index)
99 |
100 |
101 | def moon_phase_df(datetime_index, epoch=2444237.905):
102 | """Convert pandas DatetimeIndex to moon phases. Note timezone and hour can matter slightly.
103 | Epoch can be adjust slightly (0.5 = half day) to adjust for time zones.
104 | 2444237.905 is for US Central. epoch=2444238.5 for Asia generally.
105 | """
106 | moon = pd.Series(moon_phase(datetime_index, epoch=epoch), index=datetime_index)
107 | full_moon = ((moon > moon.shift(1)) & (moon > moon.shift(-1))).astype(int)
108 | new_moon = ((moon < moon.shift(1)) & (moon < moon.shift(-1))).astype(int)
109 | # account for end (shift) being new_moon
110 | if new_moon.tail(29).sum() == 0:
111 | new_moon.iloc[-1] = 1
112 | if full_moon.tail(29).sum() == 0:
113 | full_moon.iloc[-1] = 1
114 | moon_df = pd.concat([moon, full_moon, new_moon], axis=1)
115 | moon_df.columns = ['phase', 'full_moon', 'new_moon']
116 | return moon_df
117 |
118 |
119 | # Little mathematical functions
120 | def fixangle(a):
121 | return a - 360.0 * floor(a / 360.0)
122 |
123 |
124 | def torad(d):
125 | return d * pi / 180.0
126 |
127 |
128 | def todeg(r):
129 | return r * 180.0 / pi
130 |
131 |
132 | def dsin(d):
133 | return sin(torad(d))
134 |
135 |
136 | def dcos(d):
137 | return cos(torad(d))
138 |
139 |
140 | def kepler(m, ecc=0.016718):
141 | """Solve the equation of Kepler."""
142 |
143 | epsilon = 1e-6
144 |
145 | m = torad(m)
146 | e = m
147 | while 1:
148 | delta = e - ecc * sin(e) - m
149 | e = e - delta / (1.0 - ecc * cos(e))
150 |
151 | if abs(delta) <= epsilon:
152 | break
153 |
154 | return e
155 |
156 |
157 | def phase_string(
158 | p, precision=0.05, new=0.0, first=0.25, full=0.4, last=0.75, nextnew=1.0
159 | ):
160 | phase_strings = (
161 | (new + precision, "new"),
162 | (first - precision, "waxing crescent"),
163 | (first + precision, "first quarter"),
164 | (full - precision, "waxing gibbous"),
165 | (full + precision, "full"),
166 | (last - precision, "waning gibbous"),
167 | (last + precision, "last quarter"),
168 | (nextnew - precision, "waning crescent"),
169 | (nextnew + precision, "new"),
170 | )
171 |
172 | i = bisect.bisect([a[0] for a in phase_strings], p)
173 |
174 | return phase_strings[i][1]
175 |
--------------------------------------------------------------------------------
/autots/tools/percentile.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """Faster percentile and quantile for numpy
3 |
4 | Entirely from: https://krstn.eu/np.nanpercentile()-there-has-to-be-a-faster-way/
5 | """
6 | import numpy as np
7 |
8 |
9 | def _zvalue_from_index(arr, ind):
10 | """private helper function to work around the limitation of np.choose() by employing np.take()
11 | arr has to be a 3D array
12 | ind has to be a 2D array containing values for z-indicies to take from arr
13 | See: http://stackoverflow.com/a/32091712/4169585
14 | This is faster and more memory efficient than using the ogrid based solution with fancy indexing.
15 | """
16 | # get number of columns and rows
17 | _, nC, nR = arr.shape
18 |
19 | # get linear indices and extract elements with np.take()
20 | idx = nC * nR * ind + np.arange(nC * nR).reshape((nC, nR))
21 | return np.take(arr, idx)
22 |
23 |
24 | def nan_percentile(in_arr, q, method="linear", axis=0, errors="raise"):
25 | """Given a 3D array, return the given percentiles as input by q.
26 | Beware this is only tested for the limited case required here, and will not match np fully.
27 | Args more limited. If errors="rollover" passes to np.nanpercentile where args are not supported.
28 | """
29 | flag_2d = False
30 | if in_arr.ndim == 2:
31 | arr = np.expand_dims(in_arr, 1)
32 | flag_2d = True
33 | else:
34 | arr = in_arr.copy()
35 | if (
36 | axis != 0
37 | or method not in ["linear", "nearest", "lowest", "highest"]
38 | or arr.ndim != 3
39 | ):
40 | if errors == "rollover":
41 | return np.nanpercentile(arr, q=q, method=method, axis=axis)
42 | else:
43 | raise ValueError("input not supported by internal percentile function")
44 | # valid (non NaN) observations along the first axis
45 | valid_obs = np.sum(np.isfinite(arr), axis=0)
46 | # replace NaN with maximum
47 | max_val = np.nanmax(arr)
48 | arr[np.isnan(arr)] = max_val
49 | # sort - former NaNs will move to the end
50 | arr = np.sort(arr, axis=0)
51 |
52 | # loop over requested quantiles
53 | if type(q) is list:
54 | qs = []
55 | qs.extend(q)
56 | elif type(q) is range:
57 | qs = list(q)
58 | else:
59 | qs = [q]
60 | if len(qs) < 2:
61 | quant_arr = np.zeros(shape=(arr.shape[1], arr.shape[2]))
62 | else:
63 | quant_arr = np.zeros(shape=(len(qs), arr.shape[1], arr.shape[2]))
64 |
65 | result = []
66 | # note this is vectorized for a single quantile but each quantile step is separate
67 | for i in range(len(qs)):
68 | quant = qs[i]
69 | # desired position as well as floor and ceiling of it
70 | k_arr = (valid_obs - 1) * (quant / 100.0)
71 | f_arr = np.floor(k_arr).astype(np.int32)
72 | c_arr = np.ceil(k_arr).astype(np.int32)
73 | fc_equal_k_mask = f_arr == c_arr
74 |
75 | if method == "linear":
76 | # linear interpolation (like numpy percentile) takes the fractional part of desired position
77 | floor_val = _zvalue_from_index(arr=arr, ind=f_arr) * (c_arr - k_arr)
78 | ceil_val = _zvalue_from_index(arr=arr, ind=c_arr) * (k_arr - f_arr)
79 |
80 | quant_arr = floor_val + ceil_val
81 | quant_arr[fc_equal_k_mask] = _zvalue_from_index(
82 | arr=arr, ind=k_arr.astype(np.int32)
83 | )[
84 | fc_equal_k_mask
85 | ] # if floor == ceiling take floor value
86 | elif method == 'nearest':
87 | f_arr = np.around(k_arr).astype(np.int32)
88 | quant_arr = _zvalue_from_index(arr=arr, ind=f_arr)
89 | elif method == 'lowest':
90 | f_arr = np.floor(k_arr).astype(np.int32)
91 | quant_arr = _zvalue_from_index(arr=arr, ind=f_arr)
92 | elif method == 'highest':
93 | f_arr = np.ceiling(k_arr).astype(np.int32)
94 | quant_arr = _zvalue_from_index(arr=arr, ind=f_arr)
95 | else:
96 | raise ValueError("interpolation method not supported")
97 |
98 | if flag_2d:
99 | result.append(quant_arr[0])
100 | else:
101 | result.append(quant_arr)
102 | if len(result) == 1:
103 | return result[0]
104 | else:
105 | return np.asarray(result)
106 |
107 |
108 | def nan_quantile(arr, q, method="linear", axis=0, errors="raise"):
109 | """Same as nan_percentile but accepts q in range [0, 1].
110 | Args more limited. If errors="rollover" passes to np.nanpercentile where not supported.
111 | """
112 | return nan_percentile(arr, q * 100, method=method, axis=axis, errors=errors)
113 |
114 |
115 | def trimmed_mean(data, percent, axis=0):
116 | limit = int(np.ceil(percent * data.shape[axis]))
117 | sorted_data = np.sort(data, axis=axis)
118 | trimmed = sorted_data.take(
119 | indices=range(limit, data.shape[axis] - limit), axis=axis
120 | )
121 | return np.mean(trimmed, axis=axis)
122 |
--------------------------------------------------------------------------------
/autots/tools/probabilistic.py:
--------------------------------------------------------------------------------
1 | """
2 | Point to Probabilistic
3 | """
4 |
5 | import pandas as pd
6 | import numpy as np
7 | from autots.tools.impute import fake_date_fill
8 | from autots.tools.percentile import nan_quantile
9 |
10 | try:
11 | from scipy.stats import percentileofscore
12 | except Exception:
13 | pass
14 |
15 |
16 | def percentileofscore_appliable(x, a, kind='rank'):
17 | return percentileofscore(a, score=x, kind=kind)
18 |
19 |
20 | def historic_quantile(df_train, prediction_interval: float = 0.9, nan_flag=None):
21 | """
22 | Computes the difference between the median and the prediction interval range in historic data.
23 |
24 | Args:
25 | df_train (pd.DataFrame): a dataframe of training data
26 | prediction_interval (float): the desired forecast interval range
27 |
28 | Returns:
29 | lower, upper (np.array): two 1D arrays
30 | """
31 | quantiles = [0, 1 - prediction_interval, 0.5, prediction_interval, 1]
32 | # save compute time by using the non-nan verison if possible
33 | if not isinstance(nan_flag, bool):
34 | if isinstance(df_train, pd.DataFrame):
35 | nan_flag = np.isnan(np.min(df_train.to_numpy()))
36 | else:
37 | nan_flag = np.isnan(np.min(np.array(df_train)))
38 | if nan_flag:
39 | bins = nan_quantile(df_train.astype(float), quantiles, axis=0)
40 | else:
41 | bins = np.quantile(df_train.astype(float), quantiles, axis=0, keepdims=False)
42 | upper = bins[3] - bins[2]
43 | if 0 in upper:
44 | np.where(upper != 0, upper, (bins[4] - bins[2]) / 4)
45 | lower = bins[2] - bins[1]
46 | if 0 in lower:
47 | np.where(lower != 0, lower, (bins[2] - bins[0]) / 4)
48 | return lower, upper
49 |
50 |
51 | def inferred_normal(train, forecast, n: int = 5, prediction_interval: float = 0.9):
52 | """A corruption of Bayes theorem.
53 | It will be sensitive to the transformations of the data."""
54 | prior_mu = train.mean().to_numpy()
55 | prior_sigma = train.std().replace(0, 1).to_numpy()
56 | idx = forecast.index
57 | columns = forecast.columns
58 | from scipy.stats import norm
59 |
60 | p_int = 1 - ((1 - prediction_interval) / 2)
61 | adj = norm.ppf(p_int)
62 | upper_forecast, lower_forecast = [], []
63 |
64 | for row in forecast.values:
65 | data_mu = row
66 | reshape_row = data_mu.reshape(1, -1)
67 | post_mu = (
68 | (prior_mu / prior_sigma**2) + ((n * data_mu) / prior_sigma**2)
69 | ) / ((1 / prior_sigma**2) + (n / prior_sigma**2))
70 | lower = pd.DataFrame(post_mu - adj * prior_sigma, index=columns).transpose()
71 | lower = lower.where(lower <= data_mu, reshape_row, axis=1)
72 | upper = pd.DataFrame(post_mu + adj * prior_sigma, index=columns).transpose()
73 | upper = upper.where(upper >= data_mu, reshape_row, axis=1)
74 | lower_forecast.append(lower)
75 | upper_forecast.append(upper)
76 | lower_forecast = pd.concat(lower_forecast, axis=0)
77 | upper_forecast = pd.concat(upper_forecast, axis=0)
78 | lower_forecast.index = idx
79 | upper_forecast.index = idx
80 | return upper_forecast, lower_forecast
81 |
82 |
83 | """
84 | post_mu = ((prior_mu/prior_sigma ** 2) + ((n * data_mu)/data_sigma ** 2))/
85 | ((1/prior_sigma ** 2) + (n/data_sigma ** 2))
86 | post_sigma = sqrt(1/((1/prior_sigma ** 2) + (n/data_sigma ** 2)))
87 | """
88 |
89 |
90 | def Variable_Point_to_Probability(train, forecast, alpha=0.3, beta=1):
91 | """Data driven placeholder for model error estimation.
92 |
93 | ErrorRange = beta * (En + alpha * En-1 [cum sum of En])
94 | En = abs(0.5 - QTP) * D
95 | D = abs(Xn - ((Avg % Change of Train * Xn-1) + Xn-1))
96 | Xn = Forecast Value
97 | QTP = Percentile of Score in All Percent Changes of Train
98 | Score = Percent Change (from Xn-1 to Xn)
99 |
100 | Args:
101 | train (pandas.DataFrame): DataFrame of time series where index is DatetimeIndex
102 | forecast (pandas.DataFrame): DataFrame of forecast time series
103 | in which the index is a DatetimeIndex and columns/series aligned with train.
104 | Forecast must be > 1 in length.
105 | alpha (float): parameter which effects the broadening of error range over time
106 | Usually 0 < alpha < 1 (although it can be larger than 1)
107 | beta (float): parameter which effects the general width of the error bar
108 | Usually 0 < beta < 1 (although it can be larger than 1)
109 |
110 | Returns:
111 | ErrorRange (pandas.DataFrame): error width for each value of forecast.
112 | """
113 | column_order = train.columns.intersection(forecast.columns)
114 | intial_length = len(forecast.columns)
115 | forecast = forecast[column_order] # align columns
116 | aligned_length = len(forecast.columns)
117 | train = train[column_order]
118 | if aligned_length != intial_length:
119 | print("Forecast columns do not match train, some series may be lost")
120 |
121 | train = train.replace(0, np.nan)
122 |
123 | train = fake_date_fill(train, back_method='keepNA')
124 |
125 | percent_changes = train.pct_change()
126 |
127 | median_change = percent_changes.median()
128 | # median_change = (1 + median_change)
129 | # median_change[median_change <= 0 ] = 0.01 # HANDLE GOING BELOW ZERO
130 |
131 | diffs = abs(forecast - (forecast + forecast * median_change).ffill().shift(1))
132 |
133 | forecast_percent_changes = forecast.replace(0, np.nan).pct_change()
134 |
135 | quantile_differences = pd.DataFrame()
136 | for column in forecast.columns:
137 | percentile_distribution = percent_changes[column].dropna()
138 |
139 | quantile_difference = abs(
140 | (
141 | 50
142 | - forecast_percent_changes[column].apply(
143 | percentileofscore_appliable, a=percentile_distribution, kind='rank'
144 | )
145 | )
146 | / 100
147 | )
148 | quantile_differences = pd.concat(
149 | [quantile_differences, quantile_difference], axis=1
150 | )
151 |
152 | En = quantile_differences * diffs
153 | Enneg1 = En.cumsum().shift(1).fillna(0)
154 | ErrorRange = beta * (En + alpha * Enneg1)
155 | ErrorRange = ErrorRange.bfill().ffill()
156 |
157 | return ErrorRange
158 |
159 |
160 | def Point_to_Probability(
161 | train, forecast, prediction_interval=0.9, method: str = 'historic_quantile'
162 | ):
163 | """Data driven placeholder for model error estimation.
164 |
165 | Catlin Point to Probability method ('a mixture of dark magic and gum disease')
166 |
167 | Args:
168 | train (pandas.DataFrame): DataFrame of time series where index is DatetimeIndex
169 | forecast (pandas.DataFrame): DataFrame of forecast time series
170 | in which the index is a DatetimeIndex and columns/series aligned with train.
171 | Forecast must be > 1 in length.
172 | prediction_interval (float): confidence or perhaps credible interval
173 | method (str): spell to cast to create dark magic.
174 | 'historic_quantile', 'inferred_normal', 'variable_pct_change'
175 | gum disease available separately upon request.
176 |
177 | Returns:
178 | upper_error, lower_error (two pandas.DataFrames for upper and lower bound respectively)
179 | """
180 | if method == 'historic_quantile':
181 | lower, upper = historic_quantile(train, prediction_interval)
182 | upper_forecast = forecast.astype(float) + upper
183 | lower_forecast = forecast.astype(float) - lower
184 | return upper_forecast, lower_forecast
185 | if method == 'inferred_normal':
186 | return inferred_normal(
187 | train, forecast, n=5, prediction_interval=prediction_interval
188 | )
189 | if method == 'variable_pct_change':
190 | beta = np.exp(prediction_interval * 10)
191 | alpha = 0.3
192 | errorranges = Variable_Point_to_Probability(
193 | train, forecast, alpha=alpha, beta=beta
194 | )
195 | # make symmetric error ranges
196 | errorranges = errorranges / 2
197 |
198 | upper_forecast = forecast + errorranges
199 | lower_forecast = forecast - errorranges
200 | return upper_forecast, lower_forecast
201 |
--------------------------------------------------------------------------------
/autots/tools/profile.py:
--------------------------------------------------------------------------------
1 | """
2 | Profiling
3 | """
4 |
5 | import numpy as np
6 | import pandas as pd
7 | from autots.tools.seasonal import (
8 | date_part,
9 | create_changepoint_features,
10 | half_yr_spacing,
11 | )
12 | from autots.models.basics import BasicLinearModel
13 |
14 |
15 | def data_profile(df):
16 | """Legacy profiler.
17 | Input: a pd DataFrame of columns which are time series, and a datetime index
18 |
19 | Output: a pd DataFrame of column per time series, with rows which are statistics
20 | """
21 |
22 | a = pd.DataFrame(df.min(skipna=True)).transpose()
23 | b = pd.DataFrame(df.mean(skipna=True)).transpose()
24 | c = pd.DataFrame(df.median(skipna=True)).transpose()
25 | d = pd.DataFrame(df.max(skipna=True)).transpose()
26 | e = pd.DataFrame(df.notna().idxmax()).transpose()
27 | f = pd.DataFrame(df.notna()[::-1].idxmax()).transpose()
28 | g = f - e
29 | h = pd.DataFrame(df.isnull().sum() * 100 / len(df)).transpose()
30 | profile_df = pd.concat([a, b, c, d, e, f, g, h], ignore_index=True, sort=True)
31 | profile_df.index = [
32 | 'min',
33 | 'mean',
34 | 'median',
35 | 'max',
36 | 'FirstDate',
37 | 'LastDate',
38 | 'LengthDays',
39 | "PercentNA",
40 | ]
41 |
42 | return profile_df
43 |
44 |
45 | def summarize_series(df):
46 | """Summarize time series data.
47 |
48 | Args:
49 | df (pd.DataFrame): wide style data with datetimeindex
50 | """
51 | df_sum = df.describe(percentiles=[0.1, 0.25, 0.5, 0.75, 0.9])
52 | df_sum.loc["count_non_zero"] = (df != 0).sum()
53 | df_sum.loc["cv_squared"] = (
54 | df_sum.loc["std"] / df_sum.loc["mean"].replace(0, 1)
55 | ) ** 2
56 | df_sum.loc["adi"] = (df.shape[0] / df_sum.loc["count_non_zero"].replace(0, 1)) ** 2
57 | first_non_nan_index = df.replace(0, np.nan).reset_index(drop=True).notna().idxmax()
58 | try:
59 | df_sum.loc["autocorr_1"] = np.diag(
60 | np.corrcoef(df.bfill().T, df.shift(1).bfill().T)[
61 | : df.shape[1], df.shape[1] :
62 | ]
63 | )
64 | except Exception as e:
65 | print(f"summarize_series autocorr_1 failed with {repr(e)}")
66 | df_sum.loc["null_percentage"] = (first_non_nan_index / df.shape[0]).fillna(1)
67 | diffs = df.diff().iloc[1:] # Exclude the first row with NaN
68 | zero_diffs = (diffs == 0).sum()
69 | total_diffs = df.shape[0] - 1 # Number of differences per series
70 | total_diffs = total_diffs if total_diffs > 0 else 1
71 | zero_diff_proportions = zero_diffs / total_diffs
72 | df_sum.loc['zero_diff_proportion'] = zero_diff_proportions
73 | try:
74 | mod = BasicLinearModel(changepoint_spacing=None)
75 | mod.fit(df.ffill().bfill())
76 | summary = mod.coefficient_summary(df.ffill().bfill())
77 | df_sum = pd.concat([df_sum, summary.transpose()])
78 | except Exception as e:
79 | df_sum.loc["season_trend_percent"] = 0
80 | print(f"summarize_series BasicLinearModel decomposition failed with {repr(e)}")
81 | return df_sum
82 |
83 |
84 | def profile_time_series(
85 | df,
86 | adi_threshold=1.3,
87 | cvar_threshold=0.5,
88 | flat_threshold=0.92,
89 | new_product_threshold='auto',
90 | seasonal_threshold=0.5,
91 | ):
92 | """
93 | Profiles time series data into categories:
94 | smooth, intermittent, erratic, lumpy, flat, new_product
95 |
96 | Args:
97 | df (pd.DataFrame): Wide format DataFrame with datetime index and each column as a time series.
98 | new_product_threshold (float): one of the more finiky thresholds, percent of null or zero data from beginning to declare new product
99 | new_product_correct (bool): use dt index to correct
100 | Returns:
101 | pd.DataFrame: DataFrame with 'SERIES' and 'DEMAND_PROFILE' columns.
102 | """
103 |
104 | metrics_df = summarize_series(df).transpose()
105 |
106 | # Initialize demand profile as 'smooth'
107 | metrics_df['PROFILE'] = 'smooth'
108 |
109 | if new_product_threshold == "auto":
110 | half_yr_space = half_yr_spacing(df)
111 | new_product_threshold = 1 - (half_yr_space * 0.65 / df.shape[0])
112 | if new_product_threshold < 0.85:
113 | new_product_threshold = 0.85
114 | if new_product_threshold > 0.99:
115 | new_product_threshold = 0.99
116 | # Apply conditions to classify the demand profiles
117 | metrics_df.loc[
118 | (metrics_df['adi'] >= adi_threshold)
119 | & (metrics_df['cv_squared'] < cvar_threshold),
120 | 'PROFILE',
121 | ] = 'intermittent'
122 | metrics_df.loc[
123 | (metrics_df['adi'] < adi_threshold)
124 | & (metrics_df['cv_squared'] >= cvar_threshold),
125 | 'PROFILE',
126 | ] = 'erratic'
127 | metrics_df.loc[
128 | (metrics_df['adi'] >= adi_threshold)
129 | & (metrics_df['cv_squared'] >= cvar_threshold),
130 | 'PROFILE',
131 | ] = 'lumpy'
132 | metrics_df.loc[
133 | metrics_df['zero_diff_proportion'] >= flat_threshold, 'PROFILE'
134 | ] = 'flat'
135 | metrics_df.loc[
136 | metrics_df['null_percentage'] >= new_product_threshold, 'PROFILE'
137 | ] = 'new_product'
138 | metrics_df.loc[
139 | metrics_df['season_trend_percent'] > seasonal_threshold, 'PROFILE'
140 | ] = "seasonal"
141 |
142 | # Reset index to get 'SERIES' column
143 | intermittence_df = (
144 | metrics_df[['PROFILE']].reset_index().rename(columns={'index': 'SERIES'})
145 | )
146 |
147 | return intermittence_df
148 |
149 |
150 | # burst, stationary, seasonality
151 |
--------------------------------------------------------------------------------
/docs/.gitignore:
--------------------------------------------------------------------------------
1 | *_googleid*
--------------------------------------------------------------------------------
/docs/.nojekyll:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/winedarksea/AutoTS/7258a7a655b040a4fbe11ac6439c30141252992c/docs/.nojekyll
--------------------------------------------------------------------------------
/docs/Makefile:
--------------------------------------------------------------------------------
1 | # Minimal makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line, and also
5 | # from the environment for the first two.
6 | SPHINXOPTS ?=
7 | SPHINXBUILD ?= sphinx-build
8 | SOURCEDIR = .
9 | BUILDDIR = ./build/
10 |
11 | # Put it first so that "make" without argument is like "make help".
12 | help:
13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
14 |
15 | .PHONY: help Makefile
16 |
17 | # Catch-all target: route all unknown targets to Sphinx using the new
18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
19 | %: Makefile
20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
21 |
--------------------------------------------------------------------------------
/docs/_config.yml:
--------------------------------------------------------------------------------
1 | theme: jekyll-theme-minimal
--------------------------------------------------------------------------------
/docs/_static/autots_logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/winedarksea/AutoTS/7258a7a655b040a4fbe11ac6439c30141252992c/docs/_static/autots_logo.png
--------------------------------------------------------------------------------
/docs/build/doctrees/environment.pickle:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/winedarksea/AutoTS/7258a7a655b040a4fbe11ac6439c30141252992c/docs/build/doctrees/environment.pickle
--------------------------------------------------------------------------------
/docs/build/doctrees/index.doctree:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/winedarksea/AutoTS/7258a7a655b040a4fbe11ac6439c30141252992c/docs/build/doctrees/index.doctree
--------------------------------------------------------------------------------
/docs/build/doctrees/source/autots.datasets.doctree:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/winedarksea/AutoTS/7258a7a655b040a4fbe11ac6439c30141252992c/docs/build/doctrees/source/autots.datasets.doctree
--------------------------------------------------------------------------------
/docs/build/doctrees/source/autots.doctree:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/winedarksea/AutoTS/7258a7a655b040a4fbe11ac6439c30141252992c/docs/build/doctrees/source/autots.doctree
--------------------------------------------------------------------------------
/docs/build/doctrees/source/autots.evaluator.doctree:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/winedarksea/AutoTS/7258a7a655b040a4fbe11ac6439c30141252992c/docs/build/doctrees/source/autots.evaluator.doctree
--------------------------------------------------------------------------------
/docs/build/doctrees/source/autots.models.doctree:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/winedarksea/AutoTS/7258a7a655b040a4fbe11ac6439c30141252992c/docs/build/doctrees/source/autots.models.doctree
--------------------------------------------------------------------------------
/docs/build/doctrees/source/autots.templates.doctree:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/winedarksea/AutoTS/7258a7a655b040a4fbe11ac6439c30141252992c/docs/build/doctrees/source/autots.templates.doctree
--------------------------------------------------------------------------------
/docs/build/doctrees/source/autots.tools.doctree:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/winedarksea/AutoTS/7258a7a655b040a4fbe11ac6439c30141252992c/docs/build/doctrees/source/autots.tools.doctree
--------------------------------------------------------------------------------
/docs/build/doctrees/source/intro.doctree:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/winedarksea/AutoTS/7258a7a655b040a4fbe11ac6439c30141252992c/docs/build/doctrees/source/intro.doctree
--------------------------------------------------------------------------------
/docs/build/doctrees/source/modules.doctree:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/winedarksea/AutoTS/7258a7a655b040a4fbe11ac6439c30141252992c/docs/build/doctrees/source/modules.doctree
--------------------------------------------------------------------------------
/docs/build/doctrees/source/tutorial.doctree:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/winedarksea/AutoTS/7258a7a655b040a4fbe11ac6439c30141252992c/docs/build/doctrees/source/tutorial.doctree
--------------------------------------------------------------------------------
/docs/build/html/.buildinfo:
--------------------------------------------------------------------------------
1 | # Sphinx build info version 1
2 | # This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done.
3 | config: 9cb0541968e2a7fa2638183ce74b6ee3
4 | tags: 645f666f9bcd5a90fca523b33c5a78b7
5 |
--------------------------------------------------------------------------------
/docs/build/html/.nojekyll:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/winedarksea/AutoTS/7258a7a655b040a4fbe11ac6439c30141252992c/docs/build/html/.nojekyll
--------------------------------------------------------------------------------
/docs/build/html/_sources/index.rst.txt:
--------------------------------------------------------------------------------
1 | AutoTS
2 | ===================
3 |
4 | `autots`_ is an automated time series forecasting package for Python.
5 |
6 | Installation
7 | ------------
8 |
9 | .. code:: sh
10 |
11 | pip install autots
12 |
13 |
14 | Requirements: Python 3.9+, numpy, pandas, statsmodels, and scikit-learn.
15 |
16 | Getting Started
17 | ===================
18 | .. toctree::
19 | :maxdepth: 2
20 |
21 | source/intro
22 | source/tutorial
23 |
24 | Modules API
25 | ===================
26 | .. toctree::
27 | :maxdepth: 2
28 |
29 | source/modules
30 |
31 | Indices and tables
32 | ==================
33 |
34 | * :ref:`genindex`
35 | * :ref:`modindex`
36 | * :ref:`search`
37 |
--------------------------------------------------------------------------------
/docs/build/html/_sources/source/autots.datasets.rst.txt:
--------------------------------------------------------------------------------
1 | autots.datasets package
2 | =======================
3 |
4 | Submodules
5 | ----------
6 |
7 | autots.datasets.fred module
8 | ---------------------------
9 |
10 | .. automodule:: autots.datasets.fred
11 | :members:
12 | :undoc-members:
13 | :show-inheritance:
14 |
15 | Module contents
16 | ---------------
17 |
18 | .. automodule:: autots.datasets
19 | :members:
20 | :undoc-members:
21 | :show-inheritance:
22 |
--------------------------------------------------------------------------------
/docs/build/html/_sources/source/autots.evaluator.rst.txt:
--------------------------------------------------------------------------------
1 | autots.evaluator package
2 | ========================
3 |
4 | Submodules
5 | ----------
6 |
7 | autots.evaluator.anomaly\_detector module
8 | -----------------------------------------
9 |
10 | .. automodule:: autots.evaluator.anomaly_detector
11 | :members:
12 | :undoc-members:
13 | :show-inheritance:
14 |
15 | autots.evaluator.auto\_model module
16 | -----------------------------------
17 |
18 | .. automodule:: autots.evaluator.auto_model
19 | :members:
20 | :undoc-members:
21 | :show-inheritance:
22 |
23 | autots.evaluator.auto\_ts module
24 | --------------------------------
25 |
26 | .. automodule:: autots.evaluator.auto_ts
27 | :members:
28 | :undoc-members:
29 | :show-inheritance:
30 |
31 | autots.evaluator.benchmark module
32 | ---------------------------------
33 |
34 | .. automodule:: autots.evaluator.benchmark
35 | :members:
36 | :undoc-members:
37 | :show-inheritance:
38 |
39 | autots.evaluator.event\_forecasting module
40 | ------------------------------------------
41 |
42 | .. automodule:: autots.evaluator.event_forecasting
43 | :members:
44 | :undoc-members:
45 | :show-inheritance:
46 |
47 | autots.evaluator.metrics module
48 | -------------------------------
49 |
50 | .. automodule:: autots.evaluator.metrics
51 | :members:
52 | :undoc-members:
53 | :show-inheritance:
54 |
55 | autots.evaluator.validation module
56 | ----------------------------------
57 |
58 | .. automodule:: autots.evaluator.validation
59 | :members:
60 | :undoc-members:
61 | :show-inheritance:
62 |
63 | Module contents
64 | ---------------
65 |
66 | .. automodule:: autots.evaluator
67 | :members:
68 | :undoc-members:
69 | :show-inheritance:
70 |
--------------------------------------------------------------------------------
/docs/build/html/_sources/source/autots.models.rst.txt:
--------------------------------------------------------------------------------
1 | autots.models package
2 | =====================
3 |
4 | Submodules
5 | ----------
6 |
7 | autots.models.arch module
8 | -------------------------
9 |
10 | .. automodule:: autots.models.arch
11 | :members:
12 | :undoc-members:
13 | :show-inheritance:
14 |
15 | autots.models.base module
16 | -------------------------
17 |
18 | .. automodule:: autots.models.base
19 | :members:
20 | :undoc-members:
21 | :show-inheritance:
22 |
23 | autots.models.basics module
24 | ---------------------------
25 |
26 | .. automodule:: autots.models.basics
27 | :members:
28 | :undoc-members:
29 | :show-inheritance:
30 |
31 | autots.models.cassandra module
32 | ------------------------------
33 |
34 | .. automodule:: autots.models.cassandra
35 | :members:
36 | :undoc-members:
37 | :show-inheritance:
38 |
39 | autots.models.composite module
40 | ------------------------------
41 |
42 | .. automodule:: autots.models.composite
43 | :members:
44 | :undoc-members:
45 | :show-inheritance:
46 |
47 | autots.models.dnn module
48 | ------------------------
49 |
50 | .. automodule:: autots.models.dnn
51 | :members:
52 | :undoc-members:
53 | :show-inheritance:
54 |
55 | autots.models.ensemble module
56 | -----------------------------
57 |
58 | .. automodule:: autots.models.ensemble
59 | :members:
60 | :undoc-members:
61 | :show-inheritance:
62 |
63 | autots.models.gluonts module
64 | ----------------------------
65 |
66 | .. automodule:: autots.models.gluonts
67 | :members:
68 | :undoc-members:
69 | :show-inheritance:
70 |
71 | autots.models.greykite module
72 | -----------------------------
73 |
74 | .. automodule:: autots.models.greykite
75 | :members:
76 | :undoc-members:
77 | :show-inheritance:
78 |
79 | autots.models.matrix\_var module
80 | --------------------------------
81 |
82 | .. automodule:: autots.models.matrix_var
83 | :members:
84 | :undoc-members:
85 | :show-inheritance:
86 |
87 | autots.models.mlensemble module
88 | -------------------------------
89 |
90 | .. automodule:: autots.models.mlensemble
91 | :members:
92 | :undoc-members:
93 | :show-inheritance:
94 |
95 | autots.models.model\_list module
96 | --------------------------------
97 |
98 | .. automodule:: autots.models.model_list
99 | :members:
100 | :undoc-members:
101 | :show-inheritance:
102 |
103 | autots.models.neural\_forecast module
104 | -------------------------------------
105 |
106 | .. automodule:: autots.models.neural_forecast
107 | :members:
108 | :undoc-members:
109 | :show-inheritance:
110 |
111 | autots.models.prophet module
112 | ----------------------------
113 |
114 | .. automodule:: autots.models.prophet
115 | :members:
116 | :undoc-members:
117 | :show-inheritance:
118 |
119 | autots.models.pytorch module
120 | ----------------------------
121 |
122 | .. automodule:: autots.models.pytorch
123 | :members:
124 | :undoc-members:
125 | :show-inheritance:
126 |
127 | autots.models.sklearn module
128 | ----------------------------
129 |
130 | .. automodule:: autots.models.sklearn
131 | :members:
132 | :undoc-members:
133 | :show-inheritance:
134 |
135 | autots.models.statsmodels module
136 | --------------------------------
137 |
138 | .. automodule:: autots.models.statsmodels
139 | :members:
140 | :undoc-members:
141 | :show-inheritance:
142 |
143 | autots.models.tfp module
144 | ------------------------
145 |
146 | .. automodule:: autots.models.tfp
147 | :members:
148 | :undoc-members:
149 | :show-inheritance:
150 |
151 | autots.models.tide module
152 | -------------------------
153 |
154 | .. automodule:: autots.models.tide
155 | :members:
156 | :undoc-members:
157 | :show-inheritance:
158 |
159 | Module contents
160 | ---------------
161 |
162 | .. automodule:: autots.models
163 | :members:
164 | :undoc-members:
165 | :show-inheritance:
166 |
--------------------------------------------------------------------------------
/docs/build/html/_sources/source/autots.rst.txt:
--------------------------------------------------------------------------------
1 | autots package
2 | ==============
3 |
4 | Subpackages
5 | -----------
6 |
7 | .. toctree::
8 | :maxdepth: 4
9 |
10 | autots.datasets
11 | autots.evaluator
12 | autots.models
13 | autots.templates
14 | autots.tools
15 |
16 | Module contents
17 | ---------------
18 |
19 | .. automodule:: autots
20 | :members:
21 | :undoc-members:
22 | :show-inheritance:
23 |
--------------------------------------------------------------------------------
/docs/build/html/_sources/source/autots.templates.rst.txt:
--------------------------------------------------------------------------------
1 | autots.templates package
2 | ========================
3 |
4 | Submodules
5 | ----------
6 |
7 | autots.templates.general module
8 | -------------------------------
9 |
10 | .. automodule:: autots.templates.general
11 | :members:
12 | :undoc-members:
13 | :show-inheritance:
14 |
15 | Module contents
16 | ---------------
17 |
18 | .. automodule:: autots.templates
19 | :members:
20 | :undoc-members:
21 | :show-inheritance:
22 |
--------------------------------------------------------------------------------
/docs/build/html/_sources/source/autots.tools.rst.txt:
--------------------------------------------------------------------------------
1 | autots.tools package
2 | ====================
3 |
4 | Submodules
5 | ----------
6 |
7 | autots.tools.anomaly\_utils module
8 | ----------------------------------
9 |
10 | .. automodule:: autots.tools.anomaly_utils
11 | :members:
12 | :undoc-members:
13 | :show-inheritance:
14 |
15 | autots.tools.calendar module
16 | ----------------------------
17 |
18 | .. automodule:: autots.tools.calendar
19 | :members:
20 | :undoc-members:
21 | :show-inheritance:
22 |
23 | autots.tools.cointegration module
24 | ---------------------------------
25 |
26 | .. automodule:: autots.tools.cointegration
27 | :members:
28 | :undoc-members:
29 | :show-inheritance:
30 |
31 | autots.tools.constraint module
32 | ------------------------------
33 |
34 | .. automodule:: autots.tools.constraint
35 | :members:
36 | :undoc-members:
37 | :show-inheritance:
38 |
39 | autots.tools.cpu\_count module
40 | ------------------------------
41 |
42 | .. automodule:: autots.tools.cpu_count
43 | :members:
44 | :undoc-members:
45 | :show-inheritance:
46 |
47 | autots.tools.fast\_kalman module
48 | --------------------------------
49 |
50 | .. automodule:: autots.tools.fast_kalman
51 | :members:
52 | :undoc-members:
53 | :show-inheritance:
54 |
55 | autots.tools.fft module
56 | -----------------------
57 |
58 | .. automodule:: autots.tools.fft
59 | :members:
60 | :undoc-members:
61 | :show-inheritance:
62 |
63 | autots.tools.fir\_filter module
64 | -------------------------------
65 |
66 | .. automodule:: autots.tools.fir_filter
67 | :members:
68 | :undoc-members:
69 | :show-inheritance:
70 |
71 | autots.tools.hierarchial module
72 | -------------------------------
73 |
74 | .. automodule:: autots.tools.hierarchial
75 | :members:
76 | :undoc-members:
77 | :show-inheritance:
78 |
79 | autots.tools.holiday module
80 | ---------------------------
81 |
82 | .. automodule:: autots.tools.holiday
83 | :members:
84 | :undoc-members:
85 | :show-inheritance:
86 |
87 | autots.tools.impute module
88 | --------------------------
89 |
90 | .. automodule:: autots.tools.impute
91 | :members:
92 | :undoc-members:
93 | :show-inheritance:
94 |
95 | autots.tools.kalman module
96 | --------------------------
97 |
98 | .. automodule:: autots.tools.kalman
99 | :members:
100 | :undoc-members:
101 | :show-inheritance:
102 |
103 | autots.tools.lunar module
104 | -------------------------
105 |
106 | .. automodule:: autots.tools.lunar
107 | :members:
108 | :undoc-members:
109 | :show-inheritance:
110 |
111 | autots.tools.percentile module
112 | ------------------------------
113 |
114 | .. automodule:: autots.tools.percentile
115 | :members:
116 | :undoc-members:
117 | :show-inheritance:
118 |
119 | autots.tools.probabilistic module
120 | ---------------------------------
121 |
122 | .. automodule:: autots.tools.probabilistic
123 | :members:
124 | :undoc-members:
125 | :show-inheritance:
126 |
127 | autots.tools.profile module
128 | ---------------------------
129 |
130 | .. automodule:: autots.tools.profile
131 | :members:
132 | :undoc-members:
133 | :show-inheritance:
134 |
135 | autots.tools.regressor module
136 | -----------------------------
137 |
138 | .. automodule:: autots.tools.regressor
139 | :members:
140 | :undoc-members:
141 | :show-inheritance:
142 |
143 | autots.tools.seasonal module
144 | ----------------------------
145 |
146 | .. automodule:: autots.tools.seasonal
147 | :members:
148 | :undoc-members:
149 | :show-inheritance:
150 |
151 | autots.tools.shaping module
152 | ---------------------------
153 |
154 | .. automodule:: autots.tools.shaping
155 | :members:
156 | :undoc-members:
157 | :show-inheritance:
158 |
159 | autots.tools.thresholding module
160 | --------------------------------
161 |
162 | .. automodule:: autots.tools.thresholding
163 | :members:
164 | :undoc-members:
165 | :show-inheritance:
166 |
167 | autots.tools.transform module
168 | -----------------------------
169 |
170 | .. automodule:: autots.tools.transform
171 | :members:
172 | :undoc-members:
173 | :show-inheritance:
174 |
175 | autots.tools.wavelet module
176 | ---------------------------
177 |
178 | .. automodule:: autots.tools.wavelet
179 | :members:
180 | :undoc-members:
181 | :show-inheritance:
182 |
183 | autots.tools.window\_functions module
184 | -------------------------------------
185 |
186 | .. automodule:: autots.tools.window_functions
187 | :members:
188 | :undoc-members:
189 | :show-inheritance:
190 |
191 | Module contents
192 | ---------------
193 |
194 | .. automodule:: autots.tools
195 | :members:
196 | :undoc-members:
197 | :show-inheritance:
198 |
--------------------------------------------------------------------------------
/docs/build/html/_sources/source/intro.rst.txt:
--------------------------------------------------------------------------------
1 | Intro
2 | ===================
3 | .. mdinclude:: ../../README.md
4 |
--------------------------------------------------------------------------------
/docs/build/html/_sources/source/modules.rst.txt:
--------------------------------------------------------------------------------
1 | autots
2 | ======
3 |
4 | .. toctree::
5 | :maxdepth: 4
6 |
7 | autots
8 |
--------------------------------------------------------------------------------
/docs/build/html/_sources/source/tutorial.rst.txt:
--------------------------------------------------------------------------------
1 | Tutorial
2 | ===================
3 | .. mdinclude:: ../../extended_tutorial.md
4 |
--------------------------------------------------------------------------------
/docs/build/html/_static/autots_logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/winedarksea/AutoTS/7258a7a655b040a4fbe11ac6439c30141252992c/docs/build/html/_static/autots_logo.png
--------------------------------------------------------------------------------
/docs/build/html/_static/custom.css:
--------------------------------------------------------------------------------
1 | /* This file intentionally left blank. */
2 |
--------------------------------------------------------------------------------
/docs/build/html/_static/doctools.js:
--------------------------------------------------------------------------------
1 | /*
2 | * doctools.js
3 | * ~~~~~~~~~~~
4 | *
5 | * Base JavaScript utilities for all Sphinx HTML documentation.
6 | *
7 | * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS.
8 | * :license: BSD, see LICENSE for details.
9 | *
10 | */
11 | "use strict";
12 |
13 | const BLACKLISTED_KEY_CONTROL_ELEMENTS = new Set([
14 | "TEXTAREA",
15 | "INPUT",
16 | "SELECT",
17 | "BUTTON",
18 | ]);
19 |
20 | const _ready = (callback) => {
21 | if (document.readyState !== "loading") {
22 | callback();
23 | } else {
24 | document.addEventListener("DOMContentLoaded", callback);
25 | }
26 | };
27 |
28 | /**
29 | * Small JavaScript module for the documentation.
30 | */
31 | const Documentation = {
32 | init: () => {
33 | Documentation.initDomainIndexTable();
34 | Documentation.initOnKeyListeners();
35 | },
36 |
37 | /**
38 | * i18n support
39 | */
40 | TRANSLATIONS: {},
41 | PLURAL_EXPR: (n) => (n === 1 ? 0 : 1),
42 | LOCALE: "unknown",
43 |
44 | // gettext and ngettext don't access this so that the functions
45 | // can safely bound to a different name (_ = Documentation.gettext)
46 | gettext: (string) => {
47 | const translated = Documentation.TRANSLATIONS[string];
48 | switch (typeof translated) {
49 | case "undefined":
50 | return string; // no translation
51 | case "string":
52 | return translated; // translation exists
53 | default:
54 | return translated[0]; // (singular, plural) translation tuple exists
55 | }
56 | },
57 |
58 | ngettext: (singular, plural, n) => {
59 | const translated = Documentation.TRANSLATIONS[singular];
60 | if (typeof translated !== "undefined")
61 | return translated[Documentation.PLURAL_EXPR(n)];
62 | return n === 1 ? singular : plural;
63 | },
64 |
65 | addTranslations: (catalog) => {
66 | Object.assign(Documentation.TRANSLATIONS, catalog.messages);
67 | Documentation.PLURAL_EXPR = new Function(
68 | "n",
69 | `return (${catalog.plural_expr})`
70 | );
71 | Documentation.LOCALE = catalog.locale;
72 | },
73 |
74 | /**
75 | * helper function to focus on search bar
76 | */
77 | focusSearchBar: () => {
78 | document.querySelectorAll("input[name=q]")[0]?.focus();
79 | },
80 |
81 | /**
82 | * Initialise the domain index toggle buttons
83 | */
84 | initDomainIndexTable: () => {
85 | const toggler = (el) => {
86 | const idNumber = el.id.substr(7);
87 | const toggledRows = document.querySelectorAll(`tr.cg-${idNumber}`);
88 | if (el.src.substr(-9) === "minus.png") {
89 | el.src = `${el.src.substr(0, el.src.length - 9)}plus.png`;
90 | toggledRows.forEach((el) => (el.style.display = "none"));
91 | } else {
92 | el.src = `${el.src.substr(0, el.src.length - 8)}minus.png`;
93 | toggledRows.forEach((el) => (el.style.display = ""));
94 | }
95 | };
96 |
97 | const togglerElements = document.querySelectorAll("img.toggler");
98 | togglerElements.forEach((el) =>
99 | el.addEventListener("click", (event) => toggler(event.currentTarget))
100 | );
101 | togglerElements.forEach((el) => (el.style.display = ""));
102 | if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) togglerElements.forEach(toggler);
103 | },
104 |
105 | initOnKeyListeners: () => {
106 | // only install a listener if it is really needed
107 | if (
108 | !DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS &&
109 | !DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS
110 | )
111 | return;
112 |
113 | document.addEventListener("keydown", (event) => {
114 | // bail for input elements
115 | if (BLACKLISTED_KEY_CONTROL_ELEMENTS.has(document.activeElement.tagName)) return;
116 | // bail with special keys
117 | if (event.altKey || event.ctrlKey || event.metaKey) return;
118 |
119 | if (!event.shiftKey) {
120 | switch (event.key) {
121 | case "ArrowLeft":
122 | if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break;
123 |
124 | const prevLink = document.querySelector('link[rel="prev"]');
125 | if (prevLink && prevLink.href) {
126 | window.location.href = prevLink.href;
127 | event.preventDefault();
128 | }
129 | break;
130 | case "ArrowRight":
131 | if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break;
132 |
133 | const nextLink = document.querySelector('link[rel="next"]');
134 | if (nextLink && nextLink.href) {
135 | window.location.href = nextLink.href;
136 | event.preventDefault();
137 | }
138 | break;
139 | }
140 | }
141 |
142 | // some keyboard layouts may need Shift to get /
143 | switch (event.key) {
144 | case "/":
145 | if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) break;
146 | Documentation.focusSearchBar();
147 | event.preventDefault();
148 | }
149 | });
150 | },
151 | };
152 |
153 | // quick alias for translations
154 | const _ = Documentation.gettext;
155 |
156 | _ready(Documentation.init);
157 |
--------------------------------------------------------------------------------
/docs/build/html/_static/documentation_options.js:
--------------------------------------------------------------------------------
1 | const DOCUMENTATION_OPTIONS = {
2 | VERSION: '0.6.21',
3 | LANGUAGE: 'en',
4 | COLLAPSE_INDEX: false,
5 | BUILDER: 'html',
6 | FILE_SUFFIX: '.html',
7 | LINK_SUFFIX: '.html',
8 | HAS_SOURCE: true,
9 | SOURCELINK_SUFFIX: '.txt',
10 | NAVIGATION_WITH_KEYS: false,
11 | SHOW_SEARCH_SUMMARY: true,
12 | ENABLE_SEARCH_SHORTCUTS: true,
13 | };
--------------------------------------------------------------------------------
/docs/build/html/_static/file.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/winedarksea/AutoTS/7258a7a655b040a4fbe11ac6439c30141252992c/docs/build/html/_static/file.png
--------------------------------------------------------------------------------
/docs/build/html/_static/language_data.js:
--------------------------------------------------------------------------------
1 | /*
2 | * language_data.js
3 | * ~~~~~~~~~~~~~~~~
4 | *
5 | * This script contains the language-specific data used by searchtools.js,
6 | * namely the list of stopwords, stemmer, scorer and splitter.
7 | *
8 | * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS.
9 | * :license: BSD, see LICENSE for details.
10 | *
11 | */
12 |
13 | var stopwords = ["a", "and", "are", "as", "at", "be", "but", "by", "for", "if", "in", "into", "is", "it", "near", "no", "not", "of", "on", "or", "such", "that", "the", "their", "then", "there", "these", "they", "this", "to", "was", "will", "with"];
14 |
15 |
16 | /* Non-minified version is copied as a separate JS file, is available */
17 |
18 | /**
19 | * Porter Stemmer
20 | */
21 | var Stemmer = function() {
22 |
23 | var step2list = {
24 | ational: 'ate',
25 | tional: 'tion',
26 | enci: 'ence',
27 | anci: 'ance',
28 | izer: 'ize',
29 | bli: 'ble',
30 | alli: 'al',
31 | entli: 'ent',
32 | eli: 'e',
33 | ousli: 'ous',
34 | ization: 'ize',
35 | ation: 'ate',
36 | ator: 'ate',
37 | alism: 'al',
38 | iveness: 'ive',
39 | fulness: 'ful',
40 | ousness: 'ous',
41 | aliti: 'al',
42 | iviti: 'ive',
43 | biliti: 'ble',
44 | logi: 'log'
45 | };
46 |
47 | var step3list = {
48 | icate: 'ic',
49 | ative: '',
50 | alize: 'al',
51 | iciti: 'ic',
52 | ical: 'ic',
53 | ful: '',
54 | ness: ''
55 | };
56 |
57 | var c = "[^aeiou]"; // consonant
58 | var v = "[aeiouy]"; // vowel
59 | var C = c + "[^aeiouy]*"; // consonant sequence
60 | var V = v + "[aeiou]*"; // vowel sequence
61 |
62 | var mgr0 = "^(" + C + ")?" + V + C; // [C]VC... is m>0
63 | var meq1 = "^(" + C + ")?" + V + C + "(" + V + ")?$"; // [C]VC[V] is m=1
64 | var mgr1 = "^(" + C + ")?" + V + C + V + C; // [C]VCVC... is m>1
65 | var s_v = "^(" + C + ")?" + v; // vowel in stem
66 |
67 | this.stemWord = function (w) {
68 | var stem;
69 | var suffix;
70 | var firstch;
71 | var origword = w;
72 |
73 | if (w.length < 3)
74 | return w;
75 |
76 | var re;
77 | var re2;
78 | var re3;
79 | var re4;
80 |
81 | firstch = w.substr(0,1);
82 | if (firstch == "y")
83 | w = firstch.toUpperCase() + w.substr(1);
84 |
85 | // Step 1a
86 | re = /^(.+?)(ss|i)es$/;
87 | re2 = /^(.+?)([^s])s$/;
88 |
89 | if (re.test(w))
90 | w = w.replace(re,"$1$2");
91 | else if (re2.test(w))
92 | w = w.replace(re2,"$1$2");
93 |
94 | // Step 1b
95 | re = /^(.+?)eed$/;
96 | re2 = /^(.+?)(ed|ing)$/;
97 | if (re.test(w)) {
98 | var fp = re.exec(w);
99 | re = new RegExp(mgr0);
100 | if (re.test(fp[1])) {
101 | re = /.$/;
102 | w = w.replace(re,"");
103 | }
104 | }
105 | else if (re2.test(w)) {
106 | var fp = re2.exec(w);
107 | stem = fp[1];
108 | re2 = new RegExp(s_v);
109 | if (re2.test(stem)) {
110 | w = stem;
111 | re2 = /(at|bl|iz)$/;
112 | re3 = new RegExp("([^aeiouylsz])\\1$");
113 | re4 = new RegExp("^" + C + v + "[^aeiouwxy]$");
114 | if (re2.test(w))
115 | w = w + "e";
116 | else if (re3.test(w)) {
117 | re = /.$/;
118 | w = w.replace(re,"");
119 | }
120 | else if (re4.test(w))
121 | w = w + "e";
122 | }
123 | }
124 |
125 | // Step 1c
126 | re = /^(.+?)y$/;
127 | if (re.test(w)) {
128 | var fp = re.exec(w);
129 | stem = fp[1];
130 | re = new RegExp(s_v);
131 | if (re.test(stem))
132 | w = stem + "i";
133 | }
134 |
135 | // Step 2
136 | re = /^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$/;
137 | if (re.test(w)) {
138 | var fp = re.exec(w);
139 | stem = fp[1];
140 | suffix = fp[2];
141 | re = new RegExp(mgr0);
142 | if (re.test(stem))
143 | w = stem + step2list[suffix];
144 | }
145 |
146 | // Step 3
147 | re = /^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$/;
148 | if (re.test(w)) {
149 | var fp = re.exec(w);
150 | stem = fp[1];
151 | suffix = fp[2];
152 | re = new RegExp(mgr0);
153 | if (re.test(stem))
154 | w = stem + step3list[suffix];
155 | }
156 |
157 | // Step 4
158 | re = /^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$/;
159 | re2 = /^(.+?)(s|t)(ion)$/;
160 | if (re.test(w)) {
161 | var fp = re.exec(w);
162 | stem = fp[1];
163 | re = new RegExp(mgr1);
164 | if (re.test(stem))
165 | w = stem;
166 | }
167 | else if (re2.test(w)) {
168 | var fp = re2.exec(w);
169 | stem = fp[1] + fp[2];
170 | re2 = new RegExp(mgr1);
171 | if (re2.test(stem))
172 | w = stem;
173 | }
174 |
175 | // Step 5
176 | re = /^(.+?)e$/;
177 | if (re.test(w)) {
178 | var fp = re.exec(w);
179 | stem = fp[1];
180 | re = new RegExp(mgr1);
181 | re2 = new RegExp(meq1);
182 | re3 = new RegExp("^" + C + v + "[^aeiouwxy]$");
183 | if (re.test(stem) || (re2.test(stem) && !(re3.test(stem))))
184 | w = stem;
185 | }
186 | re = /ll$/;
187 | re2 = new RegExp(mgr1);
188 | if (re.test(w) && re2.test(w)) {
189 | re = /.$/;
190 | w = w.replace(re,"");
191 | }
192 |
193 | // and turn initial Y back to y
194 | if (firstch == "y")
195 | w = firstch.toLowerCase() + w.substr(1);
196 | return w;
197 | }
198 | }
199 |
200 |
--------------------------------------------------------------------------------
/docs/build/html/_static/minus.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/winedarksea/AutoTS/7258a7a655b040a4fbe11ac6439c30141252992c/docs/build/html/_static/minus.png
--------------------------------------------------------------------------------
/docs/build/html/_static/plus.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/winedarksea/AutoTS/7258a7a655b040a4fbe11ac6439c30141252992c/docs/build/html/_static/plus.png
--------------------------------------------------------------------------------
/docs/build/html/_static/pygments.css:
--------------------------------------------------------------------------------
1 | pre { line-height: 125%; }
2 | td.linenos .normal { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; }
3 | span.linenos { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; }
4 | td.linenos .special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; }
5 | span.linenos.special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; }
6 | .highlight .hll { background-color: #ffffcc }
7 | .highlight { background: #eeffcc; }
8 | .highlight .c { color: #408090; font-style: italic } /* Comment */
9 | .highlight .err { border: 1px solid #FF0000 } /* Error */
10 | .highlight .k { color: #007020; font-weight: bold } /* Keyword */
11 | .highlight .o { color: #666666 } /* Operator */
12 | .highlight .ch { color: #408090; font-style: italic } /* Comment.Hashbang */
13 | .highlight .cm { color: #408090; font-style: italic } /* Comment.Multiline */
14 | .highlight .cp { color: #007020 } /* Comment.Preproc */
15 | .highlight .cpf { color: #408090; font-style: italic } /* Comment.PreprocFile */
16 | .highlight .c1 { color: #408090; font-style: italic } /* Comment.Single */
17 | .highlight .cs { color: #408090; background-color: #fff0f0 } /* Comment.Special */
18 | .highlight .gd { color: #A00000 } /* Generic.Deleted */
19 | .highlight .ge { font-style: italic } /* Generic.Emph */
20 | .highlight .gr { color: #FF0000 } /* Generic.Error */
21 | .highlight .gh { color: #000080; font-weight: bold } /* Generic.Heading */
22 | .highlight .gi { color: #00A000 } /* Generic.Inserted */
23 | .highlight .go { color: #333333 } /* Generic.Output */
24 | .highlight .gp { color: #c65d09; font-weight: bold } /* Generic.Prompt */
25 | .highlight .gs { font-weight: bold } /* Generic.Strong */
26 | .highlight .gu { color: #800080; font-weight: bold } /* Generic.Subheading */
27 | .highlight .gt { color: #0044DD } /* Generic.Traceback */
28 | .highlight .kc { color: #007020; font-weight: bold } /* Keyword.Constant */
29 | .highlight .kd { color: #007020; font-weight: bold } /* Keyword.Declaration */
30 | .highlight .kn { color: #007020; font-weight: bold } /* Keyword.Namespace */
31 | .highlight .kp { color: #007020 } /* Keyword.Pseudo */
32 | .highlight .kr { color: #007020; font-weight: bold } /* Keyword.Reserved */
33 | .highlight .kt { color: #902000 } /* Keyword.Type */
34 | .highlight .m { color: #208050 } /* Literal.Number */
35 | .highlight .s { color: #4070a0 } /* Literal.String */
36 | .highlight .na { color: #4070a0 } /* Name.Attribute */
37 | .highlight .nb { color: #007020 } /* Name.Builtin */
38 | .highlight .nc { color: #0e84b5; font-weight: bold } /* Name.Class */
39 | .highlight .no { color: #60add5 } /* Name.Constant */
40 | .highlight .nd { color: #555555; font-weight: bold } /* Name.Decorator */
41 | .highlight .ni { color: #d55537; font-weight: bold } /* Name.Entity */
42 | .highlight .ne { color: #007020 } /* Name.Exception */
43 | .highlight .nf { color: #06287e } /* Name.Function */
44 | .highlight .nl { color: #002070; font-weight: bold } /* Name.Label */
45 | .highlight .nn { color: #0e84b5; font-weight: bold } /* Name.Namespace */
46 | .highlight .nt { color: #062873; font-weight: bold } /* Name.Tag */
47 | .highlight .nv { color: #bb60d5 } /* Name.Variable */
48 | .highlight .ow { color: #007020; font-weight: bold } /* Operator.Word */
49 | .highlight .w { color: #bbbbbb } /* Text.Whitespace */
50 | .highlight .mb { color: #208050 } /* Literal.Number.Bin */
51 | .highlight .mf { color: #208050 } /* Literal.Number.Float */
52 | .highlight .mh { color: #208050 } /* Literal.Number.Hex */
53 | .highlight .mi { color: #208050 } /* Literal.Number.Integer */
54 | .highlight .mo { color: #208050 } /* Literal.Number.Oct */
55 | .highlight .sa { color: #4070a0 } /* Literal.String.Affix */
56 | .highlight .sb { color: #4070a0 } /* Literal.String.Backtick */
57 | .highlight .sc { color: #4070a0 } /* Literal.String.Char */
58 | .highlight .dl { color: #4070a0 } /* Literal.String.Delimiter */
59 | .highlight .sd { color: #4070a0; font-style: italic } /* Literal.String.Doc */
60 | .highlight .s2 { color: #4070a0 } /* Literal.String.Double */
61 | .highlight .se { color: #4070a0; font-weight: bold } /* Literal.String.Escape */
62 | .highlight .sh { color: #4070a0 } /* Literal.String.Heredoc */
63 | .highlight .si { color: #70a0d0; font-style: italic } /* Literal.String.Interpol */
64 | .highlight .sx { color: #c65d09 } /* Literal.String.Other */
65 | .highlight .sr { color: #235388 } /* Literal.String.Regex */
66 | .highlight .s1 { color: #4070a0 } /* Literal.String.Single */
67 | .highlight .ss { color: #517918 } /* Literal.String.Symbol */
68 | .highlight .bp { color: #007020 } /* Name.Builtin.Pseudo */
69 | .highlight .fm { color: #06287e } /* Name.Function.Magic */
70 | .highlight .vc { color: #bb60d5 } /* Name.Variable.Class */
71 | .highlight .vg { color: #bb60d5 } /* Name.Variable.Global */
72 | .highlight .vi { color: #bb60d5 } /* Name.Variable.Instance */
73 | .highlight .vm { color: #bb60d5 } /* Name.Variable.Magic */
74 | .highlight .il { color: #208050 } /* Literal.Number.Integer.Long */
--------------------------------------------------------------------------------
/docs/build/html/_static/sphinx_highlight.js:
--------------------------------------------------------------------------------
1 | /* Highlighting utilities for Sphinx HTML documentation. */
2 | "use strict";
3 |
4 | const SPHINX_HIGHLIGHT_ENABLED = true
5 |
6 | /**
7 | * highlight a given string on a node by wrapping it in
8 | * span elements with the given class name.
9 | */
10 | const _highlight = (node, addItems, text, className) => {
11 | if (node.nodeType === Node.TEXT_NODE) {
12 | const val = node.nodeValue;
13 | const parent = node.parentNode;
14 | const pos = val.toLowerCase().indexOf(text);
15 | if (
16 | pos >= 0 &&
17 | !parent.classList.contains(className) &&
18 | !parent.classList.contains("nohighlight")
19 | ) {
20 | let span;
21 |
22 | const closestNode = parent.closest("body, svg, foreignObject");
23 | const isInSVG = closestNode && closestNode.matches("svg");
24 | if (isInSVG) {
25 | span = document.createElementNS("http://www.w3.org/2000/svg", "tspan");
26 | } else {
27 | span = document.createElement("span");
28 | span.classList.add(className);
29 | }
30 |
31 | span.appendChild(document.createTextNode(val.substr(pos, text.length)));
32 | const rest = document.createTextNode(val.substr(pos + text.length));
33 | parent.insertBefore(
34 | span,
35 | parent.insertBefore(
36 | rest,
37 | node.nextSibling
38 | )
39 | );
40 | node.nodeValue = val.substr(0, pos);
41 | /* There may be more occurrences of search term in this node. So call this
42 | * function recursively on the remaining fragment.
43 | */
44 | _highlight(rest, addItems, text, className);
45 |
46 | if (isInSVG) {
47 | const rect = document.createElementNS(
48 | "http://www.w3.org/2000/svg",
49 | "rect"
50 | );
51 | const bbox = parent.getBBox();
52 | rect.x.baseVal.value = bbox.x;
53 | rect.y.baseVal.value = bbox.y;
54 | rect.width.baseVal.value = bbox.width;
55 | rect.height.baseVal.value = bbox.height;
56 | rect.setAttribute("class", className);
57 | addItems.push({ parent: parent, target: rect });
58 | }
59 | }
60 | } else if (node.matches && !node.matches("button, select, textarea")) {
61 | node.childNodes.forEach((el) => _highlight(el, addItems, text, className));
62 | }
63 | };
64 | const _highlightText = (thisNode, text, className) => {
65 | let addItems = [];
66 | _highlight(thisNode, addItems, text, className);
67 | addItems.forEach((obj) =>
68 | obj.parent.insertAdjacentElement("beforebegin", obj.target)
69 | );
70 | };
71 |
72 | /**
73 | * Small JavaScript module for the documentation.
74 | */
75 | const SphinxHighlight = {
76 |
77 | /**
78 | * highlight the search words provided in localstorage in the text
79 | */
80 | highlightSearchWords: () => {
81 | if (!SPHINX_HIGHLIGHT_ENABLED) return; // bail if no highlight
82 |
83 | // get and clear terms from localstorage
84 | const url = new URL(window.location);
85 | const highlight =
86 | localStorage.getItem("sphinx_highlight_terms")
87 | || url.searchParams.get("highlight")
88 | || "";
89 | localStorage.removeItem("sphinx_highlight_terms")
90 | url.searchParams.delete("highlight");
91 | window.history.replaceState({}, "", url);
92 |
93 | // get individual terms from highlight string
94 | const terms = highlight.toLowerCase().split(/\s+/).filter(x => x);
95 | if (terms.length === 0) return; // nothing to do
96 |
97 | // There should never be more than one element matching "div.body"
98 | const divBody = document.querySelectorAll("div.body");
99 | const body = divBody.length ? divBody[0] : document.querySelector("body");
100 | window.setTimeout(() => {
101 | terms.forEach((term) => _highlightText(body, term, "highlighted"));
102 | }, 10);
103 |
104 | const searchBox = document.getElementById("searchbox");
105 | if (searchBox === null) return;
106 | searchBox.appendChild(
107 | document
108 | .createRange()
109 | .createContextualFragment(
110 | '
' +
111 | '' +
112 | _("Hide Search Matches") +
113 | "
"
114 | )
115 | );
116 | },
117 |
118 | /**
119 | * helper function to hide the search marks again
120 | */
121 | hideSearchWords: () => {
122 | document
123 | .querySelectorAll("#searchbox .highlight-link")
124 | .forEach((el) => el.remove());
125 | document
126 | .querySelectorAll("span.highlighted")
127 | .forEach((el) => el.classList.remove("highlighted"));
128 | localStorage.removeItem("sphinx_highlight_terms")
129 | },
130 |
131 | initEscapeListener: () => {
132 | // only install a listener if it is really needed
133 | if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) return;
134 |
135 | document.addEventListener("keydown", (event) => {
136 | // bail for input elements
137 | if (BLACKLISTED_KEY_CONTROL_ELEMENTS.has(document.activeElement.tagName)) return;
138 | // bail with special keys
139 | if (event.shiftKey || event.altKey || event.ctrlKey || event.metaKey) return;
140 | if (DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS && (event.key === "Escape")) {
141 | SphinxHighlight.hideSearchWords();
142 | event.preventDefault();
143 | }
144 | });
145 | },
146 | };
147 |
148 | _ready(() => {
149 | /* Do not call highlightSearchWords() when we are on the search page.
150 | * It will highlight words from the *previous* search query.
151 | */
152 | if (typeof Search === "undefined") SphinxHighlight.highlightSearchWords();
153 | SphinxHighlight.initEscapeListener();
154 | });
155 |
--------------------------------------------------------------------------------
/docs/build/html/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
16 |
17 | AutoTS — AutoTS 0.6.21 documentation
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 | AutoTS
44 | autots is an automated time series forecasting package for Python.
45 |
46 | Installation
47 |
50 | Requirements: Python 3.9+, numpy, pandas, statsmodels, and scikit-learn.
51 |
52 |
53 |
54 | Getting Started
55 |
81 |
82 |
83 | Modules API
84 |
92 |
93 |
94 | Indices and tables
95 |
100 |
101 |
102 |
103 |
104 |
105 |
106 |
107 |
171 |
172 |
173 |
180 |
181 |
182 |
183 |
184 |
185 |
--------------------------------------------------------------------------------
/docs/build/html/objects.inv:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/winedarksea/AutoTS/7258a7a655b040a4fbe11ac6439c30141252992c/docs/build/html/objects.inv
--------------------------------------------------------------------------------
/docs/build/html/search.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
15 |
16 | Search — AutoTS 0.6.21 documentation
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
Search
47 |
48 |
56 |
57 |
58 |
59 | Searching for multiple words only shows matches that contain
60 | all words.
61 |
62 |
63 |
64 |
69 |
70 |
71 |
72 |
73 |
74 |
75 |
76 |
77 |
78 |
79 |
80 |
81 |
134 |
135 |
136 |
140 |
141 |
142 |
143 |
144 |
145 |
--------------------------------------------------------------------------------
/docs/conf.py:
--------------------------------------------------------------------------------
1 | # Configuration file for the Sphinx documentation builder.
2 | #
3 | # This file only contains a selection of the most common options. For a full
4 | # list see the documentation:
5 | # https://www.sphinx-doc.org/en/master/usage/configuration.html
6 |
7 | # -- Path setup --------------------------------------------------------------
8 |
9 | # If extensions (or modules to document with autodoc) are in another directory,
10 | # add these directories to sys.path here. If the directory is relative to the
11 | # documentation root, use os.path.abspath to make it absolute, like shown here.
12 | #
13 | import os
14 | import sys
15 | sys.path.insert(0, os.path.abspath('..'))
16 |
17 |
18 | # -- Project information -----------------------------------------------------
19 | from datetime import date
20 | project = 'AutoTS'
21 | copyright = u'%s, Colin Catlin' % date.today().year
22 | author = 'Colin Catlin'
23 |
24 | # The full version, including alpha/beta/rc tags
25 | # import AutoTS
26 | # from AutoTS import __version__
27 | # release = __version__
28 | release = "0.6.21"
29 |
30 | # -- General configuration ---------------------------------------------------
31 |
32 | # Add any Sphinx extension module names here, as strings. They can be
33 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
34 | # ones.
35 | # Add napoleon to the extensions list
36 | # 'recommonmark',
37 | extensions = ['sphinx.ext.napoleon', 'sphinx.ext.autodoc', 'm2r2', 'sphinx.ext.githubpages', "sphinxcontrib.googleanalytics"]
38 |
39 | source_suffix = ['.rst', '.md']
40 |
41 | # Add any paths that contain templates here, relative to this directory.
42 | templates_path = ['_templates']
43 |
44 | # List of patterns, relative to source directory, that match files and
45 | # directories to ignore when looking for source files.
46 | # This pattern also affects html_static_path and html_extra_path.
47 | exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
48 |
49 |
50 | # -- Options for HTML output -------------------------------------------------
51 |
52 | # The theme to use for HTML and HTML Help pages. See the documentation for
53 | # a list of builtin themes.
54 | #
55 | html_theme = 'alabaster'
56 |
57 | # Add any paths that contain custom static files (such as style sheets) here,
58 | # relative to this directory. They are copied after the builtin static files,
59 | # so a file named "default.css" will overwrite the builtin "default.css".
60 | html_static_path = ['_static']
61 |
62 | # The name of the Pygments (syntax highlighting) style to use.
63 | pygments_style = 'sphinx'
64 |
65 | html_theme_options = {
66 | "show_powered_by": False,
67 | 'logo': 'autots_logo.png',
68 | 'description': 'Automated Forecasting',
69 | "github_user": "winedarksea",
70 | "github_repo": "autots",
71 | "github_banner": False,
72 | "show_related": False,
73 | "note_bg": "#FFF59C",
74 | }
75 |
76 | # pip install sphinxcontrib-googleanalytics
77 | googleanalytics_id = "G-P2KLF8302E"
78 | # this will give a warning but works at least with pydata theme
79 | html_theme_options["analytics"] = {"google_analytics_id": googleanalytics_id}
80 | # html_theme_options['analytics_id'] = googleanalytics_id
81 |
82 | # Output file base name for HTML help builder.
83 | htmlhelp_basename = "autotsdoc"
--------------------------------------------------------------------------------
/docs/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
11 |
12 |
13 |
--------------------------------------------------------------------------------
/docs/index.rst:
--------------------------------------------------------------------------------
1 | AutoTS
2 | ===================
3 |
4 | `autots`_ is an automated time series forecasting package for Python.
5 |
6 | Installation
7 | ------------
8 |
9 | .. code:: sh
10 |
11 | pip install autots
12 |
13 |
14 | Requirements: Python 3.9+, numpy, pandas, statsmodels, and scikit-learn.
15 |
16 | Getting Started
17 | ===================
18 | .. toctree::
19 | :maxdepth: 2
20 |
21 | source/intro
22 | source/tutorial
23 |
24 | Modules API
25 | ===================
26 | .. toctree::
27 | :maxdepth: 2
28 |
29 | source/modules
30 |
31 | Indices and tables
32 | ==================
33 |
34 | * :ref:`genindex`
35 | * :ref:`modindex`
36 | * :ref:`search`
37 |
--------------------------------------------------------------------------------
/docs/make.bat:
--------------------------------------------------------------------------------
1 | @ECHO OFF
2 |
3 | pushd %~dp0
4 |
5 | REM Command file for Sphinx documentation
6 |
7 | if "%SPHINXBUILD%" == "" (
8 | set SPHINXBUILD=sphinx-build
9 | )
10 | set SOURCEDIR=.
11 | set BUILDDIR=build
12 |
13 | if "%1" == "" goto help
14 |
15 | %SPHINXBUILD% >NUL 2>NUL
16 | if errorlevel 9009 (
17 | echo.
18 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
19 | echo.installed, then set the SPHINXBUILD environment variable to point
20 | echo.to the full path of the 'sphinx-build' executable. Alternatively you
21 | echo.may add the Sphinx directory to PATH.
22 | echo.
23 | echo.If you don't have Sphinx installed, grab it from
24 | echo.http://sphinx-doc.org/
25 | exit /b 1
26 | )
27 |
28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
29 | goto end
30 |
31 | :help
32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
33 |
34 | :end
35 | popd
36 |
--------------------------------------------------------------------------------
/docs/source/autots.datasets.rst:
--------------------------------------------------------------------------------
1 | autots.datasets package
2 | =======================
3 |
4 | Submodules
5 | ----------
6 |
7 | autots.datasets.fred module
8 | ---------------------------
9 |
10 | .. automodule:: autots.datasets.fred
11 | :members:
12 | :undoc-members:
13 | :show-inheritance:
14 |
15 | Module contents
16 | ---------------
17 |
18 | .. automodule:: autots.datasets
19 | :members:
20 | :undoc-members:
21 | :show-inheritance:
22 |
--------------------------------------------------------------------------------
/docs/source/autots.evaluator.rst:
--------------------------------------------------------------------------------
1 | autots.evaluator package
2 | ========================
3 |
4 | Submodules
5 | ----------
6 |
7 | autots.evaluator.anomaly\_detector module
8 | -----------------------------------------
9 |
10 | .. automodule:: autots.evaluator.anomaly_detector
11 | :members:
12 | :undoc-members:
13 | :show-inheritance:
14 |
15 | autots.evaluator.auto\_model module
16 | -----------------------------------
17 |
18 | .. automodule:: autots.evaluator.auto_model
19 | :members:
20 | :undoc-members:
21 | :show-inheritance:
22 |
23 | autots.evaluator.auto\_ts module
24 | --------------------------------
25 |
26 | .. automodule:: autots.evaluator.auto_ts
27 | :members:
28 | :undoc-members:
29 | :show-inheritance:
30 |
31 | autots.evaluator.benchmark module
32 | ---------------------------------
33 |
34 | .. automodule:: autots.evaluator.benchmark
35 | :members:
36 | :undoc-members:
37 | :show-inheritance:
38 |
39 | autots.evaluator.event\_forecasting module
40 | ------------------------------------------
41 |
42 | .. automodule:: autots.evaluator.event_forecasting
43 | :members:
44 | :undoc-members:
45 | :show-inheritance:
46 |
47 | autots.evaluator.metrics module
48 | -------------------------------
49 |
50 | .. automodule:: autots.evaluator.metrics
51 | :members:
52 | :undoc-members:
53 | :show-inheritance:
54 |
55 | autots.evaluator.validation module
56 | ----------------------------------
57 |
58 | .. automodule:: autots.evaluator.validation
59 | :members:
60 | :undoc-members:
61 | :show-inheritance:
62 |
63 | Module contents
64 | ---------------
65 |
66 | .. automodule:: autots.evaluator
67 | :members:
68 | :undoc-members:
69 | :show-inheritance:
70 |
--------------------------------------------------------------------------------
/docs/source/autots.models.rst:
--------------------------------------------------------------------------------
1 | autots.models package
2 | =====================
3 |
4 | Submodules
5 | ----------
6 |
7 | autots.models.arch module
8 | -------------------------
9 |
10 | .. automodule:: autots.models.arch
11 | :members:
12 | :undoc-members:
13 | :show-inheritance:
14 |
15 | autots.models.base module
16 | -------------------------
17 |
18 | .. automodule:: autots.models.base
19 | :members:
20 | :undoc-members:
21 | :show-inheritance:
22 |
23 | autots.models.basics module
24 | ---------------------------
25 |
26 | .. automodule:: autots.models.basics
27 | :members:
28 | :undoc-members:
29 | :show-inheritance:
30 |
31 | autots.models.cassandra module
32 | ------------------------------
33 |
34 | .. automodule:: autots.models.cassandra
35 | :members:
36 | :undoc-members:
37 | :show-inheritance:
38 |
39 | autots.models.composite module
40 | ------------------------------
41 |
42 | .. automodule:: autots.models.composite
43 | :members:
44 | :undoc-members:
45 | :show-inheritance:
46 |
47 | autots.models.dnn module
48 | ------------------------
49 |
50 | .. automodule:: autots.models.dnn
51 | :members:
52 | :undoc-members:
53 | :show-inheritance:
54 |
55 | autots.models.ensemble module
56 | -----------------------------
57 |
58 | .. automodule:: autots.models.ensemble
59 | :members:
60 | :undoc-members:
61 | :show-inheritance:
62 |
63 | autots.models.gluonts module
64 | ----------------------------
65 |
66 | .. automodule:: autots.models.gluonts
67 | :members:
68 | :undoc-members:
69 | :show-inheritance:
70 |
71 | autots.models.greykite module
72 | -----------------------------
73 |
74 | .. automodule:: autots.models.greykite
75 | :members:
76 | :undoc-members:
77 | :show-inheritance:
78 |
79 | autots.models.matrix\_var module
80 | --------------------------------
81 |
82 | .. automodule:: autots.models.matrix_var
83 | :members:
84 | :undoc-members:
85 | :show-inheritance:
86 |
87 | autots.models.mlensemble module
88 | -------------------------------
89 |
90 | .. automodule:: autots.models.mlensemble
91 | :members:
92 | :undoc-members:
93 | :show-inheritance:
94 |
95 | autots.models.model\_list module
96 | --------------------------------
97 |
98 | .. automodule:: autots.models.model_list
99 | :members:
100 | :undoc-members:
101 | :show-inheritance:
102 |
103 | autots.models.neural\_forecast module
104 | -------------------------------------
105 |
106 | .. automodule:: autots.models.neural_forecast
107 | :members:
108 | :undoc-members:
109 | :show-inheritance:
110 |
111 | autots.models.prophet module
112 | ----------------------------
113 |
114 | .. automodule:: autots.models.prophet
115 | :members:
116 | :undoc-members:
117 | :show-inheritance:
118 |
119 | autots.models.pytorch module
120 | ----------------------------
121 |
122 | .. automodule:: autots.models.pytorch
123 | :members:
124 | :undoc-members:
125 | :show-inheritance:
126 |
127 | autots.models.sklearn module
128 | ----------------------------
129 |
130 | .. automodule:: autots.models.sklearn
131 | :members:
132 | :undoc-members:
133 | :show-inheritance:
134 |
135 | autots.models.statsmodels module
136 | --------------------------------
137 |
138 | .. automodule:: autots.models.statsmodels
139 | :members:
140 | :undoc-members:
141 | :show-inheritance:
142 |
143 | autots.models.tfp module
144 | ------------------------
145 |
146 | .. automodule:: autots.models.tfp
147 | :members:
148 | :undoc-members:
149 | :show-inheritance:
150 |
151 | autots.models.tide module
152 | -------------------------
153 |
154 | .. automodule:: autots.models.tide
155 | :members:
156 | :undoc-members:
157 | :show-inheritance:
158 |
159 | Module contents
160 | ---------------
161 |
162 | .. automodule:: autots.models
163 | :members:
164 | :undoc-members:
165 | :show-inheritance:
166 |
--------------------------------------------------------------------------------
/docs/source/autots.rst:
--------------------------------------------------------------------------------
1 | autots package
2 | ==============
3 |
4 | Subpackages
5 | -----------
6 |
7 | .. toctree::
8 | :maxdepth: 4
9 |
10 | autots.datasets
11 | autots.evaluator
12 | autots.models
13 | autots.templates
14 | autots.tools
15 |
16 | Module contents
17 | ---------------
18 |
19 | .. automodule:: autots
20 | :members:
21 | :undoc-members:
22 | :show-inheritance:
23 |
--------------------------------------------------------------------------------
/docs/source/autots.templates.rst:
--------------------------------------------------------------------------------
1 | autots.templates package
2 | ========================
3 |
4 | Submodules
5 | ----------
6 |
7 | autots.templates.general module
8 | -------------------------------
9 |
10 | .. automodule:: autots.templates.general
11 | :members:
12 | :undoc-members:
13 | :show-inheritance:
14 |
15 | Module contents
16 | ---------------
17 |
18 | .. automodule:: autots.templates
19 | :members:
20 | :undoc-members:
21 | :show-inheritance:
22 |
--------------------------------------------------------------------------------
/docs/source/autots.tools.rst:
--------------------------------------------------------------------------------
1 | autots.tools package
2 | ====================
3 |
4 | Submodules
5 | ----------
6 |
7 | autots.tools.anomaly\_utils module
8 | ----------------------------------
9 |
10 | .. automodule:: autots.tools.anomaly_utils
11 | :members:
12 | :undoc-members:
13 | :show-inheritance:
14 |
15 | autots.tools.calendar module
16 | ----------------------------
17 |
18 | .. automodule:: autots.tools.calendar
19 | :members:
20 | :undoc-members:
21 | :show-inheritance:
22 |
23 | autots.tools.cointegration module
24 | ---------------------------------
25 |
26 | .. automodule:: autots.tools.cointegration
27 | :members:
28 | :undoc-members:
29 | :show-inheritance:
30 |
31 | autots.tools.constraint module
32 | ------------------------------
33 |
34 | .. automodule:: autots.tools.constraint
35 | :members:
36 | :undoc-members:
37 | :show-inheritance:
38 |
39 | autots.tools.cpu\_count module
40 | ------------------------------
41 |
42 | .. automodule:: autots.tools.cpu_count
43 | :members:
44 | :undoc-members:
45 | :show-inheritance:
46 |
47 | autots.tools.fast\_kalman module
48 | --------------------------------
49 |
50 | .. automodule:: autots.tools.fast_kalman
51 | :members:
52 | :undoc-members:
53 | :show-inheritance:
54 |
55 | autots.tools.fft module
56 | -----------------------
57 |
58 | .. automodule:: autots.tools.fft
59 | :members:
60 | :undoc-members:
61 | :show-inheritance:
62 |
63 | autots.tools.fir\_filter module
64 | -------------------------------
65 |
66 | .. automodule:: autots.tools.fir_filter
67 | :members:
68 | :undoc-members:
69 | :show-inheritance:
70 |
71 | autots.tools.hierarchial module
72 | -------------------------------
73 |
74 | .. automodule:: autots.tools.hierarchial
75 | :members:
76 | :undoc-members:
77 | :show-inheritance:
78 |
79 | autots.tools.holiday module
80 | ---------------------------
81 |
82 | .. automodule:: autots.tools.holiday
83 | :members:
84 | :undoc-members:
85 | :show-inheritance:
86 |
87 | autots.tools.impute module
88 | --------------------------
89 |
90 | .. automodule:: autots.tools.impute
91 | :members:
92 | :undoc-members:
93 | :show-inheritance:
94 |
95 | autots.tools.kalman module
96 | --------------------------
97 |
98 | .. automodule:: autots.tools.kalman
99 | :members:
100 | :undoc-members:
101 | :show-inheritance:
102 |
103 | autots.tools.lunar module
104 | -------------------------
105 |
106 | .. automodule:: autots.tools.lunar
107 | :members:
108 | :undoc-members:
109 | :show-inheritance:
110 |
111 | autots.tools.percentile module
112 | ------------------------------
113 |
114 | .. automodule:: autots.tools.percentile
115 | :members:
116 | :undoc-members:
117 | :show-inheritance:
118 |
119 | autots.tools.probabilistic module
120 | ---------------------------------
121 |
122 | .. automodule:: autots.tools.probabilistic
123 | :members:
124 | :undoc-members:
125 | :show-inheritance:
126 |
127 | autots.tools.profile module
128 | ---------------------------
129 |
130 | .. automodule:: autots.tools.profile
131 | :members:
132 | :undoc-members:
133 | :show-inheritance:
134 |
135 | autots.tools.regressor module
136 | -----------------------------
137 |
138 | .. automodule:: autots.tools.regressor
139 | :members:
140 | :undoc-members:
141 | :show-inheritance:
142 |
143 | autots.tools.seasonal module
144 | ----------------------------
145 |
146 | .. automodule:: autots.tools.seasonal
147 | :members:
148 | :undoc-members:
149 | :show-inheritance:
150 |
151 | autots.tools.shaping module
152 | ---------------------------
153 |
154 | .. automodule:: autots.tools.shaping
155 | :members:
156 | :undoc-members:
157 | :show-inheritance:
158 |
159 | autots.tools.thresholding module
160 | --------------------------------
161 |
162 | .. automodule:: autots.tools.thresholding
163 | :members:
164 | :undoc-members:
165 | :show-inheritance:
166 |
167 | autots.tools.transform module
168 | -----------------------------
169 |
170 | .. automodule:: autots.tools.transform
171 | :members:
172 | :undoc-members:
173 | :show-inheritance:
174 |
175 | autots.tools.wavelet module
176 | ---------------------------
177 |
178 | .. automodule:: autots.tools.wavelet
179 | :members:
180 | :undoc-members:
181 | :show-inheritance:
182 |
183 | autots.tools.window\_functions module
184 | -------------------------------------
185 |
186 | .. automodule:: autots.tools.window_functions
187 | :members:
188 | :undoc-members:
189 | :show-inheritance:
190 |
191 | Module contents
192 | ---------------
193 |
194 | .. automodule:: autots.tools
195 | :members:
196 | :undoc-members:
197 | :show-inheritance:
198 |
--------------------------------------------------------------------------------
/docs/source/intro.rst:
--------------------------------------------------------------------------------
1 | Intro
2 | ===================
3 | .. mdinclude:: ../../README.md
4 |
--------------------------------------------------------------------------------
/docs/source/modules.rst:
--------------------------------------------------------------------------------
1 | autots
2 | ======
3 |
4 | .. toctree::
5 | :maxdepth: 4
6 |
7 | autots
8 |
--------------------------------------------------------------------------------
/docs/source/tutorial.rst:
--------------------------------------------------------------------------------
1 | Tutorial
2 | ===================
3 | .. mdinclude:: ../../extended_tutorial.md
4 |
--------------------------------------------------------------------------------
/img/autots_1280.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/winedarksea/AutoTS/7258a7a655b040a4fbe11ac6439c30141252992c/img/autots_1280.png
--------------------------------------------------------------------------------
/img/autots_logo.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/winedarksea/AutoTS/7258a7a655b040a4fbe11ac6439c30141252992c/img/autots_logo.jpg
--------------------------------------------------------------------------------
/img/autots_logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/winedarksea/AutoTS/7258a7a655b040a4fbe11ac6439c30141252992c/img/autots_logo.png
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = ["setuptools>=61.0"]
3 | build-backend = "setuptools.build_meta"
4 |
5 | [project]
6 | name = "autots"
7 | version = "0.6.21"
8 | authors = [
9 | { name="Colin Catlin", email="colin.catlin@gmail.com" },
10 | ]
11 | description = "Automated Time Series Forecasting"
12 | readme = "README.md"
13 | requires-python = ">=3.9"
14 | license = {text = "MIT License"}
15 | classifiers = [
16 | "Programming Language :: Python :: 3",
17 | "License :: OSI Approved :: MIT License",
18 | "Operating System :: OS Independent",
19 | ]
20 | dependencies = [
21 | "numpy>=1.14.6",
22 | "pandas>=0.25.0",
23 | "statsmodels>=0.10.0",
24 | "scikit-learn>=0.20.0",
25 | ]
26 | [project.optional-dependencies]
27 | additional = [
28 | "holidays>=0.9",
29 | 'prophet>=0.4.0',
30 | 'fredapi',
31 | 'tensorflow',
32 | 'xgboost>=1.6',
33 | 'lightgbm',
34 | 'joblib',
35 | 'scipy',
36 | 'arch',
37 | 'numexpr',
38 | 'bottleneck',
39 | 'yfinance',
40 | 'pytrends',
41 | 'matplotlib',
42 | 'requests',
43 | 'seaborn',
44 | ]
45 |
46 | [project.urls]
47 | "Homepage" = "https://github.com/winedarksea/AutoTS"
48 | "Bug Tracker" = "https://github.com/winedarksea/AutoTS/issues"
49 |
50 | [tool.setuptools.packages]
51 | find = {} # Scan the project directory with the default parameters
52 | [tool.setuptools]
53 | include-package-data = true
54 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | import setuptools
2 |
3 | required = [
4 | "numpy>=1.14.6",
5 | "pandas>=0.25.0",
6 | "statsmodels>=0.10.0",
7 | "scikit-learn>=0.20.0",
8 | ]
9 |
10 | extras = {
11 | 'additional': [
12 | "holidays>=0.9",
13 | 'prophet>=0.4.0',
14 | 'fredapi',
15 | 'tensorflow',
16 | 'xgboost',
17 | 'lightgbm',
18 | 'joblib',
19 | 'scipy',
20 | 'arch',
21 | 'numexpr',
22 | 'bottleneck',
23 | 'yfinance',
24 | 'pytrends',
25 | 'matplotlib',
26 | 'requests',
27 | ]
28 | }
29 |
30 | with open("README.md", "r") as fh:
31 | long_description = fh.read()
32 |
33 | setuptools.setup(
34 | name="AutoTS",
35 | version="0.6.21",
36 | author="Colin Catlin",
37 | author_email="colin.catlin@syllepsis.live",
38 | description="Automated Time Series Forecasting",
39 | long_description=long_description,
40 | long_description_content_type="text/markdown",
41 | url="https://github.com/winedarksea/AutoTS",
42 | packages=setuptools.find_packages(),
43 | license="MIT",
44 | include_package_data=True,
45 | install_requires=required,
46 | extras_require=extras,
47 | classifiers=[
48 | "Programming Language :: Python :: 3",
49 | "License :: OSI Approved :: MIT License",
50 | "Operating System :: OS Independent",
51 | ],
52 | python_requires='>=3.9',
53 | )
54 |
--------------------------------------------------------------------------------
/tests/test_anomalies.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """Test anomalies.
3 | Created on Mon Jul 18 16:27:48 2022
4 |
5 | @author: Colin
6 | """
7 | import unittest
8 | import numpy as np
9 | import pandas as pd
10 | from autots.tools.anomaly_utils import available_methods, fast_methods
11 | from autots.evaluator.anomaly_detector import AnomalyDetector, HolidayDetector
12 | from autots.datasets import load_live_daily
13 |
14 |
15 | def dict_loop(params):
16 | if 'transform_dict' in params.keys():
17 | x = params.get('transform_dict', {})
18 | if isinstance(x, dict):
19 | x = x.get('transformations', {})
20 | return x
21 | elif 'anomaly_detector_params' in params.keys():
22 | x = params.get('anomaly_detector_params', {})
23 | if isinstance(x, dict):
24 | x = params.get('transform_dict', {})
25 | if isinstance(x, dict):
26 | x = x.get('transformations', {})
27 | return x
28 | return {}
29 |
30 |
31 | class TestAnomalies(unittest.TestCase):
32 | @classmethod
33 | def setUp(self):
34 | wiki_pages = [
35 | "Standard_deviation", # anti-holiday
36 | "Christmas",
37 | "Thanksgiving", # specific holiday
38 | "all",
39 | ]
40 | self.df = load_live_daily(
41 | long=False,
42 | fred_series=None,
43 | tickers=None,
44 | trends_list=None,
45 | earthquake_min_magnitude=None,
46 | weather_stations=None,
47 | london_air_stations=None,
48 | gov_domain_list=None,
49 | weather_event_types=None,
50 | wikipedia_pages=wiki_pages,
51 | caiso_query=None,
52 | sleep_seconds=10,
53 | ).fillna(0).replace(np.inf, 0)
54 |
55 | def test_anomaly_holiday_detectors(self):
56 | print("Starting test_anomaly_holiday_detectors")
57 | """Combininng these to minimize live data download."""
58 | tried = []
59 | while not all(x in tried for x in available_methods):
60 | params = AnomalyDetector.get_new_params(method="deep")
61 | # remove 'Slice' as it messes up assertions
62 | while 'Slice' in dict_loop(params).values():
63 | params = AnomalyDetector.get_new_params(method="deep")
64 | with self.subTest(i=params['method']):
65 | tried.append(params['method'])
66 | mod = AnomalyDetector(output='multivariate', **params)
67 | num_cols = 2
68 | mod.detect(self.df[np.random.choice(self.df.columns, num_cols, replace=False)])
69 | # detected = mod.anomalies
70 | # print(params)
71 | # mod.plot()
72 | self.assertEqual(mod.anomalies.shape, (self.df.shape[0], num_cols), msg=f"from params {params}")
73 |
74 | mod = AnomalyDetector(output='univariate', **params)
75 | mod.detect(self.df[np.random.choice(self.df.columns, num_cols, replace=False)])
76 | self.assertEqual(mod.anomalies.shape, (self.df.shape[0], 1))
77 | # mod.plot()
78 |
79 | from prophet import Prophet
80 |
81 | tried = []
82 | forecast_length = 28
83 | holidays_detected = 0
84 | full_dates = self.df.index.union(pd.date_range(self.df.index.max(), freq="D", periods=forecast_length))
85 |
86 | while not all(x in tried for x in fast_methods):
87 | params = HolidayDetector.get_new_params(method="fast")
88 | with self.subTest(i=params["anomaly_detector_params"]['method']):
89 | while 'Slice' in dict_loop(params).values():
90 | params = HolidayDetector.get_new_params(method="fast")
91 | tried.append(params['anomaly_detector_params']['method'])
92 | mod = HolidayDetector(**params)
93 | mod.detect(self.df.copy())
94 | prophet_holidays = mod.dates_to_holidays(full_dates, style="prophet")
95 |
96 | for series in self.df.columns:
97 | # series = "wiki_George_Washington"
98 | holiday_subset = prophet_holidays[prophet_holidays['series'] == series]
99 | if holiday_subset.shape[0] >= 1:
100 | holidays_detected = 1
101 | m = Prophet(holidays=holiday_subset)
102 | # m = Prophet()
103 | m.fit(pd.DataFrame({'ds': self.df.index, 'y': self.df[series]}))
104 | future = m.make_future_dataframe(forecast_length)
105 | fcst = m.predict(future).set_index('ds') # noqa
106 | # m.plot_components(fcst)
107 | # mod.plot()
108 | temp = mod.dates_to_holidays(full_dates, style="flag")
109 | temp = mod.dates_to_holidays(full_dates, style="series_flag")
110 | temp = mod.dates_to_holidays(full_dates, style="impact")
111 | temp = mod.dates_to_holidays(full_dates, style="long") # noqa
112 | # this is a weak test, but will capture some functionality
113 | self.assertEqual(holidays_detected, 1, "no methods detected holidays")
114 |
--------------------------------------------------------------------------------
/tests/test_calendar_holiday.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """Test calendars
3 | """
4 | import unittest
5 | import numpy as np
6 | import pandas as pd
7 | from autots import load_daily
8 | from autots.tools.calendar import gregorian_to_chinese, gregorian_to_islamic, gregorian_to_hebrew, gregorian_to_hindu
9 | from autots.tools.lunar import moon_phase
10 | from autots.tools.holiday import holiday_flag
11 | from autots.tools.seasonal import date_part
12 |
13 |
14 | class TestCalendar(unittest.TestCase):
15 |
16 | def test_chinese(self):
17 | print("Starting test_chinese")
18 | input_dates = [
19 | "2014-01-01", "2022-06-30", "2026-02-28", "2000-02-05", "2040-04-15"
20 | ]
21 | result = gregorian_to_chinese(input_dates)
22 | result1 = result.iloc[0][['lunar_year', 'lunar_month', 'lunar_day']].tolist()
23 | result2 = result.iloc[1][['lunar_year', 'lunar_month', 'lunar_day']].tolist()
24 | result3 = result.iloc[2][['lunar_year', 'lunar_month', 'lunar_day']].tolist()
25 | result4 = result.iloc[3][['lunar_year', 'lunar_month', 'lunar_day']].tolist()
26 | result5 = result.iloc[4][['lunar_year', 'lunar_month', 'lunar_day']].tolist()
27 | self.assertEqual(result1, [2013, 12, 1])
28 | self.assertEqual(result2, [2022, 6, 2])
29 | self.assertEqual(result3, [2026, 1, 12]) # 2030 01 26
30 | self.assertEqual(result4, [2000, 1, 1])
31 | self.assertEqual(result5, [2040, 3, 5])
32 |
33 | def test_islamic(self):
34 | print("Starting test_islamic")
35 | input_dates = [
36 | "2014-01-01", "2022-06-30", "2030-02-28", "2000-12-31", "2040-04-15"
37 | ]
38 | result = gregorian_to_islamic(input_dates)
39 | result1 = result.iloc[0][['year', 'month', 'day']].tolist()
40 | result2 = result.iloc[1][['year', 'month', 'day']].tolist()
41 | result3 = result.iloc[2][['year', 'month', 'day']].tolist()
42 | result4 = result.iloc[3][['year', 'month', 'day']].tolist()
43 | result5 = result.iloc[4][['year', 'month', 'day']].tolist()
44 | self.assertEqual(result1, [1435, 2, 29])
45 | self.assertEqual(result2, [1443, 12, 1])
46 | self.assertEqual(result3, [1451, 10, 25])
47 | self.assertEqual(result4, [1421, 10, 5])
48 | self.assertEqual(result5, [1462, 4, 3])
49 |
50 | def test_lunar(self):
51 | print("Starting test_lunar")
52 | self.assertAlmostEqual(moon_phase(pd.Timestamp("2022-07-18")), 0.686, 3)
53 | self.assertAlmostEqual(moon_phase(pd.Timestamp("1995-11-07")), 0.998, 3)
54 | self.assertAlmostEqual(moon_phase(pd.Timestamp("2035-02-08")), 0.002, 3)
55 |
56 | def test_hebrew(self):
57 | print("Starting test_hebrew")
58 | input_dates = [
59 | "2014-01-01", "2022-06-30", "2030-02-28", "2000-12-31", "2040-04-15"
60 | ]
61 | result = gregorian_to_hebrew(input_dates)
62 | result1 = result.iloc[0][['year', 'month', 'day']].tolist()
63 | result2 = result.iloc[1][['year', 'month', 'day']].tolist()
64 | result3 = result.iloc[2][['year', 'month', 'day']].tolist()
65 | result4 = result.iloc[3][['year', 'month', 'day']].tolist()
66 | result5 = result.iloc[4][['year', 'month', 'day']].tolist()
67 | self.assertEqual(result1, [5774, 10, 29])
68 | self.assertEqual(result2, [5782, 4, 1])
69 | self.assertEqual(result3, [5790, 12, 25])
70 | self.assertEqual(result4, [5761, 10, 5])
71 | self.assertEqual(result5, [5800, 2, 2])
72 |
73 | def test_hindu(self):
74 | # Diwali in 2021 was on November 4, 2021
75 | date = pd.to_datetime(['2021-11-04'])
76 | result = gregorian_to_hindu(date)
77 | # expected_month_name = 'Kartika'
78 | # expected_lunar_day = 30 # Amavasya is typically the 30th day
79 | # self.assertEqual(result.iloc[0]['hindu_month_name'], expected_month_name)
80 | # self.assertEqual(result.iloc[0]['lunar_day'], expected_lunar_day)
81 |
82 | # Diwali in 2024 was on October 31, 2024
83 | date = pd.to_datetime(['2024-10-31'])
84 | result = gregorian_to_hindu(date) # noqa
85 | # expected_month_name = 'Kartika'
86 | # expected_lunar_day = 30 # Amavasya is typically the 30th day
87 | # self.assertEqual(result.iloc[0]['hindu_month_name'], expected_month_name)
88 | # self.assertEqual(result.iloc[0]['lunar_day'], expected_lunar_day)
89 |
90 |
91 | class TestHolidayFlag(unittest.TestCase):
92 |
93 | def test_holiday_flag(self):
94 | print("Starting test_holiday_flag")
95 | input_dates = pd.date_range("2022-01-01", "2023-01-01", freq='D')
96 | flag_1 = holiday_flag(input_dates, country="US", encode_holiday_type=False, holidays_subdiv="PR")
97 | self.assertAlmostEqual(flag_1.loc["2022-07-04"].iloc[0], 1.0)
98 | self.assertAlmostEqual(flag_1.loc["2022-12-25"].iloc[0], 1.0)
99 | self.assertAlmostEqual(flag_1.loc["2022-12-13"].iloc[0], 0.0)
100 |
101 | flag_2 = holiday_flag(input_dates, country="US", encode_holiday_type=True, holidays_subdiv=None)
102 | self.assertAlmostEqual(flag_2.loc["2022-12-25", 'Christmas Day'], 1.0)
103 | self.assertAlmostEqual(flag_2.loc["2022-12-13", "Christmas Day"], 0.0)
104 |
105 | df = load_daily(long=False)
106 | hflag = holiday_flag(df.index, country="US")
107 | test_result = hflag[(hflag.index.month == 7) & (hflag.index.day == 4)].mean()
108 | self.assertEqual(test_result.iloc[0], 1)
109 |
110 |
111 | class TestSeasonal(unittest.TestCase):
112 |
113 | def test_date_part(self):
114 | print("Starting test_holiday_flag")
115 | input_dates = pd.date_range("2021-01-01", "2023-01-01", freq='D')
116 | date_part_df = date_part(
117 | input_dates, method=['simple_binarized', 365.25, 'quarter'],
118 | set_index=True, holiday_country=["US"], holiday_countries_used=True
119 | )
120 | # assert all numeric and not NaN
121 | self.assertEqual(np.sum(date_part_df.isnull().to_numpy()), 0, msg="date part generating NaN")
122 | self.assertEqual(date_part_df.select_dtypes("number").shape, date_part_df.shape)
123 | # assert column names match expected
124 | expected_cols = [
125 | 'day',
126 | 'weekend',
127 | 'epoch',
128 | 'month_1',
129 | 'month_2',
130 | 'month_3',
131 | 'month_4',
132 | 'month_5',
133 | 'month_6',
134 | 'month_7',
135 | 'month_8',
136 | 'month_9',
137 | 'month_10',
138 | 'month_11',
139 | 'month_12',
140 | 'weekday_0',
141 | 'weekday_1',
142 | 'weekday_2',
143 | 'weekday_3',
144 | 'weekday_4',
145 | 'weekday_5',
146 | 'weekday_6',
147 | 'seasonality365.25_0',
148 | 'seasonality365.25_1',
149 | 'seasonality365.25_2',
150 | 'seasonality365.25_3',
151 | 'seasonality365.25_4',
152 | 'seasonality365.25_5',
153 | 'seasonality365.25_6',
154 | 'seasonality365.25_7',
155 | 'seasonality365.25_8',
156 | 'seasonality365.25_9',
157 | 'seasonality365.25_10',
158 | 'seasonality365.25_11',
159 | 'quarter_1',
160 | 'quarter_2',
161 | 'quarter_3',
162 | 'quarter_4',
163 | 'Christmas Day',
164 | 'Christmas Day (Observed)',
165 | 'Columbus Day',
166 | 'Independence Day',
167 | 'Independence Day (Observed)',
168 | 'Juneteenth National Independence Day',
169 | 'Juneteenth National Independence Day (Observed)',
170 | 'Labor Day',
171 | 'Martin Luther King Jr. Day',
172 | 'Memorial Day',
173 | "New Year's Day",
174 | "New Year's Day (Observed)",
175 | 'Thanksgiving',
176 | 'Veterans Day',
177 | 'Veterans Day (Observed)',
178 | "Washington's Birthday",
179 | ]
180 | self.assertCountEqual(date_part_df.columns.tolist(), expected_cols)
181 |
--------------------------------------------------------------------------------
/tests/test_constraint.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """Test constraint."""
3 | import unittest
4 | import numpy as np
5 | import pandas as pd
6 | from autots import load_daily, ModelPrediction
7 |
8 |
9 | class TestConstraint(unittest.TestCase):
10 |
11 | def test_constraint(self):
12 | df = load_daily(long=False)
13 | if "USW00014771_PRCP" in df.columns:
14 | # too close to zero, causes one test to fail
15 | df["USW00014771_PRCP"] = df["USW00014771_PRCP"] + 1
16 | forecast_length = 30
17 | constraint_types = {
18 | "empty": {
19 | "constraints": None,
20 | },
21 | "old_style": {
22 | "constraint_method": "quantile",
23 | "constraint_regularization": 0.99,
24 | "upper_constraint": 0.5,
25 | "lower_constraint": 0.1,
26 | "bounds": True,
27 | },
28 | "quantile": {
29 | "constraints": [{
30 | "constraint_method": "quantile",
31 | "constraint_value": 0.98,
32 | "constraint_direction": "upper",
33 | "constraint_regularization": 1.0,
34 | "bounds": False,
35 | },]
36 | },
37 | "last_value": {
38 | "constraints": [{
39 | "constraint_method": "last_window",
40 | "constraint_value": 0.0,
41 | "constraint_direction": "upper",
42 | "constraint_regularization": 1.0,
43 | "bounds": True,
44 | },
45 | {
46 | "constraint_method": "last_window",
47 | "constraint_value": 0.0,
48 | "constraint_direction": "lower",
49 | "constraint_regularization": 1.0,
50 | "bounds": True,
51 | },
52 | ]
53 | },
54 | "example": {"constraints": [
55 | { # don't exceed historic max
56 | "constraint_method": "quantile",
57 | "constraint_value": 1.0,
58 | "constraint_direction": "upper",
59 | "constraint_regularization": 1.0,
60 | "bounds": True,
61 | },
62 | { # don't exceed 2% growth by end of forecast horizon
63 | "constraint_method": "slope",
64 | "constraint_value": {"slope": 0.02, "window": 10, "window_agg": "max", "threshold": 0.01},
65 | "constraint_direction": "upper",
66 | "constraint_regularization": 0.9,
67 | "bounds": False,
68 | },
69 | { # don't go below the last 10 values - 10%
70 | "constraint_method": "last_window",
71 | "constraint_value": {"window": 10, "threshold": -0.1},
72 | "constraint_direction": "lower",
73 | "constraint_regularization": 1.0,
74 | "bounds": False,
75 | },
76 | { # don't go below zero
77 | "constraint_method": "absolute",
78 | "constraint_value": 0, # can also be an array or Series
79 | "constraint_direction": "lower",
80 | "constraint_regularization": 1.0,
81 | "bounds": True,
82 | },
83 | { # don't go below historic min - 1 st dev
84 | "constraint_method": "stdev_min",
85 | "constraint_value": 1.0,
86 | "constraint_direction": "lower",
87 | "constraint_regularization": 1.0,
88 | "bounds": True,
89 | },
90 | { # don't go above historic mean + 3 st devs, soft limit
91 | "constraint_method": "stdev",
92 | "constraint_value": 3.0,
93 | "constraint_direction": "upper",
94 | "constraint_regularization": 0.5,
95 | "bounds": True,
96 | },
97 | { # use a log curve shaped by the historic min/max growth rate to limit
98 | "constraint_method": "historic_growth",
99 | "constraint_value": 1.0,
100 | "constraint_direction": "upper",
101 | "constraint_regularization": 1.0,
102 | "bounds": True,
103 | },
104 | { # use a log curve shaped by the historic min/max growth rate to limit
105 | "constraint_method": "historic_growth",
106 | "constraint_value": {'threshold': 2.0, 'window': 360},
107 | "constraint_direction": "upper",
108 | "constraint_regularization": 1.0,
109 | "bounds": True,
110 | },
111 | { # like slope but steps
112 | 'constraint_method': 'historic_diff',
113 | 'constraint_direction': 'upper',
114 | 'constraint_regularization': 1.0,
115 | 'constraint_value': 1.0,
116 | },
117 | ]},
118 | "dampening": {
119 | "constraints": [{
120 | "constraint_method": "dampening",
121 | "constraint_value": 0.98,
122 | "bounds": True,
123 | },]
124 | },
125 | }
126 | for key, constraint in constraint_types.items():
127 | with self.subTest(i=key):
128 | model = ModelPrediction(
129 | forecast_length=forecast_length,
130 | transformation_dict={
131 | "fillna": "median",
132 | "transformations": {"0": "SinTrend", "1": "QuantileTransformer", "2": "bkfilter"},
133 | "transformation_params": {"0": {}, "1": {"output_distribution": "uniform", "n_quantiles": 1000}, "2": {}}
134 | },
135 | model_str="SeasonalityMotif",
136 | parameter_dict={
137 | "window": 7, "point_method": "midhinge",
138 | "distance_metric": "canberra", "k": 10,
139 | "datepart_method": "common_fourier",
140 | },
141 | no_negatives=True,
142 | )
143 | prediction = model.fit_predict(df, forecast_length=forecast_length)
144 | # apply an artificially low value
145 | prediction.forecast.iloc[0, 0] = -10
146 | prediction.forecast.iloc[0, -1] = df.iloc[:, -1].max() * 1.1
147 | prediction.plot(df, df.columns[-1])
148 | prediction.plot(df, df.columns[0])
149 |
150 | prediction.apply_constraints(
151 | df_train=df,
152 | **constraint
153 | )
154 | prediction.plot(df, df.columns[-1])
155 | prediction.plot(df, df.columns[0])
156 | # assuming all history was positive as example data currently is
157 | if key in ["empty", "dampening"]:
158 | self.assertTrue(prediction.forecast.min().min() == -10)
159 | else:
160 | self.assertTrue((prediction.forecast.sum() > 0).all())
161 |
162 | if key in ["old_style", "quantile"]:
163 | pred_max = prediction.forecast.iloc[:, -1].max()
164 | hist_max = df.iloc[:, -1].max()
165 | print(pred_max)
166 | print(hist_max)
167 | self.assertTrue(pred_max <= hist_max)
168 | if key in ["last_value"]:
169 | self.assertTrue(prediction.forecast.iloc[0, :].max() == df.iloc[-1, :].max())
170 | # test for nulls
171 | self.assertTrue(prediction.forecast.isnull().sum().sum() == 0)
172 |
--------------------------------------------------------------------------------
/tests/test_event_forecasting.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """Tests for event risk forecasting.
3 | """
4 | import unittest
5 | import numpy as np
6 | from autots import (
7 | load_weekly,
8 | load_daily,
9 | EventRiskForecast,
10 | )
11 |
12 |
13 | class TestEventRisk(unittest.TestCase):
14 |
15 | def test_event_risk(self):
16 | print("Starting test_event_risk")
17 | """This at least assures no changes in behavior go unnoticed, hopefully."""
18 | forecast_length = 6
19 | df_full = load_weekly(long=False)
20 | df = df_full[0: (df_full.shape[0] - forecast_length)]
21 | df_test = df[(df.shape[0] - forecast_length):]
22 |
23 | upper_limit = 0.8
24 | # if using manual array limits, historic limit must be defined separately (if used)
25 | lower_limit = np.ones((forecast_length, df.shape[1]))
26 | historic_lower_limit = np.ones(df.shape)
27 |
28 | model = EventRiskForecast(
29 | df,
30 | forecast_length=forecast_length,
31 | upper_limit=upper_limit,
32 | lower_limit=lower_limit,
33 | model_forecast_kwargs={
34 | "max_generations": 6,
35 | "verbose": 1,
36 | "transformer_list": "no_expanding",
37 | }
38 | )
39 | # .fit() is optional if model_name, model_param_dict, model_transform_dict are already defined (overwrites)
40 | model.fit()
41 | risk_df_upper, risk_df_lower = model.predict()
42 | historic_upper_risk_df, historic_lower_risk_df = model.predict_historic(lower_limit=historic_lower_limit)
43 | model.plot(1)
44 |
45 | # also eval summed version
46 | threshold = 0.1
47 | eval_upper = EventRiskForecast.generate_historic_risk_array(df_test, model.upper_limit_2d, direction="upper")
48 | pred_upper = np.where(model.upper_risk_array > threshold, 1, 0)
49 |
50 | self.assertTrue(risk_df_lower.shape == (forecast_length, df.shape[1]))
51 | self.assertFalse(risk_df_upper.isnull().all().all())
52 | self.assertFalse(risk_df_lower.isnull().all().all())
53 | self.assertTrue(historic_upper_risk_df.shape == df.shape)
54 | self.assertTrue(historic_lower_risk_df.shape == df.shape)
55 | self.assertFalse(historic_lower_risk_df.isnull().all().all())
56 | self.assertGreaterEqual(np.sum(pred_upper), 1)
57 | self.assertTrue(eval_upper.shape == pred_upper.shape)
58 |
59 | def test_event_risk_univariate(self):
60 | print("Starting test_event_risk_univariate")
61 | """This at least assures no changes in behavior go unnoticed, hopefully."""
62 | df = load_daily(long=False)
63 | df = df.iloc[:, 0:1]
64 | upper_limit = None
65 | lower_limit = {
66 | "model_name": "ARIMA",
67 | "model_param_dict": {'p': 1, "d": 0, "q": 1},
68 | "model_transform_dict": {},
69 | "prediction_interval": 0.5,
70 | }
71 | forecast_length = 6
72 |
73 | model = EventRiskForecast(
74 | df,
75 | forecast_length=forecast_length,
76 | upper_limit=upper_limit,
77 | lower_limit=lower_limit,
78 | prediction_interval=[0.9, 0.8, 0.7, 0.6, 0.5],
79 | model_forecast_kwargs={
80 | "max_generations": 6,
81 | "verbose": 2,
82 | "n_jobs": "auto",
83 | "random_seed": 321,
84 | "transformer_list": "no_expanding",
85 | },
86 | )
87 | model.fit(model_list="fast")
88 | risk_df_upper, risk_df_lower = model.predict()
89 | historic_upper_risk_df, historic_lower_risk_df = model.predict_historic()
90 | model.plot(0)
91 |
92 | self.assertTrue(risk_df_upper.isnull().all().all())
93 | self.assertTrue(risk_df_lower.shape == (forecast_length, df.shape[1]))
94 | self.assertFalse(risk_df_lower.isnull().all().all())
95 | self.assertTrue(historic_lower_risk_df.shape == df.shape)
96 |
--------------------------------------------------------------------------------
/tests/test_impute.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Created on Wed May 4 21:26:27 2022
4 |
5 | @author: Colin
6 | """
7 | import unittest
8 | import numpy as np
9 | import pandas as pd
10 | from autots.tools.impute import FillNA
11 |
12 |
13 | class TestImpute(unittest.TestCase):
14 |
15 | def test_impute(self):
16 | print("Starting test_impute")
17 | df_nan = pd.DataFrame({
18 | 'a': [5, 10, 15, np.nan, 10],
19 | 'b': [5, 50, 15, np.nan, 10],
20 | })
21 | filled = FillNA(df_nan, method='ffill', window=10)
22 | self.assertTrue((filled.values.flatten() == np.array([5, 5, 10, 50, 15, 15, 15, 15, 10, 10])).all())
23 |
24 | filled = FillNA(df_nan, method='mean')
25 | self.assertTrue((filled.values.flatten() == np.array([5, 5, 10, 50, 15, 15, 10, 20, 10, 10])).all())
26 |
27 | filled = FillNA(df_nan, method='median')
28 | self.assertTrue((filled.values.flatten() == np.array([5, 5, 10, 50, 15, 15, 10, 12.5, 10, 10])).all())
29 |
30 | df_nan = pd.DataFrame({
31 | 'a': [5, 10, 15, np.nan, 10],
32 | 'b': [5, 50, 15, np.nan, 10],
33 | })
34 | filled = FillNA(df_nan, method='fake_date')
35 | self.assertTrue((filled.values.flatten() == np.array([5, 5, 5, 5, 10, 50, 15, 15, 10, 10])).all())
36 |
37 | df_nan = pd.DataFrame({
38 | 'a': [5, 10, 15, np.nan, 10],
39 | 'b': [5, 50, 15, np.nan, 10],
40 | })
41 | filled = FillNA(df_nan, method='fake_date_slice')
42 | self.assertTrue((filled.values.flatten() == np.array([5, 5, 10, 50, 15, 15, 10, 10])).all())
43 |
--------------------------------------------------------------------------------
/tests/test_metrics.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """Test metrics."""
3 | import unittest
4 | import numpy as np
5 | import pandas as pd
6 | from autots.models.base import PredictionObject
7 |
8 |
9 | class TestMetrics(unittest.TestCase):
10 |
11 | def test_metrics(self):
12 | """This at least assures no changes in behavior go unnoticed, hopefully."""
13 | def custom_metric(A, F, df_train=None, prediction_interval=None):
14 | submission = F
15 | objective = A
16 | abs_err = np.nansum(np.abs(submission - objective))
17 | err = np.nansum((submission - objective))
18 | score = abs_err + abs(err)
19 | epsilon = 1
20 | big_sum = (
21 | np.nan_to_num(objective, nan=0.0, posinf=0.0, neginf=0.0).sum().sum()
22 | + epsilon
23 | )
24 | score /= big_sum
25 | return score
26 |
27 | predictions = PredictionObject()
28 | predictions.forecast = pd.DataFrame({
29 | 'a': [-10, 10, 10, -10, 0], # perfect forecast
30 | 'b': [0, 0, 0, 10, 10],
31 | 'c': [np.nan, np.nan, np.nan, np.nan, np.nan] # all NaN
32 | })
33 | predictions.upper_forecast = pd.DataFrame({
34 | 'a': [10, 20, -10, 10, 20],
35 | 'b': [0, 0, 0, 10, 10],
36 | 'c': [0, np.nan, np.nan, np.nan, np.nan]
37 | })
38 | predictions.lower_forecast = pd.DataFrame({
39 | 'a': [-10, 0, 10, 10, 0],
40 | 'b': [-10, -10, -10, -10, -5],
41 | 'c': [np.nan, np.nan, np.nan, np.nan, np.nan]
42 | })
43 | actual = pd.DataFrame({
44 | 'a': [-10, 10, 10, -10, 0],
45 | 'b': [0, 10, 0, 0, 10],
46 | 'c': [np.nan, np.nan, np.nan, np.nan, np.nan]
47 | })
48 |
49 | output_res = predictions.evaluate(
50 | actual,
51 | series_weights={'a': 10, 'b': 1, 'c': 1},
52 | df_train=actual, # just used for SPL scaling
53 | per_timestamp_errors=False,
54 | custom_metric=custom_metric,
55 | )
56 |
57 | known_avg_metrics = pd.Series(
58 | [
59 | 40., 2., 3.162, 0.533, 4.000, 0, 0, -3.333, 1.240, 3.333, 1.240,
60 | 0.572, 0.467, 0.467, 3.333, 0.533, 1.509, 1.250, 4.934, 1.778, 2.699, 0.267, 0.8, 0.952,
61 | ],
62 | index=[
63 | 'smape', 'mae', 'rmse', 'made', 'mage', 'mate', 'matse', 'underestimate', 'mle',
64 | 'overestimate', 'imle', 'spl', 'containment', 'contour', 'maxe',
65 | 'oda', 'dwae', 'mqae', 'ewmae', 'uwmse', 'smoothness', "wasserstein", "dwd", "custom",
66 | ]
67 | )
68 | known_avg_metrics_weighted = pd.Series(
69 | [
70 | 6.667, 0.333, 0.527, 0.089, 4.000, 0, 0, -0.833, 0.207, 0.833, 0.207,
71 | 0.623, 0.567, 0.717, 0.833, 0.883, 1.127, 0.208, 1.234, 0.444, 2.893, 0.044, 0.133, 0.952,
72 | ],
73 | index=[
74 | 'smape', 'mae', 'rmse', 'made', 'mage', 'mate', 'matse', 'underestimate', 'mle',
75 | 'overestimate', 'imle', 'spl', 'containment', 'contour', 'maxe',
76 | 'oda', 'dwae', 'mqae', 'ewmae', 'uwmse', 'smoothness', "wasserstein", "dwd", "custom",
77 | ]
78 | )
79 | b_avg_metrics = pd.Series(
80 | [
81 | 80., 4., 6.325, 1.067, 4.000, 0, 0, -10.0, 2.480, 10.0, 2.480, 0.44,
82 | 0.8, 0.6, 10.0, 0.60, 2.527, 2.50, 14.803, 5.333, 2.140, 0.533, 1.600, 0.952,
83 | ],
84 | index=[
85 | 'smape', 'mae', 'rmse', 'made', 'mage', 'mate', 'matse', 'underestimate',
86 | 'mle', 'overestimate', 'imle', 'spl', 'containment', 'contour',
87 | 'maxe', 'oda', 'dwae', 'mqae', 'ewmae', 'uwmse', 'smoothness', "wasserstein", "dwd", "custom",
88 | ]
89 | )
90 |
91 | pred_avg_metrics = output_res.avg_metrics.round(3)
92 | pred_weighted_avg = output_res.avg_metrics_weighted.round(3)
93 | b_avg = output_res.per_series_metrics['b'].round(3)
94 | self.assertTrue((pred_avg_metrics == known_avg_metrics).all())
95 | self.assertTrue((pred_weighted_avg == known_avg_metrics_weighted).all())
96 | self.assertTrue((b_avg == b_avg_metrics).all())
97 |
98 | # No custom
99 | output_res = predictions.evaluate(
100 | actual,
101 | series_weights={'a': 10, 'b': 1, 'c': 1},
102 | df_train=actual, # just used for SPL scaling
103 | per_timestamp_errors=False,
104 | custom_metric=None,
105 | )
106 | pred_avg_metrics = output_res.avg_metrics.round(3)
107 | self.assertEqual(pred_avg_metrics["custom"], 0.0)
108 |
109 |
110 | class TestConstraint(unittest.TestCase):
111 |
112 | def test_constraints(self):
113 | """This at least assures no changes in behavior go unnoticed, hopefully."""
114 | predictions = PredictionObject()
115 | predictions.forecast = pd.DataFrame({
116 | 'a': [-10, 10, 10, -10, 0], # perfect forecast
117 | 'b': [0, 0, 0, 10, 10],
118 | }).astype(float)
119 | df_train = predictions.forecast.copy() + 1
120 | predictions.upper_forecast = pd.DataFrame({
121 | 'a': [10, 20, -10, 10, 20],
122 | 'b': [0, 0, 0, 10, 10],
123 | }).astype(float)
124 | predictions.lower_forecast = pd.DataFrame({
125 | 'a': [-10, 0, 10, 10, 0],
126 | 'b': [-10, -10, -10, -10, -5],
127 | }).astype(float)
128 | predictions = predictions.apply_constraints(
129 | constraint_method="quantile", constraint_regularization=1,
130 | upper_constraint=None, lower_constraint=0.0,
131 | bounds=True, df_train=df_train
132 | )
133 | predictions = predictions.apply_constraints(
134 | constraint_method="absolute", constraint_regularization=1,
135 | upper_constraint=[5.0, 5.0], lower_constraint=None,
136 | bounds=False, df_train=df_train
137 | )
138 | self.assertTrue(10.0 == predictions.lower_forecast.max().max())
139 | predictions = predictions.apply_constraints(
140 | constraint_method="stdev", constraint_regularization=0.5,
141 | upper_constraint=0.5, lower_constraint=None,
142 | bounds=True, df_train=df_train
143 | )
144 | # test lower constraint
145 | self.assertTrue((df_train.min() == predictions.lower_forecast.min()).all())
146 | self.assertTrue((df_train.min() == predictions.forecast.min()).all())
147 | self.assertTrue((df_train.min() == predictions.upper_forecast.min()).all())
148 | # test upper constraint
149 | self.assertTrue(10.0 == predictions.forecast.max().sum())
150 | self.assertTrue((predictions.upper_forecast.round(2).max() == np.array([13.00, 8.87])).all())
151 |
--------------------------------------------------------------------------------
/tests/test_percentile.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import unittest
3 | from autots import load_linear
4 | from autots.tools.percentile import nan_quantile, nan_percentile
5 | import timeit
6 |
7 |
8 | class TestImpute(unittest.TestCase):
9 |
10 | def test_percentile(self):
11 | print("Starting test_percentile")
12 | df = load_linear(long=False, shape=(300, 10000), introduce_nan=0.2, introduce_random=100)
13 | arr = df.to_numpy()
14 |
15 | old_func = np.nanpercentile(arr, q=range(0, 100), axis=0)
16 | arr = df.to_numpy()
17 | new_func = nan_percentile(arr, q=range(0, 100))
18 | self.assertTrue(np.allclose(new_func, old_func))
19 |
20 | arr = df.to_numpy().copy()
21 | res1 = nan_quantile(arr, q=0.5).round(3)
22 | res2 = df.quantile(0.5).values.round(3)
23 | self.assertTrue(
24 | (res1 == res2).all()
25 | )
26 | self.assertTrue(
27 | np.allclose(nan_quantile(arr, q=0.5), np.nanquantile(arr, 0.5, axis=0))
28 | )
29 |
30 | start_time = timeit.default_timer()
31 | res1 = nan_percentile(arr, q=[10, 25, 50, 75, 90])
32 | runtime_custom = timeit.default_timer() - start_time
33 |
34 | start_time = timeit.default_timer()
35 | res2 = np.nanpercentile(arr, q=[10, 25, 50, 75, 90], axis=0)
36 | runtime_np = timeit.default_timer() - start_time
37 |
38 | self.assertTrue(np.allclose(res1, res2))
39 | self.assertTrue(
40 | runtime_custom < runtime_np,
41 | "Failed to assert custom percentile was faster than numpy percentile. Rerun may fix."
42 | )
43 |
--------------------------------------------------------------------------------
/tests/test_regressor.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """Tests."""
3 |
4 | import unittest
5 | from autots import create_lagged_regressor, load_daily, create_regressor
6 |
7 |
8 | class test_create_lagged_regressor(unittest.TestCase):
9 | def test_create_regressor(self):
10 | print("Starting test_create_regressor")
11 | df = load_daily(long=False)
12 | forecast_length = 5
13 | regr, fcst = create_lagged_regressor(
14 | df,
15 | forecast_length=forecast_length,
16 | summarize=None,
17 | backfill='bfill',
18 | fill_na='ffill')
19 |
20 | self.assertEqual(regr.shape, df.shape)
21 | self.assertEqual(fcst.shape[0], forecast_length)
22 | self.assertFalse(regr.isna().any().any())
23 | self.assertFalse(fcst.isna().any().any())
24 | self.assertTrue((df.index == regr.index).all())
25 |
26 | regr, fcst = create_regressor(
27 | df,
28 | forecast_length=forecast_length,
29 | summarize="auto",
30 | datepart_method="recurring",
31 | holiday_countries=["UK", "US"],
32 | backfill='ffill',
33 | fill_na='zero')
34 |
35 | self.assertEqual(regr.shape[0], df.shape[0])
36 | self.assertEqual(fcst.shape[0], forecast_length)
37 | self.assertFalse(regr.isna().any().any())
38 | self.assertFalse(fcst.isna().any().any())
39 | self.assertTrue((df.index == regr.index).all())
40 |
41 | regr, fcst = create_lagged_regressor(
42 | df,
43 | forecast_length=forecast_length,
44 | summarize="mean+std",
45 | backfill='ETS',
46 | fill_na='mean')
47 |
48 | self.assertEqual(regr.shape[1], 2)
49 | self.assertEqual(fcst.shape[0], forecast_length)
50 | self.assertFalse(regr.isna().any().any())
51 | self.assertFalse(fcst.isna().any().any())
52 | self.assertTrue((df.index == regr.index).all())
53 |
--------------------------------------------------------------------------------
/tests/test_seasonal.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Seasonal unittests.
4 |
5 | Created on Sat May 4 21:43:02 2024
6 |
7 | @author: Colin
8 | """
9 |
10 | import unittest
11 | import numpy as np
12 | import pandas as pd
13 | from autots.tools.seasonal import date_part, base_seasonalities, datepart_components, random_datepart, fourier_df
14 | from autots.tools.holiday import holiday_flag
15 | from autots.tools.wavelet import create_narrowing_wavelets, offset_wavelet
16 |
17 |
18 | class TestSeasonal(unittest.TestCase):
19 |
20 | def test_date_part(self):
21 | DTindex = pd.date_range("2020-01-01", "2024-01-01", freq="D")
22 | for method in base_seasonalities:
23 | df = date_part(DTindex, method=method, set_index=True)
24 | self.assertEqual(df.shape[0], DTindex.shape[0])
25 | self.assertGreater(df.shape[1], 1)
26 |
27 | def test_date_components(self):
28 | DTindex = pd.date_range("2023-01-01", "2024-01-01", freq="h")
29 | for method in datepart_components:
30 | df = date_part(DTindex, method=method, set_index=True)
31 | self.assertEqual(df.shape[0], DTindex.shape[0])
32 |
33 | def test_random_datepart(self):
34 | out = random_datepart()
35 | self.assertTrue(out)
36 |
37 | def test_fourier(self):
38 | DTindex = pd.date_range("2020-01-02", "2024-01-01", freq="D")
39 | order = 10
40 | df = fourier_df(DTindex, seasonality=365.25, order=order)
41 | self.assertEqual(df.shape[1], order * 2)
42 | self.assertEqual(df.shape[0], DTindex.shape[0])
43 | self.assertAlmostEqual(df.mean().sum(), 0.0)
44 |
45 | def test_wavelets_repeat(self):
46 | DTindex = pd.date_range("2020-01-01", "2024-01-01", freq="D")
47 | origin_ts = "2030-01-01"
48 | t = (DTindex - pd.Timestamp(origin_ts)).total_seconds() / 86400
49 |
50 | p = 7
51 | w_order = 7
52 | weekly_wavelets = offset_wavelet(
53 | p=p, # Weekly period
54 | t=t, # A full year (365 days)
55 | # origin_ts=origin_ts,
56 | order=w_order, # One offset for each day of the week
57 | # frequency=2 * np.pi / p, # Frequency for weekly pattern
58 | sigma=0.5, # Smaller sigma for tighter weekly spread
59 | wavelet_type="ricker",
60 | )
61 | self.assertEqual(weekly_wavelets.shape[1], w_order)
62 |
63 | # Example for yearly seasonality
64 | p = 365.25
65 | y_order = 12
66 | yearly_wavelets = offset_wavelet(
67 | p=p, # Yearly period
68 | t=t, # Three full years
69 | # origin_ts=origin_ts,
70 | order=y_order, # One offset for each month
71 | # frequency=2 * np.pi / p, # Frequency for yearly pattern
72 | sigma=2.0, # Larger sigma for broader yearly spread
73 | wavelet_type="morlet",
74 | )
75 | yearly_wavelets2 = offset_wavelet(
76 | p=p, # Yearly period
77 | t=t[-100:], # Three full years
78 | # origin_ts=origin_ts,
79 | order=y_order, # One offset for each month
80 | # frequency=2 * np.pi / p, # Frequency for yearly pattern
81 | sigma=2.0, # Larger sigma for broader yearly spread
82 | wavelet_type="morlet",
83 | )
84 | self.assertEqual(yearly_wavelets.shape[1], y_order)
85 | self.assertTrue(np.allclose(yearly_wavelets[-100:], yearly_wavelets2))
86 |
87 | def test_wavelet_continuous(self):
88 | DTindex = pd.date_range("2020-01-01", "2024-01-01", freq="D")
89 | origin_ts = "2020-01-01"
90 | t_full = (DTindex - pd.Timestamp(origin_ts)).total_seconds() / 86400
91 |
92 | p = 365.25 # Example period
93 | max_order = 5 # Example maximum order
94 |
95 | # Full set of wavelets
96 | wavelets = create_narrowing_wavelets(p, max_order, t_full)
97 |
98 | # Wavelets for the last 100 days
99 | t_subset = t_full[-100:]
100 | wavelet_short = create_narrowing_wavelets(p, max_order, t_subset)
101 |
102 | # Check if the last 100 days of the full series match the subset
103 | self.assertTrue(np.allclose(wavelets[-100:], wavelet_short))
104 |
105 | def test_holiday_flag(self):
106 | # hourly being trickier
107 | train_index = pd.date_range("2020-01-01", "2023-01-01", freq="h")
108 | pred_index = pd.date_range("2023-01-02", "2024-01-01", freq="h")
109 |
110 | train_holiday = holiday_flag(train_index, country=["US", "CA"], encode_holiday_type=True)
111 | pred_holiday = holiday_flag(pred_index, country=["US", "CA"], encode_holiday_type=True)
112 |
113 | self.assertCountEqual(train_holiday.columns.tolist(), pred_holiday.columns.tolist())
114 | self.assertGreaterEqual(train_holiday.sum().sum(), 24)
115 | self.assertGreaterEqual(pred_holiday.sum().sum(), 24)
116 |
--------------------------------------------------------------------------------
/tests/test_transforms.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """Overall testing."""
3 | import unittest
4 | import numpy as np
5 | import pandas as pd
6 | from autots.datasets import (
7 | load_daily, load_monthly, load_artificial, load_sine
8 | )
9 | from autots.tools.transform import ThetaTransformer, FIRFilter
10 |
11 | class TestTransforms(unittest.TestCase):
12 |
13 | def test_theta(self):
14 | # Sample DataFrame with a DatetimeIndex and multiple time series columns
15 | dates = pd.date_range(start='2020-01-01', periods=100, freq='D')
16 | data = pd.DataFrame({
17 | 'series1': np.random.randn(100).cumsum(),
18 | 'series2': np.random.randn(100).cumsum(),
19 | }, index=dates)
20 |
21 | theta_values = [0, 1, 2]
22 | theta_transformer = ThetaTransformer(theta_values=theta_values)
23 |
24 | params = theta_transformer.get_new_params()
25 | self.assertTrue(params)
26 |
27 | theta_transformer.fit(data)
28 | transformed_data = theta_transformer.transform(data)
29 | reconstructed_data = theta_transformer.inverse_transform(transformed_data)
30 | self.assertTrue(np.allclose(data.values, reconstructed_data.values, atol=1e-8))
31 |
32 | def test_firfilter(self):
33 | df = load_daily(long=False)
34 | transformer = FIRFilter()
35 | transformed = transformer.fit_transform(df)
36 | inverse = transformer.inverse_transform(transformed) # noqa
37 |
38 | if False:
39 | col = df.columns[0]
40 | pd.concat([df[col], transformed[col].rename("transformed")], axis=1).plot()
41 |
42 | self.assertCountEqual(transformed.index.tolist(), df.index.tolist())
43 | self.assertCountEqual(transformed.columns.tolist(), df.columns.tolist())
44 |
--------------------------------------------------------------------------------