├── .editorconfig ├── .gitattributes ├── .github ├── ISSUE_TEMPLATE │ ├── bug_report.md │ └── feature_request.md ├── PULL_REQUEST_TEMPLATE.md └── workflows │ ├── codeql-analysis.yml │ ├── linters.yml │ ├── metrics.yml │ ├── test-docs.yml │ ├── tests.yml │ └── website.yml ├── .gitignore ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── LICENSE ├── MANIFEST.in ├── README.md ├── SECURITY.md ├── codecov.yml ├── docs ├── CNAME ├── Makefile ├── README.md ├── make.bat ├── source │ ├── _static │ │ ├── css │ │ │ ├── bootstrap.css │ │ │ └── custom.css │ │ └── js │ │ │ └── scripts.js │ ├── _templates │ │ ├── index.html │ │ └── sidebar │ │ │ └── brand.html │ ├── code │ │ ├── components │ │ │ ├── base.rst │ │ │ ├── future_regressors │ │ │ │ └── index.rst │ │ │ ├── router.rst │ │ │ ├── seasonality │ │ │ │ └── index.rst │ │ │ └── trend │ │ │ │ └── index.rst │ │ ├── configure.rst │ │ ├── custom_loss_metrics.rst │ │ ├── data │ │ │ ├── process.rst │ │ │ ├── split.rst │ │ │ └── transform.rst │ │ ├── df_utils.rst │ │ ├── forecaster.rst │ │ ├── index.rst │ │ ├── logger.rst │ │ ├── np_types.rst │ │ ├── plot_forecast_matplotlib.rst │ │ ├── plot_forecast_plotly.rst │ │ ├── plot_model_parameters_matplotlib.rst │ │ ├── plot_model_parameters_plotly.rst │ │ ├── plot_utils.rst │ │ ├── time_dataset.rst │ │ ├── time_net.rst │ │ ├── torch_prophet.rst │ │ ├── uncertainty.rst │ │ ├── utils.rst │ │ ├── utils_metrics.rst │ │ └── utils_torch.rst │ ├── community │ │ └── contribute.md │ ├── conf.py │ ├── contents.rst │ ├── how-to-guides │ │ ├── application-examples │ │ │ ├── energy_hospital_load.ipynb │ │ │ ├── energy_solar_pv.ipynb │ │ │ └── energy_tool.ipynb │ │ ├── feature-guides │ │ │ ├── Live_plot_during_training.ipynb │ │ │ ├── Migration_from_Prophet.ipynb │ │ │ ├── collect_predictions.ipynb │ │ │ ├── conditional_seasonality_peyton.ipynb │ │ │ ├── figures │ │ │ │ ├── fig1_p.png │ │ │ │ └── fig2_p.png │ │ │ ├── global_local_modeling.ipynb │ │ │ ├── global_local_modeling_fut_regr.ipynb │ │ │ ├── global_local_trend.ipynb │ │ │ ├── hyperparameter-selection.md │ │ │ ├── mlflow.ipynb │ │ │ ├── network_architecture_visualization.ipynb │ │ │ ├── plotly.ipynb │ │ │ ├── prophet_to_torch_prophet.ipynb │ │ │ ├── season_multiplicative_air_travel.ipynb │ │ │ ├── sparse_autoregression_yosemite_temps.ipynb │ │ │ ├── sub_daily_data_yosemite_temps.ipynb │ │ │ ├── test_and_crossvalidate.ipynb │ │ │ └── uncertainty_quantification.ipynb │ │ └── index.rst │ ├── images │ │ ├── np_favicon.png │ │ ├── np_highres.svg │ │ ├── np_highres_docs.svg │ │ ├── plot_comp_ar_1.png │ │ ├── plot_comp_ar_2.png │ │ ├── plot_comp_events_1.png │ │ ├── plot_comp_events_2.png │ │ ├── plot_comp_events_3.png │ │ ├── plot_comp_future_reg_1.png │ │ ├── plot_comp_future_reg_2.png │ │ ├── plot_comp_lag_reg_1.png │ │ ├── plot_comp_seasonality_1.png │ │ ├── plot_comp_seasonality_2.png │ │ ├── plot_comp_simple_1.png │ │ ├── plot_comp_trend_1.png │ │ ├── plot_forecast_ar_1.png │ │ ├── plot_forecasts_simple_1.png │ │ ├── plot_param_ar_1.png │ │ ├── plot_param_events_1.png │ │ ├── plot_param_events_2.png │ │ ├── plot_param_events_3.png │ │ ├── plot_param_events_4.png │ │ ├── plot_param_future_reg_1.png │ │ ├── plot_param_future_reg_2.png │ │ ├── plot_param_lag_reg_1.png │ │ ├── plot_param_seasonality_1.png │ │ ├── plot_param_simple_1.png │ │ └── plot_param_trend_1.png │ ├── quickstart.ipynb │ ├── science-behind │ │ └── model-overview.md │ └── tutorials │ │ ├── index.rst │ │ ├── next-steps.md │ │ ├── tutorial01.ipynb │ │ ├── tutorial02.ipynb │ │ ├── tutorial03.ipynb │ │ ├── tutorial04.ipynb │ │ ├── tutorial05.ipynb │ │ ├── tutorial06.ipynb │ │ ├── tutorial07.ipynb │ │ ├── tutorial08.ipynb │ │ ├── tutorial09.ipynb │ │ └── tutorial10.ipynb └── zh │ ├── README.md │ ├── logo.png │ ├── 事件.md │ ├── 季节性.md │ ├── 快速开始.md │ ├── 未来回归项.md │ ├── 模型概述.md │ ├── 滞后回归项.md │ ├── 相较于prophet的改进.md │ ├── 自回归.md │ ├── 贡献.md │ ├── 超参数选取.md │ └── 趋势项.md ├── neuralprophet ├── __init__.py ├── __main__.py ├── _version.py ├── components │ ├── README.md │ ├── __init__.py │ ├── base.py │ ├── future_regressors │ │ ├── __init__.py │ │ ├── base.py │ │ ├── linear.py │ │ ├── neural_nets.py │ │ ├── shared_neural_nets.py │ │ └── shared_neural_nets_coef.py │ ├── router.py │ ├── seasonality │ │ ├── __init__.py │ │ ├── base.py │ │ └── fourier.py │ └── trend │ │ ├── __init__.py │ │ ├── base.py │ │ ├── linear.py │ │ ├── piecewise_linear.py │ │ └── static.py ├── configure.py ├── configure_components.py ├── custom_loss_metrics.py ├── data │ ├── process.py │ ├── split.py │ └── transform.py ├── df_utils.py ├── event_utils.py ├── forecaster.py ├── logger.py ├── np_types.py ├── plot_forecast_matplotlib.py ├── plot_forecast_plotly.py ├── plot_model_parameters_matplotlib.py ├── plot_model_parameters_plotly.py ├── plot_utils.py ├── time_dataset.py ├── time_net.py ├── torch_prophet.py ├── uncertainty.py ├── utils.py ├── utils_lightning.py ├── utils_metrics.py ├── utils_time_dataset.py └── utils_torch.py ├── notes ├── NeuralProphet_Facebook_Forecasting_Summit.pdf ├── NeuralProphet_Introduction.pdf └── development_timeline.md ├── poetry.lock ├── pyproject.toml ├── repro.np ├── scripts ├── install_hooks.bash ├── neuralprophet_dev_setup.py ├── pre_commit.bash └── pre_push.bash ├── setup.cfg └── tests ├── __init__.py ├── debug ├── debug-energy-price-daily.ipynb ├── debug-energy-price-hourly.ipynb ├── debug-yosemite.ipynb └── debug_glocal.py ├── metrics ├── .gitkeep └── compareMetrics.py ├── pytest.ini ├── test-data ├── air_passengers.csv ├── ercot_load.csv ├── tutorial01-full_kaggle_energy_hourly.csv ├── tutorial01_kaggle_energy_daily.csv ├── tutorial04_kaggle_energy_daily_temperature.csv ├── wp_log_peyton_manning.csv └── yosemite_temps.csv ├── test_cli.py ├── test_configure.py ├── test_event_utils.py ├── test_future_regressor_nn.py ├── test_glocal.py ├── test_integration.py ├── test_model_performance.py ├── test_plotting.py ├── test_regularization.py ├── test_save.py ├── test_train_config.py ├── test_uncertainty.py ├── test_unit.py ├── test_utils.py ├── test_wrapper.py └── utils ├── benchmark_time_dataset.py └── dataset_generators.py /.editorconfig: -------------------------------------------------------------------------------- 1 | # EditorConfig is awesome: https://EditorConfig.org 2 | 3 | # top-most EditorConfig file 4 | root = true 5 | 6 | [*] 7 | indent_style = space 8 | indent_size = 4 9 | end_of_line = lf 10 | charset = utf-8 11 | trim_trailing_whitespace = true 12 | insert_final_newline = true 13 | 14 | [*.yml] 15 | indent_size = 2 16 | 17 | [Makefile] 18 | indent_style = tab 19 | 20 | [*.bat] 21 | indent_style = tab 22 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | # Jupyter notebook language stats 2 | 3 | # For text count 4 | # *.ipynb text 5 | 6 | # To ignore it use below 7 | *.ipynb linguist-documentation 8 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | 5 | --- 6 | 7 | **Prerequisites** 8 | 9 | * [ ] Put an X between the brackets on this line if you have done all of the following: 10 | * Reproduced the problem in a new virtualenv with only neuralprophet installed, directly from github: 11 | ```shell 12 | git clone 13 | cd neural_prophet 14 | pip install . 15 | ``` 16 | * Checked the Answered Questions on the Github Discussion board: https://github.com/ourownstory/neural_prophet/discussions 17 | If you have the same question but the Answer does not solve your issue, please continue the conversation there. 18 | * Checked that your issue isn't already filed: https://github.com/ourownstory/neural_prophet/issues 19 | If you have the same issue but there is a twist to your situation, please add an explanation there. 20 | * Considered whether your bug might actually be solvable by getting a question answered: 21 | * Please [post a package use question](https://github.com/ourownstory/neural_prophet/discussions/categories/q-a-get-help-using-neuralprophet) 22 | * Please [post a forecasting best practice question](https://github.com/ourownstory/neural_prophet/discussions/categories/q-a-forecasting-best-practices) 23 | * Please [post an idea or feedback](https://github.com/ourownstory/neural_prophet/discussions/categories/ideas-feedback) 24 | 25 | **Describe the bug** 26 | 27 | A clear and concise description of what the bug is. 28 | 29 | **To Reproduce** 30 | 31 | Steps to reproduce the behavior: 32 | 1. With the data '...' 33 | 2. Setting the model hyperparameters '....' 34 | 3. After running these code lines '....' 35 | 4. When using this function '....' 36 | 5. See error 37 | 38 | **Expected behavior** 39 | 40 | A clear and concise description of what you expected to happen. 41 | 42 | 43 | **What actually happens** 44 | 45 | Describe what happens, and how often it happens. 46 | 47 | **Screenshots** 48 | 49 | If applicable, add screenshots and console printouts to help explain your problem. 50 | 51 | **Environment (please complete the following information):** 52 | 53 | - Python environment [e.g. Python 3.8, in standalone venv with no other packages] 54 | - NeuralProphet version and install method [e.g. 2.7, installed from PYPI with `pip install neuralprophet`] 55 | 56 | **Additional context** 57 | 58 | Add any other context about the problem here. 59 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | 5 | --- 6 | 7 | **Prerequisites** 8 | 9 | * [ ] Put an X between the brackets on this line if you have done all of the following: 10 | * Checked the Answered Questions on the Github Discussion board: https://github.com/ourownstory/neural_prophet/discussions 11 | If you have the same question but the Answer does not solve your issue, please continue the conversation there. 12 | * Checked that your issue isn't already filed: https://github.com/ourownstory/neural_prophet/issues 13 | If you have the same issue but there is a twist to your situation, please add an explanation there. 14 | * Considered whether your issue might need further discussing before being defined as a feature request: 15 | Please [post an idea or feedback](https://github.com/ourownstory/neural_prophet/discussions/categories/ideas-feedback) 16 | 17 | **Is your feature request related to a problem? Please describe.** 18 | 19 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 20 | 21 | **Describe the solution you'd like** 22 | 23 | A clear and concise description of what you want to happen. 24 | 25 | **Describe alternatives you've considered** 26 | 27 | A clear and concise description of any alternative solutions or features you've considered. 28 | 29 | **Additional context** 30 | 31 | Add any other context or screenshots about the feature request here. 32 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | ## :microscope: Background 2 | 3 | - Why is this change needed? Is there a related issue or a new feature to be added? 4 | 5 | ## :crystal_ball: Key changes 6 | 7 | - Explain the main changes introduced by this pull request for the reviewer. 8 | 9 | ## :clipboard: Review Checklist 10 | - [ ] I have performed a self-review of my own code. 11 | - [ ] I have commented my code, added docstrings and data types to function definitions. 12 | - [ ] I have added pytests to check whether my feature / fix works. 13 | 14 | Please make sure to follow our best practices in the [Contributing guidelines](https://github.com/ourownstory/neural_prophet/blob/main/CONTRIBUTING.md). 15 | -------------------------------------------------------------------------------- /.github/workflows/codeql-analysis.yml: -------------------------------------------------------------------------------- 1 | # For most projects, this workflow file will not need changing; you simply need 2 | # to commit it to your repository. 3 | # 4 | # You may wish to alter this file to override the set of languages analyzed, 5 | # or to provide custom queries or build logic. 6 | # 7 | # ******** NOTE ******** 8 | # We have attempted to detect the languages in your repository. Please check 9 | # the `language` matrix defined below to confirm you have the correct set of 10 | # supported CodeQL languages. 11 | # 12 | name: "CodeQL" 13 | 14 | on: 15 | push: 16 | branches: [main] 17 | schedule: 18 | # ┌───────────── minute (0 - 59) 19 | # │ ┌───────────── hour (0 - 23) 20 | # │ │ ┌───────────── day of the month (1 - 31) 21 | # │ │ │ ┌───────────── month (1 - 12 or JAN-DEC) 22 | # │ │ │ │ ┌───────────── day of the week (0 - 6 or SUN-SAT) 23 | # │ │ │ │ │ 24 | # │ │ │ │ │ 25 | # │ │ │ │ │ 26 | # * * * * * 27 | - cron: "35 18 * * 2" 28 | 29 | jobs: 30 | analyze: 31 | name: Analyze 32 | runs-on: ubuntu-latest 33 | 34 | permissions: 35 | actions: read 36 | contents: read 37 | security-events: write 38 | 39 | steps: 40 | - name: Checkout repository 41 | uses: actions/checkout@v3 42 | 43 | # Initializes the CodeQL tools for scanning. 44 | - name: Initialize CodeQL 45 | uses: github/codeql-action/init@v2 46 | with: 47 | languages: python 48 | 49 | # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). 50 | # If this step fails, then you should remove it and run the build manually (see below) 51 | - name: Autobuild 52 | uses: github/codeql-action/autobuild@v2 53 | 54 | # ℹ️ Command-line programs to run using the OS shell. 55 | # 📚 https://git.io/JvXDl 56 | 57 | # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines 58 | # and modify them (or add more) to build your code if your project 59 | # uses a compiled language 60 | 61 | #- run: | 62 | # make bootstrap 63 | # make release 64 | 65 | - name: Perform CodeQL Analysis 66 | uses: github/codeql-action/analyze@v2 67 | -------------------------------------------------------------------------------- /.github/workflows/linters.yml: -------------------------------------------------------------------------------- 1 | name: Linters 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | pull_request: 8 | branches: 9 | - main 10 | 11 | concurrency: 12 | group: ${{ github.workflow }}-${{ github.ref }} 13 | cancel-in-progress: true 14 | 15 | jobs: 16 | flake8: 17 | name: flake8 18 | runs-on: ubuntu-latest 19 | steps: 20 | - name: Checkout 21 | uses: actions/checkout@v3 22 | - name: Set up Python 23 | uses: actions/setup-python@v5 24 | with: 25 | python-version: "3.12" 26 | - run: python -m pip install flake8 27 | - name: flake8 28 | uses: liskin/gh-problem-matcher-wrap@v2 29 | with: 30 | linters: flake8 31 | run: flake8 neuralprophet scripts 32 | 33 | ruff: 34 | runs-on: ubuntu-latest 35 | steps: 36 | - name: Checkout 37 | uses: actions/checkout@v3 38 | - name: ruff 39 | uses: chartboost/ruff-action@v1 40 | 41 | isort: 42 | runs-on: ubuntu-latest 43 | steps: 44 | - name: Checkout 45 | uses: actions/checkout@v3 46 | - name: Set up Python 47 | uses: actions/setup-python@v5 48 | with: 49 | python-version: "3.12" 50 | - run: python -m pip install isort 51 | - name: isort 52 | uses: liskin/gh-problem-matcher-wrap@v2 53 | with: 54 | linters: isort 55 | run: isort --check --diff neuralprophet tests scripts 56 | 57 | black: 58 | runs-on: ubuntu-latest 59 | steps: 60 | - name: Checkout 61 | uses: actions/checkout@v3 62 | - name: black 63 | uses: psf/black@stable 64 | with: 65 | jupyter: true 66 | 67 | pyright: 68 | runs-on: ubuntu-latest 69 | env: 70 | POETRY_VIRTUALENVS_CREATE: false 71 | steps: 72 | - name: Checkout 73 | uses: actions/checkout@v3 74 | - name: Set up Python 75 | uses: actions/setup-python@v5 76 | with: 77 | python-version: "3.12" 78 | - name: Cache poetry 79 | id: poetry-cache 80 | uses: actions/cache@v3 81 | with: 82 | path: ~/.local 83 | key: poetry 84 | - name: Setup poetry 85 | if: steps.poetry-cache.outputs.cache-hit != 'true' 86 | uses: snok/install-poetry@v1 87 | - name: Install dependencies 88 | run: poetry install --no-interaction --with=linters --without=dev,docs,pytest,metrics 89 | - name: pyright 90 | uses: jakebailey/pyright-action@v1 91 | -------------------------------------------------------------------------------- /.github/workflows/metrics.yml: -------------------------------------------------------------------------------- 1 | name: Measure Model Performance 2 | on: 3 | push: 4 | paths: # Run only when files under neuralprophet/ are modified 5 | - neuralprophet/** 6 | branches: # Run only on these branches 7 | - main 8 | - develop 9 | pull_request: # Run on pull requests 10 | branches: 11 | - main 12 | - develop 13 | workflow_dispatch: 14 | 15 | jobs: 16 | metrics: 17 | runs-on: ubuntu-latest # container: docker://ghcr.io/iterative/cml:0-dvc2-base1 18 | steps: 19 | - name: Checkout 20 | uses: actions/checkout@v3 21 | with: 22 | ref: ${{ github.event.pull_request.head.sha }} 23 | 24 | - name: Install Python 3.12 25 | uses: actions/setup-python@v5 26 | with: 27 | python-version: "3.12" 28 | 29 | - name: Setup NodeJS (for CML) 30 | uses: actions/setup-node@v3 # For CML 31 | with: 32 | node-version: '20' 33 | 34 | - name: Setup CML 35 | uses: iterative/setup-cml@v1 36 | 37 | - name: Install Poetry 38 | uses: snok/install-poetry@v1 39 | 40 | - name: Install Dependencies 41 | run: poetry install --no-interaction --no-root --with=pytest,metrics --without=dev,docs,linters 42 | 43 | - name: Install Project 44 | run: poetry install --no-interaction --with=pytest,metrics --without=dev,docs,linters 45 | 46 | - name: Train model 47 | run: poetry run pytest tests/test_model_performance.py -n 1 --durations=0 48 | 49 | - name: Download metrics from main 50 | uses: dawidd6/action-download-artifact@v2 51 | with: 52 | repo: ourownstory/neural_prophet 53 | branch: main 54 | name: metrics 55 | path: tests/metrics-main/ 56 | if_no_artifact_found: warn 57 | 58 | - name: Open Benchmark Report 59 | run: echo "## Model Benchmark" >> report.md 60 | 61 | - name: Write Benchmark Report 62 | run: poetry run python tests/metrics/compareMetrics.py >> report.md 63 | 64 | - name: Publish Report with CML 65 | env: 66 | REPO_TOKEN: ${{ secrets.GITHUB_TOKEN }} 67 | run: | 68 | echo "
Model training plots" >> report.md 69 | echo "" >> report.md 70 | echo "## Model Training" >> report.md 71 | echo "" >> report.md 72 | echo "### PeytonManning" >> report.md 73 | cml asset publish tests/metrics/PeytonManning.svg --md >> report.md 74 | echo "" >> report.md 75 | echo "### YosemiteTemps" >> report.md 76 | cml asset publish tests/metrics/YosemiteTemps.svg --md >> report.md 77 | echo "" >> report.md 78 | echo "### AirPassengers" >> report.md 79 | cml asset publish tests/metrics/AirPassengers.svg --md >> report.md 80 | echo "" >> report.md 81 | echo "### EnergyPriceDaily" >> report.md 82 | cml asset publish tests/metrics/EnergyPriceDaily.svg --md >> report.md 83 | echo "" >> report.md 84 | echo "
" >> report.md 85 | echo "" >> report.md 86 | cml comment update --target=pr report.md # Post reports as comments in GitHub PRs 87 | cml check create --title=ModelReport report.md # update status of check in PR 88 | 89 | - name: Upload metrics if on main 90 | if: github.ref == 'refs/heads/main' 91 | uses: actions/upload-artifact@v3 92 | with: 93 | name: metrics 94 | path: tests/metrics/ 95 | -------------------------------------------------------------------------------- /.github/workflows/test-docs.yml: -------------------------------------------------------------------------------- 1 | name: Test build docs 2 | on: 3 | push: 4 | branches: 5 | - main 6 | - website 7 | pull_request: 8 | branches: 9 | - main 10 | - website 11 | 12 | defaults: 13 | run: 14 | shell: bash 15 | 16 | jobs: 17 | docs: 18 | runs-on: ubuntu-latest 19 | steps: 20 | - name: Checkout 21 | uses: actions/checkout@v3 22 | - name: Install Python 23 | uses: actions/setup-python@v4 24 | with: 25 | python-version: "3.12" 26 | - name: Install Poetry 27 | uses: abatilo/actions-poetry@v2 28 | - name: Setup Pandoc 29 | uses: r-lib/actions/setup-pandoc@v2 30 | - name: Setup Requirements 31 | run: poetry install --no-interaction --with=docs --without=dev,pytest,metrics,linters 32 | - name: Build with Sphinx 33 | run: poetry run sphinx-build docs/source _site -------------------------------------------------------------------------------- /.github/workflows/tests.yml: -------------------------------------------------------------------------------- 1 | name: Pytest and Coverage 2 | on: 3 | push: 4 | branches: 5 | - main 6 | pull_request: 7 | branches: 8 | - main 9 | 10 | defaults: 11 | run: 12 | shell: bash 13 | 14 | jobs: 15 | tests: 16 | runs-on: ${{matrix.os}} 17 | strategy: 18 | matrix: 19 | os: [ubuntu-latest] 20 | python-version: ["3.9", "3.12"] 21 | include: 22 | - os: "ubuntu-22.04" 23 | python-version: "3.10" 24 | env: 25 | POETRY_VIRTUALENVS_CREATE: false 26 | steps: 27 | - name: Checkout 28 | uses: actions/checkout@v3 29 | - name: Install Python ${{ matrix.python-version }} 30 | uses: actions/setup-python@v4 31 | with: 32 | python-version: ${{ matrix.python-version }} 33 | - name: Install Poetry 34 | uses: snok/install-poetry@v1 35 | - name: Install dependencies 36 | run: poetry install --no-root --no-interaction --with=dev,pytest --without=docs,metrics,linters 37 | - name: Install Project 38 | run: poetry install --no-interaction --with=dev,pytest --without=docs,metrics,linters 39 | - name: Pytest 40 | run: poetry run pytest tests -v -n auto --durations=0 --cov=./ --cov-report=xml 41 | - name: Upload coverage to codecov 42 | uses: codecov/codecov-action@v3 43 | with: 44 | env_vars: OS,PYTHON 45 | files: ./coverage.xml 46 | name: Coverage Report with codecov overview 47 | verbose: true 48 | -------------------------------------------------------------------------------- /.github/workflows/website.yml: -------------------------------------------------------------------------------- 1 | name: Deploy website with GitHub Pages 2 | 3 | on: 4 | # Runs on pushes targeting the default branch 5 | push: 6 | branches: 7 | - website 8 | 9 | # Allows you to run this workflow manually from the Actions tab 10 | workflow_dispatch: 11 | 12 | # Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages 13 | permissions: 14 | contents: read 15 | pages: write 16 | id-token: write 17 | 18 | # Allow one concurrent deployment 19 | concurrency: 20 | group: "pages" 21 | cancel-in-progress: true 22 | 23 | jobs: 24 | # Build job 25 | build: 26 | runs-on: ubuntu-latest 27 | steps: 28 | - name: Checkout 29 | uses: actions/checkout@v3 30 | - name: Setup Pages 31 | uses: actions/configure-pages@v2 32 | - name: Setup Pandoc 33 | uses: r-lib/actions/setup-pandoc@v2 34 | - name: Setup Python 35 | uses: actions/setup-python@v4 36 | with: 37 | python-version: "3.10" 38 | - name: Setup Poetry 39 | uses: snok/install-poetry@v1 40 | - name: Setup Requirements 41 | run: poetry install --with=docs --without=dev,pytest,metrics,linters 42 | - name: Build with Sphinx 43 | run: poetry run sphinx-build docs/source _site 44 | - name: Upload artifact 45 | uses: actions/upload-pages-artifact@v1 46 | 47 | # Deployment job 48 | deploy: 49 | environment: 50 | name: github-pages 51 | url: ${{ steps.deployment.outputs.page_url }} 52 | runs-on: ubuntu-latest 53 | needs: build 54 | steps: 55 | - name: Deploy to GitHub Pages 56 | id: deployment 57 | uses: actions/deploy-pages@v1 58 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # CUSTOM 2 | __py* 3 | *.egg* 4 | *.idea* 5 | **/*.log 6 | **/*.pyc 7 | **/__pycache__/ 8 | **/*.gz 9 | **/*.whl 10 | **/*egg-info/ 11 | site/ 12 | *.ipynb_checkpoints* 13 | **/*.DS_Store 14 | tests/test-logs/ 15 | **/test_save_model.np 16 | lightning_logs/ 17 | logs/ 18 | *.ckpt 19 | *.pt 20 | tests/metrics/*.json 21 | tests/metrics/*.svg 22 | .vscode/launch.json 23 | .vscode/settings.json 24 | /source/ 25 | 26 | # Byte-compiled / optimized / DLL files 27 | __pycache__/ 28 | *.py[cod] 29 | *$py.class 30 | 31 | # C extensions 32 | *.so 33 | 34 | # Distribution / packaging 35 | .Python 36 | build/ 37 | develop-eggs/ 38 | dist/ 39 | downloads/ 40 | eggs/ 41 | .eggs/ 42 | lib/ 43 | lib64/ 44 | parts/ 45 | sdist/ 46 | var/ 47 | wheels/ 48 | pip-wheel-metadata/ 49 | share/python-wheels/ 50 | *.egg-info/ 51 | .installed.cfg 52 | *.egg 53 | MANIFEST 54 | 55 | # PyInstaller 56 | # Usually these files are written by a python script from a template 57 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 58 | *.manifest 59 | *.spec 60 | 61 | # Installer logs 62 | pip-log.txt 63 | pip-delete-this-directory.txt 64 | 65 | # Translations 66 | *.mo 67 | *.pot 68 | 69 | # Django stuff: 70 | *.log 71 | local_settings.py 72 | db.sqlite3 73 | db.sqlite3-journal 74 | 75 | # Flask stuff: 76 | instance/ 77 | .webassets-cache 78 | 79 | # Scrapy stuff: 80 | .scrapy 81 | 82 | # Sphinx documentation 83 | docs/_build/ 84 | 85 | # PyBuilder 86 | target/ 87 | 88 | # Jupyter Notebook 89 | .ipynb_checkpoints 90 | 91 | # IPython 92 | profile_default/ 93 | ipython_config.py 94 | 95 | # pyenv 96 | .python-version 97 | 98 | # pipenv 99 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 100 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 101 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 102 | # install all needed dependencies. 103 | #Pipfile.lock 104 | 105 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 106 | __pypackages__/ 107 | 108 | # Celery stuff 109 | celerybeat-schedule 110 | celerybeat.pid 111 | 112 | # SageMath parsed files 113 | *.sage.py 114 | 115 | # Environments 116 | .env 117 | .venv 118 | env/ 119 | venv/ 120 | ENV/ 121 | env.bak/ 122 | venv.bak/ 123 | 124 | # Spyder project settings 125 | .spyderproject 126 | .spyproject 127 | 128 | # Rope project settings 129 | .ropeproject 130 | 131 | # mkdocs documentation 132 | /site 133 | 134 | # mypy 135 | .mypy_cache/ 136 | .dmypy.json 137 | dmypy.json 138 | 139 | # Pyre type checker 140 | .pyre/ 141 | 142 | # Coverage 143 | /.coverage 144 | /coverage.xml 145 | 146 | # Docs 147 | docs/doctrees/ 148 | docs/html/ 149 | _site/ 150 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Code of Conduct 2 | 3 | We are kind, understanding, honest, and supportive to one another and everyone else. 4 | We love you, too. 5 | Come join our community to receive a hug! 6 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to NeuralProphet 2 | :+1::tada: First off, thanks for taking the time to contribute! :tada::+1: 3 | 4 | Welcome to the Prophet community and thank you for your contribution to its continued legacy. 5 | We compiled this page with practical instructions and further resources to help you get started. 6 | 7 | For an easy start, check out all open issues with the label https://github.com/ourownstory/neural_prophet/labels/good%20first%20issue. 8 | They can be done somewhat in isolation from other tasks and will take a couple hours up to a week of work to complete. We appreciate your help! 9 | 10 | Please come join us on our [Slack](https://join.slack.com/t/neuralprophet/shared_invite/zt-sgme2rw3-3dCH3YJ_wgg01IXHoYaeCg), you can message any core dev there. 11 | 12 | ## Process 13 | If this is your first time contributing to NeuralProphet, please read our [wiki summary](https://github.com/ourownstory/neural_prophet/wiki#contributing-process) of the steps involved. 14 | 15 | ## Dev Install using Poetry 16 | Prerequisite: [install poetry](https://python-poetry.org/docs/#installing-with-the-official-installer). 17 | * Start by cloning the repo with `git clone `. 18 | * Make sure you have changed directories to your cloned neuralprophet github `cd neural_prophet`. There, run `poetry shell` to (create and) start a (new) poetry virtual environment. If you run `poetry env info --path` you should see the path to the venv. 19 | * To complete the venv setup, install neuralprophet (in editable mode by default) with `poetry install`. 20 | Note: poetry will automatically use the specific dependencies in the `poetry.lock` file for reproducibility. If you want to install the latest dependencies instead, run `poetry update`. This will update all dependencies and update the `poetry.lock` file. Be mindful to not track the `poetry.lock` file with git when commiting, unless the purpose of your pull request is to update it. 21 | 22 | Warning, you are still lacking some git hooks to auto-format your code pre-commit and to run pytests pre-push. 23 | Currently these need to be self-added. Simplified instructions to follow. 24 | 25 | [Tutorial Link](https://realpython.com/dependency-management-python-poetry/) 26 | 27 | ## Dev Install (old) 28 | Before starting it's a good idea to first create and activate a new virtual environment: 29 | ``` 30 | python3 -m venv 31 | source /bin/activate 32 | ``` 33 | Now you can install neuralprophet: 34 | 35 | ``` 36 | git clone 37 | cd neural_prophet 38 | pip install -e ".[dev]" 39 | ``` 40 | 41 | Please don't forget to run the dev setup script to install the hooks for black and pytest, and set git to fast forward only: 42 | ``` 43 | neuralprophet_dev_setup.py 44 | git config pull.ff only 45 | ``` 46 | 47 | Notes: 48 | * Including the optional `-e` flag will install neuralprophet in "editable" mode, meaning that instead of copying the files into your virtual environment, a symlink will be created to the files where they are. 49 | * The `neuralprophet_dev_setup` command runs the dev-setup script which installs appropriate git hooks for Black (pre-commit) and PyTest (pre-push). 50 | * setting git to fast-forward only prevents accidental merges when using `git pull`. 51 | * To run tests without pushing (or when the hook installation fails), run from neuralprophet folder: `pytest -v` 52 | * To run black without commiting (or when the hook installation fails): `python3 -m black {source_file_or_directory}` 53 | * If running `neuralprophet_dev_setup.py` gives you a `no such file` error, try running `python ./scripts/neuralprophet_dev_setup.py` 54 | 55 | ## Writing documentation 56 | The NeuralProphet documentation website is hosted via GitHub Pages on www.neuralprohet.com. Have a look at the [wiki](https://github.com/ourownstory/neural_prophet/wiki#writing-documentation) on how to write and build documentation. 57 | 58 | ## Best practices 59 | We follow a set of guidelines and methodologies to ensure that code is of high quality, maintainable, and easily understandable by others who may contribute to the project: 60 | * [Typing](https://github.com/ourownstory/neural_prophet/wiki#typing): Use type annotations across the project to improve code readability and maintainability 61 | * [Tests and Code Coverage](https://github.com/ourownstory/neural_prophet/wiki#testing-and-code-coverage): Run tests using 'PyTest' to ensure that the code is functioning as expected. 62 | * [Continuous Integration](https://github.com/ourownstory/neural_prophet/wiki#continous-integration): Github Actions is used to set up a CI pipeline 63 | * [Code Style](https://github.com/ourownstory/neural_prophet/wiki#style): Deploy Black, so there is no need to worry about code style and formatting. 64 | 65 | ## Prefixes and labels for pull requests and issues 66 | 67 | ### Prefixes for pull requests 68 | All pull requests (PR) should have one of the following prefixes: 69 | 70 | * [breaking] Breaking changes, which require user action (e.g. breaking API changes) 71 | * [major] Major features worth mentioning (e.g. uncertainty prediction) 72 | * [minor] Minor changes which are nice to know about (e.g. add sorting to labels in plots) 73 | * [fix] Bugfixes (e.g. fix for plots not showing up) 74 | * [docs] Documentation related changes (e.g. add tutorial for energy dataset) 75 | * [tests] Tests additions and changes (e.g. add tests for utils) 76 | * [devops] Github workflows (e.g. add pyright type checking Github action) 77 | 78 | Those prefixed are then used to generate the changelog and decide which version number change is necessary for a release. 79 | 80 | ### Labels for pull requests 81 | Once your PR needs attention, please add an appropriate label: 82 | 83 | - https://github.com/ourownstory/neural_prophet/labels/status%3A%20blocked 84 | - https://github.com/ourownstory/neural_prophet/labels/status%3A%20in%20development 85 | - https://github.com/ourownstory/neural_prophet/labels/status%3A%20needs%20review 86 | - https://github.com/ourownstory/neural_prophet/labels/status%3A%20needs%20update 87 | - https://github.com/ourownstory/neural_prophet/labels/status%3A%20ready 88 | 89 | ### Issue labels 90 | 91 | Issues should always have a type and a priority. Other labels are optional. 92 | 93 | **Issue type** 94 | 95 | https://github.com/ourownstory/neural_prophet/labels/bug 96 | https://github.com/ourownstory/neural_prophet/labels/epic 97 | https://github.com/ourownstory/neural_prophet/labels/task 98 | (questions should be moved to [discussions](https://github.com/ourownstory/neural_prophet/discussions)) 99 | 100 | **Priorities** 101 | 102 | https://github.com/ourownstory/neural_prophet/labels/P1 103 | https://github.com/ourownstory/neural_prophet/labels/P2 104 | https://github.com/ourownstory/neural_prophet/labels/P3 105 | 106 | **Getting started** 107 | 108 | https://github.com/ourownstory/neural_prophet/labels/good%20first%20issue 109 | 110 | **Closed for reason** 111 | 112 | https://github.com/ourownstory/neural_prophet/labels/duplicate 113 | https://github.com/ourownstory/neural_prophet/labels/wontfix 114 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 Oskar Triebe 4 | 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to deal 8 | in the Software without restriction, including without limitation the rights 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the Software is 11 | furnished to do so, subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in all 14 | copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | SOFTWARE. 23 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | prune scripts 2 | include LICENSE.md 3 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![GitHub release (latest SemVer)](https://img.shields.io/github/v/release/ourownstory/neural_prophet?logo=github)](https://github.com/ourownstory/neural_prophet/releases) 2 | [![Pypi_Version](https://img.shields.io/pypi/v/neuralprophet.svg)](https://pypi.python.org/pypi/neuralprophet) 3 | [![Python Version](https://img.shields.io/badge/python-3.9+-blue?logo=python)](https://www.python.org/) 4 | [![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black) 5 | [![License](https://img.shields.io/badge/license-MIT-brightgreen)](https://opensource.org/licenses/MIT) 6 | [![Tests](https://github.com/ourownstory/neural_prophet/actions/workflows/tests.yml/badge.svg)](https://github.com/ourownstory/neural_prophet/actions/workflows/tests.yml) 7 | [![codecov](https://codecov.io/gh/ourownstory/neural_prophet/branch/master/graph/badge.svg?token=U5KXCL55DW)](https://codecov.io/gh/ourownstory/neural_prophet) 8 | [![Slack](https://img.shields.io/badge/slack-@neuralprophet-CF0E5B.svg?logo=slack&logoColor=white&labelColor=3F0E40)](https://neuralprophet.slack.com/join/shared_invite/zt-sgme2rw3-3dCH3YJ_wgg01IXHoYaeCg#/shared-invite/email) 9 | [![Downloads](https://static.pepy.tech/personalized-badge/neuralprophet?period=total&units=international_system&left_color=black&right_color=blue&left_text=Downloads)](https://pepy.tech/project/neuralprophet) 10 | 11 | ![NP-logo-wide_cut](https://user-images.githubusercontent.com/21246060/111388960-6c367e80-866d-11eb-91c1-46f2c0d21879.PNG) 12 | 13 | 14 | Please note that the project is still in beta phase. Please report any issues you encounter or suggestions you have. We will do our best to address them quickly. Contributions are very welcome! 15 | 16 | # NeuralProphet: human-centered forecasting 17 | NeuralProphet is an easy to learn framework for interpretable time series forecasting. 18 | NeuralProphet is built on PyTorch and combines Neural Networks and traditional time-series algorithms, inspired by [Facebook Prophet](https://github.com/facebook/prophet) and [AR-Net](https://github.com/ourownstory/AR-Net). 19 | - With a few lines of code, you can define, customize, visualize, and evaluate your own forecasting models. 20 | - It is designed for iterative human-in-the-loop model building. That means that you can build a first model quickly, interpret the results, improve, repeat. Due to the focus on interpretability and customization-ability, NeuralProphet may not be the most accurate model out-of-the-box; so, don't hesitate to adjust and iterate until you like your results. 21 | - NeuralProphet is best suited for time series data that is of higher-frequency (sub-daily) and longer duration (at least two full periods/years). 22 | 23 | 24 | ## Documentation 25 | The [documentation page](https://neuralprophet.com) may not be entirely up to date. Docstrings should be reliable, please refer to those when in doubt. We are working on an improved documentation. We appreciate any help to improve and update the docs. 26 | 27 | For a visual introduction to NeuralProphet, [view this presentation](notes/NeuralProphet_Introduction.pdf). 28 | 29 | ## Contribute 30 | We compiled a [Contributing to NeuralProphet](CONTRIBUTING.md) page with practical instructions and further resources to help you become part of the family. 31 | 32 | ## Community 33 | #### Discussion and Help 34 | If you have any questions or suggestion, you can participate in [our community right here on Github](https://github.com/ourownstory/neural_prophet/discussions) 35 | 36 | #### Slack Chat 37 | We also have an active [Slack community](https://join.slack.com/t/neuralprophet/shared_invite/zt-sgme2rw3-3dCH3YJ_wgg01IXHoYaeCg). Come and join the conversation! 38 | 39 | ## Tutorials 40 | [![Open All Collab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/ourownstory/neural_prophet) 41 | 42 | There are several [example notebooks](docs/source/tutorials) to help you get started. 43 | 44 | You can find the datasets used in the tutorials, including data preprocessing examples, in our [neuralprophet-data repository](https://github.com/ourownstory/neuralprophet-data). 45 | 46 | Please refer to our [documentation page](https://neuralprophet.com) for more resources. 47 | 48 | ### Minimal example 49 | ```python 50 | from neuralprophet import NeuralProphet 51 | ``` 52 | After importing the package, you can use NeuralProphet in your code: 53 | ```python 54 | m = NeuralProphet() 55 | metrics = m.fit(df) 56 | forecast = m.predict(df) 57 | ``` 58 | You can visualize your results with the inbuilt plotting functions: 59 | ```python 60 | fig_forecast = m.plot(forecast) 61 | fig_components = m.plot_components(forecast) 62 | fig_model = m.plot_parameters() 63 | ``` 64 | If you want to forecast into the unknown future, extend the dataframe before predicting: 65 | ```python 66 | m = NeuralProphet().fit(df, freq="D") 67 | df_future = m.make_future_dataframe(df, periods=30) 68 | forecast = m.predict(df_future) 69 | fig_forecast = m.plot(forecast) 70 | ``` 71 | ## Install 72 | You can now install neuralprophet directly with pip: 73 | ```shell 74 | pip install neuralprophet 75 | ``` 76 | 77 | ### Install options 78 | 79 | If you plan to use the package in a Jupyter notebook, we recommended to install the 'live' version: 80 | ```shell 81 | pip install neuralprophet[live] 82 | ``` 83 | This will allow you to enable `plot_live_loss` in the `fit` function to get a live plot of train (and validation) loss. 84 | 85 | If you would like the most up to date version, you can instead install directly from github: 86 | ```shell 87 | git clone 88 | cd neural_prophet 89 | pip install . 90 | ``` 91 | 92 | Note for Windows users: Please use WSL2. 93 | 94 | ## Features 95 | ### Model components 96 | * Autoregression: Autocorrelation modelling - linear or NN (AR-Net). 97 | * Trend: Piecewise linear trend with optional automatic changepoint detection. 98 | * Seasonality: Fourier terms at different periods such as yearly, daily, weekly, hourly. 99 | * Lagged regressors: Lagged observations (e.g temperature sensor) - linear or NN. 100 | * Future regressors: In advance known features (e.g. temperature forecast) - linear or NN. 101 | * Events: Country holidays & recurring custom events. 102 | * Global Modeling: Components can be local, global or 'glocal' (global + regularized local) 103 | 104 | 105 | ### Framework features 106 | * Multiple time series: Fit a global/glocal model with (partially) shared model parameters. 107 | * Uncertainty: Estimate values of specific quantiles - Quantile Regression. 108 | * Regularize modelling components. 109 | * Plotting of forecast components, model coefficients and more. 110 | * Time series crossvalidation utility. 111 | * Model checkpointing and validation. 112 | 113 | 114 | ### Coming soon:tm: 115 | 116 | * Cross-relation of lagged regressors. 117 | * Static metadata regression for multiple series 118 | * Logistic growth for trend component. 119 | 120 | For a list of past changes, please refer to the [releases page](https://github.com/ourownstory/neural_prophet/releases). 121 | 122 | ## Cite 123 | Please cite [NeuralProphet](https://arxiv.org/abs/2111.15397) in your publications if it helps your research: 124 | ``` 125 | @misc{triebe2021neuralprophet, 126 | title={NeuralProphet: Explainable Forecasting at Scale}, 127 | author={Oskar Triebe and Hansika Hewamalage and Polina Pilyugina and Nikolay Laptev and Christoph Bergmeir and Ram Rajagopal}, 128 | year={2021}, 129 | eprint={2111.15397}, 130 | archivePrefix={arXiv}, 131 | primaryClass={cs.LG} 132 | } 133 | ``` 134 | ### Many Thanks To Our Contributors: 135 | 136 | 137 | 138 | 139 | ## About 140 | NeuralProphet is an open-source community project, supported by awesome people like you. 141 | If you are interested in joining the project, please feel free to reach out to me (Oskar) - you can find my email on the [NeuralProphet Paper](https://arxiv.org/abs/2111.15397). 142 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | # Security Policy 2 | 3 | ## Supported Versions 4 | 5 | This project supports but the most recent version with security updates. 6 | 7 | ## Reporting a Vulnerability 8 | 9 | Feel free to report any vulnerabilities. 10 | 11 | Please also let us know if you have any security concerns for your particular use of this forecasting tool. 12 | 13 | -------------------------------------------------------------------------------- /codecov.yml: -------------------------------------------------------------------------------- 1 | # ignore files or directories to be scanned by codecov 2 | ignore: 3 | - "setup.py" 4 | - "./tests/" 5 | - "./neuralprophet/__init__.py" 6 | 7 | coverage: 8 | status: 9 | project: 10 | default: 11 | threshold: 1% 12 | -------------------------------------------------------------------------------- /docs/CNAME: -------------------------------------------------------------------------------- 1 | neuralprophet.com 2 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line, and also 5 | # from the environment for the first two. 6 | SPHINXOPTS ?= 7 | SPHINXBUILD ?= sphinx-build 8 | SOURCEDIR = source 9 | BUILDDIR = . 10 | 11 | # Autosphinx https://github.com/executablebooks/sphinx-autobuild#using-with-makefile 12 | livehtml: 13 | sphinx-autobuild "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | # Put it first so that "make" without argument is like "make help". 16 | help: 17 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 18 | 19 | .PHONY: help Makefile 20 | 21 | # Catch-all target: route all unknown targets to Sphinx using the new 22 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 23 | %: Makefile 24 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 25 | -------------------------------------------------------------------------------- /docs/README.md: -------------------------------------------------------------------------------- 1 | Welcome to our documentation! 2 | In the source folder, you can find the quickstart tutorial and further [basic tutorials](/docs/source/tutorials/index.rst) to get you started with NeuralProphet. 3 | The [how-to-guides](/docs/source/how-to-guides/index.rst) folder contains more advanced notebooks for the different features of NeuralProphet and application examples. Feel free to explore! -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=sphinx-build 9 | ) 10 | set SOURCEDIR=source 11 | set BUILDDIR= . 12 | 13 | if "%1" == "" goto help 14 | 15 | %SPHINXBUILD% >NUL 2>NUL 16 | if errorlevel 9009 ( 17 | echo. 18 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 19 | echo.installed, then set the SPHINXBUILD environment variable to point 20 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 21 | echo.may add the Sphinx directory to PATH. 22 | echo. 23 | echo.If you don't have Sphinx installed, grab it from 24 | echo.https://www.sphinx-doc.org/ 25 | exit /b 1 26 | ) 27 | 28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 29 | goto end 30 | 31 | :help 32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 33 | 34 | :end 35 | popd 36 | -------------------------------------------------------------------------------- /docs/source/_static/css/custom.css: -------------------------------------------------------------------------------- 1 | .my_line{ 2 | text-transform: uppercase; 3 | } 4 | 5 | .announcement{ 6 | background-color: #2d92ffff; 7 | max-height: 40px; 8 | font-size: 0.7em; } 9 | @media (min-width: 431px) { 10 | .announcement { 11 | font-size: 0.875em;; 12 | } 13 | } 14 | @media (min-width: 576px) { 15 | .announcement { 16 | font-size: 1em; 17 | } 18 | } 19 | 20 | .announcement-content{ 21 | padding: 0.01em; 22 | } 23 | 24 | .github-button-container{ 25 | text-align: center; 26 | margin-top: -20px; 27 | } 28 | 29 | /* change background color of div.nboutput.container div.output_area pre if dark mode is active */ 30 | @media (prefers-color-scheme: dark) { 31 | body:not([data-theme="light"]) div.nboutput.container div.output_area.stderr .highlight, 32 | body:not([data-theme="light"]) div.nboutput.container div.output_area.stderr pre { 33 | background: #824d4d !important; 34 | } 35 | } 36 | 37 | body[data-theme="dark"] div.nboutput.container div.output_area.stderr .highlight, 38 | body[data-theme="dark"] div.nboutput.container div.output_area.stderr pre{ 39 | background: #824d4d !important; 40 | } 41 | 42 | 43 | /* change font color and background color of table text if dark mode is active */ 44 | @media (prefers-color-scheme: dark) { 45 | body:not([data-theme="light"]) .jp-RenderedHTMLCommon table, 46 | body:not([data-theme="light"]) div.rendered_html table{ 47 | color: #ffffff !important; 48 | } 49 | body:not([data-theme="light"]) .jp-RenderedHTMLCommon tbody tr:nth-child(odd), 50 | body:not([data-theme="light"]) div.rendered_html tbody tr:nth-child(odd){ 51 | background: #444444 !important; 52 | } 53 | 54 | body:not([data-theme="light"]) .jp-RenderedHTMLCommon tbody tr:hover, 55 | body:not([data-theme="light"]) div.rendered_html tbody tr:hover{ 56 | background: rgba(66, 165, 245, 0.2) !important; 57 | } 58 | } 59 | 60 | 61 | body[data-theme="dark"] .jp-RenderedHTMLCommon table, 62 | body[data-theme="dark"] div.rendered_html table{ 63 | color: #ffffff !important; 64 | } 65 | body[data-theme="dark"] dHTMLCommon tbody tr:nth-child(odd), 66 | body[data-theme="dark"] div.rendered_html tbody tr:nth-child(odd){ 67 | background: #444444 !important; 68 | } 69 | 70 | body[data-theme="dark"] .jp-RenderedHTMLCommon tbody tr:hover, 71 | body[data-theme="dark"] div.rendered_html tbody tr:hover{ 72 | background: rgba(66, 165, 245, 0.2) !important; 73 | } 74 | -------------------------------------------------------------------------------- /docs/source/_static/js/scripts.js: -------------------------------------------------------------------------------- 1 | $(function () { 2 | // init feather icons 3 | feather.replace(); 4 | }); -------------------------------------------------------------------------------- /docs/source/_templates/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | NeuralProphet 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 |
25 |
26 | 39 |
40 |
41 |

Explainable Forecasting at Scale

42 |

NeuralProphet bridges the gap between traditional time-series models and deep learning methods. It's based on PyTorch and can be installed using pip.

43 | 44 | GitHub 45 | 46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
from neuralprophet import NeuralProphet
57 | import pandas as pd
58 | 
59 | df = pd.read_csv('toiletpaper_daily_sales.csv')
60 | 
61 | m = NeuralProphet()
62 | 
63 | metrics = m.fit(df, freq="D")
64 | 
65 | forecast = m.predict(df)
66 | 
67 |
68 |
69 |
70 |
71 |
72 | Star 74 | 75 |
76 |
77 |
78 |
79 | 80 | 81 |
82 |
83 |
84 |
Made with 🌲 at Stanford University
85 |
86 |
87 |
88 | 89 | 90 | 91 | 92 | 93 | 94 | 95 | 96 | 97 | 98 | -------------------------------------------------------------------------------- /docs/source/_templates/sidebar/brand.html: -------------------------------------------------------------------------------- 1 | 19 | 20 | 21 | 22 | 23 | {% if not theme_sidebar_hide_name %} 24 | {{ docstitle if docstitle else project }} 25 | {%- endif %} 26 | {% endblock brand_content %} 27 | 28 | 29 | -------------------------------------------------------------------------------- /docs/source/code/components/base.rst: -------------------------------------------------------------------------------- 1 | Core Module Documentation 2 | ========================== 3 | 4 | .. automodule:: neuralprophet.components.base 5 | :members: -------------------------------------------------------------------------------- /docs/source/code/components/future_regressors/index.rst: -------------------------------------------------------------------------------- 1 | Core Module Documentation 2 | ========================== 3 | 4 | .. automodule:: neuralprophet.components.future_regressors.base 5 | :members: 6 | 7 | .. automodule:: neuralprophet.components.future_regressors.linear 8 | :members: 9 | 10 | .. automodule:: neuralprophet.components.future_regressors.neural_nets 11 | :members: 12 | 13 | .. automodule:: neuralprophet.components.future_regressors.shared_neural_nets_coef 14 | :members: 15 | 16 | .. automodule:: neuralprophet.components.future_regressors.shared_neural_nets 17 | :members: 18 | -------------------------------------------------------------------------------- /docs/source/code/components/router.rst: -------------------------------------------------------------------------------- 1 | Core Module Documentation 2 | ========================== 3 | 4 | .. automodule:: neuralprophet.components.router 5 | :members: -------------------------------------------------------------------------------- /docs/source/code/components/seasonality/index.rst: -------------------------------------------------------------------------------- 1 | Core Module Documentation 2 | ========================== 3 | 4 | .. automodule:: neuralprophet.components.seasonality.base 5 | :members: 6 | 7 | .. automodule:: neuralprophet.components.seasonality.fourier 8 | :members: 9 | -------------------------------------------------------------------------------- /docs/source/code/components/trend/index.rst: -------------------------------------------------------------------------------- 1 | Core Module Documentation 2 | ========================== 3 | 4 | .. automodule:: neuralprophet.components.trend.base 5 | :members: 6 | 7 | .. automodule:: neuralprophet.components.trend.linear 8 | :members: 9 | 10 | .. automodule:: neuralprophet.components.trend.piecewise_linear 11 | :members: 12 | 13 | .. automodule:: neuralprophet.components.trend.static 14 | :members: 15 | -------------------------------------------------------------------------------- /docs/source/code/configure.rst: -------------------------------------------------------------------------------- 1 | Core Module Documentation 2 | ========================== 3 | 4 | .. automodule:: neuralprophet.configure 5 | :members: -------------------------------------------------------------------------------- /docs/source/code/custom_loss_metrics.rst: -------------------------------------------------------------------------------- 1 | Core Module Documentation 2 | ========================== 3 | 4 | .. automodule:: neuralprophet.custom_loss_metrics 5 | :members: -------------------------------------------------------------------------------- /docs/source/code/data/process.rst: -------------------------------------------------------------------------------- 1 | Core Module Documentation 2 | ========================== 3 | 4 | .. automodule:: neuralprophet.data.process 5 | :members: -------------------------------------------------------------------------------- /docs/source/code/data/split.rst: -------------------------------------------------------------------------------- 1 | Core Module Documentation 2 | ========================== 3 | 4 | .. automodule:: neuralprophet.data.split 5 | :members: -------------------------------------------------------------------------------- /docs/source/code/data/transform.rst: -------------------------------------------------------------------------------- 1 | Core Module Documentation 2 | ========================== 3 | 4 | .. automodule:: neuralprophet.data.transform 5 | :members: -------------------------------------------------------------------------------- /docs/source/code/df_utils.rst: -------------------------------------------------------------------------------- 1 | Core Module Documentation 2 | ========================== 3 | 4 | .. automodule:: neuralprophet.df_utils 5 | :members: -------------------------------------------------------------------------------- /docs/source/code/forecaster.rst: -------------------------------------------------------------------------------- 1 | Core Module Documentation 2 | ========================== 3 | 4 | .. toctree:: 5 | :hidden: 6 | :maxdepth: 1 7 | 8 | configure.py 9 | configure_components.py 10 | df_utils.py 11 | event_utils.py 12 | plot_forecast_plotly.py 13 | plot_forecast_matplotlib.py 14 | plot_model_parameters_plotly.py 15 | plot_model_parameters_matplotlib.py 16 | time_dataset.py 17 | time_net.py 18 | utils_time_dataset.py 19 | utils.py 20 | 21 | .. automodule:: neuralprophet.forecaster 22 | :members: 23 | -------------------------------------------------------------------------------- /docs/source/code/index.rst: -------------------------------------------------------------------------------- 1 | Code Documentation 2 | ========================== 3 | 4 | .. toctree:: 5 | :maxdepth: 1 6 | 7 | forecaster.py (NeuralProphet) 8 | configure.py 9 | configure_components.py 10 | time_dataset.py 11 | time_net.py 12 | torch_prophet.py 13 | uncertainty.py 14 | data/process.py 15 | data/split.py 16 | data/transform.py 17 | components/router.py 18 | components/base.py 19 | components/future_regressors 20 | components/seasonality 21 | components/trend 22 | plot_forecast_plotly.py 23 | plot_forecast_matplotlib.py 24 | plot_model_parameters_plotly.py 25 | plot_model_parameters_matplotlib.py 26 | utils.py 27 | utils_time_dataset.py 28 | df_utils.py 29 | hdays_utils.py 30 | plot_utils.py 31 | utils_metrics.py 32 | utils_torch.py 33 | custom_loss_metrics.py 34 | logger.py 35 | np_types.py 36 | 37 | 38 | 39 | 40 | 41 | -------------------------------------------------------------------------------- /docs/source/code/logger.rst: -------------------------------------------------------------------------------- 1 | Core Module Documentation 2 | ========================== 3 | 4 | .. automodule:: neuralprophet.logger 5 | :members: -------------------------------------------------------------------------------- /docs/source/code/np_types.rst: -------------------------------------------------------------------------------- 1 | Core Module Documentation 2 | ========================== 3 | 4 | .. automodule:: neuralprophet.np_types 5 | :members: -------------------------------------------------------------------------------- /docs/source/code/plot_forecast_matplotlib.rst: -------------------------------------------------------------------------------- 1 | Core Module Documentation 2 | ========================== 3 | 4 | .. automodule:: neuralprophet.plot_forecast_matplotlib 5 | :members: -------------------------------------------------------------------------------- /docs/source/code/plot_forecast_plotly.rst: -------------------------------------------------------------------------------- 1 | Core Module Documentation 2 | ========================== 3 | 4 | .. automodule:: neuralprophet.plot_forecast_plotly 5 | :members: -------------------------------------------------------------------------------- /docs/source/code/plot_model_parameters_matplotlib.rst: -------------------------------------------------------------------------------- 1 | Core Module Documentation 2 | ========================== 3 | 4 | .. automodule:: neuralprophet.plot_model_parameters_matplotlib 5 | :members: -------------------------------------------------------------------------------- /docs/source/code/plot_model_parameters_plotly.rst: -------------------------------------------------------------------------------- 1 | Core Module Documentation 2 | ========================== 3 | 4 | .. automodule:: neuralprophet.plot_model_parameters_plotly 5 | :members: -------------------------------------------------------------------------------- /docs/source/code/plot_utils.rst: -------------------------------------------------------------------------------- 1 | Core Module Documentation 2 | ========================== 3 | 4 | .. automodule:: neuralprophet.plot_utils 5 | :members: -------------------------------------------------------------------------------- /docs/source/code/time_dataset.rst: -------------------------------------------------------------------------------- 1 | Core Module Documentation 2 | ========================== 3 | 4 | .. automodule:: neuralprophet.time_dataset 5 | :members: -------------------------------------------------------------------------------- /docs/source/code/time_net.rst: -------------------------------------------------------------------------------- 1 | Core Module Documentation 2 | ========================== 3 | 4 | .. automodule:: neuralprophet.time_net 5 | :members: -------------------------------------------------------------------------------- /docs/source/code/torch_prophet.rst: -------------------------------------------------------------------------------- 1 | Core Module Documentation 2 | ========================== 3 | 4 | .. automodule:: neuralprophet.torch_prophet 5 | :members: -------------------------------------------------------------------------------- /docs/source/code/uncertainty.rst: -------------------------------------------------------------------------------- 1 | Core Module Documentation 2 | ========================== 3 | 4 | .. automodule:: neuralprophet.uncertainty 5 | :members: -------------------------------------------------------------------------------- /docs/source/code/utils.rst: -------------------------------------------------------------------------------- 1 | Core Module Documentation 2 | ========================== 3 | 4 | .. automodule:: neuralprophet.utils 5 | :members: -------------------------------------------------------------------------------- /docs/source/code/utils_metrics.rst: -------------------------------------------------------------------------------- 1 | Core Module Documentation 2 | ========================== 3 | 4 | .. automodule:: neuralprophet.utils_metrics 5 | :members: -------------------------------------------------------------------------------- /docs/source/code/utils_torch.rst: -------------------------------------------------------------------------------- 1 | Core Module Documentation 2 | ========================== 3 | 4 | .. automodule:: neuralprophet.utils_torch 5 | :members: -------------------------------------------------------------------------------- /docs/source/community/contribute.md: -------------------------------------------------------------------------------- 1 | # Contribute 2 | 3 | We compiled a [Contributing to NeuralProphet](https://github.com/ourownstory/neural_prophet/blob/master/CONTRIBUTING.md) page with practical instructions and further resources to help you become part of the family. -------------------------------------------------------------------------------- /docs/source/conf.py: -------------------------------------------------------------------------------- 1 | # Configuration file for the Sphinx documentation builder. 2 | # 3 | # This file only contains a selection of the most common options. For a full 4 | # list see the documentation: 5 | # https://www.sphinx-doc.org/en/master/usage/configuration.html 6 | 7 | # -- Path setup -------------------------------------------------------------- 8 | 9 | # If extensions (or modules to document with autodoc) are in another directory, 10 | # add these directories to sys.path here. If the directory is relative to the 11 | # documentation root, use os.path.abspath to make it absolute, like shown here. 12 | # 13 | 14 | 15 | import os 16 | import sys 17 | from typing import Any, Dict 18 | 19 | import sphinx_fontawesome # noqa: F401 20 | from sphinx.ext.autodoc import between 21 | 22 | # sys.path.insert(0, os.path.abspath('.')) 23 | sys.path.insert(0, os.path.abspath("../..")) 24 | 25 | 26 | # -- Project information ----------------------------------------------------- 27 | 28 | project = "NeuralProphet" 29 | copyright = "2024, Oskar Triebe" 30 | author = "Oskar Triebe" 31 | version = "1.0.0" 32 | release = "1.0.0rc8" 33 | 34 | # -- General configuration --------------------------------------------------- 35 | 36 | # Add any Sphinx extension module names here, as strings. They can be 37 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 38 | # ones. 39 | extensions = [ 40 | "myst_parser", 41 | "sphinx.ext.autodoc", 42 | "sphinx.ext.coverage", 43 | "sphinx.ext.napoleon", 44 | "nbsphinx", 45 | "nbsphinx_link", 46 | "sphinx_fontawesome", 47 | ] 48 | html_sourcelink_suffix = "" 49 | 50 | # Here to describe what format of files are parsed 51 | source_suffix = { 52 | ".rst": "restructuredtext", 53 | ".txt": "markdown", 54 | ".md": "markdown", 55 | } 56 | 57 | # Add any paths that contain templates here, relative to this directory. 58 | # Note: in use for custom sidebar and landing page 59 | templates_path = ["_templates"] 60 | 61 | # List of patterns, relative to source directory, that match files and 62 | # directories to ignore when looking for source files. 63 | # This pattern also affects html_static_path and html_extra_path. 64 | exclude_patterns = [] 65 | 66 | 67 | # -- Options for HTML output ------------------------------------------------- 68 | 69 | # The theme to use for HTML and HTML Help pages. See the documentation for 70 | # a list of builtin themes. 71 | # 72 | html_theme = "furo" 73 | html_favicon = "images/np_favicon.png" 74 | html_logo = "images/np_highres_docs.svg" 75 | # html_logo = "images/logo.png" 76 | font_stack = "-apple-system,'system-ui','Segoe UI',Helvetica,Arial,sans-serif,'Apple Color Emoji','Segoe UI Emoji'" 77 | font_stack_mono = "'SFMono-Regular',Menlo,Consolas,Monaco,Liberation Mono,Lucida Console,monospace" 78 | html_theme_options: Dict[str, Any] = { 79 | "sidebar_hide_name": True, 80 | "navigation_with_keys": True, 81 | "light_css_variables": { 82 | "font-stack": font_stack, 83 | "font-stack--monospace": font_stack_mono, 84 | }, 85 | } 86 | 87 | # Add any paths that contain custom static files (such as style sheets) here, 88 | # relative to this directory. They are copied after the builtin static files, 89 | # so a file named "default.css" will overwrite the builtin "default.css". 90 | html_static_path = ["_static", "images/np_highres.svg"] 91 | 92 | # html_sidebars = { '**': [ 93 | # "_templates/sidebar/brand.html", 94 | # "sidebar/search.html", 95 | # "sidebar/scroll-start.html", 96 | # "sidebar/navigation.html", 97 | # "sidebar/ethical-ads.html", 98 | # "sidebar/scroll-end.html", 99 | # ] } 100 | 101 | nbsphinx_execute_arguments = [ 102 | "--InlineBackend.figure_formats={'svg'}", 103 | "--InlineBackend.rc=figure.dpi=96", 104 | ] 105 | 106 | # change index.rst to contents.rst for custom landing page feature 107 | root_doc = "contents" 108 | 109 | html_additional_pages = { 110 | "index": "index.html", 111 | } 112 | 113 | 114 | def setup(app): 115 | app.add_css_file("css/custom.css") # may also be an URL 116 | # Register a sphinx.ext.autodoc.between listener to ignore everything between lines that contain the word COMMENT 117 | app.connect("autodoc-process-docstring", between("^.*COMMENT.*$", exclude=True)) 118 | return app 119 | -------------------------------------------------------------------------------- /docs/source/contents.rst: -------------------------------------------------------------------------------- 1 | .. NeuralProphet documentation master file, created by 2 | sphinx-quickstart on Tue Oct 12 13:27:59 2021. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | ========================================= 7 | NeuralProphet 8 | ========================================= 9 | 10 | Fusing traditional time series algorithms using standard deep learning methods, built on PyTorch, inspired by `Facebook Prophet `_ and `AR-Net `_. 11 | 12 | 13 | Simple Example 14 | ------------------ 15 | 16 | .. code-block:: pycon 17 | 18 | >>> from neuralprophet import NeuralProphet 19 | >>> m = NeuralProphet() 20 | >>> metrics = m.fit(df) 21 | >>> forecast = m.predict(df) 22 | >>> m.plot(forecast) 23 | 24 | Features 25 | ------------------ 26 | 27 | NeuralProphet provides many time series modeling and workflow features, in a simple package: 28 | 29 | - Support for global modeling of many time series. 30 | - Automatic selection of training related hyperparameters. 31 | - Plotting utilities for forecast components, model coefficients and final predictions. 32 | - Local context through Autoregression and lagged covariates. 33 | - Changing trends and smooth seasonality at different periods. 34 | - Modeling of event, holiday, and future regressor effects. 35 | - Many customization options, such as regularization. 36 | 37 | Resources 38 | ----------------- 39 | 40 | - `Read the paper `_ 41 | - `GitHub repository `_ 42 | 43 | 44 | .. toctree:: 45 | :hidden: 46 | :maxdepth: 1 47 | 48 | Home 49 | Quick Start Guide 50 | 51 | .. toctree:: 52 | :hidden: 53 | :maxdepth: 1 54 | :caption: Tutorials 55 | 56 | Tutorials 57 | 58 | .. toctree:: 59 | :hidden: 60 | :maxdepth: 1 61 | :caption: How To Guides 62 | 63 | Guides 64 | 65 | .. toctree:: 66 | :hidden: 67 | :maxdepth: 1 68 | :caption: Code Documentation 69 | 70 | NeuralProphet 71 | 72 | .. toctree:: 73 | :hidden: 74 | :maxdepth: 1 75 | :caption: About 76 | 77 | Model Overview 78 | Presentation 79 | 80 | .. toctree:: 81 | :hidden: 82 | :maxdepth: 1 83 | :caption: Community 84 | 85 | Contribute 86 | GitHub 87 | Slack 88 | 89 | -------------------------------------------------------------------------------- /docs/source/how-to-guides/feature-guides/figures/fig1_p.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ourownstory/neural_prophet/5e6b2314547334bbd5dbfc9cc5e019efcb3d67c5/docs/source/how-to-guides/feature-guides/figures/fig1_p.png -------------------------------------------------------------------------------- /docs/source/how-to-guides/feature-guides/figures/fig2_p.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ourownstory/neural_prophet/5e6b2314547334bbd5dbfc9cc5e019efcb3d67c5/docs/source/how-to-guides/feature-guides/figures/fig2_p.png -------------------------------------------------------------------------------- /docs/source/how-to-guides/index.rst: -------------------------------------------------------------------------------- 1 | Feature guides 2 | -------------- 3 | 4 | .. note:: 5 | The feature guides show how to use specific features of NeuralProphet in detail. For more basic examples, see the tutorial section. 6 | 7 | 8 | .. toctree:: 9 | :maxdepth: 1 10 | 11 | Collect Predictions 12 | Testing and Cross Validation 13 | Plotting 14 | Global Local Modelling 15 | Uncertainty Quantification 16 | Conditional Seasonality 17 | Multiplicative Seasonality 18 | Sparse Autoregression 19 | Subdaily data 20 | Hyperparameter Selection 21 | MLflow Integration 22 | Live Plotting during Training 23 | Network Architecture Visualization 24 | 25 | Application examples 26 | -------------------- 27 | 28 | .. note:: 29 | Here you can find examples of how to use NeuralProphet on different datasets. 30 | 31 | 32 | .. toctree:: 33 | :maxdepth: 1 34 | 35 | Power Demand: Forecasting Load for a Hospital in SF 36 | Renewable Energy: Forecasting Solar 37 | Forecasting energy load with visualization 38 | 39 | Migrate From Prophet 40 | -------------------- 41 | 42 | .. toctree:: 43 | :maxdepth: 1 44 | 45 | Migration from Prophet 46 | Prophet to TorchProphet -------------------------------------------------------------------------------- /docs/source/images/np_favicon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ourownstory/neural_prophet/5e6b2314547334bbd5dbfc9cc5e019efcb3d67c5/docs/source/images/np_favicon.png -------------------------------------------------------------------------------- /docs/source/images/plot_comp_ar_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ourownstory/neural_prophet/5e6b2314547334bbd5dbfc9cc5e019efcb3d67c5/docs/source/images/plot_comp_ar_1.png -------------------------------------------------------------------------------- /docs/source/images/plot_comp_ar_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ourownstory/neural_prophet/5e6b2314547334bbd5dbfc9cc5e019efcb3d67c5/docs/source/images/plot_comp_ar_2.png -------------------------------------------------------------------------------- /docs/source/images/plot_comp_events_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ourownstory/neural_prophet/5e6b2314547334bbd5dbfc9cc5e019efcb3d67c5/docs/source/images/plot_comp_events_1.png -------------------------------------------------------------------------------- /docs/source/images/plot_comp_events_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ourownstory/neural_prophet/5e6b2314547334bbd5dbfc9cc5e019efcb3d67c5/docs/source/images/plot_comp_events_2.png -------------------------------------------------------------------------------- /docs/source/images/plot_comp_events_3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ourownstory/neural_prophet/5e6b2314547334bbd5dbfc9cc5e019efcb3d67c5/docs/source/images/plot_comp_events_3.png -------------------------------------------------------------------------------- /docs/source/images/plot_comp_future_reg_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ourownstory/neural_prophet/5e6b2314547334bbd5dbfc9cc5e019efcb3d67c5/docs/source/images/plot_comp_future_reg_1.png -------------------------------------------------------------------------------- /docs/source/images/plot_comp_future_reg_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ourownstory/neural_prophet/5e6b2314547334bbd5dbfc9cc5e019efcb3d67c5/docs/source/images/plot_comp_future_reg_2.png -------------------------------------------------------------------------------- /docs/source/images/plot_comp_lag_reg_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ourownstory/neural_prophet/5e6b2314547334bbd5dbfc9cc5e019efcb3d67c5/docs/source/images/plot_comp_lag_reg_1.png -------------------------------------------------------------------------------- /docs/source/images/plot_comp_seasonality_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ourownstory/neural_prophet/5e6b2314547334bbd5dbfc9cc5e019efcb3d67c5/docs/source/images/plot_comp_seasonality_1.png -------------------------------------------------------------------------------- /docs/source/images/plot_comp_seasonality_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ourownstory/neural_prophet/5e6b2314547334bbd5dbfc9cc5e019efcb3d67c5/docs/source/images/plot_comp_seasonality_2.png -------------------------------------------------------------------------------- /docs/source/images/plot_comp_simple_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ourownstory/neural_prophet/5e6b2314547334bbd5dbfc9cc5e019efcb3d67c5/docs/source/images/plot_comp_simple_1.png -------------------------------------------------------------------------------- /docs/source/images/plot_comp_trend_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ourownstory/neural_prophet/5e6b2314547334bbd5dbfc9cc5e019efcb3d67c5/docs/source/images/plot_comp_trend_1.png -------------------------------------------------------------------------------- /docs/source/images/plot_forecast_ar_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ourownstory/neural_prophet/5e6b2314547334bbd5dbfc9cc5e019efcb3d67c5/docs/source/images/plot_forecast_ar_1.png -------------------------------------------------------------------------------- /docs/source/images/plot_forecasts_simple_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ourownstory/neural_prophet/5e6b2314547334bbd5dbfc9cc5e019efcb3d67c5/docs/source/images/plot_forecasts_simple_1.png -------------------------------------------------------------------------------- /docs/source/images/plot_param_ar_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ourownstory/neural_prophet/5e6b2314547334bbd5dbfc9cc5e019efcb3d67c5/docs/source/images/plot_param_ar_1.png -------------------------------------------------------------------------------- /docs/source/images/plot_param_events_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ourownstory/neural_prophet/5e6b2314547334bbd5dbfc9cc5e019efcb3d67c5/docs/source/images/plot_param_events_1.png -------------------------------------------------------------------------------- /docs/source/images/plot_param_events_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ourownstory/neural_prophet/5e6b2314547334bbd5dbfc9cc5e019efcb3d67c5/docs/source/images/plot_param_events_2.png -------------------------------------------------------------------------------- /docs/source/images/plot_param_events_3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ourownstory/neural_prophet/5e6b2314547334bbd5dbfc9cc5e019efcb3d67c5/docs/source/images/plot_param_events_3.png -------------------------------------------------------------------------------- /docs/source/images/plot_param_events_4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ourownstory/neural_prophet/5e6b2314547334bbd5dbfc9cc5e019efcb3d67c5/docs/source/images/plot_param_events_4.png -------------------------------------------------------------------------------- /docs/source/images/plot_param_future_reg_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ourownstory/neural_prophet/5e6b2314547334bbd5dbfc9cc5e019efcb3d67c5/docs/source/images/plot_param_future_reg_1.png -------------------------------------------------------------------------------- /docs/source/images/plot_param_future_reg_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ourownstory/neural_prophet/5e6b2314547334bbd5dbfc9cc5e019efcb3d67c5/docs/source/images/plot_param_future_reg_2.png -------------------------------------------------------------------------------- /docs/source/images/plot_param_lag_reg_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ourownstory/neural_prophet/5e6b2314547334bbd5dbfc9cc5e019efcb3d67c5/docs/source/images/plot_param_lag_reg_1.png -------------------------------------------------------------------------------- /docs/source/images/plot_param_seasonality_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ourownstory/neural_prophet/5e6b2314547334bbd5dbfc9cc5e019efcb3d67c5/docs/source/images/plot_param_seasonality_1.png -------------------------------------------------------------------------------- /docs/source/images/plot_param_simple_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ourownstory/neural_prophet/5e6b2314547334bbd5dbfc9cc5e019efcb3d67c5/docs/source/images/plot_param_simple_1.png -------------------------------------------------------------------------------- /docs/source/images/plot_param_trend_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ourownstory/neural_prophet/5e6b2314547334bbd5dbfc9cc5e019efcb3d67c5/docs/source/images/plot_param_trend_1.png -------------------------------------------------------------------------------- /docs/source/science-behind/model-overview.md: -------------------------------------------------------------------------------- 1 | # Overview of the NeuralProphet Model 2 | 3 | NeuralProphet is a Neural Network based PyTorch implementation of a user-friendly time series forecasting tool for practitioners. This is heavily inspired by [Prophet](https://facebook.github.io/prophet/), which is the popular forecasting tool developed by Facebook. 4 | 5 | NeuralProphet is developed in a fully modular architecture which makes it scalable to add any additional components in the future. Our vision is to develop a simple to use forecasting tool for users while retaining the original objectives of Prophet such as interpretability, configurability and providing much more such as the automatic differencing capabilities by using PyTorch as the backend. 6 | 7 | ## Time Series Components 8 | 9 | NeuralProphet is a decomposable time series model with the components, trend, seasonality, auto-regression, special events, 10 | future regressors and lagged regressors. Future regressors are external variables which have known future values for the forecast 11 | period whereas the lagged regressors are those external variables which only have values for the observed period. Trend can be 12 | modelled either as a linear or a piece-wise linear trend by using changepoints. Seasonality is modelled using fourier terms and thus can handle multiple seasonalities for high-frequency data. Auto-regression is handled using an implementation of [AR-Net](https://github.com/ourownstory/AR-Net), an Auto-Regressive Feed-Forward Neural Network for time series. 13 | 14 | Lagged regressors are also modelled using separate Feed-Forward Neural Networks. Future regressors and special events are both modelled as covariates of the model with dedicated coefficients. For more details, refer to the documentation of the individual components. 15 | 16 | ## Data Preprocessing 17 | 18 | We perform a few data pre-processing steps in the model. For the observed values of the time series, users can specify whether they would like the values to be normalized. By default, the `y` values would be min-max normalized. If the user specifically, sets the `normalize_y` argument to `true`, the data is z-score normalized. Normalization can be performed for covariates as well. The default mode for normalization of covariates is `auto`. In this mode, apart from binary features such as events, all others are 19 | z-score normalized. 20 | 21 | We also perform an imputation in-case there are missing values in the data. However, imputation is only done if auto-regression is enabled in the model. In case of auto-regression, users may also choose not to impute any missing values and/or even drop missing values from the data, which should be done with caution as it may affect the model performance. Otherwise, the missing values do not really matter for the regression model. No special imputation is done for binary data. They are simply taken as `0` for the missing dates. For the numeric data, including the `y` values, normalization is a two-step process. First, small gaps are filled with a linear imputation and then the larger gaps are filled with rolling averages. When auto-regression is enabled, the observed `y` values are preprocessed in a moving window format to learn from lagged values. This is done for lagged regressors as well. 22 | 23 | ## When to Use NeuralProphet 24 | 25 | NeuralProphet can produce both single step and multi step-ahead forecasts. NeuralProphet can build models based on a single time series or even from a group of time series. The latter is a recent addition to our forecasting tool widely known as global forecasting models. 26 | 27 | NeuralProphet helps build forecasting models for scenarios where there are other external factors which can drive the behaviour of the target series over time. Using such external information can heavily improve forecasting models rather than relying only on the autocorrelation of the series. NeuralProphet tool is suitable for forecasting practitioners that wish to gain insights into the overall modelling process by visualizing the forecasts, the individual components as well as the underlying coefficients of the model. Through our descriptive plots, users can visualize the interaction of the individual components. They also have the power to control these coefficients as required by introducing sparsity through regularization. They can combine the components additively or multiplicatively as per their domain knowledge. 28 | 29 | This is an ongoing effort. Therefore, NeuralProphet will be equipped with even much more features in the upcoming releases. 30 | -------------------------------------------------------------------------------- /docs/source/tutorials/index.rst: -------------------------------------------------------------------------------- 1 | New Tutorials 2 | ------------- 3 | 4 | .. note:: 5 | The new tutorial section is still under development and certain sections might be empty or incomplete. We are working on it and would be very glad if you provide us with feedback. See `Github issue`_ for further information. 6 | 7 | .. _Github issue: https://github.com/ourownstory/neural_prophet/issues/767 8 | 9 | 10 | .. toctree:: 11 | :maxdepth: 1 12 | 13 | 01: The Basics 14 | 02: Trends 15 | 03: Seasonality 16 | 04: Auto Regression 17 | 05: Lagged Regressors 18 | 06: Future Regressors 19 | 07: Events and Holidays 20 | 08: Uncertainty 21 | 09: Global Model 22 | 10: Validation and Reproducibility 23 | 11: Next Steps 24 | -------------------------------------------------------------------------------- /docs/source/tutorials/next-steps.md: -------------------------------------------------------------------------------- 1 | # Next steps 2 | 3 | 1. Browse the [feature guides and application examples](../how-to-guides/index.rst) 4 | 2. Read about the [science behind](../science-behind/model-overview.md) NeuralProphet 5 | 3. Explore the [source code](https://github.com/ourownstory/neural_prophet) and [API reference](../code/forecaster.rst) of NeuralProphet 6 | 4. Join the community on [Github](https://github.com/ourownstory/neural_prophet) or [Slack](https://join.slack.com/t/neuralprophet/shared_invite/zt-1iyfs2pld-vtnegAX4CtYg~6E~V8miXw) 7 | -------------------------------------------------------------------------------- /docs/zh/README.md: -------------------------------------------------------------------------------- 1 | # neuralprophet-doc-zh 2 | NeuralProphet中文文档 3 | 4 | # 说明 5 | 6 | ![logo](logo.png) 7 | 8 | 9 | 10 | NeuralProphet是一个Facebook Prophet和AR-Net的启发,在PyTorch上构建的基于神经网络的时间序列模型,目前处于开发阶段。NeuralProphet是在一个完全模块化的架构中开发的,这使得它可以在未来添加任何额外的组件。我们的愿景是为用户开发一个简单易用的预测工具,同时保留Prophet的原始目标,如可解释性、可配置性,并通过使用PyTorch作为后端提供更多的功能,如自动差分功能。 11 | 12 | ------ 13 | 14 | 本文档是对其官方文档的中文翻译。水平有限,如有翻译不当之处敬请指正! 15 | 16 | 建议在**语雀**上阅读,效果好。 17 | 18 | - 本文档在语雀地址:https://www.yuque.com/alipayqgthu1irbf/neuralprophet 19 | 20 | - 本文档GitHub地址:https://github.com/SharkFin-top/neuralprophet-doc-zh 21 | 22 | 23 | ------ 24 | 25 | NeuralProphet官方地址: 26 | 27 | - GitHub:https://github.com/ourownstory/neural_prophet 28 | - Document:http://neuralprophet.com/ 29 | 30 | ------ 31 | 32 | PS: 封面图是我自己理解,在Facebook Prophet基础上设计的logo,还不错吧 ヾ(◍°∇°◍)ノ゙ 33 | 34 | ------ 35 | 36 | 此外, 37 | 38 | Prophet中文文档:https://github.com/SharkFin-top/prophet-doc-zh 39 | 40 | 更多内容:https://www.yuque.com/alipayqgthu1irbf/sharkfin 41 | -------------------------------------------------------------------------------- /docs/zh/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ourownstory/neural_prophet/5e6b2314547334bbd5dbfc9cc5e019efcb3d67c5/docs/zh/logo.png -------------------------------------------------------------------------------- /docs/zh/事件.md: -------------------------------------------------------------------------------- 1 | http://neuralprophet.com/model/events/ 2 | 3 | # 为事件建模 4 | 5 | 在预测问题中,我们经常需要考虑反复出现的特殊事件。`neural_prophet`支持这些事件。这些事件既可以以加法格式添加,也可以以乘法格式添加。 6 | 7 | 为了将事件信息提供给模型,用户必须创建一个dataframe ,该数据框架有对应事件日期的列`ds`和包含指定日期事件名称的列`event`。在下面的例子中,我们创建了名为 `history_events_df` 的dataframe ,其中包含这些事件信息。 8 | 9 | ```python 10 | playoffs_history = pd.DataFrame({ 11 | 'event': 'playoff', 12 | 'ds': pd.to_datetime(['2008-01-13', '2009-01-03', '2010-01-16', 13 | '2010-01-24', '2010-02-07', '2011-01-08', 14 | '2013-01-12', '2014-01-12', '2014-01-19', 15 | '2014-02-02', '2015-01-11', '2016-01-17']), 16 | }) 17 | 18 | superbowls_history = pd.DataFrame({ 19 | 'event': 'superbowl', 20 | 'ds': pd.to_datetime(['2010-02-07', '2014-02-02']), 21 | }) 22 | history_events_df = pd.concat((playoffs_history, superbowls_history)) 23 | ``` 24 | 25 | `history_events_df`dataframe 的前几行如下。 26 | 27 | | | event | ds | 28 | | ---- | ------- | ------------------- | 29 | | 0 | playoff | 2008-01-13 00:00:00 | 30 | | 1 | playoff | 2009-01-03 00:00:00 | 31 | | 2 | playoff | 2010-01-16 00:00:00 | 32 | | 3 | playoff | 2010-01-24 00:00:00 | 33 | | 4 | playoff | 2010-02-07 00:00:00 | 34 | | 5 | playoff | 2011-01-08 00:00:00 | 35 | 36 | 为了进行预测,我们还需要提供这些事件的未来日期,用于训练模型。你可以将这些内容包含在之前为拟合模型而创建的同一事件dataframe 中,或者包含在一个新的dataframe 中,如下所示。 37 | 38 | ```python 39 | playoffs_future = pd.DataFrame({ 40 | 'event': 'playoff', 41 | 'ds': pd.to_datetime(['2016-01-21', '2016-02-07']) 42 | }) 43 | 44 | superbowl_future = pd.DataFrame({ 45 | 'event': 'superbowl', 46 | 'ds': pd.to_datetime(['2016-01-23', '2016-02-07']) 47 | }) 48 | 49 | future_events_df = pd.concat((playoffs_future, superbowl_future)) 50 | ``` 51 | 52 | 一旦事件dataframes 被创建,就应该创建`NeuralProphet`对象,并添加事件配置。这是用`NeuralProphet`类的`add_events`函数完成的。 53 | 54 | ```python 55 | m = NeuralProphet( 56 | n_forecasts=10, 57 | yearly_seasonality=False, 58 | weekly_seasonality=False, 59 | daily_seasonality=False, 60 | ) 61 | m = m.add_events(["superbowl", "playoff"]) 62 | ``` 63 | 64 | 之后,我们需要将之前创建的dataframes 中的事件数据转换为模型所期望的binary 输入数据,这可以通过调用`create_df_withevents`函数来完成。这可以通过调用`create_df_with_events` 函数来完成,将原始的时间序列dataframe 与创建的 `history_events_df`一起传递。 65 | 66 | ```python 67 | history_df = m.create_df_with_events(df, history_events_df) 68 | ``` 69 | 70 | 这将以下列格式返回一个dataframe 。 71 | 72 | | | ds | y | superbowl | playoff | 73 | | ---- | ------------------- | ------- | --------- | ------- | 74 | | 0 | 2007-12-10 00:00:00 | 9.59076 | 0 | 0 | 75 | | 1 | 2007-12-11 00:00:00 | 8.51959 | 0 | 0 | 76 | | 2 | 2007-12-12 00:00:00 | 8.18368 | 0 | 0 | 77 | | 3 | 2007-12-13 00:00:00 | 8.07247 | 0 | 0 | 78 | | 4 | 2007-12-14 00:00:00 | 7.89357 | 0 | 0 | 79 | 80 | 81 | 82 | 之后,我们可以通过向`fit`函数提供创建的`history_df`,简单地对模型进行如下拟合。 83 | 84 | ```python 85 | metrics = m.fit(history_df, freq="D") 86 | ``` 87 | 88 | 为了使用拟合模型进行预测,我们首先需要创建带有事件信息的未来dataframe 。这可以通过`make_future_dataframe`函数来完成,通过传递创建的`future_events_df`并指定预测范围的所需大小。 89 | 90 | ```python 91 | future = m.make_future_dataframe(df=history_df, events_df=future_events_df, periods=10) 92 | forecast = m.predict(df=future) 93 | ``` 94 | 95 | 96 | 97 | 预测完成的不同部分如下图所示。所有事件都作为一个部分绘制,即 `Additive Events`。 98 | 99 | ![plot-comp-1](http://neuralprophet.com/images/plot_comp_events_1.png) 100 | 101 | 模型系数如下。 102 | 103 | ![plot-param-1](http://neuralprophet.com/images/plot_param_events_1.png) 104 | 105 | ## 乘法事件 106 | 107 | `neural_prophet`中事件的默认模式是加法。但是,事件也可以用乘法模式来建模。为此,在向`NeuralProphet`对象添加事件配置时,我们需要将`mode`设置为`multiplicative`,如下所示。 108 | 109 | ```python 110 | m = m.add_events(["superbowl", "playoff"], mode="multiplicative") 111 | ``` 112 | 113 | 所有其他步骤与`additive` 模式相同。现在,当画部分图时时,事件组件将以百分比的形式出现。 114 | 115 | ![plot-comp-2](http://neuralprophet.com/images/plot_comp_events_2.png) 116 | 117 | ## 事件窗口 118 | 119 | 你也可以为事件提供窗口。这样,你可以通过向`NeuralProphet`对象的`add_events`函数提供适当的参数`lower_window`和`upper_window`,将一个特定事件周围的日子也视为特殊事件。默认情况下,这些窗口的值是`0`,这意味着窗口不被考虑。 120 | 121 | ```python 122 | m = m.add_events(["superbowl", "playoff"], lower_window=-1, upper_window=1) 123 | ``` 124 | 125 | 根据该规范,对于 `superbowl` 和`playoff` 事件,将对三个特殊事件进行建模,即事件日期、前一天和第二天。这些事件在组件图中可见,如下图所示。 126 | 127 | ![plot-comp-3](http://neuralprophet.com/images/plot_comp_events_3.png) 128 | 129 | 在参数图中,现在会有`superbowl_+1`和`superbowl_-1`,它们对应`superbowl`事件之后和之前一天的系数。`季后赛`事件也有同样的新系数。 130 | 131 | ![plot-param-3](http://neuralprophet.com/images/plot_param_events_3.png) 132 | 133 | 如果你想为各个事件定义不同的窗口,也可以按以下方式进行。 134 | 135 | ```python 136 | m = m.add_events("superbowl", lower_window=-1, upper_window=1) 137 | m = m.add_events("playoff", upper_window=2) 138 | ``` 139 | 140 | 在上面的例子中,对于 `playoff` 事件,指定的活动日期以及下面两个日期被认为是三个不同的特别活动。 141 | 142 | ## 国家法定假期 143 | 144 | 除了用户指定的事件外,`neural_prophet`还支持标准的特定国家的假期。如果你想添加特定国家的假期,你只需要调用`NeuralProphet`对象上的`add_country_holidays`函数并指定国家。与用户指定的事件类似,特定国家的假期可以是 `additive` 或`multiplicative` ,并包括窗口。然而,与用户指定事件不同的是,所有国家特定事件的窗口都是一样的。 145 | 146 | ```python 147 | m = m.add_country_holidays("US", mode="additive", lower_window=-1, upper_window=1) 148 | ``` 149 | 150 | 这个例子将以`additive` 形式把所有的`US` 假期添加到模型中。各个事件的系数如下所示: 151 | 152 | ![plot-param-3](http://neuralprophet.com/images/plot_param_events_4.png) 153 | 154 | ## 正则化事件 155 | 156 | 事件也可以支持系数的正则化。你可以在将事件配置添加到`NeuralProphet`对象中时指定正则化,如下图所示。 157 | 158 | ```python 159 | m = m.add_events(["superbowl", "playoff"], regularization=0.05) 160 | ``` 161 | 162 | 各个事件的正则化也可以是不同的,比如下面。 163 | 164 | ```python 165 | m = m.add_events("superbowl", regularization=0.05) 166 | m = m.add_events("playoff", regularization=0.03) 167 | ``` 168 | 169 | 对于特定国家的节日,也可以像下面这样规定。 170 | 171 | ```python 172 | m = m.add_country_holidays("US", mode="additive", regularization=0.05) 173 | ``` -------------------------------------------------------------------------------- /docs/zh/季节性.md: -------------------------------------------------------------------------------- 1 | http://neuralprophet.com/model/seasonality/ 2 | 3 | # 为季节性建模 4 | 5 | NeuralProphet中的季节性使用傅里叶项建模。它可以指定加法和乘法模式。 6 | 7 | ## 加法季节性 8 | 9 | 季节性的默认模式是加法。请看下面NeuralProphet中加法季节性的简单例子。 10 | 11 | ```python 12 | m = NeuralProphet() 13 | metrics = m.fit(df, freq="D") 14 | ``` 15 | 16 | ![plot-comp-1](http://neuralprophet.com/images/plot_comp_seasonality_1.png) 17 | 18 | 你可以看到每周和每年的季节性形状。由于在模型开发中没有明确说明所需的季节性,NeuralProphet会拟合数据中可能存在的任何季节性。模型还为每个季节性所需的Fourier项数分配了默认值。您也可以像下面的例子一样指定这些数字。 19 | 20 | ```python 21 | m = NeuralProphet( 22 | yearly_seasonality=8, 23 | weekly_seasonality=3 24 | ) 25 | ``` 26 | 27 | 根据这个例子,年季节性模式将使用8个傅立叶项,周季节性模式将使用3个傅立叶项。通过调整Fourier项的数量,你可以对季节性进行低拟合或过拟合。下面是一个例子,对于同样的数据,每个季节性的Fourier项数都很高,季节性被过度拟合。 28 | 29 | ```python 30 | m = NeuralProphet( 31 | yearly_seasonality=16, 32 | weekly_seasonality=8 33 | ) 34 | ``` 35 | 36 | ![plot-comp-1](http://neuralprophet.com/images/plot_comp_seasonality_2.png) 37 | 38 | ## 乘法季节性 39 | 40 | 季节性也可以通过设置明确的模式来进行多重建模,如下图所示。这样做,季节性相对于趋势将是乘法的。 41 | 42 | ```python 43 | m = NeuralProphet( 44 | seasonality_mode='multiplicative' 45 | ) 46 | ``` 47 | 48 | ## 正则化季节性 49 | 50 | 就像NeuralProphet中的所有其他组件一样,季节性也可以被正则化。这是通过正则化傅里叶系数来实现的,如下图。关于如何设置 `seasonality_reg` 参数的细节,请参考[超参数选取](http://neuralprophet.com/hyperparameter-selection/#regularization-related-parameters)一节。 51 | 52 | ```python 53 | m = NeuralProphet( 54 | yearly_seasonality=16, 55 | weekly_seasonality=8, 56 | daily_seasonality=False, 57 | seasonality_reg=1, 58 | ) 59 | ``` 60 | 61 | -------------------------------------------------------------------------------- /docs/zh/快速开始.md: -------------------------------------------------------------------------------- 1 | http://neuralprophet.com/ 2 | 3 | # 快速入门指南 4 | 5 | 本页详细介绍了如何使用NeuralProphet用简单的方法建立一个简单的模型。 6 | 7 | ## 安装 8 | 9 | 下载代码仓库后(通过`git clone`),切换到仓库目录下(`cd neural_prophet`),用`pip install .`将neuralprophet作为python包安装。 10 | 11 | 注意:如果你打算在Jupyter notebook上使用这个包,建议使用`pip install .[live]`安装 "live "包版本。这将允许你在 `train`函数中启用 `plot_live_loss` ,以获得train (和validation))损失的实时图。 12 | 13 | ### 导入包 14 | 15 | 现在你可以通过以下代码中使用NeuralProphet。 16 | 17 | ```python 18 | from neuralprophet import NeuralProphet 19 | ``` 20 | 21 | ## 导入数据 22 | 23 | `neural_prophet`包所期望的输入数据格式与原始`prophet`相同。它应该有两列,`ds`有时间戳,`y`列包含时间序列的观测值。在整个文档中,我们将使用[佩顿-曼宁](https://en.wikipedia.org/wiki/Peyton_Manning)维基百科页面的每日页面浏览日志的时间序列数据。这些数据可以通过以下方式导入。 24 | 25 | ```python 26 | import pandas as pd 27 | 28 | data_location = "https://raw.githubusercontent.com/ourownstory/neuralprophet-data/main/datasets/" 29 | 30 | df = pd.read_csv(data_location + 'wp_log_peyton_manning.csv') 31 | ``` 32 | 33 | 数据的格式如下。 34 | 35 | | ds | y | 36 | | ---------- | ---- | 37 | | 2007-12-10 | 9.59 | 38 | | 2007-12-11 | 8.52 | 39 | | 2007-12-12 | 8.18 | 40 | | 2007-12-13 | 8.07 | 41 | | 2007-12-14 | 7.89 | 42 | 43 | 44 | 45 | ## 简单模型 46 | 47 | 通过创建一个`NeuralProphet`类的对象,并调用fit函数,就可以为这个数据集拟合一个`neural_prophet`的简单模型,如下所示。这样就可以用模型中的默认设置来拟合模型。关于这些默认设置的更多细节,请参考[Hyperparameter Selction](http://neuralprophet.com/hyperparameter-selection/)。 48 | 49 | ```python 50 | m = NeuralProphet() 51 | metrics = m.fit(df, freq="D") 52 | ``` 53 | 54 | 模型被拟合后,我们就可以使用拟合的模型进行预测。为此,我们首先需要创建一个未来的dataframe ,包括我们需要预测的未来的时间步骤。`NeuralProphet`为此提供了辅助函数`make_future_dataframe`。注意,这里的数据频率是全局设置的。有效的时序频率设置是[pandas timeseries offset aliases](https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#timeseries-offset-aliases)。 55 | 56 | ```python 57 | future = m.make_future_dataframe(df, periods=365) 58 | forecast = m.predict(future) 59 | ``` 60 | 61 | ## 画图 62 | 63 | 可视化通过模型得到的预测。 64 | 65 | ```python 66 | forecasts_plot = m.plot(forecast) 67 | ``` 68 | 69 | ![plot-forecasts-1](http://neuralprophet.com/images/plot_forecasts_simple_1.png) 70 | 71 | 这是一个简单的模型,默认估计了趋势、周季节性和年季节性。你也可以分别看下面的各个组成部分。 72 | 73 | ```python 74 | fig_comp = m.plot_components(forecast) 75 | ``` 76 | 77 | ![plot-comp-1](http://neuralprophet.com/images/plot_comp_simple_1.png) 78 | 79 | 各个系数值也可以绘制如下图,以获得进一步的了解。 80 | 81 | ```python 82 | fig_param = m.plot_parameters() 83 | ``` 84 | 85 | ![plot-param-1](http://neuralprophet.com/images/plot_param_simple_1.png) 86 | 87 | ## 验证 88 | 89 | NeuralProphet的模型验证可以通过两种方式进行。用户可以在参数`valida_p`中指定用于验证的数据分数,像下面这样在模型拟合后手动分割数据集进行验证。这个验证集从系列末尾开始保留。 90 | 91 | ```python 92 | m = NeuralProphet() 93 | df_train, df_val = m.split_df(df, valid_p=0.2) 94 | ``` 95 | 96 | 现在你可以分别查看训练和验证指标,如下图所示。 97 | 98 | ```python 99 | train_metrics = m.fit(df_train) 100 | val_metrics = m.test(df_val) 101 | ``` 102 | 103 | 在模型拟合过程中,你也可以对每个epoch进行验证。通过在`fit`函数调用中设置`validate_each_epoch`参数来完成。这可以让你在模型训练时查看验证指标。 104 | 105 | ```python 106 | # or evaluate while training 107 | m = NeuralProphet() 108 | metrics = m.fit(df, validate_each_epoch=True, valid_p=0.2) 109 | ``` 110 | 111 | ## 可重复性 112 | 113 | 结果的差异性来自SGD在不同的运行中找到不同的optima。大部分的随机性来自于权重的随机初始化、不同的学习率和dataloader的不同shuffling 。我们可以通过设置随机数发生器的种子(seed)来控制它。 114 | 115 | ```python 116 | from neuralprophet import set_random_seed 117 | set_random_seed(0) 118 | ``` 119 | 120 | 这应该会导致每次运行模型时都有相同的结果。请注意,在拟合模型之前,每次都必须明确地将随机种子设置为相同的随机数。 121 | 122 | -------------------------------------------------------------------------------- /docs/zh/未来回归项.md: -------------------------------------------------------------------------------- 1 | http://neuralprophet.com/model/future-regressors/ 2 | 3 | # 为未来回归项建模 4 | 5 | 未来回归器是指具有已知未来值的外部变量。从这个意义上说,未来回归者的功能如果和特殊事件非常相似。 6 | 7 | 这些回归器的过去值对应于训练time stamps,必须与训练数据本身一起提供。请看下面的例子,我们通过对原始数据的滚动平均值(rolling means)来创建两个虚拟回归项`A`和`B`。 8 | 9 | ```python 10 | df['A'] = df['y'].rolling(7, min_periods=1).mean() 11 | df['B'] = df['y'].rolling(30, min_periods=1).mean() 12 | ``` 13 | 14 | dataframe 如下。 15 | 16 | | | ds | y | A | B | 17 | | ---- | ---------- | ------- | ------- | ------- | 18 | | 0 | 2007-12-10 | 9.59076 | 9.59076 | 9.59076 | 19 | | 1 | 2007-12-11 | 8.51959 | 9.05518 | 9.05518 | 20 | | 2 | 2007-12-12 | 8.18368 | 8.76468 | 8.76468 | 21 | | 3 | 2007-12-13 | 8.07247 | 8.59162 | 8.59162 | 22 | | 4 | 2007-12-14 | 7.89357 | 8.45201 | 8.45201 | 23 | 24 | 25 | 为了进行预测,我们还需要提供回归因子的未来值。 26 | 27 | ```python 28 | future_regressors_df = pd.DataFrame(data={'A': df['A'][:50], 'B': df['B'][:50]}) 29 | ``` 30 | 31 | dataframe 如下。 32 | 33 | | | A | B | 34 | | ---- | ------- | ------- | 35 | | 0 | 9.59076 | 9.59076 | 36 | | 1 | 9.05518 | 9.05518 | 37 | | 2 | 8.76468 | 8.76468 | 38 | | 3 | 8.59162 | 8.59162 | 39 | | 4 | 8.45201 | 8.45201 | 40 | 41 | 42 | 43 | 它是一个只有回归项未来值列的dataframe 。 44 | 45 | 与事件类似,未来的回归器也可以以加法和乘法两种格式添加。 46 | 47 | ## 加法未来回归项 48 | 49 | `neural_prophet`中未来回归器的默认模式是加法。必须通过调用`add_future_regressor`函数将回归器添加到`NeuralProphet`对象中。一旦完成了这些工作,就可以通过向`fit`函数提供训练数据的dataframe 以及回归项的值来拟合模型。 50 | 51 | ```python 52 | m = NeuralProphet( 53 | n_forecasts=10, 54 | yearly_seasonality=False, 55 | weekly_seasonality=False, 56 | daily_seasonality=False, 57 | ) 58 | 59 | m = m.add_future_regressor(name='A') 60 | m = m.add_future_regressor(name='B') 61 | 62 | metrics = m.fit(df, freq="D") 63 | ``` 64 | 65 | 当进行预测时,必须通过提供回归器的未来值来创建未来dataframe 。要做到这一点,现在你需要调用 `make_future_dataframe` 函数,提供之前创建的`future_regressors_df` 作为参数。 66 | 67 | ```python 68 | future = m.make_future_dataframe(df=df, regressors_df=future_regressors_df, periods=3) 69 | forecast = m.predict(df=future) 70 | ``` 71 | 72 | 分解图如下。 73 | 74 | ```python 75 | fig_comp = m.plot_components(forecast) 76 | ``` 77 | 78 | ![plot-comp-1](http://neuralprophet.com/images/plot_comp_future_reg_1.png) 79 | 80 | n 除了趋势外,它还显示了加法未来回归项的图。未来回归项的系数也可以绘制出来。 81 | 82 | ```python 83 | fig_param = m.plot_parameters() 84 | ``` 85 | 86 | ![plot-param-1](http://neuralprophet.com/images/plot_param_future_reg_1.png) 87 | 88 | ## 乘法未来回归项 89 | 90 | 未来的回归项也可以以乘法模式添加。你只需要在向`NeuralProphet`对象添加回归项时,将模式设置为`multiplicative`即可。 91 | 92 | ```python 93 | m = m.add_future_regressor(name='A', mode="multiplicative") 94 | m = m.add_future_regressor(name='B') 95 | ``` 96 | 97 | 在上面的例子中,我们有加法和乘法回归器,其中`A`是乘法,`B`是加法。拟合和预测过程中的所有其他步骤都是一样的。 98 | 99 | 100 | 101 | ## 正则化未来回归项 102 | 103 | 我们可以在未来的回归项中加入正则化,如下图。 104 | 105 | ```python 106 | m = m.add_future_regressor(name='A', regularization=0.05) 107 | m = m.add_future_regressor(name='B', regularization=0.02) 108 | ``` 109 | 110 | 这将在各个回归者系数中增加稀疏性(sparsity)。 111 | 112 | -------------------------------------------------------------------------------- /docs/zh/模型概述.md: -------------------------------------------------------------------------------- 1 | http://neuralprophet.com/model-overview/ 2 | 3 | 此文档正在完善中。 4 | 5 | # NeuralProphet模型概述 6 | 7 | NeuralProphet是一个基于神经网络的PyTorch实现的面向从业者的用户友好型时间序列预测工具。这在很大程度上受到了Facebook开发的流行预测工具[Prophet](https://facebook.github.io/prophet/)的启发。NeuralProphet是在一个完全模块化的架构中开发的,这使得它可以在未来添加任何额外的组件。我们的愿景是为用户开发一个简单易用的预测工具,同时保留Prophet的原始目标,如,可解释性、可配置性,并通过使用PyTorch作为后端提供更多的功能,如自动差分功能。 8 | 9 | ## 时间序列部分 10 | 11 | NeuralProphet是一个可分解的时间序列模型,其组成部分有趋势(trend)、季节性(seasonality)、自回归(auto-regression)、特殊事件(special events)、未来回归项(future regressors)和滞后回归项(lagged regressors)。未来回归因子是指在预测期有已知未来值的外部变量,而滞后回归因子是指那些只有观察期值的外部变量。趋势可以通过使用变化点来建立线性或逐个线性趋势的模型。季节性使用傅里叶项建模,因此可以处理高频率数据的多种季节性。自动回归使用[AR-Net](https://github.com/ourownstory/AR-Net)的实现来处理,这是一个用于时间序列的自回归前馈神经网络(Auto-Regressive Feed-Forward Neural Network)。滞后回归因子也使用单独的前馈神经网络进行建模。未来回归项和特殊事件都是作为模型的协变量,用专用系数进行建模。更多细节,请参考各个部分的文档。 12 | 13 | ## 数据预处理 14 | 15 | 我们在模型中进行了一些数据预处理步骤。对于观察到的时间序列值,用户可以指定是否希望将这些值标准化。默认情况下,`y` 值将被最小-最大归一化。如果用户特别将 `normalize_y` 参数设置为 `true`,则数据将进行z-score归一化。对协变量也可以进行归一化处理。协变量归一化的默认模式是`auto`。在这种模式下,除了事件等binary features外,所有其他特征都进行z-score归一化。 16 | 17 | 如果数据中存在缺失值,我们也会进行估算。然而,只有在模型中启用自回归时,才会进行估算。否则,缺失值对于回归模型来说并不重要。对于binary data没有进行特殊的归因。对于缺失的日期,它们被简单地当作 "0"。对于数字数据,包括 `y` 值,标准化是一个两步走的过程。首先,用线性归入法填补小的差距,然后用滚动平均法填补较大的差距。当启用自动回归时,将观察到的`y`值以移动窗口的形式进行预处理,以从滞后值中学习。对于滞后的回归者也是如此。 18 | 19 | ## 何时使用NeuralProphet 20 | 21 | NeuralProphet既可以进行单步预测,也可以进行多步预测。目前,NeuralProphet是单变量建立模型。这意味着,如果你有很多series ,你希望进行预测,你需要一次只做一个。然而,未来我们希望将整体预测模型的能力整合到NeuralProphet中。 22 | 23 | NeuralProphet可以帮助建立预测模型,用于存在其他外部因素的情况下,这些外部因素可以驱动目标序列随时间的行为。使用这些外部信息可以极大地改善预测模型,而不是仅仅依靠序列的自相关。NeuralProphet工具适用于那些希望通过可视化预测、单个组件以及模型的基本系数来深入了解整体建模过程的预测从业者。通过我们的描述性图表,用户可以直观地看到各个组件的相互作用。他们还可以根据需要通过正则化引入稀疏性(sparsity )来控制这些系数。他们可以根据自己的领域知识,将这些组件加法或乘法地组合起来。 24 | 25 | 这是一项持续的工作。因此,NeuralProphet将在接下来的版本中搭配更多的功能。 -------------------------------------------------------------------------------- /docs/zh/滞后回归项.md: -------------------------------------------------------------------------------- 1 | http://neuralprophet.com/model/lagged-regressors/ 2 | 3 | # 为滞后回归项建模 4 | 5 | 在NeuralProphet目前的版本下,只有在启用AR-Net的情况下,才会支持Lagged Regressor。这是因为它们都是使用前馈神经网络的内部处理方式,需要指定`n_lags`值。为了简单起见,目前我们对AR-Net和滞后回归器使用相同的`n_lags`值。因此,对于滞后回归器,NeuralProphet对象的实例化与AR-Net类似,如下图。 6 | 7 | ```python 8 | m = NeuralProphet( 9 | n_forecasts=3, 10 | n_lags=5, 11 | yearly_seasonality=False, 12 | weekly_seasonality=False, 13 | daily_seasonality=False, 14 | ) 15 | ``` 16 | 17 | 当拟合模型时,提供给`fit` 函数的dataframe 应该有额外的滞后回归因子列,如下所示。 18 | 19 | | | ds | y | A | 20 | | ---- | ------------------- | ------- | ------- | 21 | | 0 | 2007-12-10 00:00:00 | 9.59076 | 9.59076 | 22 | | 1 | 2007-12-11 00:00:00 | 8.51959 | 9.05518 | 23 | | 2 | 2007-12-12 00:00:00 | 8.18368 | 8.76468 | 24 | | 3 | 2007-12-13 00:00:00 | 8.07247 | 8.59162 | 25 | | 4 | 2007-12-14 00:00:00 | 7.89357 | 8.45201 | 26 | 27 | 28 | 29 | 在这个例子中,我们有一个名为`A`的滞后回归项。你还需要通过调用`add_lagged_regressor`函数并给出必要的设置,将这些Lagged Regressor用于`NeuralProphet`对象中。 30 | 31 | ```python 32 | m = m.add_lagged_regressor(names='A') 33 | ``` 34 | 35 | 通过设置 `add_lagged_regressor` 函数的 `only_last_value` 参数,用户可以指定在输入窗口内只使用回归项的最后已知值,或者使用与自动回归相同的滞后数。现在你可以像往常一样执行模型拟合和预测。绘制的分量应该像下面的样子。 36 | 37 | ![plot-comp-1](http://neuralprophet.com/images/plot_comp_lag_reg_1.png) 38 | 39 | 可以看到自动回归和滞后回归项`A`所对应的成分。系数图如下。 40 | 41 | ![plot-param-1](http://neuralprophet.com/images/plot_param_lag_reg_1.png) 42 | 43 | 它显示了与输入窗口对应的5个滞后的AR和滞后回归项的相关性。 -------------------------------------------------------------------------------- /docs/zh/相较于prophet的改进.md: -------------------------------------------------------------------------------- 1 | http://neuralprophet.com/changes-from-prophet/ 2 | 3 | # 相较于Prophet的改进 4 | 5 | NeuralProphet与原来的Prophet相比,增加了一些功能。它们如下。 6 | 7 | - 使用PyTorch作为后端进行优化的梯度下降法。 8 | - 使用AR-Net对时间序列的自相关进行建模。 9 | - 使用seepearate前馈神经网络对滞后回归者进行建模。 10 | - 可配置的FFNNs非线性深层。 11 | - 可调整到特定的预测范围(大于1)。 12 | - 自定义损失和指标。 13 | 14 | 由于代码的模块化和PyTorch支持的可扩展性,任何可通过梯度下降训练的组件都可以作为一个模块添加到NeuralProphet中。使用PyTorch作为后端,与原来使用Stan作为后端的Prophet相比,使得建模过程更快。 -------------------------------------------------------------------------------- /docs/zh/自回归.md: -------------------------------------------------------------------------------- 1 | http://neuralprophet.com/model/auto-regression/ 2 | 3 | # 为自回归建模 4 | 5 | 通过简单地对`NeuralProphet`对象的`n_lags`参数设置一个适当的值,就可以在NeuralProphet中启用AR-Net。 6 | 7 | ```python 8 | m = NeuralProphet( 9 | n_forecasts=3, 10 | n_lags=5, 11 | yearly_seasonality=False, 12 | weekly_seasonality=False, 13 | daily_seasonality=False, 14 | ) 15 | ``` 16 | 17 | 在上面的例子中,我们创建了一个预测方案,将5个滞后期输入AR-Net,并接收3个步骤作为预测。一旦您启用了AR-Net,在预测过程中,您的`future_periods`值应该等于创建`NeuralProphet`对象时指定的`n_forecasts`值。无论你为`future_periods`指定的是哪个值,它都会被转换为`n_forecasts`的值,并向用户发出通知。这是因为,由于AR-Net是在训练过程中建立的,因此它的输出大小为`n_forecasts`,在测试过程中不能支持任何其他值。 18 | 19 | 分解图如下所示。 20 | 21 | ![plot-comp-1](http://neuralprophet.com/images/plot_comp_ar_1.png) 22 | 23 | 现在你可以看到自回归作为一个独立的部分。相应的系数如下图所示。 24 | 25 | ![plot-param-1](http://neuralprophet.com/images/plot_param_ar_1.png) 26 | 27 | 在建立自相关模型时,您可以看到每个滞后的相关性。您也可以为AR-Net指定`ar_layers`,以增加AR-Net的复杂性。 28 | 29 | ```python 30 | m = NeuralProphet( 31 | n_forecasts=3, 32 | n_lags=5, 33 | ar_layers=[32,32], 34 | yearly_seasonality=False, 35 | weekly_seasonality=False, 36 | daily_seasonality=False 37 | ) 38 | ``` 39 | 40 | ## 正则化AR-Net 41 | 42 | 在AR-Net中,正则化是通过在`NeuralProphet`对象中设置`ar_sparsity`参数来完成的,如下图所示。更多关于`ar_sparsity`参数设置的细节,请参考[Hyperparameter Selection](http://neuralprophet.com/hyperparameter-selection/#regularization-related-parameters)一节。 43 | 44 | ```python 45 | m = NeuralProphet( 46 | n_forecasts=3, 47 | n_lags=5, 48 | ar_layers=[32,32], 49 | ar_sparsity=0.01, 50 | yearly_seasonality=False, 51 | weekly_seasonality=False, 52 | daily_seasonality=False 53 | ) 54 | ``` 55 | 56 | ## 突出特定的预测步骤 57 | 58 | 在建立自相关模型时,多输入多输出模式下的模型。在这种模式下,可以突出显示第n步前的预测。这意味着,在模型训练过程中计算误差以及预测绘图时,可以专门看第n步的预测。可以像下面这样做。 59 | 60 | ```python 61 | m = NeuralProphet( 62 | n_forecasts=30, 63 | n_lags=60, 64 | yearly_seasonality=False, 65 | weekly_seasonality=False, 66 | daily_seasonality=False 67 | ) 68 | m.highlight_nth_step_ahead_of_each_forecast(step_number=m.config_model.n_forecasts) 69 | ``` 70 | 71 | 您可以指定任何小于或等于`n_forecasts`的值到`step_number`参数。一旦你这样做,指标将看起来像下面。 72 | 73 | | SmoothL1Loss | MAE | SmoothL1Loss-3 | MAE-3 | RegLoss | 74 | | ------------ | -------- | -------------- | -------- | ------- | 75 | | 0.272427 | 3.063127 | 0.164296 | 2.407697 | 0.0 | 76 | | 0.151259 | 2.303768 | 0.144811 | 2.261525 | 0.0 | 77 | | 0.129990 | 2.140769 | 0.127703 | 2.126293 | 0.0 | 78 | | 0.116178 | 2.020397 | 0.113719 | 2.005068 | 0.0 | 79 | | 0.104502 | 1.915078 | 0.101155 | 1.887193 | 0.0 | 80 | 81 | 82 | 83 | 在预测图中,它将只关注提前第n步的预测。如下图所示为该模型的fir。 84 | 85 | ![plot-forecast-1](http://neuralprophet.com/images/plot_forecast_ar_1.png) -------------------------------------------------------------------------------- /docs/zh/贡献.md: -------------------------------------------------------------------------------- 1 | http://neuralprophet.com/contribute/ 2 | 3 | # 贡献 4 | 5 | ## Dev Install 6 | 7 | 下载代码仓库后(通过`git clone`),更改到仓库目录(`cd neural_prophet`),激活你的虚拟环境,用`pip install -e .[dev]`将neuralprophet作为python包安装。 8 | 9 | (包括可选的`-e`标志将在 "可编辑 "模式下安装neuralprophet,这意味着不是将文件复制到你的虚拟环境中,而是在文件所在的地方创建一个符号链接。) 10 | 11 | 此外,你必须在控制台中运行`$ neuralprophet_dev_setup`来运行dev-setup脚本,它将安装适当的git hooks用于测试等。 12 | 13 | ## Notes 14 | 15 | A我们尽可能地遵循 [Google Python Style Guide](http://google.github.io/styleguide/pyguide.html) 16 | 17 | 至于Git的做法,请按照[Swiss Cheese](https://github.com/ourownstory/swiss-cheese/blob/master/git_best_practices.md)中描述的步骤,了解如何在forked repo上进行git-rebase-squash。 -------------------------------------------------------------------------------- /docs/zh/超参数选取.md: -------------------------------------------------------------------------------- 1 | http://neuralprophet.com/hyperparameter-selection/ 2 | 3 | NeuralProphet有一些超参数需要用户指定。如果没有指定,将使用这些超参数的默认值。如下。 4 | 5 | | Parameter | Default Value | 6 | | --------------------- | ------------- | 7 | | `growth` | linear | 8 | | `changepoints` | None | 9 | | `n_changepoints` | 5 | 10 | | `changepoints_range` | 0.8 | 11 | | `trend_reg` | 0 | 12 | | `trend_reg_threshold` | False | 13 | | `yearly_seasonality` | auto | 14 | | `weekly_seasonality` | auto | 15 | | `daily_seasonality` | auto | 16 | | `seasonality_mode` | additive | 17 | | `seasonality_reg` | None | 18 | | `n_forecasts` | 1 | 19 | | `n_lags` | 0 | 20 | | `ar_layers` | [] | 21 | | `ar_sparsity` | None | 22 | | `learning_rate` | None | 23 | | `epochs` | None | 24 | | `batch_size` | None | 25 | | `loss_func` | SmoothL1Loss | 26 | | `train_speed` | None | 27 | | `normalize_y` | auto | 28 | | `impute_missing` | True | 29 | 30 | 31 | 32 | ## 预测范围 33 | 34 | `n_forecasts`是预测范围的大小。默认值为1,表示模型预测未来一步。 35 | 36 | ## 自回归 37 | 38 | `n_lags`定义AR-Net是否启用(如果`n_lags`>0)。如果可能的话,通常建议`n_lags'的值大于``n_forecasts`。FFNNs最好至少遇到过去的`n_forecast`长度,以便预测未来的`n_forecast`。因此,`n_lags`决定应该考虑到过去多远的自动递减依赖性。这可以是根据专业知识或经验分析选择的一个值。 39 | 40 | ## 模型训练相关参数 41 | 42 | NeuralProphet采用随机梯度下降法进行拟合--更准确地说,是采用AdamW优化器和One-Cycle策略。如果没有指定参数`learning_rate`,则进行学习率范围测试以确定最佳学习率。`epochs`和`loss_func`是另外两个直接影响模型训练过程的参数。如果没有定义,这两个参数会根据数据集大小自动设置。它们的设置方式是将训练步骤总数控制在1000到4000左右。 43 | 44 | 如果看起来模型对训练数据过度拟合(实时损失图在此很有用),可以减少 `epochs` 和 `learning_rate`,并有可能增加 `batch_size`。如果是低拟合,可以增加`epochs` 和`learning_rate` 的数量,并有可能减少`batch_size` 。 45 | 46 | 默认的损失函数是 "SmoothL1Loss "损失,该函数被认为对离群值具有鲁棒性。但是,您可以自由选择标准的 "MSE "或任何其他PyTorch `torch.nn.modules.loss`损失函数。 47 | 48 | ## 增加模型的深度 49 | 50 | `ar_layers`定义了整个模型中AR-Net的隐藏层数量及其大小。它是一个数组,其中每个元素都是相应隐藏层的大小。默认为空数组,这意味着AR-Net将只有一个大小为`n_forecasts`的最终层。添加更多层将增加复杂性和计算时间。然而,增加隐藏层的数量有助于构建更复杂的关系。为了在计算复杂性和改进精度之间取得平衡,建议将`ar_layers`设置为具有1-2个元素的数组。然而,在大多数情况下,通过完全没有隐藏层也可以实现足够好的性能。 51 | 52 | `lagged_reg_layers`定义了整个模型中滞后回归器FFNN的隐藏层数量及其大小。它是一个数组,其中每个元素都是相应隐藏层的大小。默认为空数组,这意味着滞后回归器的FFNN将只有一个大小为`n_forecasts`的最终层。添加更多层将增加复杂性和计算时间。然而,增加隐藏层的数量有助于构建更复杂的关系,尤其是对于滞后回归器。为了在计算复杂性和改进精度之间取得平衡,建议将`lagged_reg_layers`设置为具有1-2个元素的数组。然而,在大多数情况下,通过完全没有隐藏层也可以实现足够好的性能。 53 | 54 | 请注意,以前的`num_hidden_layers`和`d_hidden`参数现在已被弃用。现在通过`ar_layers`和`lagged_reg_layers`输入ar_net和covar_net架构配置。如果手动调整,建议将隐藏层大小的值设置在`n_lags`和`n_forecasts`之间。同样重要的是要注意,当前的NeuralProphet实现允许您为ar_net和covar_net中的隐藏层指定不同的大小。 55 | 56 | 57 | ## 数据预处理相关参数 58 | 59 | `normalize_y` 是关于在建模前对时间序列进行缩放。默认情况下,NeuralProphet会对时间序列进行(soft)最小-最大的归一化。如果序列值波动很大,归一化可以帮助模型训练过程。然而,如果序列没有这样的缩放,用户可以关闭这个功能或选择其他归一化。 60 | 61 | `impute_missing`是关于在一个给定的序列中推算缺失值。与Prophet类似,NeuralProphet也可以在没有AR-Net的回归模式下处理缺失值。然而,当需要捕获自相关时,有必要对缺失值进行估算,因为这时建模就变成了一个有序的问题。在大多数情况下,让这个参数处于默认状态可以完美地完成工作。 62 | 63 | ## 趋势相关参数 64 | 65 | 你可以在[`example_notebooks/trend_peyton_manning.ipynb`](https://github.com/ourownstory/neural_prophet/blob/master/example_notebooks/trend_peyton_manning.ipynb)找到一个实践的例子。 66 | 67 | 如果趋势的灵活性主要由`n_changepoints`控制,它设定了趋势率可能变化的点数。此外,可以通过将`trend_reg`设置为大于零的值来规范趋势率的变化。 68 | 69 | 这是一个有用的功能,可以用来自动检测相关的变化点。 70 | 71 | `changepoints_range`控制用于拟合趋势的训练数据的范围。默认值为0.8,意味着在最后20%的训练数据中不设置变化点。 72 | 73 | 如果提供了一个`changepoints` 列表,`n_changepoints` 和 `changepoints_range` 将被忽略。这个列表用于设置允许趋势率变化的日期。 74 | 75 | `n_changepoints`是沿系列选择的趋势变化点的数量。默认值为5。 76 | 77 | ## 季节性相关参数 78 | 79 | `yearly_seasonality`、`weekly_seasonality` 和 `daily_seasonality` 是关于要模拟的季节成分。例如,如果你使用温度数据,你可能可以选择每天和每年。例如,使用使用地铁的乘客数量更可能有一个每周的季节性。将这些季节性设置在默认的`auto`模式下,可以让NeuralProphet根据可用数据的多少来决定包括哪些季节性。例如,如果可用数据少于两年,则不会考虑年季节性。同样,如果可用数据少于两周,每周的季节性将不被考虑等等。然而,如果用户确定系列不包括年、周或日季节性,因此模型不应该被这些成分扭曲,他们可以通过设置相应的成分为`False`来明确关闭它们。除此之外,参数 `yearly_seasonality`、`weekly_seasonality` 和 `daily_seasonality` 也可以设置为各自季节性的傅里叶项数。默认值为年6,周4和日6。用户可以将其设置为任何他们想要的数字。如果每年的项数为6,那么实际上每年季节性的傅里叶项总数为12(6*2),以适应正弦和余弦项。增加Fourier项的数量可以使模型能够捕捉相当复杂的季节性模式。然而,与 `ar_layers`类似,这也会增加模型的复杂性。用户可以通过观察最终的分量图来了解最佳的Fourier项数。默认的`seasonality_mode`是加法。这意味着在季节性方面,序列中没有异方差。然而,如果序列包含明显的方差,季节性波动与趋势成正比,则可以将`seasonality_mode` 设置为乘法。 80 | 81 | ## 正则化相关参数 82 | 83 | NeuralProphet还包含一些正则化参数,用于控制模型系数,并将稀疏性引入模型。这也有助于避免模型对训练数据的过度拟合。对于`seasonality_reg`,在0.1-1范围内的小值允许拟合大的季节性波动,而在1-100范围内的大值则会对傅里叶系数施加较重的惩罚,从而抑制季节性。对于 "ar_sparsity",预计值在0-1的范围内,0会引起完全的稀疏性,而1则完全不需要正则化。`ar_sparsity`和`n_lags`可以用于数据探索和特征选择。由于AR-Net的可扩展性,你可以使用更多的滞后数,并利用稀疏性来识别过去时间步长对预测精度的重要影响。对于 `future_regressor_regularization`、`event_regularization` 和 country_holiday_regularization,其值可以设置在0-1之间,与 `ar_sparsity`的概念相同。可以根据各个回归因子和事件需要更多的抑制,设置不同的正则化参数。 84 | -------------------------------------------------------------------------------- /docs/zh/趋势项.md: -------------------------------------------------------------------------------- 1 | http://neuralprophet.com/model/trend/ 2 | 3 | # 为趋势项建模 4 | 5 | 这是在Neuralprophet中通过定义变化点进行趋势建模的一个简单例子。 6 | 7 | ```python 8 | m = NeuralProphet( 9 | n_changepoints=100, 10 | trend_smoothness=2, 11 | yearly_seasonality=False, 12 | weekly_seasonality=False, 13 | daily_seasonality=False, 14 | ) 15 | metrics = m.fit(df, freq="D") 16 | future = m.make_future_dataframe(df, periods=365, n_historic_predictions=len(df)) 17 | forecast = m.predict(future) 18 | ``` 19 | 20 | 分解图看起来像下面这样,只有趋势和残差作为一个组成部分。 21 | 22 | ![plot-comp-1](http://neuralprophet.com/images/plot_comp_trend_1.png) 23 | 24 | 系数图应显示100个变化点对应的系数。 25 | 26 | ![plot-param-1](http://neuralprophet.com/images/plot_param_trend_1.png) -------------------------------------------------------------------------------- /neuralprophet/__init__.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import warnings 3 | 4 | import pytorch_lightning as pl 5 | 6 | # make core features and version number accessible 7 | from ._version import __version__ # noqa: F401 8 | from .df_utils import add_quarter_condition, add_weekday_condition, split_df # noqa: F401 9 | from .forecaster import NeuralProphet # noqa: F401 10 | from .torch_prophet import TorchProphet # noqa: F401 11 | from .uncertainty import uncertainty_evaluate # noqa: F401 12 | from .utils import load, save, set_log_level, set_random_seed # noqa: F401 13 | 14 | # Reduce lightning logs 15 | warnings.simplefilter(action="ignore", category=pl.utilities.warnings.PossibleUserWarning) 16 | logging.getLogger("pytorch_lightning").setLevel(logging.WARNING) 17 | 18 | log = logging.getLogger("NP") 19 | log.setLevel("INFO") 20 | 21 | c_handler = logging.StreamHandler() 22 | # c_handler.setLevel("WARNING") 23 | c_format = logging.Formatter("%(levelname)s - (%(name)s.%(funcName)s) - %(message)s") 24 | c_handler.setFormatter(c_format) 25 | log.addHandler(c_handler) 26 | 27 | logging.captureWarnings(True) 28 | warnings_log = logging.getLogger("py.warnings") 29 | warnings_log.addHandler(c_handler) 30 | 31 | write_log_file = False 32 | if write_log_file: 33 | f_handler = logging.FileHandler("logs.log", "w+") 34 | # f_handler.setLevel("ERROR") 35 | f_format = logging.Formatter("%(asctime)s; %(levelname)s; %(name)s; %(funcName)s; %(message)s") 36 | f_handler.setFormatter(f_format) 37 | log.addHandler(f_handler) 38 | warnings_log.addHandler(f_handler) 39 | -------------------------------------------------------------------------------- /neuralprophet/__main__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Invokes neuralprophet when module is run as a script. 3 | """ 4 | 5 | import argparse 6 | 7 | from neuralprophet._version import __version__ 8 | 9 | 10 | def parse_args(args=None): 11 | parser = argparse.ArgumentParser(description="NeuralProphet") 12 | parser.add_argument("-V", "--version", action="version", version="%(prog)s " + __version__) 13 | return parser.parse_args(args) 14 | 15 | 16 | if __name__ == "__main__": 17 | parse_args() 18 | -------------------------------------------------------------------------------- /neuralprophet/_version.py: -------------------------------------------------------------------------------- 1 | from importlib import metadata 2 | 3 | __version__ = metadata.version("neuralprophet") 4 | -------------------------------------------------------------------------------- /neuralprophet/components/README.md: -------------------------------------------------------------------------------- 1 | # Component overview 2 | This chart provides an overview of all modular components and their inheritance structure. 3 | 4 | ```mermaid 5 | flowchart TD 6 | BaseComponent --> Trend 7 | Trend --> LinearTrend 8 | LinearTrend --> GlobalLinearTrend 9 | LinearTrend --> LocalLinearTrend 10 | Trend --> StaticTrend 11 | Trend --> PiecewiseLinearTrend 12 | PiecewiseLinearTrend --> GlobalPiecewiseLinearTrend 13 | PiecewiseLinearTrend --> LocalPiecewiseLinearTrend 14 | BaseComponent --> FutureRegressors --> LinearFutureRegressors 15 | BaseComponent --> Seasonality 16 | Seasonality --> FourierSeasonality 17 | FourierSeasonality --> GlobalFourierSeasonality 18 | FourierSeasonality --> LocalFourierSeasonality 19 | ``` -------------------------------------------------------------------------------- /neuralprophet/components/__init__.py: -------------------------------------------------------------------------------- 1 | from .base import BaseComponent # noqa: F401 2 | -------------------------------------------------------------------------------- /neuralprophet/components/base.py: -------------------------------------------------------------------------------- 1 | from abc import abstractmethod 2 | 3 | import torch.nn as nn 4 | 5 | 6 | class BaseComponent(nn.Module): 7 | def __init__(self, n_forecasts, quantiles, id_list, device): 8 | super().__init__() 9 | self.n_forecasts = n_forecasts 10 | self.quantiles = quantiles 11 | self.id_list = id_list 12 | self.device = device 13 | 14 | @abstractmethod 15 | def forward(self, x): 16 | """ 17 | Needs to be implemented by subclass. 18 | 19 | Parameters 20 | ---------- 21 | t : torch.Tensor float 22 | normalized time, dim: (batch, n_forecasts) 23 | meta: dict 24 | Metadata about the all the samples of the model input batch. Contains the following: 25 | * ``df_name`` (list, str), time series ID corresponding to each sample of the input batch. 26 | Returns 27 | ------- 28 | torch.Tensor 29 | Component forecast, same dimensions as input t 30 | """ 31 | pass 32 | -------------------------------------------------------------------------------- /neuralprophet/components/future_regressors/__init__.py: -------------------------------------------------------------------------------- 1 | from .base import FutureRegressors # noqa: F401 2 | -------------------------------------------------------------------------------- /neuralprophet/components/future_regressors/base.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | from neuralprophet import utils 4 | from neuralprophet.components import BaseComponent 5 | 6 | log = logging.getLogger("NP.future_regressors") 7 | 8 | 9 | class FutureRegressors(BaseComponent): 10 | def __init__(self, config, id_list, quantiles, n_forecasts, device, config_trend_none_bool): 11 | super().__init__(n_forecasts=n_forecasts, quantiles=quantiles, id_list=id_list, device=device) 12 | 13 | self.config_regressors = config # config_regressors 14 | self.regressors_dims = utils.config_regressors_to_model_dims(config) # config_regressors 15 | if self.regressors_dims is not None: 16 | self.n_additive_regressor_params = 0 17 | self.n_multiplicative_regressor_params = 0 18 | for name, configs in self.regressors_dims.items(): 19 | if configs["mode"] not in ["additive", "multiplicative"]: 20 | log.error("Regressors mode {} not implemented. Defaulting to 'additive'.".format(configs["mode"])) 21 | self.regressors_dims[name]["mode"] = "additive" 22 | if configs["mode"] == "additive": 23 | self.n_additive_regressor_params += 1 24 | elif configs["mode"] == "multiplicative": 25 | if config_trend_none_bool: 26 | log.error("Multiplicative regressors require trend.") 27 | raise ValueError 28 | self.n_multiplicative_regressor_params += 1 29 | 30 | else: 31 | self.config_regressors = None 32 | -------------------------------------------------------------------------------- /neuralprophet/components/future_regressors/linear.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | from neuralprophet.components.future_regressors import FutureRegressors 5 | from neuralprophet.utils_torch import init_parameter 6 | 7 | # from neuralprophet.utils_torch import init_parameter 8 | 9 | 10 | class LinearFutureRegressors(FutureRegressors): 11 | def __init__(self, config, id_list, quantiles, n_forecasts, device, config_trend_none_bool): 12 | super().__init__( 13 | config=config, 14 | n_forecasts=n_forecasts, 15 | quantiles=quantiles, 16 | id_list=id_list, 17 | device=device, 18 | config_trend_none_bool=config_trend_none_bool, 19 | ) 20 | 21 | if self.regressors_dims is not None: 22 | # Regresors params 23 | self.regressor_params = nn.ParameterDict( 24 | { 25 | # dimensions - [no. of quantiles, no. of additive regressors] 26 | "additive": init_parameter(dims=[len(self.quantiles), self.n_additive_regressor_params]), 27 | # dimensions - [no. of quantiles, no. of multiplicative regressors] 28 | "multiplicative": init_parameter( 29 | dims=[len(self.quantiles), self.n_multiplicative_regressor_params] 30 | ), 31 | } 32 | ) 33 | 34 | def scalar_features_effects(self, features, params, indices=None): 35 | """ 36 | Computes events component of the model 37 | 38 | Parameters 39 | ---------- 40 | features : torch.Tensor, float 41 | Features (either additive or multiplicative) related to event component dims (batch, n_forecasts, 42 | n_features) 43 | params : nn.Parameter 44 | Params (either additive or multiplicative) related to events 45 | indices : list of int 46 | Indices in the feature tensors related to a particular event 47 | Returns 48 | ------- 49 | torch.Tensor 50 | Forecast component of dims (batch, n_forecasts) 51 | """ 52 | if indices is not None: 53 | features = features[:, :, indices] 54 | params = params[:, indices] 55 | # features dims: (batch, n_forecasts, n_features) -> (batch, n_forecasts, 1, n_features) 56 | # params dims: (n_quantiles, n_features) -> (batch, 1, n_quantiles, n_features) 57 | out = torch.sum(features.unsqueeze(dim=2) * params.unsqueeze(dim=0).unsqueeze(dim=0), dim=-1) 58 | return out # dims (batch, n_forecasts, n_quantiles) 59 | 60 | def get_reg_weights(self, name): 61 | """ 62 | Retrieve the weights of regressor features given the name 63 | 64 | Parameters 65 | ---------- 66 | name : string 67 | Regressor name 68 | 69 | Returns 70 | ------- 71 | torch.tensor 72 | Weight corresponding to the given regressor 73 | """ 74 | 75 | regressor_dims = self.regressors_dims[name] 76 | mode = regressor_dims["mode"] 77 | index = regressor_dims["regressor_index"] 78 | 79 | if mode == "additive": 80 | regressor_params = self.regressor_params["additive"] 81 | else: 82 | assert mode == "multiplicative" 83 | regressor_params = self.regressor_params["multiplicative"] 84 | 85 | return regressor_params[:, index : (index + 1)] 86 | 87 | def forward(self, inputs, mode, indeces=None): 88 | """Compute all seasonality components. 89 | Parameters 90 | ---------- 91 | f_r : torch.Tensor, float 92 | future regressors inputs 93 | mode: string, either "additive" or "multiplicative" 94 | mode of the regressors 95 | Returns 96 | ------- 97 | torch.Tensor 98 | Forecast component of dims (batch, n_forecasts, no_quantiles) 99 | """ 100 | 101 | if "additive" == mode: 102 | f_r = self.scalar_features_effects(inputs, self.regressor_params["additive"], indeces) 103 | if "multiplicative" == mode: 104 | f_r = self.scalar_features_effects(inputs, self.regressor_params["multiplicative"], indeces) 105 | return f_r 106 | -------------------------------------------------------------------------------- /neuralprophet/components/future_regressors/neural_nets.py: -------------------------------------------------------------------------------- 1 | from collections import OrderedDict 2 | 3 | import torch.nn as nn 4 | 5 | from neuralprophet.components.future_regressors import FutureRegressors 6 | from neuralprophet.utils_torch import interprete_model 7 | 8 | # from neuralprophet.utils_torch import init_parameter 9 | 10 | 11 | class NeuralNetsFutureRegressors(FutureRegressors): 12 | def __init__(self, config, id_list, quantiles, n_forecasts, device, config_trend_none_bool): 13 | super().__init__( 14 | config=config, 15 | n_forecasts=n_forecasts, 16 | quantiles=quantiles, 17 | id_list=id_list, 18 | device=device, 19 | config_trend_none_bool=config_trend_none_bool, 20 | ) 21 | if self.regressors_dims is not None: 22 | # Regresors params 23 | self.regressor_nets = nn.ModuleDict({}) 24 | self.layers = config.layers 25 | # one net per regressor. to be adapted to combined network 26 | for regressor in self.regressors_dims.keys(): 27 | # Nets for both additive and multiplicative regressors 28 | regressor_net = nn.ModuleList() 29 | # This will be later 1 + static covariates 30 | d_inputs = 1 31 | for d_hidden_i in self.layers: 32 | regressor_net.append(nn.Linear(d_inputs, d_hidden_i, bias=True)) 33 | d_inputs = d_hidden_i 34 | # final layer has input size d_inputs and output size equal to no. of quantiles 35 | regressor_net.append(nn.Linear(d_inputs, len(self.quantiles), bias=False)) 36 | for lay in regressor_net: 37 | nn.init.kaiming_normal_(lay.weight, mode="fan_in") 38 | self.regressor_nets[regressor] = regressor_net 39 | 40 | def get_reg_weights(self, name): 41 | """ 42 | Get attributions of regressors component network w.r.t. the model input. 43 | 44 | Parameters 45 | ---------- 46 | name : string 47 | Regressor name 48 | 49 | Returns 50 | ------- 51 | torch.tensor 52 | Weight corresponding to the given regressor 53 | """ 54 | 55 | reg_attributions = interprete_model( 56 | self, 57 | net="regressor_nets", 58 | forward_func="regressor", 59 | _num_in_features=self.regressor_nets[name][0].in_features, 60 | _num_out_features=self.regressor_nets[name][-1].out_features, 61 | additional_forward_args=name, 62 | ) 63 | 64 | return reg_attributions 65 | 66 | def regressor(self, regressor_input, name): 67 | """Compute single regressor component. 68 | Parameters 69 | ---------- 70 | regressor_input : torch.Tensor, float 71 | regressor values at corresponding, dims: (batch, n_forecasts, 1) 72 | nam : str 73 | Name of regressor, for attribution to corresponding model weights 74 | Returns 75 | ------- 76 | torch.Tensor 77 | Forecast component of dims (batch, n_forecasts, num_quantiles) 78 | """ 79 | x = regressor_input 80 | for i in range(len(self.layers) + 1): 81 | if i > 0: 82 | x = nn.functional.relu(x) 83 | x = self.regressor_nets[name][i](x) 84 | 85 | return x 86 | 87 | def all_regressors(self, regressor_inputs, mode): 88 | """Compute all regressors components. 89 | Parameters 90 | ---------- 91 | regressor_inputs : torch.Tensor, float 92 | regressor values at corresponding, dims: (batch, n_forecasts, num_regressors) 93 | Returns 94 | ------- 95 | torch.Tensor 96 | Forecast component of dims (batch, n_forecasts, num_quantiles) 97 | """ 98 | # Select only elements from OrderedDict that have the value mode == 'mode_of_interest' 99 | regressors_dims_filtered = OrderedDict((k, v) for k, v in self.regressors_dims.items() if v["mode"] == mode) 100 | for i, name in enumerate(regressors_dims_filtered.keys()): 101 | regressor_index = regressors_dims_filtered[name]["regressor_index"] 102 | regressor_input = regressor_inputs[:, :, regressor_index].unsqueeze(dim=2) 103 | if i == 0: 104 | x = self.regressor(regressor_input, name=name) 105 | if i > 0: 106 | x = x + self.regressor(regressor_input, name=name) 107 | return x 108 | 109 | def forward(self, inputs, mode, indeces=None): 110 | """Compute all seasonality components. 111 | Parameters 112 | ---------- 113 | f_r : torch.Tensor, float 114 | future regressors inputs 115 | mode: string, either "additive" or "multiplicative" 116 | mode of the regressors 117 | Returns 118 | ------- 119 | torch.Tensor 120 | Forecast component of dims (batch, n_forecasts, no_quantiles) 121 | """ 122 | return self.all_regressors(inputs, mode) 123 | -------------------------------------------------------------------------------- /neuralprophet/components/future_regressors/shared_neural_nets.py: -------------------------------------------------------------------------------- 1 | from collections import Counter 2 | 3 | import torch.nn as nn 4 | 5 | from neuralprophet.components.future_regressors import FutureRegressors 6 | from neuralprophet.utils_torch import interprete_model 7 | 8 | # from neuralprophet.utils_torch import init_parameter 9 | 10 | 11 | class SharedNeuralNetsFutureRegressors(FutureRegressors): 12 | def __init__(self, config, id_list, quantiles, n_forecasts, device, config_trend_none_bool): 13 | super().__init__( 14 | config=config, 15 | n_forecasts=n_forecasts, 16 | quantiles=quantiles, 17 | id_list=id_list, 18 | device=device, 19 | config_trend_none_bool=config_trend_none_bool, 20 | ) 21 | if self.regressors_dims is not None: 22 | # Regresors params 23 | self.regressor_nets = nn.ModuleDict({}) 24 | self.layers = config.layers 25 | # Combined network 26 | for net_i, size_i in Counter([x["mode"] for x in self.regressors_dims.values()]).items(): 27 | # Nets for both additive and multiplicative regressors 28 | regressor_net = nn.ModuleList() 29 | # This will be later size_i(1 + static covariates) 30 | d_inputs = size_i 31 | for d_hidden_i in self.layers: 32 | regressor_net.append(nn.Linear(d_inputs, d_hidden_i, bias=True)) 33 | d_inputs = d_hidden_i 34 | # final layer has input size d_inputs and output size equal to no. of quantiles 35 | regressor_net.append(nn.Linear(d_inputs, len(self.quantiles), bias=False)) 36 | for lay in regressor_net: 37 | nn.init.kaiming_normal_(lay.weight, mode="fan_in") 38 | self.regressor_nets[net_i] = regressor_net 39 | 40 | def get_reg_weights(self, name): 41 | """ 42 | Get attributions of regressors component network w.r.t. the model input. 43 | 44 | Parameters 45 | ---------- 46 | name : string 47 | Regressor name 48 | 49 | Returns 50 | ------- 51 | torch.tensor 52 | Weight corresponding to the given regressor 53 | """ 54 | 55 | mode = self.config_regressors.regressors[name].mode 56 | reg_attributions = interprete_model( 57 | self, 58 | net="regressor_nets", 59 | forward_func="regressors_net", 60 | _num_in_features=self.regressor_nets[mode][0].in_features, 61 | _num_out_features=self.regressor_nets[mode][-1].out_features, 62 | additional_forward_args=mode, 63 | ) 64 | 65 | regressor_index = self.regressors_dims[name]["regressor_index"] 66 | return reg_attributions[:, regressor_index].unsqueeze(-1) 67 | 68 | def regressors(self, regressor_inputs, mode): 69 | """Compute single regressor component. 70 | Parameters 71 | ---------- 72 | regressor_input : torch.Tensor, float 73 | regressor values at corresponding, dims: (batch, n_forecasts, 1) 74 | nam : str 75 | Name of regressor, for attribution to corresponding model weights 76 | Returns 77 | ------- 78 | torch.Tensor 79 | Forecast component of dims (batch, n_forecasts, num_quantiles) 80 | """ 81 | x = regressor_inputs 82 | for i in range(len(self.layers) + 1): 83 | if i > 0: 84 | x = nn.functional.relu(x) 85 | x = self.regressor_nets[mode][i](x) 86 | 87 | # segment the last dimension to match the quantiles 88 | # x = x.reshape(x.shape[0], self.n_forecasts, len(self.quantiles)) # causes error in case of multiple forecast targes, possibly unneeded/wrong 89 | return x 90 | 91 | def forward(self, inputs, mode, indeces=None): 92 | """Compute all seasonality components. 93 | Parameters 94 | ---------- 95 | f_r : torch.Tensor, float 96 | future regressors inputs 97 | mode: string, either "additive" or "multiplicative" 98 | mode of the regressors 99 | Returns 100 | ------- 101 | torch.Tensor 102 | Forecast component of dims (batch, n_forecasts, no_quantiles) 103 | """ 104 | return self.regressors(inputs, mode) 105 | -------------------------------------------------------------------------------- /neuralprophet/components/future_regressors/shared_neural_nets_coef.py: -------------------------------------------------------------------------------- 1 | from collections import Counter 2 | 3 | import torch.nn as nn 4 | 5 | from neuralprophet.components.future_regressors import FutureRegressors 6 | from neuralprophet.utils_torch import interprete_model 7 | 8 | # from neuralprophet.utils_torch import init_parameter 9 | 10 | 11 | class SharedNeuralNetsCoefFutureRegressors(FutureRegressors): 12 | def __init__(self, config, id_list, quantiles, n_forecasts, device, config_trend_none_bool): 13 | super().__init__( 14 | config=config, 15 | n_forecasts=n_forecasts, 16 | quantiles=quantiles, 17 | id_list=id_list, 18 | device=device, 19 | config_trend_none_bool=config_trend_none_bool, 20 | ) 21 | if self.regressors_dims is not None: 22 | # Regresors params 23 | self.regressor_nets = nn.ModuleDict({}) 24 | self.layers = config.layers 25 | # Combined network 26 | for net_i, size_i in Counter([x["mode"] for x in self.regressors_dims.values()]).items(): 27 | # Nets for both additive and multiplicative regressors 28 | regressor_net = nn.ModuleList() 29 | # This will be later size_i(1 + static covariates) 30 | d_inputs = size_i 31 | for d_hidden_i in self.layers: 32 | regressor_net.append(nn.Linear(d_inputs, d_hidden_i, bias=True)) 33 | d_inputs = d_hidden_i 34 | # final layer has input size d_inputs and output size equal to no. of quantiles 35 | regressor_net.append(nn.Linear(d_inputs, size_i * len(self.quantiles), bias=False)) 36 | for lay in regressor_net: 37 | nn.init.kaiming_normal_(lay.weight, mode="fan_in") 38 | self.regressor_nets[net_i] = regressor_net 39 | 40 | def get_reg_weights(self, name): 41 | """ 42 | Get attributions of regressors component network w.r.t. the model input. 43 | 44 | Parameters 45 | ---------- 46 | name : string 47 | Regressor name 48 | 49 | Returns 50 | ------- 51 | torch.tensor 52 | Weight corresponding to the given regressor 53 | """ 54 | 55 | mode = self.config_regressors.regressors[name].mode 56 | reg_attributions = interprete_model( 57 | self, 58 | net="regressor_nets", 59 | forward_func="regressors_net", 60 | _num_in_features=self.regressor_nets[mode][0].in_features, 61 | _num_out_features=self.regressor_nets[mode][-1].out_features 62 | / Counter([x["mode"] for x in self.regressors_dims.values()])[mode], 63 | additional_forward_args=mode, 64 | ) 65 | 66 | regressor_index = self.regressors_dims[name]["regressor_index"] 67 | return reg_attributions[:, regressor_index].unsqueeze(-1) 68 | 69 | def regressors(self, regressor_inputs, mode): 70 | """Compute single regressor component. 71 | Parameters 72 | ---------- 73 | regressor_input : torch.Tensor, float 74 | regressor values at corresponding, dims: (batch, n_forecasts, num_regressors) 75 | nam : str 76 | Name of regressor, for attribution to corresponding model weights 77 | Returns 78 | ------- 79 | torch.Tensor 80 | Forecast component of dims (batch, n_forecasts, num_quantiles) 81 | """ 82 | x = regressor_inputs 83 | for i in range(len(self.layers) + 1): 84 | if i > 0: 85 | x = nn.functional.relu(x) 86 | x = self.regressor_nets[mode][i](x) 87 | 88 | # segment the last dimension to match the quantiles 89 | # causes errorin case of multiple forecast targes and lags, likely wrong, but needed with no lags 90 | x = x.reshape(x.shape[0], self.n_forecasts, regressor_inputs.shape[-1], len(self.quantiles)) 91 | x = (regressor_inputs.unsqueeze(-1) * x).sum(-2) 92 | return x 93 | 94 | def forward(self, inputs, mode, indeces=None): 95 | """Compute all seasonality components. 96 | Parameters 97 | ---------- 98 | inputs : torch.Tensor, float 99 | future regressors inputs 100 | mode: string, either "additive" or "multiplicative" 101 | mode of the regressors 102 | Returns 103 | ------- 104 | torch.Tensor 105 | Forecast component of dims (batch, n_forecasts, no_quantiles) 106 | """ 107 | return self.regressors(inputs, mode) 108 | -------------------------------------------------------------------------------- /neuralprophet/components/router.py: -------------------------------------------------------------------------------- 1 | from neuralprophet.components.future_regressors.linear import LinearFutureRegressors 2 | from neuralprophet.components.future_regressors.neural_nets import NeuralNetsFutureRegressors 3 | from neuralprophet.components.future_regressors.shared_neural_nets import SharedNeuralNetsFutureRegressors 4 | from neuralprophet.components.future_regressors.shared_neural_nets_coef import SharedNeuralNetsCoefFutureRegressors 5 | from neuralprophet.components.seasonality.fourier import ( 6 | GlobalFourierSeasonality, 7 | GlocalFourierSeasonality, 8 | LocalFourierSeasonality, 9 | ) 10 | from neuralprophet.components.trend.linear import GlobalLinearTrend, LocalLinearTrend 11 | from neuralprophet.components.trend.piecewise_linear import GlobalPiecewiseLinearTrend, LocalPiecewiseLinearTrend 12 | from neuralprophet.components.trend.static import StaticTrend 13 | 14 | 15 | def get_trend(config, n_forecasts, quantiles, id_list, num_trends_modelled, device): 16 | """ 17 | Router for all trend classes. 18 | 19 | Based on the conditions provided, the correct trend class is returned and initialized using the provided args. 20 | 21 | Parameters 22 | ---------- 23 | config : configure_components.Trend 24 | n_forecasts : int 25 | number of steps to forecast. Aka number of model outputs 26 | quantiles : list 27 | the set of quantiles estimated 28 | id_list : list 29 | List of different time series IDs, used for global-local modelling (if enabled) 30 | 31 | Note 32 | ---- 33 | This parameter is set to ``['__df__']`` if only one time series is input. 34 | num_trends_modelled : int 35 | Number of different trends modelled. 36 | 37 | Note 38 | ---- 39 | If only 1 time series is modelled, it will be always 1. 40 | 41 | Note 42 | ---- 43 | For multiple time series. If trend is modelled globally the value is set 44 | to 1, otherwise it is set to the number of time series modelled. 45 | device : torch.device 46 | Device that tensors are stored on. 47 | 48 | Note 49 | ---- 50 | This is set to ``torch.device("cpu")`` if no GPU is available. 51 | """ 52 | args = { 53 | "config": config, 54 | "id_list": id_list, 55 | "quantiles": quantiles, 56 | "num_trends_modelled": num_trends_modelled, 57 | "n_forecasts": n_forecasts, 58 | "device": device, 59 | } 60 | 61 | if config.growth == "off": 62 | # No trend 63 | return StaticTrend(**args) 64 | elif config.growth in ["linear", "discontinuous"]: 65 | # Linear trend 66 | if num_trends_modelled == 1: 67 | # Global trend 68 | if int(config.n_changepoints) == 0: 69 | # Linear trend 70 | return GlobalLinearTrend(**args) 71 | else: 72 | # Piecewise trend 73 | return GlobalPiecewiseLinearTrend(**args) 74 | else: 75 | # Local trend 76 | if int(config.n_changepoints) == 0: 77 | # Linear trend 78 | return LocalLinearTrend(**args) 79 | else: 80 | # Piecewise trend 81 | return LocalPiecewiseLinearTrend(**args) 82 | else: 83 | raise ValueError(f"Growth type {config.growth} is not supported.") 84 | 85 | 86 | def get_future_regressors(config, id_list, quantiles, n_forecasts, device, config_trend_none_bool): 87 | """ 88 | Router for all future regressor classes. 89 | """ 90 | args = { 91 | "config": config, 92 | "id_list": id_list, 93 | "quantiles": quantiles, 94 | "n_forecasts": n_forecasts, 95 | "device": device, 96 | "config_trend_none_bool": config_trend_none_bool, 97 | } 98 | if config.model == "linear": 99 | return LinearFutureRegressors(**args) 100 | elif config.model == "neural_nets": 101 | return NeuralNetsFutureRegressors(**args) 102 | elif config.model == "shared_neural_nets": 103 | return SharedNeuralNetsFutureRegressors(**args) 104 | elif config.model == "shared_neural_nets_coef": 105 | return SharedNeuralNetsCoefFutureRegressors(**args) 106 | else: 107 | raise ValueError(f"Model type {config.model} is not supported.") 108 | 109 | 110 | def get_seasonality( 111 | config, id_list, quantiles, num_seasonalities_modelled, num_seasonalities_modelled_dict, n_forecasts, device 112 | ): 113 | """ 114 | Router for all seasonality classes. 115 | """ 116 | args = { 117 | "config": config, 118 | "id_list": id_list, 119 | "quantiles": quantiles, 120 | "n_forecasts": n_forecasts, 121 | "device": device, 122 | "num_seasonalities_modelled": num_seasonalities_modelled, 123 | "num_seasonalities_modelled_dict": num_seasonalities_modelled_dict, 124 | } 125 | 126 | # if only 1 time series, global strategy 127 | if len(id_list) == 1: 128 | config.global_local = "global" 129 | for season_i in config.periods: 130 | config.periods[season_i].global_local = "global" 131 | 132 | # if all config.periods[season_i] are the same(x), then config.global_local = x 133 | if len(set([config.periods[season_i].global_local for season_i in config.periods])) == 1: 134 | config.global_local = list(set([config.periods[season_i].global_local for season_i in config.periods]))[0] 135 | 136 | # if all config.periods[season_i] are different, then config.global_local = 'glocal' 137 | if len(set([config.periods[season_i].global_local for season_i in config.periods])) > 1: 138 | config.global_local = "glocal" 139 | 140 | if config.global_local == "global": 141 | # Global seasonality 142 | return GlobalFourierSeasonality(**args) 143 | elif config.global_local == "local": 144 | # Local seasonality 145 | return LocalFourierSeasonality(**args) 146 | elif config.global_local == "glocal": 147 | # Glocal seasonality 148 | return GlocalFourierSeasonality(**args) 149 | else: 150 | raise ValueError(f"Seasonality mode {config.global_local} is not supported.") 151 | -------------------------------------------------------------------------------- /neuralprophet/components/seasonality/__init__.py: -------------------------------------------------------------------------------- 1 | from .base import Seasonality # noqa: F401 2 | -------------------------------------------------------------------------------- /neuralprophet/components/seasonality/base.py: -------------------------------------------------------------------------------- 1 | from neuralprophet.components import BaseComponent 2 | 3 | 4 | class Seasonality(BaseComponent): 5 | def __init__( 6 | self, 7 | config, 8 | id_list, 9 | quantiles, 10 | num_seasonalities_modelled, 11 | num_seasonalities_modelled_dict, 12 | n_forecasts, 13 | device, 14 | ): 15 | super().__init__(n_forecasts=n_forecasts, quantiles=quantiles, id_list=id_list, device=device) 16 | self.config_seasonality = config 17 | self.num_seasonalities_modelled = num_seasonalities_modelled 18 | self.num_seasonalities_modelled_dict = num_seasonalities_modelled_dict 19 | -------------------------------------------------------------------------------- /neuralprophet/components/trend/__init__.py: -------------------------------------------------------------------------------- 1 | from .base import Trend # noqa: F401 2 | -------------------------------------------------------------------------------- /neuralprophet/components/trend/base.py: -------------------------------------------------------------------------------- 1 | from abc import abstractmethod 2 | 3 | from neuralprophet.components import BaseComponent 4 | from neuralprophet.utils_torch import init_parameter 5 | 6 | 7 | class Trend(BaseComponent): 8 | def __init__(self, config, id_list, quantiles, num_trends_modelled, n_forecasts, device): 9 | super().__init__(n_forecasts=n_forecasts, quantiles=quantiles, id_list=id_list, device=device) 10 | self.config_trend = config 11 | self.num_trends_modelled = num_trends_modelled 12 | 13 | # if only 1 time series, global strategy 14 | if len(self.id_list) == 1: 15 | self.config_trend.trend_global_local = "global" 16 | 17 | # dimensions - [no. of quantiles, 1 bias shape] 18 | self.bias = init_parameter( 19 | dims=[ 20 | len(self.quantiles), 21 | ] 22 | ) 23 | 24 | @property 25 | @abstractmethod 26 | def get_trend_deltas(self): 27 | """trend deltas for regularization. 28 | 29 | update if trend is modelled differently""" 30 | pass 31 | 32 | @abstractmethod 33 | def add_regularization(self): 34 | """add regularization to loss""" 35 | pass 36 | -------------------------------------------------------------------------------- /neuralprophet/components/trend/linear.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | from neuralprophet.components.trend import Trend 5 | from neuralprophet.utils_torch import init_parameter 6 | 7 | 8 | class LinearTrend(Trend): 9 | def __init__(self, config, id_list, quantiles, num_trends_modelled, n_forecasts, device): 10 | super().__init__( 11 | config=config, 12 | n_forecasts=n_forecasts, 13 | num_trends_modelled=num_trends_modelled, 14 | quantiles=quantiles, 15 | id_list=id_list, 16 | device=device, 17 | ) 18 | # Trend_k0 parameter. 19 | # dimensions - [no. of quantiles, num_trends_modelled, trend coeff shape] 20 | self.trend_k0 = init_parameter(dims=([len(self.quantiles)] + [self.num_trends_modelled] + [1])) 21 | 22 | @property 23 | def get_trend_deltas(self): 24 | """trend deltas for regularization. 25 | 26 | update if trend is modelled differently""" 27 | if self.config_trend is None: 28 | trend_delta = None 29 | else: 30 | trend_delta = self.trend_deltas 31 | 32 | return trend_delta 33 | 34 | def add_regularization(self): 35 | pass 36 | 37 | 38 | class GlobalLinearTrend(LinearTrend): 39 | def __init__(self, config, id_list, quantiles, num_trends_modelled, n_forecasts, device): 40 | super().__init__( 41 | config=config, 42 | n_forecasts=n_forecasts, 43 | num_trends_modelled=num_trends_modelled, 44 | quantiles=quantiles, 45 | id_list=id_list, 46 | device=device, 47 | ) 48 | 49 | def forward(self, t, meta): 50 | """ 51 | Computes trend based on model configuration. 52 | 53 | Parameters 54 | ---------- 55 | t : torch.Tensor float 56 | normalized time, dim: (batch, n_forecasts) 57 | meta: dict 58 | Metadata about the all the samples of the model input batch. Contains the following: 59 | * ``df_name`` (list, str), time series ID corresponding to each sample of the input batch. 60 | Returns 61 | ------- 62 | torch.Tensor 63 | Trend component, same dimensions as input t 64 | """ 65 | # dimensions - batch_size, n_forecasts, quantiles 66 | trend = self.trend_k0.permute(1, 2, 0) * t.unsqueeze(dim=2) 67 | return self.bias.unsqueeze(dim=0).unsqueeze(dim=0) + trend 68 | 69 | 70 | class LocalLinearTrend(LinearTrend): 71 | def __init__(self, config, id_list, quantiles, num_trends_modelled, n_forecasts, device): 72 | super().__init__( 73 | config=config, 74 | n_forecasts=n_forecasts, 75 | num_trends_modelled=num_trends_modelled, 76 | quantiles=quantiles, 77 | id_list=id_list, 78 | device=device, 79 | ) 80 | 81 | def forward(self, t, meta): 82 | """ 83 | Computes trend based on model configuration. 84 | 85 | Parameters 86 | ---------- 87 | t : torch.Tensor float 88 | normalized time, dim: (batch, n_forecasts) 89 | meta: dict 90 | Metadata about the all the samples of the model input batch. Contains the following: 91 | * ``df_name`` (list, str), time series ID corresponding to each sample of the input batch. 92 | Returns 93 | ------- 94 | torch.Tensor 95 | Trend component, same dimensions as input t 96 | """ 97 | # From the dataloader meta data, we get the one-hot encoding of the df_name. 98 | meta_name_tensor_one_hot = nn.functional.one_hot(meta, num_classes=len(self.id_list)) 99 | # trend_k_0 = trend_k_0(sample metadata) 100 | # dimensions - batch_size, segments(1), quantiles 101 | trend_k_0 = torch.sum( 102 | meta_name_tensor_one_hot.unsqueeze(dim=0).unsqueeze(dim=-1) * self.trend_k0.unsqueeze(dim=1), dim=2 103 | ).permute(1, 2, 0) 104 | # dimensions - batch_size, n_forecasts, quantiles 105 | trend = trend_k_0 * t.unsqueeze(2) 106 | return self.bias.unsqueeze(dim=0).unsqueeze(dim=0) + trend 107 | -------------------------------------------------------------------------------- /neuralprophet/components/trend/static.py: -------------------------------------------------------------------------------- 1 | from neuralprophet.components.trend import Trend 2 | 3 | 4 | class StaticTrend(Trend): 5 | def __init__(self, config, id_list, quantiles, num_trends_modelled, n_forecasts, device): 6 | super().__init__( 7 | config=config, 8 | n_forecasts=n_forecasts, 9 | num_trends_modelled=num_trends_modelled, 10 | quantiles=quantiles, 11 | id_list=id_list, 12 | device=device, 13 | ) 14 | 15 | def forward(self, t, meta): 16 | """ 17 | Computes trend based on model configuration. 18 | 19 | Parameters 20 | ---------- 21 | t : torch.Tensor float 22 | normalized time, dim: (batch, n_forecasts) 23 | meta: dict 24 | Metadata about the all the samples of the model input batch. Contains the following: 25 | * ``df_name`` (list, str), time series ID corresponding to each sample of the input batch. 26 | Returns 27 | ------- 28 | torch.Tensor 29 | Trend component, same dimensions as input t 30 | """ 31 | return self.bias.unsqueeze(dim=0).repeat(t.shape[0], t.shape[1], 1) 32 | 33 | @property 34 | def get_trend_deltas(self): 35 | pass 36 | 37 | def add_regularization(self): 38 | pass 39 | -------------------------------------------------------------------------------- /neuralprophet/custom_loss_metrics.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch.nn.modules.loss import _Loss 3 | 4 | 5 | class PinballLoss(_Loss): 6 | """Class for the PinBall loss for quantile regression""" 7 | 8 | def __init__(self, loss_func, quantiles): 9 | """ 10 | Args: 11 | loss_func : torch.nn._Loss 12 | Loss function to be used as the 13 | base loss for pinball loss 14 | quantiles : list 15 | list of quantiles estimated from the model 16 | """ 17 | super().__init__() 18 | self.loss_func = loss_func 19 | self.quantiles = quantiles 20 | 21 | def forward(self, outputs, target): 22 | """ 23 | Computes the pinball loss from forecasts 24 | Args: 25 | outputs : torch.tensor 26 | outputs from the model of dims (batch, no_quantiles, n_forecasts) 27 | target : torch.tensor 28 | actual targets of dims (batch, n_forecasts) 29 | 30 | Returns: 31 | float 32 | pinball loss 33 | """ 34 | target = target.repeat(1, 1, len(self.quantiles)) # increase the quantile dimension of the targets 35 | differences = target - outputs 36 | base_losses = self.loss_func(outputs, target).float() # dimensions - [n_batch, n_forecasts, no. of quantiles] 37 | quantiles_tensor = ( 38 | torch.tensor(self.quantiles, device=target.device, dtype=torch.float32).unsqueeze(dim=0).unsqueeze(dim=0) 39 | ) 40 | positive_losses = quantiles_tensor * base_losses 41 | negative_losses = (1 - quantiles_tensor) * base_losses 42 | differences = differences.float() 43 | pinball_losses = torch.where(differences >= 0, positive_losses, negative_losses) 44 | multiplier = torch.ones(size=(1, 1, len(self.quantiles)), device=target.device, dtype=torch.float32) 45 | multiplier[:, :, 0] = 2 46 | pinball_losses = multiplier * pinball_losses # double the loss for the median quantile 47 | return pinball_losses 48 | -------------------------------------------------------------------------------- /neuralprophet/data/transform.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | import pandas as pd 4 | 5 | from neuralprophet import df_utils 6 | from neuralprophet.configure import Normalization 7 | 8 | log = logging.getLogger("NP.data.transforming") 9 | 10 | 11 | def _normalize(df: pd.DataFrame, config_normalization: Normalization) -> pd.DataFrame: 12 | """Apply data scales. 13 | 14 | Applies data scaling factors to df using data_params. 15 | 16 | Parameters 17 | ---------- 18 | df : pd.DataFrame 19 | dataframe containing column ``ds``, ``y``, and optionally``ID`` with all data 20 | config_normalization: Normalization 21 | Normalization configuration 22 | 23 | Returns 24 | ------- 25 | df: pd.DataFrame, normalized 26 | """ 27 | df_norm = pd.DataFrame() 28 | for df_name, df_i in df.groupby("ID"): 29 | data_params = config_normalization.get_data_params(df_name) 30 | df_i.drop("ID", axis=1, inplace=True) 31 | df_aux = df_utils.normalize(df_i, data_params) 32 | df_aux["ID"] = df_name 33 | df_norm = pd.concat((df_norm, df_aux), ignore_index=True) 34 | return df_norm 35 | -------------------------------------------------------------------------------- /neuralprophet/event_utils.py: -------------------------------------------------------------------------------- 1 | from collections import defaultdict 2 | from typing import Iterable, Union 3 | 4 | import numpy as np 5 | import pandas as pd 6 | from holidays import country_holidays 7 | 8 | 9 | def get_holiday_names(country: Union[str, Iterable[str]], df=None): 10 | """ 11 | Return all possible holiday names for a list of countries over time period in df 12 | 13 | Parameters 14 | ---------- 15 | country : str, list 16 | List of country names to retrieve country specific holidays 17 | df : pd.Dataframe 18 | Dataframe from which datestamps will be retrieved from 19 | 20 | Returns 21 | ------- 22 | set 23 | All possible holiday names of given country 24 | """ 25 | if df is None: 26 | years = np.arange(1995, 2045) 27 | else: 28 | dates = df["ds"].copy(deep=True) 29 | years = pd.unique(dates.apply(lambda x: x.year)) 30 | # years = list({x.year for x in dates}) 31 | # support multiple countries, convert to list if not already 32 | if isinstance(country, str): 33 | country = [country] 34 | 35 | all_holidays = get_all_holidays(years=years, country=country) 36 | return set(all_holidays.keys()) 37 | 38 | 39 | def get_all_holidays(years, country): 40 | """ 41 | Make dataframe of country specific holidays for given years and countries 42 | Parameters 43 | ---------- 44 | year_list : list 45 | List of years 46 | country : str, list, dict 47 | List of country names and optional subdivisions 48 | Returns 49 | ------- 50 | pd.DataFrame 51 | Containing country specific holidays df with columns 'ds' and 'holiday' 52 | """ 53 | # convert to list if not already 54 | if isinstance(country, str): 55 | country = {country: None} 56 | elif isinstance(country, list): 57 | country = dict(zip(country, [None] * len(country))) 58 | 59 | all_holidays = defaultdict(list) 60 | # iterate over countries and get holidays for each country 61 | for single_country, subdivision in country.items(): 62 | # For compatibility with Turkey as "TU" cases. 63 | single_country = "TUR" if single_country == "TU" else single_country 64 | # get dict of dates and their holiday name 65 | single_country_specific_holidays = country_holidays( 66 | country=single_country, subdiv=subdivision, years=years, expand=True, observed=False, language="en" 67 | ) 68 | # invert order - for given holiday, store list of dates 69 | for date, name in single_country_specific_holidays.items(): 70 | all_holidays[name].append(pd.to_datetime(date)) 71 | return all_holidays 72 | -------------------------------------------------------------------------------- /neuralprophet/logger.py: -------------------------------------------------------------------------------- 1 | # https://pytorch-lightning.readthedocs.io/en/stable/extensions/logging.html#make-a-custom-logger 2 | import collections 3 | from typing import Any, Mapping, Optional 4 | 5 | import pytorch_lightning as pl 6 | from pytorch_lightning.callbacks import TQDMProgressBar 7 | from pytorch_lightning.loggers import TensorBoardLogger 8 | from pytorch_lightning.utilities.rank_zero import rank_zero_only 9 | 10 | 11 | class MetricsLogger(TensorBoardLogger): 12 | def __init__( 13 | self, 14 | **kwargs: Any, 15 | ): 16 | super().__init__(**kwargs) 17 | self.history = collections.defaultdict(list) 18 | self.checkpoint_path = None 19 | 20 | def after_save_checkpoint(self, checkpoint_callback) -> None: 21 | """Called after model checkpoint callback saves a new checkpoint. 22 | 23 | Args: 24 | checkpoint_callback: the model checkpoint callback instance 25 | """ 26 | self.checkpoint_path = checkpoint_callback.best_model_path 27 | 28 | @rank_zero_only 29 | def log_metrics(self, metrics: Mapping[str, float], step: Optional[int] = None) -> None: 30 | super(MetricsLogger, self).log_metrics(metrics, step) 31 | # metrics is a dictionary of metric names and values 32 | for metric_name, metric_value in metrics.items(): 33 | if metric_name == "hp_metric": 34 | pass 35 | elif metric_name != "epoch": 36 | self.history[metric_name].append(metric_value) 37 | else: # case epoch. We want to avoid adding multiple times the same. It happens for multiple losses. 38 | if ( 39 | not len(self.history["epoch"]) or not self.history["epoch"][-1] == metric_value # len == 0: 40 | ): # the last values of epochs is not the one we are currently trying to add. 41 | self.history["epoch"].append(metric_value) 42 | else: 43 | pass 44 | return 45 | 46 | 47 | class ProgressBar(TQDMProgressBar): 48 | """ 49 | Custom progress bar for PyTorch Lightning for only update every epoch, not every batch. 50 | """ 51 | 52 | def __init__(self, *args, **kwargs): 53 | self.epochs = kwargs.pop("epochs") 54 | super().__init__(*args, **kwargs) 55 | self.main_progress_bar = super().init_train_tqdm() 56 | 57 | def on_train_epoch_start(self, trainer: "pl.Trainer", *_) -> None: 58 | self.main_progress_bar.reset(self.epochs) 59 | self.main_progress_bar.set_description(f"Epoch {trainer.current_epoch + 1}") 60 | self._update_n(self.main_progress_bar, trainer.current_epoch + 1) 61 | 62 | def on_train_batch_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule", *_) -> None: 63 | pass 64 | 65 | def _update_n(self, bar, value: int) -> None: 66 | if not bar.disable: 67 | bar.n = value 68 | bar.refresh() 69 | -------------------------------------------------------------------------------- /neuralprophet/np_types.py: -------------------------------------------------------------------------------- 1 | import sys 2 | from typing import Dict, Union 3 | 4 | import torch 5 | 6 | # Ensure compatibility with python 3.7 7 | if sys.version_info >= (3, 8): 8 | from typing import Literal 9 | else: 10 | from typing_extensions import Literal 11 | 12 | 13 | NormalizeMode = Literal["auto", "soft", "soft1", "minmax", "standardize", "off"] 14 | 15 | SeasonalityMode = Literal["additive", "multiplicative"] 16 | 17 | SeasonalityArgument = Union[Literal["auto"], bool, int] 18 | 19 | GrowthMode = Literal["off", "linear", "discontinuous"] 20 | 21 | CollectMetricsMode = Union[Dict, bool] 22 | 23 | SeasonGlobalLocalMode = Literal["global", "local", "glocal"] 24 | 25 | FutureRegressorsModel = Literal["linear", "neural_nets", "shared_neural_nets"] 26 | 27 | Components = Dict[str, torch.Tensor] 28 | -------------------------------------------------------------------------------- /neuralprophet/utils_metrics.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | import torchmetrics 4 | 5 | log = logging.getLogger("NP.metrics") 6 | 7 | METRICS = { 8 | # "short_name": [torchmetrics.Metric name, {optional args}] 9 | "MAE": ["MeanAbsoluteError", {}], 10 | "MSE": ["MeanSquaredError", {"squared": True}], 11 | "RMSE": ["MeanSquaredError", {"squared": False}], 12 | } 13 | 14 | 15 | def get_metrics(metric_input): 16 | """ 17 | Returns a dict of metrics. 18 | 19 | Parameters 20 | ---------- 21 | metrics : input received from the user 22 | List of metrics to use. 23 | 24 | Returns 25 | ------- 26 | dict 27 | Dict of names of torchmetrics.Metric metrics 28 | """ 29 | if metric_input is None: 30 | return False 31 | elif metric_input is False: 32 | return False 33 | elif metric_input is True: 34 | return {"MAE": METRICS["MAE"], "RMSE": METRICS["RMSE"]} 35 | elif isinstance(metric_input, str): 36 | if metric_input.upper() in METRICS.keys(): 37 | return {metric_input: METRICS[metric_input.upper()]} 38 | else: 39 | raise ValueError("Received unsupported argument for collect_metrics.") 40 | elif isinstance(metric_input, list): 41 | if all([m.upper() in METRICS.keys() for m in metric_input]): 42 | return {m: METRICS[m.upper()] for m in metric_input} 43 | else: 44 | raise ValueError("Received unsupported argument for collect_metrics.") 45 | elif isinstance(metric_input, dict): 46 | # check if all values are names belonging to torchmetrics.Metric 47 | try: 48 | for _metric in metric_input.values(): 49 | torchmetrics.__dict__[_metric]() 50 | except KeyError: 51 | raise ValueError( 52 | "Received unsupported argument for collect_metrics." 53 | "All metrics must be valid names of torchmetrics.Metric objects." 54 | ) 55 | return {k: [v, {}] for k, v in metric_input.items()} 56 | else: 57 | raise ValueError("Received unsupported argument for collect_metrics.") 58 | -------------------------------------------------------------------------------- /neuralprophet/utils_torch.py: -------------------------------------------------------------------------------- 1 | import inspect 2 | import logging 3 | from typing import Any 4 | 5 | import numpy as np 6 | import pytorch_lightning as pl 7 | import torch 8 | import torch.nn as nn 9 | from captum.attr import Saliency 10 | 11 | log = logging.getLogger("NP.utils_torch") 12 | 13 | 14 | def init_parameter(dims): 15 | """ 16 | Create and initialize a new torch Parameter. 17 | 18 | Parameters 19 | ---------- 20 | dims : list or tuple 21 | Desired dimensions of parameter 22 | 23 | Returns 24 | ------- 25 | nn.Parameter 26 | initialized Parameter 27 | """ 28 | if len(dims) > 1: 29 | return nn.Parameter(nn.init.xavier_normal_(torch.randn(dims)), requires_grad=True) 30 | else: 31 | return nn.Parameter(torch.nn.init.xavier_normal_(torch.randn([1] + dims)).squeeze(0), requires_grad=True) 32 | 33 | 34 | def penalize_nonzero(weights, eagerness=1.0, acceptance=1.0): 35 | cliff = 1.0 / (np.e * eagerness) 36 | return torch.log(cliff + acceptance * torch.abs(weights)) - np.log(cliff) 37 | 38 | 39 | def create_optimizer_from_config(optimizer_name, optimizer_args): 40 | """ 41 | Translate the optimizer name and arguments into a torch optimizer. 42 | If an optimizer object is provided, it is returned as is. 43 | The optimizer is not initialized yet since this is done by the trainer. 44 | 45 | Parameters 46 | ---------- 47 | optimizer_name : int 48 | Object provided to NeuralProphet as optimizer. 49 | optimizer_args : dict 50 | Arguments for the optimizer. 51 | 52 | Returns 53 | ------- 54 | optimizer : torch.optim.Optimizer 55 | The optimizer object. 56 | optimizer_args : dict 57 | The optimizer arguments. 58 | """ 59 | if isinstance(optimizer_name, str): 60 | if optimizer_name.lower() == "adamw": 61 | # Tends to overfit, but reliable 62 | optimizer = torch.optim.AdamW 63 | optimizer_args["weight_decay"] = 1e-3 64 | elif optimizer_name.lower() == "sgd": 65 | # better validation performance, but diverges sometimes 66 | optimizer = torch.optim.SGD 67 | optimizer_args["momentum"] = 0.9 68 | optimizer_args["weight_decay"] = 1e-4 69 | else: 70 | raise ValueError(f"The optimizer name {optimizer_name} is not supported.") 71 | elif inspect.isclass(optimizer_name) and issubclass(optimizer_name, torch.optim.Optimizer): 72 | optimizer = optimizer_name 73 | else: 74 | raise ValueError("The provided optimizer is not supported.") 75 | return optimizer, optimizer_args 76 | 77 | 78 | def interprete_model( 79 | target_model: pl.LightningModule, 80 | net: str, 81 | forward_func: str, 82 | _num_in_features: int = None, 83 | _num_out_features: int = None, 84 | _input: torch.Tensor = None, 85 | additional_forward_args: Any = None, 86 | ): 87 | """ 88 | Returns model input attributions for a given network and forward function. 89 | 90 | Parameters 91 | ---------- 92 | target_model : pl.LightningModule 93 | The model for which input attributions are to be computed. 94 | net : str 95 | Name of the network for which input attributions are to be computed. 96 | forward_func : str 97 | Name of the forward function for which input attributions are to be computed. 98 | 99 | _input : torch.Tensor 100 | Input for which the attributions are to be computed. 101 | 102 | Returns 103 | ------- 104 | torch.Tensor 105 | Input attributions for the given network and forward function. 106 | """ 107 | # Load the respective forward function from the model and init model interpreter 108 | forward = getattr(target_model, forward_func) 109 | saliency = Saliency(forward_func=forward) 110 | 111 | # Number of quantiles 112 | num_quantiles = len(target_model.quantiles) 113 | 114 | # Number of input features to the net (aka n_lags) 115 | num_in_features = getattr(target_model, net)[0].in_features if _num_in_features is None else _num_in_features 116 | # Number of output features from the net (aka n_forecasts) 117 | num_out_features = getattr(target_model, net)[-1].out_features if _num_out_features is None else _num_out_features 118 | 119 | num_out_features_without_quantiles = int(num_out_features / num_quantiles) 120 | 121 | # Create a tensor of ones as model input 122 | model_input = torch.ones(1, num_in_features, requires_grad=True) if _input is None else _input 123 | 124 | # Iterate through each output feature and compute the model attribution wrt. the input 125 | attributions = torch.empty((0, num_in_features)) 126 | for output_feature in range(num_out_features_without_quantiles): 127 | for quantile in range(num_quantiles): 128 | target_attribution = saliency.attribute( 129 | model_input, 130 | target=[(output_feature, quantile)], 131 | abs=False, 132 | additional_forward_args=additional_forward_args, 133 | ) 134 | attributions = torch.cat((attributions, target_attribution), 0) 135 | 136 | # Average the attributions over the input features 137 | # Idea: Average attribution of each lag on all forecasts (eg the n'th lag has an attribution of xyz on the forecast) 138 | # TODO: support the visualization of 2d tensors in plot_parameters 139 | # (aka the attribution of the n'th lag on the m'th forecast) 140 | 141 | return attributions 142 | -------------------------------------------------------------------------------- /notes/NeuralProphet_Facebook_Forecasting_Summit.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ourownstory/neural_prophet/5e6b2314547334bbd5dbfc9cc5e019efcb3d67c5/notes/NeuralProphet_Facebook_Forecasting_Summit.pdf -------------------------------------------------------------------------------- /notes/NeuralProphet_Introduction.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ourownstory/neural_prophet/5e6b2314547334bbd5dbfc9cc5e019efcb3d67c5/notes/NeuralProphet_Introduction.pdf -------------------------------------------------------------------------------- /notes/development_timeline.md: -------------------------------------------------------------------------------- 1 | ## Development Timeline 2 | ### v0.1 Alpha [released in 2020] 3 | Working version of NeuralProphet with missing features and potentially containing bugs. 4 | 5 | ### v0.2 to v0.5 Beta NeuralProphet [current] 6 | Modelling capabilities: 7 | * [done] Trend, piecewise linear changepoints 8 | * [done] Auto-regression, univariate, multi-step ahead forecasts 9 | * [done] Seasonalities, based on fourier-terms 10 | * [done] Optional hidden layers for AR 11 | * [done] Manage missing data - basic automatic imputation 12 | * [done] Basic Automatic hyperparameter selection 13 | * [done] Custom Metrics 14 | * [done] Training with evaluation on holdout set 15 | * [done] Events and Holidays 16 | * [done] Exagenous variables (as covariate inputs) 17 | * Simple Uncertainty estimation 18 | 19 | User Interface: 20 | * simple package with limited capabilities 21 | * similar to Facebook Prophet's basic features 22 | 23 | Accompanying Products: 24 | * Quickstart documentation and examples 25 | * Benchmarks (Accuracy and Execution time) 26 | * Few datasets 27 | 28 | ### v1.0 NeuralProphet 29 | Added modelling capabilities: 30 | * More intelligent Automatic hyperparameter selection 31 | * different ways to manage trend/normalize data and compute seasonality (rolling, local seasonality, ...) 32 | * Inclusion of traditional models (ets, sarimax, ...) 33 | * Component-wise uncertainty 34 | 35 | User Interface: 36 | * More user-control (set trend changepoint times, ...) 37 | * Rich analytics and plotting 38 | * Model gives user feedback on how to improve hyperparameters (if set) 39 | * Integration with Time-Series Preprocessing tools 40 | 41 | Accompanying Products: 42 | * Large collection of time-series datasets 43 | * Professional documentation and more tutorials 44 | 45 | ### v2.0 Redesigned - Modular Framework 46 | Here, we will re-write large portions of the code structure in order to make it a modular framework where model components can freely be interchanged and combined. 47 | 48 | Added modelling capabilities: 49 | * Inclusion of more potent models (Recurrence, Convolution, Attention, ...) 50 | 51 | User Interface: 52 | * Tools for Understanding of model and input-output mapping 53 | * Integration with relevant Interfaces (Pytorch metrics, Tensorboard, scikitlearn, ...) 54 | 55 | Accompanying Products: 56 | * Pre-trained models 57 | 58 | ### v3.0 Nice UI for non-programmers 59 | Alternative visual web-interface, potentially cloud-based execution 60 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.poetry] 2 | name = "neuralprophet" 3 | version = "1.0.0rc10" 4 | description = "NeuralProphet is an easy to learn framework for interpretable time series forecasting." 5 | authors = ["Oskar Triebe "] 6 | license = "MIT" 7 | readme = "README.md" 8 | classifiers = [ 9 | "Development Status :: 4 - Beta", 10 | "Natural Language :: English", 11 | "Operating System :: OS Independent", 12 | "Operating System :: POSIX :: Linux", 13 | "Operating System :: MacOS :: MacOS X", 14 | ] 15 | 16 | [tool.poetry.urls] 17 | Homepage = "https://github.com/ourownstory/neural_prophet" 18 | 19 | [tool.poetry.dependencies] 20 | python = ">=3.9,<3.13" 21 | numpy = ">=1.25.0,<2.0.0" 22 | pandas = ">=2.0.0" 23 | torch = ">=2.0.0,<2.4.0" 24 | # Note: torch defaults to already installed version or installs CUDA version 25 | # If you want CPU-only torch, install that before installing neuralprophet. 26 | pytorch-lightning = ">=2.0.0,<2.4.0" 27 | tensorboard = ">=2.11.2" 28 | torchmetrics = ">=1.0.0" 29 | typing-extensions = ">=4.5.0" 30 | holidays = ">=0.41,<1.0" 31 | captum = ">=0.6.0" 32 | matplotlib = ">=3.5.3" 33 | plotly = ">=5.13.1" 34 | kaleido = "0.2.1" # required for plotly static image export 35 | plotly-resampler = { version = ">=0.9.2", optional = true } 36 | livelossplot = { version = ">=0.5.5", optional = true } 37 | lightning-fabric = ">=2.0.0,<2.4.0" 38 | 39 | [tool.poetry.extras] 40 | plotly-resampler = ["plotly-resampler"] 41 | live = ["livelossplot"] 42 | 43 | [tool.poetry.group.dev.dependencies] # For dev involving notebooks 44 | ipykernel = ">=6.29.2" 45 | nbformat = ">=5.8.0" 46 | 47 | [tool.poetry.group.pytest] # pytest dev setup and CI 48 | optional = true 49 | 50 | [tool.poetry.group.pytest.dependencies] 51 | pytest = "^8.0.0" 52 | pytest-xdist = "^3.1.0" 53 | pytest-cov = "^4.1.0" 54 | 55 | [tool.poetry.group.docs] # for building docs; and testing docs CI 56 | optional = true 57 | 58 | [tool.poetry.group.docs.dependencies] 59 | myst-parser = "^2.0.0" 60 | nbsphinx = "^0.9.0" 61 | nbsphinx-link = "^1.3.0" 62 | sphinx = "^7.0.0" 63 | sphinx-fontawesome = "^0.0.6" 64 | furo = "^2024.1.29" 65 | 66 | [tool.poetry.group.metrics] # for metrics CI 67 | optional = true 68 | 69 | [tool.poetry.group.metrics.dependencies] 70 | tabulate = "^0.9.0" # Used in model metrics CI only; md export for github-actions bot 71 | 72 | [tool.poetry.group.linters] # for linters CI 73 | optional = true 74 | 75 | [tool.poetry.group.linters.dependencies] 76 | black = { extras = ["jupyter"], version = "^24.1.0" } 77 | isort = "^5.12.0" 78 | pandas-stubs = "^2.0" 79 | flake8 = "^7.0.0" 80 | 81 | [build-system] 82 | requires = ["poetry-core"] 83 | build-backend = "poetry.core.masonry.api" 84 | 85 | [tool.black] 86 | line-length = 120 87 | target-version = ['py311'] 88 | include = '\.pyi?$' 89 | exclude = ''' 90 | 91 | ( 92 | /( 93 | \.eggs # exclude a few common directories in the 94 | | \.git # root of the project 95 | | \.hg 96 | | \.mypy_cache 97 | | \.tox 98 | | \.venv 99 | | _build 100 | | buck-out 101 | | build 102 | | dist 103 | | example_data 104 | | example_notebooks 105 | | notes 106 | | site 107 | )/ 108 | | .gitignore 109 | ) 110 | ''' 111 | 112 | [tool.isort] 113 | profile = "black" 114 | line_length = 120 115 | 116 | [tool.pyright] 117 | include = [ 118 | "neuralprophet/**/*.py" 119 | ] 120 | exclude = [ 121 | "neuralprophet/**/*plot*.py" 122 | ] 123 | 124 | [tool.ruff] 125 | line-length = 120 126 | typing-modules = ["neuralprophet.np_types"] 127 | -------------------------------------------------------------------------------- /repro.np: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ourownstory/neural_prophet/5e6b2314547334bbd5dbfc9cc5e019efcb3d67c5/repro.np -------------------------------------------------------------------------------- /scripts/install_hooks.bash: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | echo "Installing hooks..." 4 | GIT_DIR=$(git rev-parse --git-dir) 5 | # create symlink to our pre-commit and pre-push scripts 6 | ln -s ../../scripts/pre_commit.bash "$GIT_DIR"/hooks/pre-commit 7 | ln -s ../../scripts/pre_push.bash "$GIT_DIR"/hooks/pre-push 8 | # make the symlinks executable 9 | chmod a+rwx "$GIT_DIR"/hooks/pre-commit 10 | chmod a+rwx "$GIT_DIR"/hooks/pre-push 11 | echo "Done!" 12 | -------------------------------------------------------------------------------- /scripts/neuralprophet_dev_setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import os 4 | import subprocess 5 | 6 | 7 | def install_hooks(): 8 | dir_scripts = os.path.abspath(os.path.dirname(__file__)) 9 | script_files = [ 10 | "install_hooks.bash", 11 | "pre_commit.bash", 12 | "pre_push.bash", 13 | ] 14 | for script_f in script_files: 15 | file = os.path.join(dir_scripts, script_f) 16 | subprocess.check_call(["chmod", "a+rwx", file]) 17 | subprocess.call(os.path.join(dir_scripts, "install_hooks.bash"), shell=True) 18 | 19 | 20 | if __name__ == "__main__": 21 | install_hooks() 22 | -------------------------------------------------------------------------------- /scripts/pre_commit.bash: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | set -e 4 | 5 | pyfiles=$(git diff --staged --name-only --diff-filter=d -- "*.py") 6 | for file in $pyfiles; do 7 | black "$file" 8 | isort "$file" 9 | git add "$file" 10 | done 11 | 12 | notebooks=$(git diff --staged --name-only --diff-filter=d -- "*.ipynb") 13 | for file in $notebooks; do 14 | black "$file" 15 | git add "$file" 16 | done 17 | -------------------------------------------------------------------------------- /scripts/pre_push.bash: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | echo "Running pre-push hook: pytest" 4 | if ! pytest -n auto --durations=0 -c tests/pytest.ini -v tests; 5 | then 6 | echo "Failed pytests. Pytests must pass before push!" 7 | exit 1 8 | fi 9 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [flake8] 2 | max-line-length = 120 3 | extend-ignore = E203,E501 4 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | # matplotlib is known to spam logs, thus we set the logging level to warning 4 | logging.getLogger("matplotlib").setLevel(logging.WARNING) 5 | logging.getLogger("pytorch_lightning").setLevel(logging.INFO) 6 | logging.getLogger("torch").setLevel(logging.WARNING) 7 | logging.getLogger("fsspec").setLevel(logging.INFO) 8 | -------------------------------------------------------------------------------- /tests/debug/debug_glocal.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import numpy as np 4 | import pandas as pd 5 | 6 | 7 | def read_json(path, metrics_path, branch): 8 | try: 9 | df = pd.read_json(os.path.join(metrics_path, path), orient="index") 10 | df = df.reset_index() 11 | df.rename(columns={0: branch, "index": "Metric"}, inplace=True) 12 | return df 13 | except: # noqa: E722 14 | return None 15 | 16 | 17 | # Load all metrics paths 18 | metrics_path = os.path.join(os.getcwd(), "tests", "metrics") 19 | metrics_path_main = os.path.join(os.getcwd(), "tests", "metrics-main") 20 | metrics_files = [f for f in os.listdir(metrics_path) if ".json" in f] 21 | all_metrics = pd.DataFrame() 22 | 23 | # Loop through all metrics files 24 | for f in metrics_files: 25 | current = read_json(f, metrics_path, "current") 26 | main = read_json(f, metrics_path_main, "main") 27 | 28 | if main is not None: 29 | df = pd.merge(current, main, on=["Metric"], how="left") 30 | 31 | # Adjust the runtime metrics 32 | time = df.loc[df["Metric"] == "time"] 33 | performance = df.loc[df["Metric"] == "system_performance"] 34 | adjusted_performance = time["main"].values[0] / performance["main"].values[0] * performance["current"].values[0] 35 | df.loc[df["Metric"] == "time", "main"] = adjusted_performance 36 | 37 | df["diff"] = ((df["main"] - df["current"]) / df["main"]) * 100 * -1 38 | 39 | df = df.round(5) 40 | df[" "] = np.select( 41 | condlist=[(df["diff"] >= 7.0), (df["diff"] >= 3.0), (df["diff"] <= -5.0)], 42 | choicelist=[":x:", ":warning:", ":tada:"], 43 | default=":white_check_mark:", 44 | ) 45 | df["diff"] = df["diff"].fillna(0) 46 | df["diff"] = df["diff"] + 0.0 47 | df["diff"] = df["diff"].round(2).astype(str) + "%" 48 | else: 49 | df = current.copy() 50 | df["main"] = "-" 51 | df["diff"] = "-" 52 | df[" "] = "" 53 | df = df.round(4) 54 | 55 | # Remove unused metrics 56 | df = df[~df["Metric"].isin(["epoch", "RegLoss", "RegLoss_val", "system_performance", "system_std"])] 57 | df["Benchmark"] = f.split(".")[0] 58 | df = df[["Benchmark", "Metric", "main", "current", "diff", " "]] 59 | all_metrics = pd.concat([all_metrics, df]) 60 | 61 | print(str(all_metrics.to_markdown(tablefmt="github", index=False))) 62 | -------------------------------------------------------------------------------- /tests/metrics/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ourownstory/neural_prophet/5e6b2314547334bbd5dbfc9cc5e019efcb3d67c5/tests/metrics/.gitkeep -------------------------------------------------------------------------------- /tests/metrics/compareMetrics.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import numpy as np 4 | import pandas as pd 5 | 6 | 7 | def read_json(path, metrics_path, branch): 8 | try: 9 | df = pd.read_json(os.path.join(metrics_path, path), orient="index") 10 | df = df.reset_index() 11 | df.rename(columns={0: branch, "index": "Metric"}, inplace=True) 12 | return df 13 | except: # noqa: E722 14 | return None 15 | 16 | 17 | # Load all metrics paths 18 | metrics_path = os.path.join(os.getcwd(), "tests", "metrics") 19 | metrics_path_main = os.path.join(os.getcwd(), "tests", "metrics-main") 20 | metrics_files = [f for f in os.listdir(metrics_path) if ".json" in f] 21 | all_metrics = pd.DataFrame() 22 | 23 | # Loop through all metrics files 24 | for f in metrics_files: 25 | current = read_json(f, metrics_path, "current") 26 | main = read_json(f, metrics_path_main, "main") 27 | 28 | if main is not None: 29 | df = pd.merge(current, main, on=["Metric"], how="left") 30 | 31 | # Adjust the runtime metrics 32 | time = df.loc[df["Metric"] == "time"] 33 | performance = df.loc[df["Metric"] == "system_performance"] 34 | adjusted_performance = time["main"].values[0] / performance["main"].values[0] * performance["current"].values[0] 35 | df.loc[df["Metric"] == "time", "main"] = adjusted_performance 36 | 37 | df["diff"] = ((df["main"] - df["current"]) / df["main"]) * 100 * -1 38 | 39 | df = df.round(5) 40 | df[" "] = np.select( 41 | condlist=[(df["diff"] >= 7.0), (df["diff"] >= 3.0), (df["diff"] <= -5.0)], 42 | choicelist=[":x:", ":warning:", ":tada:"], 43 | default=":white_check_mark:", 44 | ) 45 | df["diff"] = df["diff"].fillna(0) 46 | df["diff"] = df["diff"] + 0.0 47 | df["diff"] = df["diff"].round(2).astype(str) + "%" 48 | else: 49 | df = current.copy() 50 | df["main"] = "-" 51 | df["diff"] = "-" 52 | df[" "] = "" 53 | df = df.round(4) 54 | 55 | # Remove unused metrics 56 | df = df[~df["Metric"].isin(["epoch", "RegLoss", "RegLoss_val", "system_performance", "system_std"])] 57 | df["Benchmark"] = f.split(".")[0] 58 | df = df[["Benchmark", "Metric", "main", "current", "diff", " "]] 59 | all_metrics = pd.concat([all_metrics, df]) 60 | 61 | print(str(all_metrics.to_markdown(tablefmt="github", index=False))) 62 | -------------------------------------------------------------------------------- /tests/pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | testpaths = tests 3 | numprocesses = auto 4 | durations = 0 5 | log_cli = true 6 | log_cli_level = ERROR 7 | log_cli_format = %(asctime)s [%(levelname)s]: %(message)s (%(filename)s:%(lineno)s) 8 | log_cli_date_format = %Y-%m-%d %H:%M:%S 9 | filterwarnings = 10 | ignore::DeprecationWarning:tensorboard.* 11 | ignore::pytorch_lightning.utilities.warnings.PossibleUserWarning 12 | addopts = --ignore tests/test_model_performance.py 13 | -------------------------------------------------------------------------------- /tests/test-data/air_passengers.csv: -------------------------------------------------------------------------------- 1 | ds,y 1949-01-01,112 1949-02-01,118 1949-03-01,132 1949-04-01,129 1949-05-01,121 1949-06-01,135 1949-07-01,148 1949-08-01,148 1949-09-01,136 1949-10-01,119 1949-11-01,104 1949-12-01,118 1950-01-01,115 1950-02-01,126 1950-03-01,141 1950-04-01,135 1950-05-01,125 1950-06-01,149 1950-07-01,170 1950-08-01,170 1950-09-01,158 1950-10-01,133 1950-11-01,114 1950-12-01,140 1951-01-01,145 1951-02-01,150 1951-03-01,178 1951-04-01,163 1951-05-01,172 1951-06-01,178 1951-07-01,199 1951-08-01,199 1951-09-01,184 1951-10-01,162 1951-11-01,146 1951-12-01,166 1952-01-01,171 1952-02-01,180 1952-03-01,193 1952-04-01,181 1952-05-01,183 1952-06-01,218 1952-07-01,230 1952-08-01,242 1952-09-01,209 1952-10-01,191 1952-11-01,172 1952-12-01,194 1953-01-01,196 1953-02-01,196 1953-03-01,236 1953-04-01,235 1953-05-01,229 1953-06-01,243 1953-07-01,264 1953-08-01,272 1953-09-01,237 1953-10-01,211 1953-11-01,180 1953-12-01,201 1954-01-01,204 1954-02-01,188 1954-03-01,235 1954-04-01,227 1954-05-01,234 1954-06-01,264 1954-07-01,302 1954-08-01,293 1954-09-01,259 1954-10-01,229 1954-11-01,203 1954-12-01,229 1955-01-01,242 1955-02-01,233 1955-03-01,267 1955-04-01,269 1955-05-01,270 1955-06-01,315 1955-07-01,364 1955-08-01,347 1955-09-01,312 1955-10-01,274 1955-11-01,237 1955-12-01,278 1956-01-01,284 1956-02-01,277 1956-03-01,317 1956-04-01,313 1956-05-01,318 1956-06-01,374 1956-07-01,413 1956-08-01,405 1956-09-01,355 1956-10-01,306 1956-11-01,271 1956-12-01,306 1957-01-01,315 1957-02-01,301 1957-03-01,356 1957-04-01,348 1957-05-01,355 1957-06-01,422 1957-07-01,465 1957-08-01,467 1957-09-01,404 1957-10-01,347 1957-11-01,305 1957-12-01,336 1958-01-01,340 1958-02-01,318 1958-03-01,362 1958-04-01,348 1958-05-01,363 1958-06-01,435 1958-07-01,491 1958-08-01,505 1958-09-01,404 1958-10-01,359 1958-11-01,310 1958-12-01,337 1959-01-01,360 1959-02-01,342 1959-03-01,406 1959-04-01,396 1959-05-01,420 1959-06-01,472 1959-07-01,548 1959-08-01,559 1959-09-01,463 1959-10-01,407 1959-11-01,362 1959-12-01,405 1960-01-01,417 1960-02-01,391 1960-03-01,419 1960-04-01,461 1960-05-01,472 1960-06-01,535 1960-07-01,622 1960-08-01,606 1960-09-01,508 1960-10-01,461 1960-11-01,390 1960-12-01,432 -------------------------------------------------------------------------------- /tests/test_cli.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from neuralprophet.__main__ import parse_args 4 | from neuralprophet._version import __version__ 5 | 6 | 7 | def test_main_file(capsys): 8 | with pytest.raises(SystemExit) as exit_info: 9 | parse_args(["--version"]) 10 | 11 | out, _ = capsys.readouterr() 12 | assert exit_info.value.code == 0 13 | assert __version__ in out 14 | -------------------------------------------------------------------------------- /tests/test_configure.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from neuralprophet import NeuralProphet 4 | 5 | 6 | def test_config_training_quantiles(): 7 | checks = [ 8 | ({}, [0.5]), 9 | ({"quantiles": None}, [0.5]), 10 | ({"quantiles": []}, [0.5]), 11 | ({"quantiles": [0.2]}, [0.5, 0.2]), 12 | ({"quantiles": [0.2, 0.8]}, [0.5, 0.2, 0.8]), 13 | ({"quantiles": [0.5, 0.8]}, [0.5, 0.8]), 14 | ] 15 | for overrides, expected in checks: 16 | model = NeuralProphet(**overrides) 17 | assert model.config_model.quantiles == expected 18 | 19 | 20 | def test_config_training_quantiles_error_invalid_type(): 21 | with pytest.raises(AssertionError) as err: 22 | _ = NeuralProphet(quantiles="hello world") 23 | assert str(err.value) == "Quantiles must be provided as list." 24 | 25 | 26 | def test_config_training_quantiles_error_invalid_scale(): 27 | with pytest.raises(Exception) as err: 28 | _ = NeuralProphet(quantiles=[-1]) 29 | assert str(err.value) == "The quantiles specified need to be floats in-between (0, 1)." 30 | with pytest.raises(Exception) as err: 31 | _ = NeuralProphet(quantiles=[1.3]) 32 | assert str(err.value) == "The quantiles specified need to be floats in-between (0, 1)." 33 | -------------------------------------------------------------------------------- /tests/test_event_utils.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import logging 4 | import os 5 | import pathlib 6 | 7 | import holidays 8 | import matplotlib.pyplot as plt 9 | import pandas as pd 10 | import pytest 11 | from holidays import country_holidays 12 | 13 | from neuralprophet import NeuralProphet, event_utils 14 | 15 | log = logging.getLogger("NP.test") 16 | log.setLevel("ERROR") 17 | log.parent.setLevel("ERROR") 18 | 19 | 20 | DIR = pathlib.Path(__file__).parent.parent.absolute() 21 | DATA_DIR = os.path.join(DIR, "tests", "test-data") 22 | PEYTON_FILE = os.path.join(DATA_DIR, "wp_log_peyton_manning.csv") 23 | AIR_FILE = os.path.join(DATA_DIR, "air_passengers.csv") 24 | YOS_FILE = os.path.join(DATA_DIR, "yosemite_temps.csv") 25 | NROWS = 256 26 | EPOCHS = 1 27 | BATCH_SIZE = 128 28 | LR = 1.0 29 | 30 | PLOT = False 31 | 32 | 33 | def test_get_country_holidays(): 34 | # deprecated 35 | # assert issubclass(event_utils.get_country_holidays("TU").__class__, holidays.countries.turkey.TR) is True 36 | # new format 37 | assert issubclass(event_utils.get_all_holidays(country=["TU", "US"], years=2025).__class__, dict) is True 38 | 39 | for country in ("UnitedStates", "US", "USA"): 40 | us_holidays = event_utils.get_all_holidays(country=country, years=[2019, 2020]) 41 | assert len(us_holidays) == 10 42 | 43 | with pytest.raises(NotImplementedError): 44 | event_utils.get_holiday_names("NotSupportedCountry") 45 | 46 | 47 | def test_get_country_holidays_with_subdivisions(): 48 | # Test US holidays with a subdivision 49 | us_ca_holidays = country_holidays("US", years=2019, subdiv="CA") 50 | assert issubclass(us_ca_holidays.__class__, holidays.countries.united_states.UnitedStates) is True 51 | assert len(us_ca_holidays) > 0 # Assuming there are holidays specific to CA 52 | 53 | # Test Canada holidays with a subdivision 54 | ca_on_holidays = country_holidays("CA", years=2019, subdiv="ON") 55 | assert issubclass(ca_on_holidays.__class__, holidays.countries.canada.CA) is True 56 | assert len(ca_on_holidays) > 0 # Assuming there are holidays specific to ON 57 | 58 | 59 | def test_add_country_holiday_multiple_calls_warning(caplog): 60 | m = NeuralProphet( 61 | epochs=EPOCHS, 62 | batch_size=BATCH_SIZE, 63 | learning_rate=LR, 64 | ) 65 | m.add_country_holidays(["US", "Germany"]) 66 | error_message = "Country holidays can only be added once." 67 | assert error_message not in caplog.text 68 | 69 | with pytest.raises(AssertionError): 70 | m.add_country_holidays("Germany") 71 | # assert error_message in caplog.text 72 | 73 | 74 | # Test failing for (ubuntu-latest, 3.12) 75 | # def test_multiple_countries(): 76 | # # test if multiple countries are added 77 | # df = pd.read_csv(PEYTON_FILE, nrows=NROWS) 78 | # m = NeuralProphet( 79 | # epochs=EPOCHS, 80 | # batch_size=BATCH_SIZE, 81 | # learning_rate=LR, 82 | # ) 83 | # m.add_country_holidays(country_name=["US", "Germany"]) 84 | # m.fit(df, freq="D") 85 | # m.predict(df) 86 | # # get the name of holidays and compare that no holiday is repeated 87 | # holiday_names = m.model.config_holidays.holiday_names 88 | # assert "Independence Day" in holiday_names 89 | # assert "Christmas Day" in holiday_names 90 | # assert "Erster Weihnachtstag" not in holiday_names 91 | # assert "Neujahr" not in holiday_names 92 | 93 | 94 | def test_events(): 95 | log.info("testing: Events") 96 | df = pd.read_csv(PEYTON_FILE)[-NROWS:] 97 | playoffs = pd.DataFrame( 98 | { 99 | "event": "playoff", 100 | "ds": pd.to_datetime( 101 | [ 102 | "2008-01-13", 103 | "2009-01-03", 104 | "2010-01-16", 105 | "2010-01-24", 106 | "2010-02-07", 107 | "2011-01-08", 108 | "2013-01-12", 109 | "2014-01-12", 110 | "2014-01-19", 111 | "2014-02-02", 112 | "2015-01-11", 113 | "2016-01-17", 114 | "2016-01-24", 115 | "2016-02-07", 116 | ] 117 | ), 118 | } 119 | ) 120 | superbowls = pd.DataFrame( 121 | { 122 | "event": "superbowl", 123 | "ds": pd.to_datetime(["2010-02-07", "2014-02-02", "2016-02-07"]), 124 | } 125 | ) 126 | events_df = pd.concat((playoffs, superbowls)) 127 | m = NeuralProphet( 128 | n_lags=2, 129 | n_forecasts=30, 130 | daily_seasonality=False, 131 | epochs=EPOCHS, 132 | batch_size=BATCH_SIZE, 133 | learning_rate=LR, 134 | ) 135 | # set event windows 136 | m = m.add_events( 137 | ["superbowl", "playoff"], lower_window=-1, upper_window=1, mode="multiplicative", regularization=0.5 138 | ) 139 | # add the country specific holidays 140 | m = m.add_country_holidays( 141 | ["US", "Indonesia", "Philippines", "Pakistan", "Belarus"], mode="additive", regularization=0.5 142 | ) 143 | # m.add_country_holidays("Thailand") # holidays package has issue with int input for timedelta. accepts np.float64() 144 | history_df = m.create_df_with_events(df, events_df) 145 | m.fit(history_df, freq="D") 146 | future = m.make_future_dataframe(df=history_df, events_df=events_df, periods=30, n_historic_predictions=90) 147 | forecast = m.predict(df=future) 148 | log.debug(f"Event Parameters:: {m.model.event_params}") 149 | if PLOT: 150 | m.plot_components(forecast) 151 | m.plot(forecast) 152 | m.plot_parameters() 153 | plt.show() 154 | -------------------------------------------------------------------------------- /tests/test_future_regressor_nn.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import logging 4 | import os 5 | import pathlib 6 | 7 | import pandas as pd 8 | from matplotlib import pyplot as plt 9 | 10 | from neuralprophet import NeuralProphet 11 | 12 | log = logging.getLogger("NP.test") 13 | log.setLevel("DEBUG") 14 | log.parent.setLevel("WARNING") 15 | 16 | DIR = pathlib.Path(__file__).parent.parent.absolute() 17 | DATA_DIR = os.path.join(DIR, "tests", "test-data") 18 | PEYTON_FILE = os.path.join(DATA_DIR, "wp_log_peyton_manning.csv") 19 | 20 | ENERGY_TEMP_DAILY_FILE = os.path.join(DATA_DIR, "tutorial04_kaggle_energy_daily_temperature.csv") 21 | NROWS = 512 22 | EPOCHS = 2 23 | BATCH_SIZE = 128 24 | LR = 1.0 25 | 26 | PLOT = False 27 | 28 | 29 | def test_future_reg_nn(): 30 | log.info("testing: Future Regressors modelled with NNs") 31 | df = pd.read_csv(PEYTON_FILE, nrows=NROWS + 50) 32 | m = NeuralProphet(epochs=EPOCHS, batch_size=BATCH_SIZE, learning_rate=LR, future_regressors_model="neural_nets") 33 | df["A"] = df["y"].rolling(7, min_periods=1).mean() 34 | df["B"] = df["y"].rolling(30, min_periods=1).mean() 35 | df["C"] = df["y"].rolling(7, min_periods=1).mean() 36 | df["D"] = df["y"].rolling(30, min_periods=1).mean() 37 | 38 | regressors_df_future = pd.DataFrame( 39 | data={"A": df["A"][-50:], "B": df["B"][-50:], "C": df["C"][-50:], "D": df["D"][-50:]} 40 | ) 41 | df = df[:-50] 42 | m = m.add_future_regressor(name="A") 43 | m = m.add_future_regressor(name="B", mode="additive") 44 | m = m.add_future_regressor(name="C", mode="multiplicative") 45 | m = m.add_future_regressor(name="D", mode="multiplicative") 46 | m.fit(df, freq="D") 47 | future = m.make_future_dataframe(df=df, regressors_df=regressors_df_future, n_historic_predictions=10, periods=50) 48 | forecast = m.predict(df=future) 49 | if PLOT: 50 | m.plot(forecast) 51 | m.plot_components(forecast) 52 | m.plot_parameters() 53 | plt.show() 54 | 55 | 56 | def test_future_reg_nn_shared(): 57 | log.info("testing: Future Regressors modelled with NNs shared") 58 | df = pd.read_csv(PEYTON_FILE, nrows=NROWS + 50) 59 | m = NeuralProphet( 60 | epochs=EPOCHS, batch_size=BATCH_SIZE, learning_rate=LR, future_regressors_model="shared_neural_nets" 61 | ) 62 | df["A"] = df["y"].rolling(7, min_periods=1).mean() 63 | df["B"] = df["y"].rolling(30, min_periods=1).mean() 64 | df["C"] = df["y"].rolling(7, min_periods=1).mean() 65 | df["D"] = df["y"].rolling(30, min_periods=1).mean() 66 | 67 | regressors_df_future = pd.DataFrame( 68 | data={"A": df["A"][-50:], "B": df["B"][-50:], "C": df["C"][-50:], "D": df["D"][-50:]} 69 | ) 70 | df = df[:-50] 71 | m = m.add_future_regressor(name="A") 72 | m = m.add_future_regressor(name="B", mode="additive") 73 | m = m.add_future_regressor(name="C", mode="multiplicative") 74 | m = m.add_future_regressor(name="D", mode="multiplicative") 75 | m.fit(df, freq="D") 76 | future = m.make_future_dataframe(df=df, regressors_df=regressors_df_future, n_historic_predictions=10, periods=50) 77 | forecast = m.predict(df=future) 78 | if PLOT: 79 | m.plot(forecast) 80 | m.plot_components(forecast) 81 | m.plot_parameters() 82 | plt.show() 83 | 84 | 85 | def test_future_reg_nn_shared_coef(): 86 | log.info("testing: Future Regressors modelled with NNs shared coef") 87 | df = pd.read_csv(PEYTON_FILE, nrows=NROWS + 50) 88 | m = NeuralProphet( 89 | epochs=EPOCHS, batch_size=BATCH_SIZE, learning_rate=LR, future_regressors_model="shared_neural_nets_coef" 90 | ) 91 | df["A"] = df["y"].rolling(7, min_periods=1).mean() 92 | df["B"] = df["y"].rolling(30, min_periods=1).mean() 93 | df["C"] = df["y"].rolling(7, min_periods=1).mean() 94 | df["D"] = df["y"].rolling(30, min_periods=1).mean() 95 | 96 | regressors_df_future = pd.DataFrame( 97 | data={"A": df["A"][-50:], "B": df["B"][-50:], "C": df["C"][-50:], "D": df["D"][-50:]} 98 | ) 99 | df = df[:-50] 100 | m = m.add_future_regressor(name="A") 101 | m = m.add_future_regressor(name="B", mode="additive") 102 | m = m.add_future_regressor(name="C", mode="multiplicative") 103 | m = m.add_future_regressor(name="D", mode="multiplicative") 104 | m.fit(df, freq="D") 105 | future = m.make_future_dataframe(df=df, regressors_df=regressors_df_future, n_historic_predictions=10, periods=50) 106 | forecast = m.predict(df=future) 107 | if PLOT: 108 | m.plot(forecast) 109 | m.plot_components(forecast) 110 | m.plot_parameters() 111 | plt.show() 112 | 113 | 114 | def test_future_regressor_nn_2(): 115 | log.info("future regressor with NN") 116 | 117 | df = pd.read_csv(ENERGY_TEMP_DAILY_FILE, nrows=NROWS) 118 | 119 | m = NeuralProphet( 120 | epochs=EPOCHS, 121 | batch_size=BATCH_SIZE, 122 | learning_rate=LR, 123 | yearly_seasonality=False, 124 | weekly_seasonality=False, 125 | daily_seasonality=True, 126 | future_regressors_model="neural_nets", # 'linear' default or 'neural_nets' 127 | future_regressors_layers=[4, 4], 128 | n_forecasts=3, 129 | n_lags=5, 130 | drop_missing=True, 131 | # trainer_config={"accelerator": "gpu"}, 132 | ) 133 | df_train, df_val = m.split_df(df, freq="D", valid_p=0.2) 134 | 135 | # Use static plotly in notebooks 136 | # m.set_plotting_backend("plotly") 137 | 138 | # Add the new future regressor 139 | m.add_future_regressor("temperature") 140 | 141 | # Add counrty holidays 142 | m.add_country_holidays("IT", mode="additive", lower_window=-1, upper_window=1) 143 | 144 | metrics = m.fit( 145 | df_train, validation_df=df_val, freq="D", epochs=EPOCHS, learning_rate=LR, early_stopping=True, progress=False 146 | ) 147 | log.debug(f"Metrics: {metrics}") 148 | 149 | 150 | def test_future_regressor_nn_shared_2(): 151 | log.info("future regressor with NN shared 2") 152 | 153 | df = pd.read_csv(ENERGY_TEMP_DAILY_FILE, nrows=NROWS) 154 | 155 | m = NeuralProphet( 156 | epochs=EPOCHS, 157 | batch_size=BATCH_SIZE, 158 | learning_rate=LR, 159 | yearly_seasonality=False, 160 | weekly_seasonality=False, 161 | daily_seasonality=True, 162 | future_regressors_model="shared_neural_nets", 163 | future_regressors_layers=[4, 4], 164 | n_forecasts=3, 165 | n_lags=5, 166 | drop_missing=True, 167 | ) 168 | df_train, df_val = m.split_df(df, freq="D", valid_p=0.2) 169 | 170 | # Add the new future regressor 171 | m.add_future_regressor("temperature") 172 | 173 | metrics = m.fit( 174 | df_train, validation_df=df_val, freq="D", epochs=EPOCHS, learning_rate=LR, early_stopping=True, progress=False 175 | ) 176 | log.debug(f"Metrics: {metrics}") 177 | 178 | 179 | # def test_future_regressor_nn_shared_coef_2(): 180 | # log.info("future regressor with NN shared coef 2") 181 | # df = pd.read_csv(ENERGY_TEMP_DAILY_FILE, nrows=NROWS) 182 | # m = NeuralProphet( 183 | # epochs=EPOCHS, 184 | # batch_size=BATCH_SIZE, 185 | # learning_rate=LR, 186 | # yearly_seasonality=False, 187 | # weekly_seasonality=False, 188 | # daily_seasonality=True, 189 | # future_regressors_model="shared_neural_nets_coef", 190 | # future_regressors_layers=[4, 4], 191 | # n_forecasts=3, 192 | # n_lags=5, 193 | # drop_missing=True, 194 | # ) 195 | # df_train, df_val = m.split_df(df, freq="D", valid_p=0.2) 196 | 197 | # # Add the new future regressor 198 | # m.add_future_regressor("temperature") 199 | 200 | # metrics = m.fit( 201 | # df_train, validation_df=df_val, freq="D", epochs=EPOCHS, learning_rate=LR, early_stopping=True, progress=False 202 | # ) 203 | -------------------------------------------------------------------------------- /tests/test_regularization.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import logging 4 | import random 5 | 6 | import numpy as np 7 | import pandas as pd 8 | import pytest 9 | import torch 10 | 11 | from neuralprophet import NeuralProphet, df_utils 12 | from neuralprophet.utils import reg_func_abs 13 | from tests.utils.dataset_generators import ( 14 | generate_event_dataset, 15 | generate_holiday_dataset, 16 | generate_lagged_regressor_dataset, 17 | ) 18 | 19 | log = logging.getLogger("NP.test") 20 | log.setLevel("ERROR") 21 | log.parent.setLevel("ERROR") 22 | 23 | # Fix random seeds 24 | torch.manual_seed(0) 25 | random.seed(0) 26 | np.random.seed(0) 27 | 28 | # Variables 29 | REGULARIZATION = 0.01 30 | # Map holiday name to a y value for dataset generation 31 | Y_HOLIDAYS_OVERRIDE = { 32 | "Washington's Birthday": 10, 33 | "Labor Day": 10, 34 | "Christmas Day": 10, 35 | } 36 | Y_EVENTS_OVERRIDE = { 37 | "2022-01-13": 10, 38 | "2022-01-14": 10, 39 | "2022-01-15": 10, 40 | } 41 | 42 | 43 | def test_reg_func_abs(): 44 | log.info("testing: reg func abs") 45 | assert pytest.approx(1) == reg_func_abs(torch.Tensor([1])) 46 | assert pytest.approx(0) == reg_func_abs(torch.Tensor([0])) 47 | assert pytest.approx(1) == reg_func_abs(torch.Tensor([-1])) 48 | 49 | assert pytest.approx(1) == reg_func_abs(torch.Tensor([1, 1, 1])) 50 | assert pytest.approx(0) == reg_func_abs(torch.Tensor([0, 0, 0])) 51 | assert pytest.approx(1) == reg_func_abs(torch.Tensor([-1, -1, -1])) 52 | 53 | assert pytest.approx(0.6666666) == reg_func_abs(torch.Tensor([-1, 0, 1])) 54 | assert pytest.approx(20) == reg_func_abs(torch.Tensor([-12, 4, 0, -1, 1, 102])) 55 | 56 | 57 | def test_regularization_holidays(): 58 | log.info("testing: regularization of holidays") 59 | df = generate_holiday_dataset(y_holidays_override=Y_HOLIDAYS_OVERRIDE) 60 | df, _, _, _ = df_utils.check_multiple_series_id(df) 61 | df, _, _ = df_utils.check_dataframe(df, check_y=False) 62 | 63 | m = NeuralProphet( 64 | epochs=20, 65 | batch_size=32, 66 | learning_rate=0.1, 67 | yearly_seasonality=False, 68 | weekly_seasonality=False, 69 | daily_seasonality=False, 70 | growth="off", 71 | ) 72 | m = m.add_country_holidays( 73 | "US", 74 | regularization=0.0001, 75 | ) 76 | m.fit(df, freq="D") 77 | 78 | to_reduce = [] 79 | to_preserve = [] 80 | for country_holiday in m.config_country_holidays.holiday_names: 81 | event_params = m.model.get_event_weights(country_holiday) 82 | weight_list = [param.detach().numpy() for _, param in event_params.items()] 83 | if country_holiday in Y_HOLIDAYS_OVERRIDE.keys(): 84 | to_reduce.append(weight_list[0][0][0]) 85 | else: 86 | to_preserve.append(weight_list[0][0][0]) 87 | # print(f"To reduce (< 0.2) {to_reduce}") 88 | # print(f"To preserve (> 0.5) {to_preserve}") 89 | assert np.mean(to_reduce) < 0.2 90 | assert np.mean(to_preserve) > 0.5 91 | 92 | 93 | def test_regularization_events(): 94 | log.info("testing: regularization of events") 95 | df, events = generate_event_dataset(y_events_override=Y_EVENTS_OVERRIDE) 96 | df, _, _, id_list = df_utils.check_multiple_series_id(df) 97 | df, _, _ = df_utils.check_dataframe(df, check_y=False) 98 | 99 | m = NeuralProphet( 100 | epochs=50, 101 | batch_size=8, 102 | learning_rate=0.1, 103 | yearly_seasonality=False, 104 | weekly_seasonality=False, 105 | daily_seasonality=False, 106 | growth="off", 107 | ) 108 | m = m.add_events( 109 | ["event_%i" % index for index, _ in enumerate(events)], 110 | regularization=0.1, 111 | ) 112 | events_df = pd.concat( 113 | [ 114 | pd.DataFrame( 115 | { 116 | "event": "event_%i" % index, 117 | "ds": pd.to_datetime([event]), 118 | } 119 | ) 120 | for index, event in enumerate(events) 121 | ] 122 | ) 123 | history_df = m.create_df_with_events(df, events_df) 124 | m.fit(history_df, freq="D") 125 | 126 | to_reduce = [] 127 | to_preserve = [] 128 | for index, event in enumerate(events): 129 | weight_list = m.model.get_event_weights("event_%i" % index) 130 | for _, param in weight_list.items(): 131 | if event in Y_EVENTS_OVERRIDE.keys(): 132 | to_reduce.append(param.detach().numpy()[0][0]) 133 | else: 134 | to_preserve.append(param.detach().numpy()[0][0]) 135 | # print(f"To reduce (< 0.2) {to_reduce}") 136 | # print(f"To preserve (> 0.5) {to_preserve}") 137 | assert np.mean(to_reduce) < 0.2 138 | assert np.mean(to_preserve) > 0.5 139 | 140 | 141 | def test_regularization_lagged_regressor(): 142 | """ 143 | Test case for regularization feature of lagged regressors. Utlizes a 144 | synthetic dataset with 4 noise-based lagged regressors (a, b, c, d). 145 | The first and last lagged regressors (a, d) are expected to have a weight 146 | close to 1. The middle lagged regressors (b, c) meanwhile are expected to 147 | have a weight close to 0, due to the regularization. All other model 148 | components are turned off to avoid side effects. 149 | """ 150 | log.info("testing: regularization lagged regressors") 151 | df, lagged_regressors = generate_lagged_regressor_dataset(periods=100) 152 | df, _, _, id_list = df_utils.check_multiple_series_id(df) 153 | df, _, _ = df_utils.check_dataframe(df, check_y=False) 154 | 155 | m = NeuralProphet( 156 | epochs=30, 157 | batch_size=8, 158 | learning_rate=0.1, 159 | yearly_seasonality=False, 160 | weekly_seasonality=False, 161 | daily_seasonality=False, 162 | growth="off", 163 | normalize="off", 164 | ) 165 | m = m.add_lagged_regressor( 166 | n_lags=3, 167 | names=[lagged_regressor for lagged_regressor, _ in lagged_regressors], 168 | regularization=0.1, 169 | ) 170 | m.fit(df, freq="D") 171 | 172 | lagged_regressors_config = dict(lagged_regressors) 173 | 174 | weights = m.model.get_covar_weights() 175 | for name in m.config_lagged_regressors.regressors.keys(): 176 | weight_average = np.average(weights[name].detach().numpy()) 177 | 178 | lagged_regressor_weight = lagged_regressors_config[name] 179 | 180 | if lagged_regressor_weight > 0.9: 181 | assert weight_average > 0.5 182 | else: 183 | assert weight_average < 0.35 # Note: this should be < 0.1, but due to fitting issues, relaxed temporarily. 184 | 185 | log.info( 186 | "Lagged regressor: %s, average weight: %f, expected weight: %f", 187 | name, 188 | weight_average, 189 | lagged_regressors_config[name], 190 | ) 191 | -------------------------------------------------------------------------------- /tests/test_save.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import io 4 | import logging 5 | import os 6 | import pathlib 7 | 8 | import pandas as pd 9 | 10 | from neuralprophet import NeuralProphet, load, save 11 | 12 | log = logging.getLogger("NP.test") 13 | log.setLevel("ERROR") 14 | log.parent.setLevel("ERROR") 15 | 16 | DIR = pathlib.Path(__file__).parent.parent.absolute() 17 | DATA_DIR = os.path.join(DIR, "tests", "test-data") 18 | PEYTON_FILE = os.path.join(DATA_DIR, "wp_log_peyton_manning.csv") 19 | AIR_FILE = os.path.join(DATA_DIR, "air_passengers.csv") 20 | YOS_FILE = os.path.join(DATA_DIR, "yosemite_temps.csv") 21 | NROWS = 512 22 | EPOCHS = 10 23 | ADDITIONAL_EPOCHS = 5 24 | LR = 1.0 25 | BATCH_SIZE = 64 26 | 27 | PLOT = False 28 | 29 | 30 | def test_save_load(): 31 | df = pd.read_csv(PEYTON_FILE, nrows=NROWS) 32 | m = NeuralProphet( 33 | epochs=EPOCHS, 34 | batch_size=BATCH_SIZE, 35 | learning_rate=LR, 36 | n_lags=6, 37 | n_forecasts=3, 38 | n_changepoints=0, 39 | ) 40 | _ = m.fit(df, freq="D") 41 | future = m.make_future_dataframe(df, periods=3) 42 | forecast = m.predict(df=future) 43 | log.info("testing: save") 44 | save(m, "test_model.pt") 45 | 46 | log.info("testing: load") 47 | m2 = load("test_model.pt") 48 | forecast2 = m2.predict(df=future) 49 | 50 | m3 = load("test_model.pt", map_location="cpu") 51 | forecast3 = m3.predict(df=future) 52 | 53 | # Check that the forecasts are the same 54 | pd.testing.assert_frame_equal(forecast, forecast2) 55 | pd.testing.assert_frame_equal(forecast, forecast3) 56 | 57 | 58 | def test_save_load_io(): 59 | df = pd.read_csv(PEYTON_FILE, nrows=NROWS) 60 | m = NeuralProphet( 61 | epochs=EPOCHS, 62 | batch_size=BATCH_SIZE, 63 | learning_rate=LR, 64 | n_lags=6, 65 | n_forecasts=3, 66 | n_changepoints=0, 67 | ) 68 | _ = m.fit(df, freq="D") 69 | future = m.make_future_dataframe(df, periods=3) 70 | forecast = m.predict(df=future) 71 | 72 | # Save the model to an in-memory buffer 73 | log.info("testing: save to buffer") 74 | buffer = io.BytesIO() 75 | save(m, buffer) 76 | buffer.seek(0) # Reset buffer position to the beginning 77 | 78 | log.info("testing: load from buffer") 79 | m2 = load(buffer) 80 | forecast2 = m2.predict(df=future) 81 | 82 | buffer.seek(0) # Reset buffer position to the beginning for another load 83 | m3 = load(buffer, map_location="cpu") 84 | forecast3 = m3.predict(df=future) 85 | 86 | # Check that the forecasts are the same 87 | pd.testing.assert_frame_equal(forecast, forecast2) 88 | pd.testing.assert_frame_equal(forecast, forecast3) 89 | 90 | 91 | # def test_continue_training_checkpoint(): 92 | # df = pd.read_csv(PEYTON_FILE, nrows=NROWS) 93 | # m = NeuralProphet( 94 | # epochs=EPOCHS, 95 | # batch_size=BATCH_SIZE, 96 | # learning_rate=LR, 97 | # n_lags=6, 98 | # n_forecasts=3, 99 | # n_changepoints=0, 100 | # ) 101 | # metrics = m.fit(df, checkpointing=True, freq="D") 102 | # metrics2 = m.fit(df, freq="D", continue_training=True, epochs=ADDITIONAL_EPOCHS) 103 | # assert metrics["Loss"].min() >= metrics2["Loss"].min() 104 | 105 | 106 | # def test_continue_training_with_scheduler_selection(): 107 | # df = pd.read_csv(PEYTON_FILE, nrows=NROWS) 108 | # m = NeuralProphet( 109 | # epochs=EPOCHS, 110 | # batch_size=BATCH_SIZE, 111 | # learning_rate=LR, 112 | # n_lags=6, 113 | # n_forecasts=3, 114 | # n_changepoints=0, 115 | # ) 116 | # metrics = m.fit(df, checkpointing=True, freq="D") 117 | # # Continue training with StepLR 118 | # metrics2 = m.fit(df, freq="D", continue_training=True, epochs=ADDITIONAL_EPOCHS, scheduler="StepLR") 119 | # assert metrics["Loss"].min() >= metrics2["Loss"].min() 120 | 121 | 122 | # def test_save_load_continue_training(): 123 | # df = pd.read_csv(PEYTON_FILE, nrows=NROWS) 124 | # m = NeuralProphet( 125 | # epochs=EPOCHS, 126 | # n_lags=6, 127 | # n_forecasts=3, 128 | # n_changepoints=0, 129 | # ) 130 | # metrics = m.fit(df, checkpointing=True, freq="D") 131 | # save(m, "test_model.pt") 132 | # m2 = load("test_model.pt") 133 | # metrics2 = m2.fit(df, continue_training=True, epochs=ADDITIONAL_EPOCHS, scheduler="StepLR") 134 | # assert metrics["Loss"].min() >= metrics2["Loss"].min() 135 | -------------------------------------------------------------------------------- /tests/test_train_config.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import logging 4 | import os 5 | import pathlib 6 | 7 | import pandas as pd 8 | 9 | from neuralprophet import NeuralProphet 10 | 11 | log = logging.getLogger("NP.test") 12 | log.setLevel("ERROR") 13 | log.parent.setLevel("ERROR") 14 | 15 | DIR = pathlib.Path(__file__).parent.parent.absolute() 16 | DATA_DIR = os.path.join(DIR, "tests", "test-data") 17 | PEYTON_FILE = os.path.join(DATA_DIR, "wp_log_peyton_manning.csv") 18 | AIR_FILE = os.path.join(DATA_DIR, "air_passengers.csv") 19 | YOS_FILE = os.path.join(DATA_DIR, "yosemite_temps.csv") 20 | NROWS = 512 21 | EPOCHS = 10 22 | ADDITIONAL_EPOCHS = 5 23 | LR = 1.0 24 | BATCH_SIZE = 64 25 | 26 | PLOT = False 27 | 28 | 29 | def generate_config_train_params(overrides={}): 30 | config_train_params = { 31 | "learning_rate": None, 32 | "epochs": None, 33 | "batch_size": None, 34 | "loss_func": "SmoothL1Loss", 35 | "optimizer": "AdamW", 36 | } 37 | for key, value in overrides.items(): 38 | config_train_params[key] = value 39 | return config_train_params 40 | 41 | 42 | def test_custom_lr_scheduler(): 43 | df = pd.read_csv(PEYTON_FILE, nrows=NROWS) 44 | 45 | # Set in NeuralProphet() 46 | m = NeuralProphet( 47 | epochs=EPOCHS, 48 | batch_size=BATCH_SIZE, 49 | learning_rate=LR, 50 | scheduler="CosineAnnealingWarmRestarts", 51 | scheduler_args={"T_0": 5, "T_mult": 2}, 52 | ) 53 | metrics = m.fit(df, freq="D") 54 | print(f"metrics = {metrics}") 55 | 56 | # Set in NeuralProphet(), no args 57 | m = NeuralProphet( 58 | epochs=EPOCHS, 59 | batch_size=BATCH_SIZE, 60 | learning_rate=LR, 61 | scheduler="StepLR", 62 | ) 63 | metrics = m.fit(df, freq="D") 64 | print(f"metrics = {metrics}") 65 | 66 | # Set in fit() 67 | m = NeuralProphet(epochs=EPOCHS, batch_size=BATCH_SIZE, learning_rate=LR) 68 | _ = m.fit( 69 | df, 70 | freq="D", 71 | scheduler="ExponentialLR", 72 | scheduler_args={"gamma": 0.95}, 73 | ) 74 | print(f"metrics = {metrics}") 75 | # Set in fit(), no args 76 | m = NeuralProphet(epochs=EPOCHS, batch_size=BATCH_SIZE, learning_rate=LR) 77 | _ = m.fit( 78 | df, 79 | freq="D", 80 | scheduler="OneCycleLR", 81 | ) 82 | print(f"metrics = {metrics}") 83 | -------------------------------------------------------------------------------- /tests/test_utils.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import logging 4 | import os 5 | import pathlib 6 | 7 | import pandas as pd 8 | import pytest 9 | 10 | from neuralprophet import NeuralProphet, df_utils 11 | 12 | log = logging.getLogger("NP.test") 13 | log.setLevel("ERROR") 14 | log.parent.setLevel("ERROR") 15 | 16 | DIR = pathlib.Path(__file__).parent.parent.absolute() 17 | DATA_DIR = os.path.join(DIR, "tests", "test-data") 18 | PEYTON_FILE = os.path.join(DATA_DIR, "wp_log_peyton_manning.csv") 19 | AIR_FILE = os.path.join(DATA_DIR, "air_passengers.csv") 20 | YOS_FILE = os.path.join(DATA_DIR, "yosemite_temps.csv") 21 | NROWS = 512 22 | EPOCHS = 10 23 | ADDITIONAL_EPOCHS = 5 24 | LR = 1.0 25 | BATCH_SIZE = 64 26 | 27 | PLOT = False 28 | 29 | 30 | def test_create_dummy_datestamps(): 31 | df = pd.read_csv(PEYTON_FILE, nrows=NROWS) 32 | df_drop = df.drop("ds", axis=1) 33 | df_dummy = df_utils.create_dummy_datestamps(df_drop) 34 | df["ds"] = pd.NA 35 | with pytest.raises(ValueError): 36 | _ = df_utils.create_dummy_datestamps(df) 37 | 38 | m = NeuralProphet(epochs=EPOCHS, batch_size=BATCH_SIZE, learning_rate=LR) 39 | _ = m.fit(df_dummy) 40 | _ = m.make_future_dataframe(df_dummy, periods=365, n_historic_predictions=True) 41 | 42 | 43 | def test_no_log(): 44 | df = pd.read_csv(PEYTON_FILE, nrows=NROWS) 45 | m = NeuralProphet(epochs=EPOCHS, batch_size=BATCH_SIZE, learning_rate=LR) 46 | _ = m.fit(df, metrics=False, metrics_log_dir=False) 47 | -------------------------------------------------------------------------------- /tests/test_wrapper.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import logging 4 | import os 5 | import pathlib 6 | 7 | import pandas as pd 8 | 9 | from neuralprophet import TorchProphet as Prophet 10 | 11 | log = logging.getLogger("NP.test") 12 | log.setLevel("ERROR") 13 | log.parent.setLevel("ERROR") 14 | 15 | DIR = pathlib.Path(__file__).parent.parent.absolute() 16 | DATA_DIR = os.path.join(DIR, "tests", "test-data") 17 | PEYTON_FILE = os.path.join(DATA_DIR, "wp_log_peyton_manning.csv") 18 | AIR_FILE = os.path.join(DATA_DIR, "air_passengers.csv") 19 | NROWS = 256 20 | EPOCHS = 1 21 | BATCH_SIZE = 128 22 | LR = 1.0 23 | 24 | PLOT = False 25 | 26 | 27 | def test_wrapper_base(): 28 | log.info("testing: Wrapper base") 29 | df = pd.read_csv(PEYTON_FILE, nrows=NROWS) 30 | m = Prophet(epochs=EPOCHS, batch_size=BATCH_SIZE, learning_rate=LR) 31 | m.fit(df) 32 | 33 | future = m.make_future_dataframe(periods=50) 34 | m.predict(future) 35 | 36 | 37 | def test_wrapper_components(): 38 | log.info("testing: Wrapper components") 39 | df = pd.read_csv(AIR_FILE, nrows=NROWS) 40 | df["regressor"] = df["y"].rolling(7, min_periods=1).mean() 41 | regressors_df_future = pd.DataFrame(data={"regressor": df["regressor"][-50:]}) 42 | 43 | m = Prophet( 44 | seasonality_mode="multiplicative", 45 | epochs=EPOCHS, 46 | batch_size=BATCH_SIZE, 47 | learning_rate=LR, 48 | ) 49 | 50 | m.add_seasonality("quarterly", period=91.25, fourier_order=8, mode="additive") 51 | m.add_regressor("regressor", mode="additive") 52 | m.fit(df) 53 | 54 | future = m.make_future_dataframe(periods=50, regressors_df=regressors_df_future) 55 | m.predict(future) 56 | 57 | 58 | def test_wrapper_warnings(): 59 | log.info("testing: Wrapper base") 60 | pd.read_csv(PEYTON_FILE, nrows=NROWS) 61 | # Args should be handled by raising warnings 62 | Prophet( 63 | seasonality_prior_scale=0.0, 64 | mcmc_samples=0, 65 | stan_backend="CMDSTANPY", 66 | epochs=EPOCHS, 67 | batch_size=BATCH_SIZE, 68 | learning_rate=LR, 69 | ) 70 | 71 | 72 | def test_wrapper_plots(): 73 | log.info("testing: Wrapper plots") 74 | df = pd.read_csv(PEYTON_FILE, nrows=NROWS) 75 | m = Prophet(epochs=EPOCHS, batch_size=BATCH_SIZE, learning_rate=LR) 76 | m.fit(df) 77 | 78 | future = m.make_future_dataframe(periods=50) 79 | forecast = m.predict(future) 80 | 81 | fig1 = m.plot(forecast) 82 | fig2 = m.plot(forecast, plotting_backend="plotly") 83 | fig3 = m.plot_components(forecast) 84 | fig4 = m.plot_components(forecast, plotting_backend="plotly") 85 | 86 | if PLOT: 87 | fig1.show() 88 | fig2.show() 89 | fig3.show() 90 | fig4.show() 91 | -------------------------------------------------------------------------------- /tests/utils/dataset_generators.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pandas as pd 3 | 4 | from neuralprophet.event_utils import get_all_holidays 5 | 6 | 7 | def generate_holiday_dataset(country="US", years=[2022], y_default=1, y_holiday=100, y_holidays_override={}): 8 | """Generate dataset with special y values for country holidays.""" 9 | 10 | periods = len(years) * 365 11 | dates = pd.date_range("%i-01-01" % (years[0]), periods=periods, freq="D") 12 | df = pd.DataFrame({"ds": dates, "y": y_default}, index=dates) 13 | 14 | holidays = get_all_holidays(years, country) 15 | for holiday_name, timestamps in holidays.items(): 16 | df.loc[timestamps[0], "y"] = y_holidays_override.get(holiday_name, y_holiday) 17 | 18 | return df 19 | 20 | 21 | def generate_event_dataset( 22 | events=["2022-01-01", "2022-01-10", "2022-01-13", "2022-01-14", "2022-01-15", "2022-01-31"], 23 | periods=31, 24 | y_default=1, 25 | y_event=100, 26 | y_events_override={}, 27 | ): 28 | """Generate dataset with regular y value and special y value for events.""" 29 | events.sort() 30 | 31 | dates = pd.date_range(events[0], periods=periods, freq="D") 32 | df = pd.DataFrame({"ds": dates, "y": y_default}, index=dates) 33 | 34 | for event in events: 35 | df.loc[event, "y"] = y_events_override.get(event, y_event) 36 | 37 | return df, events 38 | 39 | 40 | def generate_lagged_regressor_dataset(periods=31): 41 | """ 42 | Generate dataset for tests on lagged regressor. 43 | Columns are: ds, lagged_regressors (one entry each), y 44 | Each lagged regressor is random noise (range 0 to 1). 45 | y is a weighted sum of the the previous 3 lagged regressors. 46 | """ 47 | lagged_regressors = [("a", 1), ("b", 0.1), ("c", 0.1), ("d", 1)] 48 | 49 | dates = pd.date_range("2022-01-01", periods=periods, freq="D") 50 | 51 | df = pd.DataFrame({"ds": dates}, index=dates) 52 | 53 | for lagged_regressor, _ in lagged_regressors: 54 | df[lagged_regressor] = np.random.random(periods) 55 | 56 | df["weighted_sum"] = sum( 57 | df[lagged_regressor] * lagged_regressor_scale for lagged_regressor, lagged_regressor_scale in lagged_regressors 58 | ) 59 | df["y"] = 0 60 | 61 | overlap = 3 62 | 63 | for pos, (index, data) in enumerate(df.iterrows()): 64 | if pos >= overlap: 65 | df.loc[index, "y"] = sum([df.iloc[pos - lag - 1]["weighted_sum"] for lag in range(overlap)]) 66 | 67 | df = df.drop(columns=["weighted_sum"]) 68 | 69 | return df, lagged_regressors 70 | --------------------------------------------------------------------------------